diff --git a/.artifactignore b/.artifactignore index 1126a160d97..cbaad306e2e 100644 --- a/.artifactignore +++ b/.artifactignore @@ -1,2 +1,5 @@ **/* !*.deb +!coverage.info +!coverage.xml +!build.info diff --git a/.azure-pipelines/build-docker-sonic-vs-template.yml b/.azure-pipelines/build-docker-sonic-vs-template.yml index 9d1e8065fc0..4ece4cdefe1 100644 --- a/.azure-pipelines/build-docker-sonic-vs-template.yml +++ b/.azure-pipelines/build-docker-sonic-vs-template.yml @@ -21,6 +21,9 @@ parameters: type: string default: '$(BUILD_BRANCH)' +- name: debian_version + type: string + - name: artifact_name type: string @@ -69,7 +72,7 @@ jobs: timeoutInMinutes: ${{ parameters.timeout }} pool: - vmImage: 'ubuntu-20.04' + vmImage: 'ubuntu-22.04' steps: - task: DownloadPipelineArtifact@2 @@ -81,6 +84,7 @@ jobs: runVersion: 'latestFromBranch' runBranch: 'refs/heads/${{ parameters.swss_common_branch }}' path: $(Build.ArtifactStagingDirectory)/download + allowPartiallySucceededBuilds: true displayName: "Download sonic swss common deb packages" - task: DownloadPipelineArtifact@2 inputs: @@ -90,6 +94,7 @@ jobs: artifact: ${{ parameters.sairedis_artifact_name }} runVersion: 'latestFromBranch' runBranch: 'refs/heads/${{ parameters.sairedis_artifact_branch }}' + allowPartiallySucceededBuilds: true path: $(Build.ArtifactStagingDirectory)/download/sairedis patterns: | ${{ parameters.sairedis_artifact_pattern }}/libsaivs_*.deb @@ -131,6 +136,29 @@ jobs: path: $(Build.ArtifactStagingDirectory)/download patterns: '**/target/${{ parameters.artifact_name }}.gz' displayName: "Download sonic-buildimage ${{ parameters.artifact_name }}" + - task: DownloadPipelineArtifact@2 + inputs: + source: specific + project: ${{ parameters.buildimage_artifact_project }} + pipeline: ${{ parameters.buildimage_artifact_pipeline }} + artifact: ${{ parameters.buildimage_artifact_name }} + runVersion: 'latestFromBranch' + runBranch: 'refs/heads/${{ parameters.buildimage_artifact_branch }}' + path: $(Build.ArtifactStagingDirectory)/download + patterns: '**/target/debs/${{ parameters.debian_version }}/framework_*.deb' + displayName: "Download sonic-buildimage sonic-framework package" + - task: DownloadPipelineArtifact@2 + inputs: + source: specific + project: build + pipeline: sonic-net.sonic-platform-vpp + artifact: vpp + runVersion: 'latestFromBranch' + runBranch: 'refs/heads/main' + allowPartiallySucceededBuilds: true + path: $(Build.ArtifactStagingDirectory)/download + displayName: "Download sonic platform-vpp deb packages" + condition: eq('${{ parameters.arch }}', 'amd64') - script: | set -ex echo $(Build.DefinitionName).$(Build.BuildNumber) @@ -141,12 +169,18 @@ jobs: find $(Build.ArtifactStagingDirectory)/download/sairedis -name '*.deb' -exec cp "{}" .azure-pipelines/docker-sonic-vs/debs \; cp -v $(Build.ArtifactStagingDirectory)/download/*.deb .azure-pipelines/docker-sonic-vs/debs + if [ -f $(Build.ArtifactStagingDirectory)/download/coverage.info ]; then + cp -v $(Build.ArtifactStagingDirectory)/download/coverage.info $(Build.ArtifactStagingDirectory)/ + cp -v $(Build.ArtifactStagingDirectory)/download/coverage.xml $(Build.ArtifactStagingDirectory)/ + fi pushd .azure-pipelines + ls -l docker-sonic-vs/debs - build_args="" + build_dir=$(grep BUILD_DIR $(Build.ArtifactStagingDirectory)/download/build.info | cut -d= -f2) + build_args="--build-arg build_dir=$build_dir" if [ '${{ parameters.asan }}' == True ]; then - build_args="--build-arg need_dbg=y" + build_args="$build_args --build-arg need_dbg=y" fi docker build $build_args --no-cache -t docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber).asan-${{ parameters.asan }} docker-sonic-vs diff --git a/.azure-pipelines/build-template.yml b/.azure-pipelines/build-template.yml index 75666648edf..39232b06882 100644 --- a/.azure-pipelines/build-template.yml +++ b/.azure-pipelines/build-template.yml @@ -10,9 +10,9 @@ parameters: - name: pool type: string values: - - sonicbld - - sonicbld-armhf - - sonicbld-arm64 + - justForTesting + - sonicso1ES-armhf + - sonicso1ES-arm64 - default default: default @@ -88,7 +88,7 @@ jobs: ${{ if ne(parameters.pool, 'default') }}: name: ${{ parameters.pool }} ${{ else }}: - vmImage: 'ubuntu-20.04' + vmImage: 'ubuntu-22.04' container: image: sonicdev-microsoft.azurecr.io:443/${{ parameters.sonic_slave }}:latest @@ -105,6 +105,13 @@ jobs: swig4.0 \ libdbus-1-dev \ libteam-dev + sudo pip3 install lcov_cobertura + sudo apt-get install -y redis-server + sudo sed -i 's/notify-keyspace-events ""/notify-keyspace-events AKE/' /etc/redis/redis.conf + sudo sed -ri 's/^# unixsocket/unixsocket/' /etc/redis/redis.conf + sudo sed -ri 's/^unixsocketperm .../unixsocketperm 777/' /etc/redis/redis.conf + sudo sed -ri 's/redis-server.sock/redis.sock/' /etc/redis/redis.conf + sudo service redis-server restart displayName: "Install dependencies" - task: DownloadPipelineArtifact@2 inputs: @@ -114,6 +121,7 @@ jobs: artifact: ${{ parameters.swss_common_artifact_name }} runVersion: 'latestFromBranch' runBranch: 'refs/heads/${{ parameters.swss_common_branch }}' + allowPartiallySucceededBuilds: true path: $(Build.ArtifactStagingDirectory)/download/swsscommon patterns: | libswsscommon_1.0.0_${{ parameters.arch }}.deb @@ -127,6 +135,7 @@ jobs: artifact: ${{ parameters.sairedis_artifact_name }} runVersion: 'latestFromBranch' runBranch: 'refs/heads/${{ parameters.sairedis_artifact_branch }}' + allowPartiallySucceededBuilds: true path: $(Build.ArtifactStagingDirectory)/download/sairedis patterns: | ${{ parameters.sairedis_artifact_pattern }}/libsaivs_*.deb @@ -147,18 +156,15 @@ jobs: runBranch: 'refs/heads/${{ parameters.common_lib_artifact_branch }}' path: $(Build.ArtifactStagingDirectory)/download/common patterns: | - target/debs/bullseye/libnl-3-200_*.deb - target/debs/bullseye/libnl-3-dev_*.deb - target/debs/bullseye/libnl-genl-3-200_*.deb - target/debs/bullseye/libnl-genl-3-dev_*.deb - target/debs/bullseye/libnl-route-3-200_*.deb - target/debs/bullseye/libnl-route-3-dev_*.deb - target/debs/bullseye/libnl-nf-3-200_*.deb - target/debs/bullseye/libnl-nf-3-dev_*.deb - target/debs/bullseye/libyang_*.deb - target/debs/bullseye/libprotobuf*.deb - target/debs/bullseye/libprotoc*.deb - target/debs/bullseye/protobuf-compiler*.deb + target/debs/bookworm/libnl-3-200_*.deb + target/debs/bookworm/libnl-3-dev_*.deb + target/debs/bookworm/libnl-genl-3-200_*.deb + target/debs/bookworm/libnl-genl-3-dev_*.deb + target/debs/bookworm/libnl-route-3-200_*.deb + target/debs/bookworm/libnl-route-3-dev_*.deb + target/debs/bookworm/libnl-nf-3-200_*.deb + target/debs/bookworm/libnl-nf-3-dev_*.deb + target/debs/bookworm/libyang_*.deb displayName: "Download common libs" - task: DownloadPipelineArtifact@2 inputs: @@ -179,18 +185,52 @@ jobs: set -ex cd download sudo dpkg -i $(find common -type f -name '*.deb') + cd .. + workingDirectory: $(Build.ArtifactStagingDirectory) + displayName: "Install libnl3" + - task: DownloadPipelineArtifact@2 + inputs: + source: specific + project: build + pipeline: sonic-net.sonic-platform-vpp + artifact: vpp + runVersion: 'latestFromBranch' + runBranch: 'refs/heads/main' + allowPartiallySucceededBuilds: true + path: $(Build.ArtifactStagingDirectory)/download + displayName: "Download sonic platform-vpp deb packages" + condition: eq('${{ parameters.arch }}', 'amd64') + - script: | + set -ex + sudo env VPP_INSTALL_SKIP_SYSCTL=1 dpkg -i download/libvppinfra-dev_*_${{ parameters.arch }}.deb + sudo env VPP_INSTALL_SKIP_SYSCTL=1 dpkg -i download/libvppinfra_*_${{ parameters.arch }}.deb + sudo env VPP_INSTALL_SKIP_SYSCTL=1 dpkg -i download/vpp_*_${{ parameters.arch }}.deb + sudo env VPP_INSTALL_SKIP_SYSCTL=1 dpkg -i download/vpp-crypto-engines_*_${{ parameters.arch }}.deb + sudo env VPP_INSTALL_SKIP_SYSCTL=1 dpkg -i download/vpp-dbg_*_${{ parameters.arch }}.deb + sudo env VPP_INSTALL_SKIP_SYSCTL=1 dpkg -i download/vpp-dev_*_${{ parameters.arch }}.deb + sudo env VPP_INSTALL_SKIP_SYSCTL=1 dpkg -i download/vpp-plugin-core_*_${{ parameters.arch }}.deb + sudo env VPP_INSTALL_SKIP_SYSCTL=1 dpkg -i download/vpp-plugin-devtools_*_${{ parameters.arch }}.deb + sudo env VPP_INSTALL_SKIP_SYSCTL=1 dpkg -i download/vpp-plugin-dpdk_*_${{ parameters.arch }}.deb + sudo env VPP_INSTALL_SKIP_SYSCTL=1 dpkg -i download/python3-vpp-api_*_${{ parameters.arch }}.deb + workingDirectory: $(Build.ArtifactStagingDirectory) + displayName: "Install sonic platform-vpp packages" + condition: eq('${{ parameters.arch }}', 'amd64') + - script: | + set -ex + cd download sudo dpkg -i $(find swsscommon -type f -name '*.deb') sudo dpkg -i $(find sairedis -type f -name '*.deb') cd .. rm -rf download workingDirectory: $(Build.ArtifactStagingDirectory) - displayName: "Install libnl3, sonic swss common and sairedis" + displayName: "Install sonic swss common and sairedis" - script: | set -ex tar czf pytest.tgz tests cp -r pytest.tgz $(Build.ArtifactStagingDirectory)/ if [ '${{ parameters.archive_gcov }}' == True ]; then export ENABLE_GCOV=y + echo BUILD_DIR=$(pwd) > build.info fi if [ '${{ parameters.asan }}' == True ]; then export ENABLE_ASAN=y @@ -198,6 +238,9 @@ jobs: ./autogen.sh dpkg-buildpackage -us -uc -b -j$(nproc) && cp ../*.deb . displayName: "Compile sonic swss" + - script: | + cargo test + displayName: "Test countersyncd" - publish: $(System.DefaultWorkingDirectory)/ artifact: ${{ parameters.artifact_name }} displayName: "Archive swss debian packages" diff --git a/.azure-pipelines/build_and_install_module.sh b/.azure-pipelines/build_and_install_module.sh index 493a2f04e28..2bf880b4560 100755 --- a/.azure-pipelines/build_and_install_module.sh +++ b/.azure-pipelines/build_and_install_module.sh @@ -7,11 +7,21 @@ set -e source /etc/os-release -function build_and_install_kmodule() +trim() { + local var="$*" + # remove leading whitespace characters + var="${var#"${var%%[![:space:]]*}"}" + # remove trailing whitespace characters + var="${var%"${var##*[![:space:]]}"}" + printf '%s' "$var" +} + + +build_and_install_kmodule() { if sudo modprobe team 2>/dev/null && sudo modprobe vrf 2>/dev/null && sudo modprobe macsec 2>/dev/null; then echo "The module team, vrf and macsec exist." - return + return 0 fi [ -z "$WORKDIR" ] && WORKDIR=$(mktemp -d) @@ -26,62 +36,59 @@ function build_and_install_kmodule() SUBLEVEL=$(echo $KERNEL_MAINVERSION | cut -d. -f3) # Install the required debian packages to build the kernel modules + apt-get update apt-get install -y build-essential linux-headers-${KERNEL_RELEASE} autoconf pkg-config fakeroot - apt-get install -y flex bison libssl-dev libelf-dev + apt-get install -y flex bison libssl-dev libelf-dev dwarves apt-get install -y libnl-route-3-200 libnl-route-3-dev libnl-cli-3-200 libnl-cli-3-dev libnl-3-dev # Add the apt source mirrors and download the linux image source code cp /etc/apt/sources.list /etc/apt/sources.list.bk sed -i "s/^# deb-src/deb-src/g" /etc/apt/sources.list apt-get update - apt-get source linux-image-unsigned-$(uname -r) > source.log + KERNEL_PACKAGE_SOURCE=$(trim $(apt-cache show linux-image-unsigned-${KERNEL_RELEASE} | grep ^Source: | cut -d':' -f 2)) + KERNEL_PACKAGE_VERSION=$(trim $(apt-cache show linux-image-unsigned-${KERNEL_RELEASE} | grep ^Version: | cut -d':' -f 2)) + SOURCE_PACKAGE_VERSION=$(apt-cache showsrc "${KERNEL_PACKAGE_SOURCE}" | grep ^Version: | cut -d':' -f 2 | tr '\n' ' ') + if ! echo "${SOURCE_PACKAGE_VERSION}" | grep "\b${KERNEL_PACKAGE_VERSION}\b"; then + echo "WARN: the running kernel version (${KERNEL_PACKAGE_VERSION}) doesn't match any of the available source " \ + "package versions (${SOURCE_PACKAGE_VERSION}) being downloaded. There's no guarantee any of the available " \ + "source packages can be loaded into the kernel or function correctly. Please update your kernel and reboot " \ + "your system so that it's running a matching kernel version." >&2 + fi + apt-get source "linux-image-unsigned-${KERNEL_RELEASE}" # Recover the original apt sources list cp /etc/apt/sources.list.bk /etc/apt/sources.list apt-get update # Build the Linux kernel module drivers/net/team and vrf - cd $(find . -maxdepth 1 -type d | grep -v "^.$") + cd ${KERNEL_PACKAGE_SOURCE}-* + if [ -e debian/debian.env ]; then + source debian/debian.env + if [ -n "${DEBIAN}" -a -e ${DEBIAN}/reconstruct ]; then + bash ${DEBIAN}/reconstruct + fi + fi make allmodconfig mv .config .config.bk cp /boot/config-$(uname -r) .config grep NET_TEAM .config.bk >> .config - echo CONFIG_NET_VRF=m >> .config - echo CONFIG_MACSEC=m >> .config - echo CONFIG_NET_VENDOR_MICROSOFT=y >> .config - echo CONFIG_MICROSOFT_MANA=m >> .config - echo CONFIG_SYSTEM_REVOCATION_LIST=n >> .config make VERSION=$VERSION PATCHLEVEL=$PATCHLEVEL SUBLEVEL=$SUBLEVEL EXTRAVERSION=-${EXTRAVERSION} LOCALVERSION=-${LOCALVERSION} modules_prepare - make M=drivers/net/team + cp /usr/src/linux-headers-$(uname -r)/Module.symvers . + make -j$(nproc) M=drivers/net/team mv drivers/net/Makefile drivers/net/Makefile.bak echo 'obj-$(CONFIG_NET_VRF) += vrf.o' > drivers/net/Makefile echo 'obj-$(CONFIG_MACSEC) += macsec.o' >> drivers/net/Makefile - make M=drivers/net + make -j$(nproc) M=drivers/net # Install the module - TEAM_DIR=$(echo /lib/modules/$(uname -r)/kernel/net/team) - NET_DIR=$(echo /lib/modules/$(uname -r)/kernel/net) - if [ ! -e "$TEAM_DIR/team.ko" ]; then - mkdir -p $TEAM_DIR - cp drivers/net/team/*.ko $TEAM_DIR/ - modinfo $TEAM_DIR/team.ko - depmod - modprobe team - fi - if [ ! -e "$NET_DIR/vrf.ko" ]; then - mkdir -p $NET_DIR - cp drivers/net/vrf.ko $NET_DIR/ - modinfo $NET_DIR/vrf.ko - depmod - modprobe vrf - fi - if [ ! -e "$NET_DIR/macsec.ko" ]; then - mkdir -p $NET_DIR - cp drivers/net/macsec.ko $NET_DIR/ - modinfo $NET_DIR/macsec.ko - depmod - modprobe macsec - fi + SONIC_MODULES_DIR=/lib/modules/$(uname -r)/updates/sonic + mkdir -p $SONIC_MODULES_DIR + cp drivers/net/team/*.ko drivers/net/vrf.ko drivers/net/macsec.ko $SONIC_MODULES_DIR/ + depmod + modinfo team vrf macsec + modprobe team + modprobe vrf + modprobe macsec cd /tmp rm -rf $WORKDIR diff --git a/.azure-pipelines/docker-sonic-vs/Dockerfile b/.azure-pipelines/docker-sonic-vs/Dockerfile index 750d1369579..41733ffda13 100644 --- a/.azure-pipelines/docker-sonic-vs/Dockerfile +++ b/.azure-pipelines/docker-sonic-vs/Dockerfile @@ -2,15 +2,40 @@ FROM docker-sonic-vs ARG docker_container_name ARG need_dbg +ARG build_dir +ENV BUILD_DIR=$build_dir COPY ["debs", "/debs"] -# Remove existing packages first before installing the new/current packages. This is to overcome limitations with +# Remove the libswsscommon package first with force all option. +# Remove the other existing packages before installing the new/current packages. This is to overcome limitations with # Docker's diff detection mechanism, where only the file size and the modification timestamp (which will remain the # same, even though contents have changed) are checked between the previous and current layer. -RUN dpkg --purge libswsscommon python3-swsscommon sonic-db-cli libsaimetadata libsairedis libsaivs syncd-vs swss sonic-eventd libdashapi +RUN dpkg --remove --force-all libswsscommon +RUN apt --fix-broken install -y +RUN dpkg --purge python3-swsscommon sonic-db-cli libsaimetadata libsairedis libsaivs syncd-vs swss sonic-eventd libdashapi framework -RUN dpkg -i /debs/libdashapi_1.0.0_amd64.deb \ +RUN apt-get update + +# vpp package configure requires to set: +# permission denied on key "vm.nr_hugepages" +# permission denied on key "vm.hugetlb_shm_group" +# permission denied on key "fs.protected_fifos" +# permission denied on key "fs.protected_hardlinks" +# permission denied on key "fs.protected_regular" +# permission denied on key "fs.protected_symlinks" + +# which can't be done during "docker build" command +# so let's put "true" command as sysctl, and after install let's bring it back + +RUN cp /usr/sbin/sysctl /usr/sbin/sysctl.org +RUN cp /usr/bin/true /usr/sbin/sysctl + +RUN apt install -y $(ls /debs/*.deb | grep vpp) + +RUN mv /usr/sbin/sysctl.org /usr/sbin/sysctl + +RUN apt install -y /debs/libdashapi_1.0.0_amd64.deb \ /debs/libswsscommon_1.0.0_amd64.deb \ /debs/python3-swsscommon_1.0.0_amd64.deb \ /debs/sonic-db-cli_1.0.0_amd64.deb \ @@ -22,6 +47,16 @@ RUN dpkg -i /debs/libdashapi_1.0.0_amd64.deb \ RUN if [ "$need_dbg" = "y" ] ; then dpkg -i /debs/swss-dbg_1.0.0_amd64.deb ; fi -RUN apt-get update +COPY ["start.sh", "/usr/bin/"] + +RUN pip3 install scapy==2.5.0 + +RUN apt-get -y install software-properties-common libdatetime-perl libcapture-tiny-perl build-essential libcpanel-json-xs-perl git python3-protobuf + +RUN git clone -b v2.0 --single-branch --depth 1 https://github.com/linux-test-project/lcov && cd lcov && make install + +RUN lcov --version + +RUN pip3 install lcov_cobertura -RUN apt-get -y install lcov +RUN if [ -n "$BUILD_DIR" ]; then mkdir -p $BUILD_DIR && tar -xf /tmp/gcov/gcov-source.tar -C $BUILD_DIR; fi diff --git a/.azure-pipelines/docker-sonic-vs/start.sh b/.azure-pipelines/docker-sonic-vs/start.sh new file mode 100755 index 00000000000..752c9ff675f --- /dev/null +++ b/.azure-pipelines/docker-sonic-vs/start.sh @@ -0,0 +1,200 @@ +#!/bin/bash -e + +# Generate configuration + +# NOTE: 'PLATFORM' and 'HWSKU' environment variables are set +# in the Dockerfile so that they persist for the life of the container + +ln -sf /usr/share/sonic/device/$PLATFORM /usr/share/sonic/platform +ln -sf /usr/share/sonic/device/$PLATFORM/$HWSKU /usr/share/sonic/hwsku + +SWITCH_TYPE=switch +PLATFORM_CONF=platform.json +if [[ $HWSKU == "DPU-2P" ]]; then + SWITCH_TYPE=dpu + PLATFORM_CONF=platform-dpu-2p.json +fi + +pushd /usr/share/sonic/hwsku + +# filter available front panel ports in lanemap.ini +[ -f lanemap.ini.orig ] || cp lanemap.ini lanemap.ini.orig +for p in $(ip link show | grep -oE "eth[0-9]+" | grep -v eth0); do + grep ^$p: lanemap.ini.orig +done > lanemap.ini + +# filter available sonic front panel ports in port_config.ini +[ -f port_config.ini.orig ] || cp port_config.ini port_config.ini.orig +grep ^# port_config.ini.orig > port_config.ini +for lanes in $(awk -F ':' '{print $2}' lanemap.ini); do + grep -E "\s$lanes\s" port_config.ini.orig +done >> port_config.ini + +popd + +[ -d /etc/sonic ] || mkdir -p /etc/sonic + +# Note: libswsscommon requires a dabase_config file in /var/run/redis/sonic-db/ +# Prepare this file before any dependent application, such as sonic-cfggen +mkdir -p /var/run/redis/sonic-db +cp /etc/default/sonic-db/database_config.json /var/run/redis/sonic-db/ + +SYSTEM_MAC_ADDRESS=$(ip link show eth0 | grep ether | awk '{print $2}') +sonic-cfggen -t /usr/share/sonic/templates/init_cfg.json.j2 -a "{\"system_mac\": \"$SYSTEM_MAC_ADDRESS\", \"switch_type\": \"$SWITCH_TYPE\"}" > /etc/sonic/init_cfg.json + +if [[ -f /usr/share/sonic/virtual_chassis/default_config.json ]]; then + sonic-cfggen -j /etc/sonic/init_cfg.json -j /usr/share/sonic/virtual_chassis/default_config.json --print-data > /tmp/init_cfg.json + mv /tmp/init_cfg.json /etc/sonic/init_cfg.json +fi + +if [ -f /etc/sonic/config_db.json ]; then + sonic-cfggen -j /etc/sonic/init_cfg.json -j /etc/sonic/config_db.json --print-data > /tmp/config_db.json + mv /tmp/config_db.json /etc/sonic/config_db.json +else + # generate and merge buffers configuration into config file + if [ -f /usr/share/sonic/hwsku/buffers.json.j2 ]; then + sonic-cfggen -k $HWSKU -p /usr/share/sonic/device/$PLATFORM/$PLATFORM_CONF -t /usr/share/sonic/hwsku/buffers.json.j2 > /tmp/buffers.json + buffers_cmd="-j /tmp/buffers.json" + fi + if [ -f /usr/share/sonic/hwsku/qos.json.j2 ]; then + sonic-cfggen -j /etc/sonic/init_cfg.json -t /usr/share/sonic/hwsku/qos.json.j2 > /tmp/qos.json + qos_cmd="-j /tmp/qos.json" + fi + + if [ -f /usr/share/sonic/single_asic_voq_fs/default_config.json ]; then + sonic-cfggen -j /usr/share/sonic/single_asic_voq_fs/default_config.json --print-data > /tmp/voq.json + voq_cmd="-j /tmp/voq.json" + fi + + sonic-cfggen -p /usr/share/sonic/device/$PLATFORM/$PLATFORM_CONF -k $HWSKU --print-data > /tmp/ports.json + # change admin_status from up to down; Test cases dependent + sed -i "s/up/down/g" /tmp/ports.json + sonic-cfggen -j /etc/sonic/init_cfg.json $buffers_cmd $qos_cmd $voq_cmd -j /tmp/ports.json --print-data > /etc/sonic/config_db.json +fi + +sonic-cfggen -t /usr/share/sonic/templates/copp_cfg.j2 > /etc/sonic/copp_cfg.json + +if [ "$HWSKU" == "Mellanox-SN2700" ]; then + cp /usr/share/sonic/hwsku/sai_mlnx.profile /usr/share/sonic/hwsku/sai.profile +elif [ "$HWSKU" == "DPU-2P" ]; then + cp /usr/share/sonic/hwsku/sai_dpu_2p.profile /usr/share/sonic/hwsku/sai.profile +fi + +if [ "$BFDOFFLOAD" == "false" ]; then + if ! grep -q "SAI_VS_BFD_OFFLOAD_SUPPORTED=" /usr/share/sonic/hwsku/sai.profile; then + echo 'SAI_VS_BFD_OFFLOAD_SUPPORTED=false' >> /usr/share/sonic/hwsku/sai.profile + else + sed -i "s/SAI_VS_BFD_OFFLOAD_SUPPORTED.*/SAI_VS_BFD_OFFLOAD_SUPPORTED=false/g" /usr/share/sonic/hwsku/sai.profile + fi +fi + +mkdir -p /etc/swss/config.d/ + +rm -f /var/run/rsyslogd.pid + +supervisorctl start rsyslogd + +supervisord_cfg="/etc/supervisor/conf.d/supervisord.conf" +chassisdb_cfg_file="/usr/share/sonic/virtual_chassis/default_config.json" +chassisdb_cfg_file_default="/etc/default/sonic-db/default_chassis_cfg.json" +host_template="/usr/share/sonic/templates/hostname.j2" +db_cfg_file="/var/run/redis/sonic-db/database_config.json" +db_cfg_file_tmp="/var/run/redis/sonic-db/database_config.json.tmp" + +if [ -r "$chassisdb_cfg_file" ]; then + echo $(sonic-cfggen -j $chassisdb_cfg_file -t $host_template) >> /etc/hosts +else + chassisdb_cfg_file="$chassisdb_cfg_file_default" + echo "10.8.1.200 redis_chassis.server" >> /etc/hosts +fi + +supervisorctl start redis-server + +start_chassis_db=`sonic-cfggen -v DEVICE_METADATA.localhost.start_chassis_db -y $chassisdb_cfg_file` +if [[ "$HOSTNAME" == *"supervisor"* ]] || [ "$start_chassis_db" == "1" ]; then + supervisorctl start redis-chassis +fi + +conn_chassis_db=`sonic-cfggen -v DEVICE_METADATA.localhost.connect_to_chassis_db -y $chassisdb_cfg_file` +if [ "$start_chassis_db" != "1" ] && [ "$conn_chassis_db" != "1" ]; then + cp $db_cfg_file $db_cfg_file_tmp + update_chassisdb_config -j $db_cfg_file_tmp -d + cp $db_cfg_file_tmp $db_cfg_file +fi + +if [ "$conn_chassis_db" == "1" ]; then + if [ -f /usr/share/sonic/virtual_chassis/coreportindexmap.ini ]; then + cp /usr/share/sonic/virtual_chassis/coreportindexmap.ini /usr/share/sonic/hwsku/ + + pushd /usr/share/sonic/hwsku + + # filter available front panel ports in coreportindexmap.ini + [ -f coreportindexmap.ini.orig ] || cp coreportindexmap.ini coreportindexmap.ini.orig + for p in $(ip link show | grep -oE "eth[0-9]+" | grep -v eth0); do + grep ^$p: coreportindexmap.ini.orig + done > coreportindexmap.ini + + popd + fi +fi + +/usr/bin/configdb-load.sh + +if [ "$HWSKU" = "brcm_gearbox_vs" ]; then + supervisorctl start gbsyncd + supervisorctl start gearsyncd +fi + +supervisorctl start syncd + +supervisorctl start portsyncd + +supervisorctl start orchagent + +supervisorctl start coppmgrd + +supervisorctl start neighsyncd + +supervisorctl start fdbsyncd + +supervisorctl start teamsyncd + +supervisorctl start fpmsyncd + +supervisorctl start teammgrd + +supervisorctl start vrfmgrd + +supervisorctl start portmgrd + +supervisorctl start intfmgrd + +supervisorctl start vlanmgrd + +supervisorctl start zebra + +supervisorctl start mgmtd + +supervisorctl start staticd + +supervisorctl start buffermgrd + +supervisorctl start nbrmgrd + +supervisorctl start vxlanmgrd + +supervisorctl start sflowmgrd + +supervisorctl start natmgrd + +supervisorctl start natsyncd + +supervisorctl start tunnelmgrd + +supervisorctl start fabricmgrd + +# Start arp_update when VLAN exists +VLAN=`sonic-cfggen -d -v 'VLAN.keys() | join(" ") if VLAN'` +if [ "$VLAN" != "" ]; then + supervisorctl start arp_update +fi diff --git a/.azure-pipelines/gcov.yml b/.azure-pipelines/gcov.yml index 27129c5611f..4429f29c0de 100644 --- a/.azure-pipelines/gcov.yml +++ b/.azure-pipelines/gcov.yml @@ -8,7 +8,7 @@ parameters: - name: pool type: string values: - - sonicbld + - justForTesting - default default: default @@ -44,7 +44,7 @@ jobs: ${{ if ne(parameters.pool, 'default') }}: name: ${{ parameters.pool }} ${{ if eq(parameters.pool, 'default') }}: - vmImage: 'ubuntu-20.04' + vmImage: 'ubuntu-22.04' variables: DIFF_COVER_CHECK_THRESHOLD: 80 @@ -122,6 +122,8 @@ jobs: codeCoverageTool: Cobertura summaryFileLocation: '$(Build.ArtifactStagingDirectory)/gcov_output/AllMergeReport/coverage.xml' reportDirectory: '$(Build.ArtifactStagingDirectory)/gcov_output/AllMergeReport/' + pathToSources: '$(Build.SourcesDirectory)' + failIfCoverageEmpty: true displayName: 'Publish c c++ test coverage' condition: eq('${{ parameters.archive_gcov }}', true) diff --git a/.azure-pipelines/test-docker-sonic-vs-template.yml b/.azure-pipelines/test-docker-sonic-vs-template.yml index 263365d8b72..0e826a002f1 100644 --- a/.azure-pipelines/test-docker-sonic-vs-template.yml +++ b/.azure-pipelines/test-docker-sonic-vs-template.yml @@ -24,7 +24,7 @@ parameters: type: string default: '$(BUILD_BRANCH)' -- name: sonic_buildimage_ubuntu20_04 +- name: sonic_buildimage_ubuntu22_04 type: string default: '$(BUILD_BRANCH)' @@ -40,15 +40,26 @@ parameters: type: string default: "" +- name: debian_version + type: string + default: bookworm + jobs: - job: displayName: vstest timeoutInMinutes: ${{ parameters.timeout }} + variables: + isAsan: ${{ parameters.asan }} + ${{ if parameters.archive_gcov }}: + DIFF_COVER_CHECK_THRESHOLD: 80 + DIFF_COVER_ENABLE: 'true' + DIFF_COVER_COVERAGE_FILES: Cobertura.xml - pool: sonic-common + pool: sonictest steps: - script: | + ip a show dev eth0 || true ls -A1 | xargs -I{} sudo rm -rf {} displayName: "Clean workspace" - checkout: self @@ -62,36 +73,78 @@ jobs: source: specific project: build pipeline: Azure.sonic-swss-common - artifact: sonic-swss-common.amd64.ubuntu20_04 + artifact: sonic-swss-common.amd64.ubuntu22_04 runVersion: 'latestFromBranch' runBranch: 'refs/heads/${{ parameters.swss_common_branch }}' path: $(Build.ArtifactStagingDirectory)/download + allowPartiallySucceededBuilds: true displayName: "Download sonic swss common deb packages" - task: DownloadPipelineArtifact@2 inputs: source: specific project: build - pipeline: sonic-net.sonic-buildimage-ubuntu20.04 - artifact: sonic-buildimage.amd64.ubuntu20_04 + pipeline: Azure.sonic-buildimage.common_libs + runVersion: 'latestFromBranch' + runBranch: 'refs/heads/$(BUILD_BRANCH)' + path: $(Build.ArtifactStagingDirectory)/download + artifact: common-lib + patterns: | + target/debs/${{ parameters.debian_version }}/libyang-*_1.0*.deb + target/debs/${{ parameters.debian_version }}/libyang_1.0*.deb + target/debs/${{ parameters.debian_version }}/libyang-cpp_*.deb + target/debs/${{ parameters.debian_version }}/python3-yang_*.deb + displayName: "Download libyang from common lib" + - task: DownloadPipelineArtifact@2 + inputs: + source: specific + project: build + pipeline: sonic-net.sonic-buildimage-ubuntu22.04 + artifact: sonic-buildimage.amd64.ubuntu22_04 runVersion: 'latestFromBranch' - runBranch: 'refs/heads/${{ parameters.sonic_buildimage_ubuntu20_04 }}' + runBranch: 'refs/heads/${{ parameters.sonic_buildimage_ubuntu22_04 }}' path: $(Build.ArtifactStagingDirectory)/download - displayName: "Download sonic buildimage ubuntu20.04 deb packages" + displayName: "Download sonic buildimage ubuntu22.04 deb packages" + + - script: | + set -ex + # Install .NET CORE + curl -sSL https://packages.microsoft.com/keys/microsoft.asc | sudo apt-key add - + sudo apt-add-repository https://packages.microsoft.com/ubuntu/22.04/prod + sudo apt-get update + sudo apt-get install -y dotnet-sdk-8.0 + sudo dotnet tool install dotnet-reportgenerator-globaltool --tool-path /usr/bin 2>&1 | tee log.log || grep 'already installed' log.log + rm log.log + displayName: "Install .NET CORE" - script: | set -ex + # install packages for vs test + sudo pip3 install pytest flaky exabgp docker redis lcov_cobertura + + # install other dependencies + sudo apt-get -o DPkg::Lock::Timeout=600 install -y net-tools \ + bridge-utils \ + vlan \ + libzmq3-dev \ + libzmq5 \ + libboost-serialization1.74.0 \ + libboost1.74-dev \ + libboost-dev \ + libhiredis0.14 \ + libpcre3-dev + sudo .azure-pipelines/build_and_install_module.sh - sudo apt-get install -y libhiredis0.14 libyang0.16 - sudo dpkg -i $(Build.ArtifactStagingDirectory)/download/libprotobuf*_amd64.deb $(Build.ArtifactStagingDirectory)/download/libprotobuf-lite*_amd64.deb $(Build.ArtifactStagingDirectory)/download/python3-protobuf*_amd64.deb + # Install libyang packages from downloaded artifacts + sudo dpkg -i $(Build.ArtifactStagingDirectory)/download/target/debs/${{ parameters.debian_version }}/libyang-*_1.0*.deb \ + $(Build.ArtifactStagingDirectory)/download/target/debs/${{ parameters.debian_version }}/libyang_1.0*.deb \ + $(Build.ArtifactStagingDirectory)/download/target/debs/${{ parameters.debian_version }}/libyang-cpp_*.deb \ + $(Build.ArtifactStagingDirectory)/download/target/debs/${{ parameters.debian_version }}/python3-yang_*.deb + + sudo dpkg -i $(Build.ArtifactStagingDirectory)/download/libprotobuf*_amd64.deb $(Build.ArtifactStagingDirectory)/download/libprotobuf-lite*_amd64.deb $(Build.ArtifactStagingDirectory)/download/python3-protobuf*_amd64.deb sudo dpkg -i $(Build.ArtifactStagingDirectory)/download/libdashapi*.deb sudo dpkg -i --force-confask,confnew $(Build.ArtifactStagingDirectory)/download/libswsscommon_1.0.0_amd64.deb || apt-get install -f sudo dpkg -i $(Build.ArtifactStagingDirectory)/download/python3-swsscommon_1.0.0_amd64.deb - - # install packages for vs test - sudo apt-get install -y net-tools bridge-utils vlan - sudo apt-get install -y python3-pip - sudo pip3 install pytest==4.6.2 attrs==19.1.0 exabgp==4.0.10 distro==1.5.0 docker>=4.4.1 redis==3.3.4 flaky==3.7.0 displayName: "Install dependencies" - script: | @@ -106,7 +159,9 @@ jobs: params="" if [ '${{ parameters.archive_gcov }}' == True ]; then - params=" ${params} --keeptb " + cp $(Build.ArtifactStagingDirectory)/download/coverage.info ./ + cp $(Build.ArtifactStagingDirectory)/download/coverage.xml ./ + params=" ${params} --enable-coverage --force-recreate-dvs " fi if [ '${{ parameters.asan }}' == True ]; then params=" ${params} --graceful-stop " @@ -115,49 +170,49 @@ jobs: params=" ${params} --num-ports=${{ parameters.num_ports }} " fi - all_tests=$(ls test_*.py) - all_tests="${all_tests} p4rt" + all_tests=$(ls test_*.py | xargs) + all_tests="${all_tests} p4rt dash" if [ -n '${{ parameters.run_tests_pattern }}' ]; then - all_tests=" $(ls ${{ parameters.run_tests_pattern }}) " - fi - - test_set=() - # Run 20 tests as a set. - for test in ${all_tests}; do - test_set+=("${test}") - if [ ${#test_set[@]} -ge 20 ]; then - test_name=$(echo "${test_set[0]}" | cut -d "." -f 1) - echo "${test_set[*]}" | xargs sudo py.test -v --force-flaky --junitxml="${test_name}_tr.xml" $params --imgname=docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber).asan-${{ parameters.asan }} - container_count=$(docker ps -q -a | wc -l) - if [ '${{ parameters.archive_gcov }}' == True ] && [ ${container_count} -gt 0 ]; then - ./gcov_support.sh set_environment $(Build.ArtifactStagingDirectory) - docker stop $(docker ps -q -a) - docker rm $(docker ps -q -a) - fi - test_set=() - fi - done - if [ ${#test_set[@]} -gt 0 ]; then - test_name=$(echo "${test_set[0]}" | cut -d "." -f 1) - echo "${test_set[*]}" | xargs sudo py.test -v $params --force-flaky --junitxml="${test_name}_tr.xml" $params --imgname=docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber).asan-${{ parameters.asan }} - container_count=$(docker ps -q -a | wc -l) - if [ '${{ parameters.archive_gcov }}' == True ] && [ ${container_count} -gt 0 ]; then - ./gcov_support.sh set_environment $(Build.ArtifactStagingDirectory) - docker stop $(docker ps -q -a) - docker rm $(docker ps -q -a) - fi + all_tests=" $(ls ${{ parameters.run_tests_pattern }} | xargs) " fi + # Run the tests in parallel and retry + retry=3 + IMAGE_NAME=docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber).asan-${{ parameters.asan }} + echo $all_tests | xargs -n 1 | xargs -P 8 -I TEST_MODULE sudo DEFAULT_CONTAINER_REGISTRY=publicmirror.azurecr.io/ ./run-tests.sh "$IMAGE_NAME" "$params" "TEST_MODULE" 3 + single_asic_voq_tests="test_portchannel.py test_neighbor.py test_route.py" + echo $single_asic_voq_tests | xargs -n 1 | xargs -P 3 -I TEST_MODULE sudo ./run-tests.sh "$IMAGE_NAME" "--force-recreate-dvs --switch-mode=single_asic_voq_fs" "TEST_MODULE" 3 rm -rf $(Build.ArtifactStagingDirectory)/download displayName: "Run vs tests" continueOnError: ${{ parameters.asan }} + - script: | + echo "##vso[task.setvariable variable=TestsRun]Yes" + condition: succeededOrFailed() + displayName: 'Record Test Status' + + - script: | + set -ex + reportgenerator -reporttypes:Cobertura -reports:tests/*coverage.xml -targetdir:. + mkdir $(Build.ArtifactStagingDirectory)/gcov + cp Cobertura.xml tests/*coverage.xml $(Build.ArtifactStagingDirectory)/gcov/ + cp tests/*coverage.info $(Build.ArtifactStagingDirectory)/gcov/ + condition: ${{ parameters.archive_gcov }} + displayName: "Generate coverage.xml" + + - task: PublishCodeCoverageResults@1 + condition: ${{ parameters.archive_gcov }} + inputs: + codeCoverageTool: Cobertura + summaryFileLocation: '$(System.DefaultWorkingDirectory)/Cobertura.xml' + displayName: 'Publish test coverage' + - task: PublishTestResults@2 inputs: testResultsFiles: '**/*_tr.xml' testRunTitle: vstest - condition: succeeded() + condition: and(eq(variables['TestsRun'], 'Yes'), ne(variables['isAsan'], 'true')) - script: | cp -r tests/log $(Build.ArtifactStagingDirectory)/ @@ -165,21 +220,9 @@ jobs: if [ '${{ parameters.asan }}' == True ]; then cp -vr tests/log/*/log/asan $(Build.ArtifactStagingDirectory)/ fi - - if [ '${{ parameters.archive_gcov }}' == True ]; then - sudo apt-get install -y lcov - cd $(Build.ArtifactStagingDirectory)/gcov_tmp/ - tar -zcvf sonic-gcov.tar.gz sonic-gcov/ - rm -rf sonic-gcov - fi displayName: "Collect logs" condition: always() - - publish: $(Build.ArtifactStagingDirectory)/gcov_tmp - artifact: ${{ parameters.gcov_artifact_name }} - displayName: "Publish gcov output" - condition: and(succeeded(), eq('${{ parameters.archive_gcov }}', true)) - - publish: $(Build.ArtifactStagingDirectory)/ artifact: ${{ parameters.log_artifact_name }}@$(System.JobAttempt) displayName: "Publish logs" @@ -200,3 +243,8 @@ jobs: displayName: "Check ASAN reports" condition: eq('${{ parameters.asan }}', true) continueOnError: true + + - script: | + sudo apt-get -o DPkg::Lock::Timeout=600 install -y python-is-python3 + + displayName: "Install temporary workaround to add a symlink to python 3" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 46732aa050c..0cf49da89d9 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -20,11 +20,18 @@ /cfgmgr/teammgr* @judyjoseph # Buffer Management, PFC -/orchagent/bufferorch* @neethajohn -/orchagent/qosorch* @neethajohn -/orchagent/pfc* @neethajohn -/cfgmgr/buffer* @neethajohn +/orchagent/bufferorch* @kperumalbfn +/orchagent/qosorch* @kperumalbfn +/orchagent/pfc* @kperumalbfn +/cfgmgr/buffer* @kperumalbfn # Chassis /orchagent/fabricportsorch* @abdosi @judyjoseph /tests/test_virtual_chassis.py @abdosi @judyjoseph + +# Mux Orch +/orchagent/mux* @Ndancejic + +# Acl Orch +/orchagent/acl* @bingwang-ms + diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 3c1596eef92..2bdb3e9933c 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -7,7 +7,7 @@ on: branches: - 'master' - '202[0-9][0-9][0-9]' - pull_request_target: + pull_request: branches: - 'master' - '202[0-9][0-9][0-9]' @@ -16,7 +16,7 @@ on: jobs: analyze: name: Analyze - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 permissions: actions: read contents: read @@ -50,15 +50,14 @@ jobs: libnl-genl-3-dev \ libnl-route-3-dev \ libnl-nf-3-dev \ - libyang-dev \ libzmq3-dev \ libzmq5 \ - swig3.0 \ - libpython2.7-dev \ + swig \ + libpython3-dev \ libgtest-dev \ libgmock-dev \ - libboost1.71-dev \ - libboost-serialization1.71-dev \ + libboost-dev \ + libboost-serialization-dev \ dh-exec \ doxygen \ cdbs \ @@ -69,7 +68,8 @@ jobs: uuid-dev \ libjansson-dev \ nlohmann-json3-dev \ - python \ + build-essential \ + devscripts \ stgit - if: matrix.language == 'cpp' @@ -79,7 +79,7 @@ jobs: git clone https://github.com/sonic-net/sonic-swss-common pushd sonic-swss-common ./autogen.sh - dpkg-buildpackage -rfakeroot -us -uc -b -j$(nproc) + dpkg-buildpackage -rfakeroot -us -uc -b -Pnoyangmod,nopython2 -j$(nproc) popd dpkg-deb -x libswsscommon_${SWSSCOMMON_VER}_amd64.deb $(dirname $GITHUB_WORKSPACE) dpkg-deb -x libswsscommon-dev_${SWSSCOMMON_VER}_amd64.deb $(dirname $GITHUB_WORKSPACE) @@ -115,31 +115,33 @@ jobs: cd .. git clone https://github.com/sonic-net/sonic-buildimage pushd sonic-buildimage/src/libnl3 - git clone https://github.com/thom311/libnl libnl3-${LIBNL3_VER} + dget -u https://deb.debian.org/debian/pool/main/libn/libnl3/libnl3_${LIBNL3_VER}-${LIBNL3_REV}.dsc pushd libnl3-${LIBNL3_VER} - git checkout tags/libnl${LIBNL3_VER//./_} - git checkout -b sonic + git init git config --local user.name $USER git config --local user.email $USER@microsoft.com + git add -f * + git commit -qm "initial commit" stg init stg import -s ../patch/series git config --local --unset user.name git config --local --unset user.email ln -s ../debian debian - dpkg-buildpackage -rfakeroot -us -uc -b -j$(nproc) + DPKG_GENSYMBOLS_CHECK_LEVEL=0 dpkg-buildpackage -rfakeroot -us -uc -b -j$(nproc) popd - dpkg-deb -x libnl-3-200_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) - dpkg-deb -x libnl-3-dev_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) - dpkg-deb -x libnl-genl-3-200_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) - dpkg-deb -x libnl-genl-3-dev_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) - dpkg-deb -x libnl-route-3-200_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) - dpkg-deb -x libnl-route-3-dev_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) - dpkg-deb -x libnl-nf-3-200_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) - dpkg-deb -x libnl-nf-3-dev_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-3-200_${LIBNL3_VER}-${LIBNL3_REV_SONIC}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-3-dev_${LIBNL3_VER}-${LIBNL3_REV_SONIC}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-genl-3-200_${LIBNL3_VER}-${LIBNL3_REV_SONIC}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-genl-3-dev_${LIBNL3_VER}-${LIBNL3_REV_SONIC}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-route-3-200_${LIBNL3_VER}-${LIBNL3_REV_SONIC}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-route-3-dev_${LIBNL3_VER}-${LIBNL3_REV_SONIC}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-nf-3-200_${LIBNL3_VER}-${LIBNL3_REV_SONIC}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-nf-3-dev_${LIBNL3_VER}-${LIBNL3_REV_SONIC}_amd64.deb $(dirname $GITHUB_WORKSPACE) popd env: - LIBNL3_VER: "3.5.0" - LIBNL3_REV: "1" + LIBNL3_VER: "3.7.0" + LIBNL3_REV: "0.2" + LIBNL3_REV_SONIC: "0.2+b1sonic1" - if: matrix.language == 'cpp' name: Build repository diff --git a/.gitignore b/.gitignore index 001db00e4bc..e115801e48a 100644 --- a/.gitignore +++ b/.gitignore @@ -59,6 +59,8 @@ cfgmgr/sflowmgrd cfgmgr/macsecmgrd cfgmgr/coppmgrd cfgmgr/tunnelmgrd +cfgmgr/fabricmgrd +cfgmgr/stpmgrd fpmsyncd/fpmsyncd gearsyncd/gearsyncd mclagsyncd/mclagsyncd @@ -83,6 +85,8 @@ tests/mock_tests/tests_portsyncd # Test Files # ############## +*gcda +*gcno tests/log tests/mock_tests/test-suite.log tests/mock_tests/tests.log @@ -92,5 +96,4 @@ tests/tests.log tests/tests.trs tests/mock_tests/**/*log tests/mock_tests/**/*trs -orchagent/p4orch/tests/**/*gcda -orchagent/p4orch/tests/**/*gcno +orchagent/p4orch/tests/p4orch_tests diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 00000000000..7547acd8930 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,3115 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "addr2line" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "getrandom 0.3.3", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anstream" +version = "0.6.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ae563653d1938f79b1ab1b5e668c87c76a9930414574a6583a7b7e11a8e6192" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" + +[[package]] +name = "anstyle-parse" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2" +dependencies = [ + "windows-sys 0.60.2", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a" +dependencies = [ + "anstyle", + "once_cell_polyfill", + "windows-sys 0.60.2", +] + +[[package]] +name = "anyhow" +version = "1.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" + +[[package]] +name = "array-init" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d62b7694a562cdf5a74227903507c56ab2cc8bdd1f781ed5cb4cf9c9f810bfc" + +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "async-trait" +version = "0.1.88" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "axum" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +dependencies = [ + "async-trait", + "axum-core", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "itoa", + "matchit 0.7.3", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper", + "tower-layer", + "tower-service", +] + +[[package]] +name = "backtrace" +version = "0.3.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-targets 0.52.6", +] + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "bindgen" +version = "0.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "itertools", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.104", +] + +[[package]] +name = "binrw" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab81d22cbd2d745852348b2138f3db2103afa8ce043117a374581926a523e267" +dependencies = [ + "array-init", + "binrw_derive 0.11.2", + "bytemuck", +] + +[[package]] +name = "binrw" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d4bca59c20d6f40c2cc0802afbe1e788b89096f61bdf7aeea6bf00f10c2909b" +dependencies = [ + "array-init", + "binrw_derive 0.14.1", + "bytemuck", +] + +[[package]] +name = "binrw_derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6b019a3efebe7f453612083202887b6f1ace59e20d010672e336eea4ed5be97" +dependencies = [ + "either", + "owo-colors 3.5.0", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "binrw_derive" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8ba42866ce5bced2645bfa15e97eef2c62d2bdb530510538de8dd3d04efff3c" +dependencies = [ + "either", + "owo-colors 3.5.0", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "bitflags" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" + +[[package]] +name = "bumpalo" +version = "3.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" + +[[package]] +name = "bytemuck" +version = "1.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3995eaeebcdf32f91f980d360f78732ddc061097ab4e39991ae7a6ace9194677" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" + +[[package]] +name = "cc" +version = "1.2.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2352e5597e9c544d5e6d9c95190d5d27738ade584fa8db0a16e130e5c2b5296e" +dependencies = [ + "shlex", +] + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + +[[package]] +name = "cfg-if" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" + +[[package]] +name = "chrono" +version = "0.4.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "serde", + "wasm-bindgen", + "windows-link", +] + +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "4.5.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50fd97c9dc2399518aa331917ac6f274280ec5eb34e555dd291899745c48ec6f" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c35b5830294e1fa0462034af85cc95225a4cb07092c088c55bda3147cfcd8f65" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim 0.11.1", + "terminal_size", + "unicase", + "unicode-width", +] + +[[package]] +name = "clap_derive" +version = "4.5.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef4f52386a59ca4c860f7393bcf8abd8dfd91ecccc0f774635ff68e92eeef491" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "clap_lex" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" + +[[package]] +name = "color-eyre" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5920befb47832a6d61ee3a3a846565cfa39b331331e68a3b1d1116630f2f26d" +dependencies = [ + "backtrace", + "color-spantrace", + "eyre", + "indenter", + "once_cell", + "owo-colors 4.2.2", + "tracing-error", +] + +[[package]] +name = "color-spantrace" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8b88ea9df13354b55bc7234ebcce36e6ef896aca2e42a15de9e10edce01b427" +dependencies = [ + "once_cell", + "owo-colors 4.2.2", + "tracing-core", + "tracing-error", +] + +[[package]] +name = "colorchoice" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "countersyncd" +version = "0.1.0" +dependencies = [ + "ahash", + "async-trait", + "binrw 0.14.1", + "byteorder", + "chrono", + "clap", + "color-eyre", + "env_logger", + "ipfixrw", + "log", + "neli", + "once_cell", + "opentelemetry 0.25.0", + "opentelemetry-http", + "opentelemetry-otlp", + "opentelemetry-proto", + "opentelemetry-semantic-conventions", + "opentelemetry-stdout", + "opentelemetry_sdk 0.25.0", + "prost", + "prost-types", + "rand", + "reqwest", + "reqwest-middleware 0.3.3", + "reqwest-tracing", + "serial_test", + "swss-common", + "tempfile", + "tokio", + "tonic", + "tonic-health", + "tracing", + "tracing-opentelemetry 0.25.0", + "tracing-subscriber", + "yaml-rust", +] + +[[package]] +name = "csv" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf" +dependencies = [ + "csv-core", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "csv-core" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d02f3b0da4c6504f86e9cd789d8dbafab48c2321be74e9987593de5a894d93d" +dependencies = [ + "memchr", +] + +[[package]] +name = "darling" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn 1.0.109", +] + +[[package]] +name = "darling_macro" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" +dependencies = [ + "darling_core", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_builder" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d07adf7be193b71cc36b193d0f5fe60b918a3a9db4dad0449f57bcfd519704a3" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f91d4cfa921f1c05904dc3c57b4a32c38aed3340cce209f3a6fd1478babafc4" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_builder_macro" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f0314b72bed045f3a68671b3c86328386762c93f82d98c65c3cb5e5f573dd68" +dependencies = [ + "derive_builder_core", + "syn 1.0.109", +] + +[[package]] +name = "derive_more" +version = "0.99.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6edb4b64a43d977b8e99788fe3a04d483834fba1215a7e02caa415b626497f7f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "env_filter" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "186e05a59d4c50738528153b83b0b0194d3a29507dfec16eccd4b342903397d0" +dependencies = [ + "log", + "regex", +] + +[[package]] +name = "env_logger" +version = "0.11.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c863f0904021b108aa8b2f55046443e6b1ebde8fd4a15c399893aae4fa069f" +dependencies = [ + "anstream", + "anstyle", + "env_filter", + "jiff", + "log", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "eyre" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" +dependencies = [ + "indenter", + "once_cell", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", +] + +[[package]] +name = "getset" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf0fc11e47561d47397154977bc219f4cf809b2974facc3ccb3b89e2436f912" +dependencies = [ + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" + +[[package]] +name = "glob" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" + +[[package]] +name = "h2" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap 2.12.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "http" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "hyper" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1744436df46f0bde35af3eda22aeaba453aada65d8f1c171cd8a5f59030bd69f" +dependencies = [ + "atomic-waker", + "bytes", + "futures-channel", + "futures-core", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "pin-utils", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" +dependencies = [ + "base64", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "ipnet", + "libc", + "percent-encoding", + "pin-project-lite", + "socket2 0.6.0", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "icu_collections" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" + +[[package]] +name = "icu_properties" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" + +[[package]] +name = "icu_provider" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "indenter" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "964de6e86d545b246d84badc0fef527924ace5134f30641c203ef52ba83f58d5" + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" +dependencies = [ + "equivalent", + "hashbrown 0.16.0", +] + +[[package]] +name = "ipfixrw" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e18277dde2a264cf269ab1090a9e003b5b323ffb3d02011bdbce697e6aaff18" +dependencies = [ + "ahash", + "binrw 0.11.2", + "csv", + "derive_more", +] + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "iri-string" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "jiff" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be1f93b8b1eb69c77f24bbb0afdf66f54b632ee39af40ca21c4365a1d7347e49" +dependencies = [ + "jiff-static", + "log", + "portable-atomic", + "portable-atomic-util", + "serde", +] + +[[package]] +name = "jiff-static" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "js-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.174" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" + +[[package]] +name = "libloading" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" +dependencies = [ + "cfg-if", + "windows-targets 0.53.3", +] + +[[package]] +name = "linked-hash-map" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" + +[[package]] +name = "linux-raw-sys" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" + +[[package]] +name = "litemap" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" + +[[package]] +name = "lock_api" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + +[[package]] +name = "matchit" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f926ade0c4e170215ae43342bf13b9310a437609c81f29f86c5df6657582ef9" + +[[package]] +name = "memchr" +version = "2.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", +] + +[[package]] +name = "mio" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +dependencies = [ + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.48.0", +] + +[[package]] +name = "neli" +version = "0.7.0-rc2" +source = "git+https://github.com/jbaublitz/neli.git?tag=neli-v0.7.0-rc2#73528ae1fb0b2af177711f1a7c6228349d770dfb" +dependencies = [ + "bitflags", + "byteorder", + "derive_builder", + "getset", + "libc", + "log", + "neli-proc-macros", + "parking_lot", +] + +[[package]] +name = "neli-proc-macros" +version = "0.2.0-rc2" +source = "git+https://github.com/jbaublitz/neli.git?tag=neli-v0.7.0-rc2#73528ae1fb0b2af177711f1a7c6228349d770dfb" +dependencies = [ + "either", + "proc-macro2", + "quote", + "serde", + "syn 1.0.109", +] + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "object" +version = "0.36.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "once_cell_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" + +[[package]] +name = "opentelemetry" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c365a63eec4f55b7efeceb724f1336f26a9cf3427b70e59e2cd2a5b947fba96" +dependencies = [ + "futures-core", + "futures-sink", + "js-sys", + "once_cell", + "pin-project-lite", + "thiserror", +] + +[[package]] +name = "opentelemetry" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "803801d3d3b71cd026851a53f974ea03df3d179cb758b260136a6c9e22e196af" +dependencies = [ + "futures-core", + "futures-sink", + "js-sys", + "once_cell", + "pin-project-lite", + "thiserror", +] + +[[package]] +name = "opentelemetry-http" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88d8c2b76e5f7848a289aa9666dbe56b16f8a22a4c5246ef37a14941818d2913" +dependencies = [ + "async-trait", + "bytes", + "http", + "opentelemetry 0.25.0", +] + +[[package]] +name = "opentelemetry-otlp" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "596b1719b3cab83addb20bcbffdf21575279d9436d9ccccfe651a3bf0ab5ab06" +dependencies = [ + "async-trait", + "futures-core", + "http", + "opentelemetry 0.25.0", + "opentelemetry-proto", + "opentelemetry_sdk 0.25.0", + "prost", + "thiserror", + "tokio", + "tonic", +] + +[[package]] +name = "opentelemetry-proto" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c43620e8f93359eb7e627a3b16ee92d8585774986f24f2ab010817426c5ce61" +dependencies = [ + "hex", + "opentelemetry 0.25.0", + "opentelemetry_sdk 0.25.0", + "prost", + "serde", + "tonic", +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b8e442487022a943e2315740e443dc5ee95fd541c18f509a5a6251b408a9f95" + +[[package]] +name = "opentelemetry-stdout" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f76e2ded3f6e5d8b51e7aefc267022e7586975c00763aab70f807ad2cc156e94" +dependencies = [ + "async-trait", + "chrono", + "futures-util", + "opentelemetry 0.25.0", + "opentelemetry_sdk 0.25.0", + "ordered-float", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "opentelemetry_sdk" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "692eac490ec80f24a17828d49b40b60f5aeaccdfe6a503f939713afd22bc28df" +dependencies = [ + "async-trait", + "futures-channel", + "futures-executor", + "futures-util", + "glob", + "once_cell", + "opentelemetry 0.24.0", + "percent-encoding", + "rand", + "thiserror", +] + +[[package]] +name = "opentelemetry_sdk" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0da0d6b47a3dbc6e9c9e36a0520e25cf943e046843818faaa3f87365a548c82" +dependencies = [ + "async-trait", + "futures-channel", + "futures-executor", + "futures-util", + "glob", + "once_cell", + "opentelemetry 0.25.0", + "percent-encoding", + "rand", + "serde_json", + "thiserror", + "tokio", + "tokio-stream", +] + +[[package]] +name = "ordered-float" +version = "4.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bb71e1b3fa6ca1c61f383464aaf2bb0e2f8e772a1f01d486832464de363b951" +dependencies = [ + "num-traits", +] + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "owo-colors" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" + +[[package]] +name = "owo-colors" +version = "4.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48dd4f4a2c8405440fd0462561f0e5806bd0f77e86f51c761481bdd4018b545e" + +[[package]] +name = "parking_lot" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets 0.52.6", +] + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "pin-project" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "portable-atomic" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + +[[package]] +name = "portable-atomic-util" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +dependencies = [ + "portable-atomic", +] + +[[package]] +name = "potential_utf" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +dependencies = [ + "zerovec", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "prettyplease" +version = "0.2.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff24dfcda44452b9816fff4cd4227e1bb73ff5a2f1bc1105aa92fb8565ce44d2" +dependencies = [ + "proc-macro2", + "syn 2.0.104", +] + +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "proc-macro2" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "prost" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "prost-types" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" +dependencies = [ + "prost", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.16", +] + +[[package]] +name = "redox_syscall" +version = "0.5.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77" +dependencies = [ + "bitflags", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "reqwest" +version = "0.12.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" +dependencies = [ + "base64", + "bytes", + "futures-core", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-util", + "js-sys", + "log", + "percent-encoding", + "pin-project-lite", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower 0.5.2", + "tower-http", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "reqwest-middleware" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "562ceb5a604d3f7c885a792d42c199fd8af239d0a51b2fa6a78aafa092452b04" +dependencies = [ + "anyhow", + "async-trait", + "http", + "reqwest", + "serde", + "thiserror", + "tower-service", +] + +[[package]] +name = "reqwest-middleware" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57f17d28a6e6acfe1733fe24bcd30774d13bffa4b8a22535b4c8c98423088d4e" +dependencies = [ + "anyhow", + "async-trait", + "http", + "reqwest", + "serde", + "thiserror", + "tower-service", +] + +[[package]] +name = "reqwest-tracing" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d70ea85f131b2ee9874f0b160ac5976f8af75f3c9badfe0d955880257d10bd83" +dependencies = [ + "anyhow", + "async-trait", + "getrandom 0.2.16", + "http", + "matchit 0.8.6", + "opentelemetry 0.25.0", + "reqwest", + "reqwest-middleware 0.4.2", + "tracing", + "tracing-opentelemetry 0.26.0", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustix" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.60.2", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "scc" +version = "2.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22b2d775fb28f245817589471dd49c5edf64237f4a19d10ce9a92ff4651a27f4" +dependencies = [ + "sdd", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "sdd" +version = "3.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "490dcfcbfef26be6800d11870ff2df8774fa6e86d047e3e8c8a76b25655e41ca" + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "serde_json" +version = "1.0.143" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serial_test" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" +dependencies = [ + "futures", + "log", + "once_cell", + "parking_lot", + "scc", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" +dependencies = [ + "libc", +] + +[[package]] +name = "slab" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "socket2" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "swss-common" +version = "0.1.0" +source = "git+https://github.com/sonic-net/sonic-swss-common.git?branch=master#1484a851dbfdd4b122c361cd7ea03eca0afe5d63" +dependencies = [ + "bindgen", + "getset", + "lazy_static", + "libc", + "serde", + "tracing-subscriber", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "tempfile" +version = "3.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +dependencies = [ + "fastrand", + "getrandom 0.3.3", + "once_cell", + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "terminal_size" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed" +dependencies = [ + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "tinystr" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tokio" +version = "1.38.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68722da18b0fc4a05fdc1120b302b82051265792a1e1b399086e9b204b10ad3d" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "num_cpus", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2 0.5.10", + "tokio-macros", + "windows-sys 0.48.0", +] + +[[package]] +name = "tokio-macros" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tonic" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64", + "bytes", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "prost", + "socket2 0.5.10", + "tokio", + "tokio-stream", + "tower 0.4.13", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-health" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1eaf34ddb812120f5c601162d5429933c9b527d901ab0e7f930d3147e33a09b2" +dependencies = [ + "async-stream", + "prost", + "tokio", + "tokio-stream", + "tonic", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-http" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +dependencies = [ + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "iri-string", + "pin-project-lite", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "tracing-core" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-error" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b1581020d7a273442f5b45074a6a57d5757ad0a47dac0e9f0bd57b81936f3db" +dependencies = [ + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-opentelemetry" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9784ed4da7d921bc8df6963f8c80a0e4ce34ba6ba76668acadd3edbd985ff3b" +dependencies = [ + "js-sys", + "once_cell", + "opentelemetry 0.24.0", + "opentelemetry_sdk 0.24.1", + "smallvec", + "tracing", + "tracing-core", + "tracing-log", + "tracing-subscriber", + "web-time", +] + +[[package]] +name = "tracing-opentelemetry" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eabc56d23707ad55ba2a0750fc24767125d5a0f51993ba41ad2c441cc7b8dea" +dependencies = [ + "js-sys", + "once_cell", + "opentelemetry 0.25.0", + "opentelemetry_sdk 0.25.0", + "smallvec", + "tracing", + "tracing-core", + "tracing-log", + "tracing-subscriber", + "web-time", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "serde", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "unicase" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "unicode-width" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a1a07cc7db3810833284e8d372ccdc6da29741639ecc70c9ec107df0fa6154c" + +[[package]] +name = "url" +version = "2.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn 2.0.104", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "web-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-core" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "windows-interface" +version = "0.59.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "windows-link" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" + +[[package]] +name = "windows-result" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.3", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + +[[package]] +name = "wit-bindgen-rt" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +dependencies = [ + "bitflags", +] + +[[package]] +name = "writeable" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" + +[[package]] +name = "yaml-rust" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +dependencies = [ + "linked-hash-map", +] + +[[package]] +name = "yoke" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", + "synstructure", +] + +[[package]] +name = "zerotrie" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 00000000000..a2db894e139 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,78 @@ +[workspace] +resolver = '2' +members = [ + "crates/countersyncd", +] +exclude = [] + +[workspace.package] +version = "0.1.0" +authors = ["SONiC"] +license = "Apache-2.0" +repository = "https://github.com/sonic-net/sonic-swss" +documentation = "https://github.com/sonic-net/SONiC/tree/master/doc" +keywords = ["sonic", "swss", "network", "switch"] +edition = "2021" + +[workspace.lints.rust] +unused_extern_crates = 'warn' +trivial_numeric_casts = 'warn' +unstable_features = 'warn' +unused_import_braces = 'warn' + +[workspace.dependencies] +# Async runtime +tokio = { version = "1.37", features = ["full"] } +tokio-util = { version = "0.7", features = ["rt"] } +tokio-stream = "0.1" + +# Netlink for network operations +neli = { git = "https://github.com/jbaublitz/neli.git", tag = "neli-v0.7.0-rc2" } + +# IPFIX parser for traffic flow analysis +ipfixrw = "0.1.0" +ahash = "0.8.11" +binrw = "0.14.1" +byteorder = "1.5.0" + +# Configuration and serialization +yaml-rust = "0.4" +serde = { version = "1", features = ["derive", "rc"] } +serde_json = "1" +serde_yaml = "0.9" + +# Logging and error handling +log = "0.4.22" +env_logger = "0.11.6" +tracing = { version = "0.1", features = ["log"] } +tracing-subscriber = { version = "0.3", features = ["env-filter", "serde"] } +thiserror = "1" +anyhow = "1" +chrono = { version = "0.4", features = ["serde"] } + +# Command line utilities +clap = { version = "4", features = ["derive", "cargo", "wrap_help", "unicode", "string", "unstable-styles"] } +color-eyre = "0.6" + +# Utilities +rand = "0.8.5" +once_cell = "1.18.0" +lazy_static = "1.4" +regex = "1" +dashmap = "6" +itertools = "0.13" +uuid = { version = "1.15", features = ["v4"] } + +# SONiC specific dependencies +swss-common = { git = "https://github.com/sonic-net/sonic-swss-common.git", branch = "master" } + +# Development dependencies +tempfile = "3.12" +serial_test = "3.1" +async-trait = "0.1" +criterion = "0.5" +pretty_assertions = "1" + +# Build dependencies +tonic-build = "0.12" +vergen = { version = "8.2", features = ["build", "git", "gitoxide", "cargo", "rustc", "si"] } \ No newline at end of file diff --git a/README.md b/README.md index e627f043178..fb3dee47afd 100644 --- a/README.md +++ b/README.md @@ -21,55 +21,97 @@ The SWitch State Service (SWSS) is a collection of software that provides a data ## Getting Started -### Install +### Prerequisites + +Install the following dependencies: +``` +sudo apt install redis-server +sudo apt install libhiredis0.14 +sudo apt install libzmq5 libzmq3-dev +sudo apt install libboost-serialization1.74.0 +sudo apt install libboost1.71-dev +sudo apt install libasan6 +``` +**Note:** If your are using Ubuntu 18.04, install `libhiredis0.13` instead. + +Visit the [official sonic-buildimage Azure pipeline for the VS platform](https://dev.azure.com/mssonic/build/_build?definitionId=142&view=branches) and choose the branch that matches the sonic-swss branch you are trying to build or install. Then select the latest successful build. +From the Summary tab, access build artifacts. +image +Download the folder `sonic-buildimage.vs/target/debs/{your host machine's Debian code name}`. You can check the Debian code name of your machine by running `cat /etc/debian_version`. +image +Extract the downloaded zip file using `unzip sonic-buildimage.vs.zip`. Then navigate to `sonic-buildimage.vs/target/debs/{Debian code name}/` and install the following Debian packages: +``` +sudo dpkg -i libdashapi_1.0.0_amd64.deb libnl-3-200_3.5.0-1_amd64.deb libnl-3-dev_3.5.0-1_amd64.deb libnl-cli-3-200_3.5.0-1_amd64.deb libnl-cli-3-dev_3.5.0-1_amd64.deb libnl-genl-3-200_3.5.0-1_amd64.deb libnl-genl-3-dev_3.5.0-1_amd64.deb libnl-nf-3-200_3.5.0-1_amd64.deb libnl-nf-3-dev_3.5.0-1_amd64.deb libnl-route-3-200_3.5.0-1_amd64.deb libnl-route-3-dev_3.5.0-1_amd64.deb libprotobuf32_3.21.12-3_amd64.deb libsaimetadata_1.0.0_amd64.deb libsaimetadata-dev_1.0.0_amd64.deb libsairedis_1.0.0_amd64.deb libsairedis-dev_1.0.0_amd64.deb libsaivs_1.0.0_amd64.deb libsaivs-dev_1.0.0_amd64.deb libswsscommon_1.0.0_amd64.deb libswsscommon-dev_1.0.0_amd64.deb libteam5_1.31-1_amd64.deb libteamdctl0_1.31-1_amd64.deb libyang_1.0.73_amd64.deb libyang-dev_1.0.73_amd64.deb python3-swsscommon_1.0.0_amd64.deb +``` +**Note:** You can also [build these packages yourself (for the VS platform)](https://github.com/sonic-net/sonic-buildimage/blob/master/README.md). + +Now, you can either directly install the SONiC SWSS package or you can build it from source and then install it. To install the SONiC SWSS package that is already in `sonic-buildimage.vs/target/debs/{Debian code name}/`, simply run the following command: +``` +sudo dpkg -i swss_1.0.0_amd64.deb +``` -Before installing, add key and package sources: +#### Install from Source - sudo apt-key adv --keyserver apt-mo.trafficmanager.net --recv-keys 417A0893 - echo 'deb http://apt-mo.trafficmanager.net/repos/sonic/ trusty main' | sudo tee -a /etc/apt/sources.list.d/sonic.list - sudo apt-get update +Install build dependencies: +``` +sudo apt install libtool +sudo apt install autoconf automake +sudo apt install dh-exec +sudo apt install nlohmann-json3-dev +sudo apt install libgmock-dev +``` -Install dependencies: +Clone the `sonic-swss` repository on your host machine: `git clone https://github.com/sonic-net/sonic-swss.git`. - sudo apt-get install redis-server -t trusty - sudo apt-get install libhiredis0.13 -t trusty - sudo apt-get install libzmq5 libzmq3-dev - -Install building dependencies: +Make sure that SAI header files exist in `/usr/include/sai`. Since you have already installed `libsairedis-dev`, `libsaimetadata-dev`, and `libsaivs-dev`, this should already be the case. If you have compiled `libsairedis` yourself, make sure that the SAI header files are copied to `/usr/include/sai`. - sudo apt-get install libtool - sudo apt-get install autoconf automake - sudo apt-get install dh-exec +You can compile and install from source using: +``` +./autogen.sh +./configure +make && sudo make install +``` +**Note:** This will NOT run the mock tests located under `tests/mock_tests`. -There are a few different ways you can install SONiC-SWSS. +You can also build a debian package using: +``` +./autogen.sh +fakeroot debian/rules binary +``` +## Common issues -#### Install from Debian Repo +#### Cannot find `libboost-serialization1.74.0` -For your convenience, you can install prepared packages on Debian Jessie: +Unfortunately, `libboost-serialization1.74.0` is not officially supported on Ubuntu 20.04 (focal) even though it is supported on Debian 11 (bullseye). Therefore, you must build this package from source. You can use a script similar to [this one](https://github.com/ulikoehler/deb-buildscripts/blob/master/deb-boost.sh), but you only need to create a package for the Boost serialization library. You should also make sure that the generated package is named `libboost-serialization1.74.0`. After the package is created, you can install it by running `sudo dpkg -i libboost-serialization1.74.0_1.74.0_amd64.deb`. - sudo apt-get install swss +#### Dependency issue when installing `libzmq3-dev` -#### Install from Source +If you cannot install `libzmq3-dev` because of dependency issues, please check the version of `libkrb5` packages installed on your host machine: +``` + sudo dpkg -l | grep "libkrb5" +``` +If the version is not `1.17-6ubuntu4.7`, then you need to install the correct version: -Checkout the source: `git clone https://github.com/sonic-net/sonic-swss.git` and install it yourself. + sudo apt install libkrb5support0=1.17-6ubuntu4.7 + sudo apt install libzmq3-dev -Get SAI header files into /usr/include/sai. Put the SAI header files that you use to compile -libsairedis into /usr/include/sai +**Warning:** This may remove many packages that are already installed on your system. Please take note of what is being removed. -Install prerequisite packages: +**Note:** Do NOT install `*krb5*` packages that are located in the `sonic-buildimage.vs` folder that you downloaded. These packages have a higher version and will cause dependency issues. - sudo apt-get install libswsscommon libswsscommon-dev libsairedis libsairedis-dev +#### Dependency issues when installing some package -You can compile and install from source using: +If you run into dependency issues during the installation of a package, you can run `sudo apt -f install` to fix the issue. But note that if `apt` is unable to fix the dependency problem, it will attempt to remove the broken package(s). - ./autogen.sh - ./configure - make && sudo make install +#### Too many open files -You can also build a debian package using: +If you get a C++ exception with the description "Too many open files" during the mock tests, you should check the maximum number of open files that are permitted on your system: +``` +ulimit -a | grep "open files" +``` +You can increase it by executing this command: `ulimit -n 8192`. Feel free to change `8192`. This value worked fine for me. - ./autogen.sh - fakeroot debian/rules binary +**Note:** This change is only valid for the current terminal session. If you want a persistent change, append `ulimit -n 8192` to `~/.bashrc`. ## Need Help? diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 362a1062256..aec1ac6471b 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -27,6 +27,11 @@ schedules: - 201??? always: true +parameters: + - name: debian_version + type: string + default: bookworm + variables: - name: BUILD_BRANCH ${{ if eq(variables['Build.Reason'], 'PullRequest') }}: @@ -41,11 +46,12 @@ stages: - template: .azure-pipelines/build-template.yml parameters: arch: amd64 - sonic_slave: sonic-slave-bullseye + pool: justForTesting + sonic_slave: sonic-slave-${{ parameters.debian_version }} common_lib_artifact_name: common-lib - swss_common_artifact_name: sonic-swss-common - sairedis_artifact_name: sonic-sairedis - artifact_name: sonic-swss + swss_common_artifact_name: sonic-swss-common-${{ parameters.debian_version }} + sairedis_artifact_name: sonic-sairedis-${{ parameters.debian_version }} + artifact_name: sonic-swss-${{ parameters.debian_version }} archive_pytests: true archive_gcov: true @@ -55,11 +61,12 @@ stages: - template: .azure-pipelines/build-template.yml parameters: arch: amd64 - sonic_slave: sonic-slave-bullseye + pool: justForTesting + sonic_slave: sonic-slave-${{ parameters.debian_version }} common_lib_artifact_name: common-lib - swss_common_artifact_name: sonic-swss-common - sairedis_artifact_name: sonic-sairedis - artifact_name: sonic-swss-asan + swss_common_artifact_name: sonic-swss-common-${{ parameters.debian_version }} + sairedis_artifact_name: sonic-sairedis-${{ parameters.debian_version }} + artifact_name: sonic-swss-asan-${{ parameters.debian_version }} asan: true - stage: BuildArm @@ -70,24 +77,24 @@ stages: parameters: arch: armhf timeout: 240 - pool: sonicbld-armhf - sonic_slave: sonic-slave-bullseye-armhf + pool: sonicso1ES-armhf + sonic_slave: sonic-slave-${{ parameters.debian_version }}-armhf common_lib_artifact_name: common-lib.armhf - swss_common_artifact_name: sonic-swss-common.armhf - sairedis_artifact_name: sonic-sairedis.armhf - artifact_name: sonic-swss.armhf + swss_common_artifact_name: sonic-swss-common-${{ parameters.debian_version }}.armhf + sairedis_artifact_name: sonic-sairedis-${{ parameters.debian_version }}.armhf + artifact_name: sonic-swss-${{ parameters.debian_version }}.armhf archive_gcov: false - template: .azure-pipelines/build-template.yml parameters: arch: arm64 timeout: 240 - pool: sonicbld-arm64 - sonic_slave: sonic-slave-bullseye-arm64 + pool: sonicso1ES-arm64 + sonic_slave: sonic-slave-${{ parameters.debian_version }}-arm64 common_lib_artifact_name: common-lib.arm64 - swss_common_artifact_name: sonic-swss-common.arm64 - sairedis_artifact_name: sonic-sairedis.arm64 - artifact_name: sonic-swss.arm64 + swss_common_artifact_name: sonic-swss-common-${{ parameters.debian_version }}.arm64 + sairedis_artifact_name: sonic-sairedis-${{ parameters.debian_version }}.arm64 + artifact_name: sonic-swss-${{ parameters.debian_version }}.arm64 archive_gcov: false - stage: BuildDocker @@ -96,9 +103,10 @@ stages: jobs: - template: .azure-pipelines/build-docker-sonic-vs-template.yml parameters: - swss_common_artifact_name: sonic-swss-common - sairedis_artifact_name: sonic-sairedis - swss_artifact_name: sonic-swss + swss_common_artifact_name: sonic-swss-common-${{ parameters.debian_version }} + sairedis_artifact_name: sonic-sairedis-${{ parameters.debian_version }} + swss_artifact_name: sonic-swss-${{ parameters.debian_version }} + debian_version: ${{ parameters.debian_version }} artifact_name: docker-sonic-vs - stage: BuildDockerAsan @@ -107,10 +115,11 @@ stages: jobs: - template: .azure-pipelines/build-docker-sonic-vs-template.yml parameters: - swss_common_artifact_name: sonic-swss-common - sairedis_artifact_name: sonic-sairedis - swss_artifact_name: sonic-swss-asan + swss_common_artifact_name: sonic-swss-common-${{ parameters.debian_version }} + sairedis_artifact_name: sonic-sairedis-${{ parameters.debian_version }} + swss_artifact_name: sonic-swss-asan-${{ parameters.debian_version }} artifact_name: docker-sonic-vs-asan + debian_version: ${{ parameters.debian_version }} asan: true - stage: Test @@ -121,8 +130,9 @@ stages: parameters: log_artifact_name: log gcov_artifact_name: sonic-gcov - sonic_slave: sonic-slave-bullseye + sonic_slave: sonic-slave-${{ parameters.debian_version }} archive_gcov: true + debian_version: ${{ parameters.debian_version }} - stage: TestAsan dependsOn: BuildDockerAsan @@ -132,20 +142,21 @@ stages: parameters: log_artifact_name: log-asan gcov_artifact_name: sonic-gcov - sonic_slave: sonic-slave-bullseye + sonic_slave: sonic-slave-${{ parameters.debian_version }} docker_sonic_vs_name: docker-sonic-vs-asan asan: true + debian_version: ${{ parameters.debian_version }} - stage: Gcov + condition: false dependsOn: Test - condition: in(dependencies.Test.result, 'Succeeded', 'SucceededWithIssues') jobs: - template: .azure-pipelines/gcov.yml parameters: arch: amd64 - sonic_slave: sonic-slave-bullseye - swss_common_artifact_name: sonic-swss-common - sairedis_artifact_name: sonic-sairedis - swss_artifact_name: sonic-swss + sonic_slave: sonic-slave-${{ parameters.debian_version }} + swss_common_artifact_name: sonic-swss-common-${{ parameters.debian_version }} + sairedis_artifact_name: sonic-sairedis-${{ parameters.debian_version }} + swss_artifact_name: sonic-swss-${{ parameters.debian_version }} artifact_name: sonic-gcov archive_gcov: true diff --git a/cfgmgr/Makefile.am b/cfgmgr/Makefile.am index 09fda145fce..0f71ad7b0bb 100644 --- a/cfgmgr/Makefile.am +++ b/cfgmgr/Makefile.am @@ -3,9 +3,9 @@ CFLAGS_SAI = -I /usr/include/sai LIBNL_CFLAGS = -I/usr/include/libnl3 LIBNL_LIBS = -lnl-genl-3 -lnl-route-3 -lnl-3 SAIMETA_LIBS = -lsaimeta -lsaimetadata -lzmq -COMMON_LIBS = -lswsscommon +COMMON_LIBS = -lswsscommon -lpthread -bin_PROGRAMS = vlanmgrd teammgrd portmgrd intfmgrd buffermgrd vrfmgrd nbrmgrd vxlanmgrd sflowmgrd natmgrd coppmgrd tunnelmgrd macsecmgrd fabricmgrd +bin_PROGRAMS = vlanmgrd teammgrd portmgrd intfmgrd buffermgrd vrfmgrd nbrmgrd vxlanmgrd sflowmgrd natmgrd coppmgrd tunnelmgrd macsecmgrd fabricmgrd stpmgrd cfgmgrdir = $(datadir)/swss @@ -96,26 +96,33 @@ tunnelmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CF tunnelmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) tunnelmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) -macsecmgrd_SOURCES = macsecmgrd.cpp macsecmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h +macsecmgrd_SOURCES = macsecmgrd.cpp macsecmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h $(top_srcdir)/orchagent/macsecpost.cpp macsecmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) macsecmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) macsecmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) + +stpmgrd_SOURCES = stpmgrd.cpp stpmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h +stpmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +stpmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +stpmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) + if GCOV_ENABLED -vlanmgrd_LDADD += -lgcovpreload -teammgrd_LDADD += -lgcovpreload -portmgrd_LDADD += -lgcovpreload -intfmgrd_LDADD+= -lgcovpreload -buffermgrd_LDADD += -lgcovpreload -vrfmgrd_LDADD += -lgcovpreload -nbrmgrd_LDADD += -lgcovpreload -vxlanmgrd_LDADD += -lgcovpreload -sflowmgrd_LDADD += -lgcovpreload -natmgrd_LDADD += -lgcovpreload -coppmgrd_LDADD += -lgcovpreload -tunnelmgrd_LDADD += -lgcovpreload -macsecmgrd_LDADD += -lgcovpreload -fabricmgrd_LDADD += -lgcovpreload +vlanmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +teammgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +portmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +fabricmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +intfmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +buffermgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +vrfmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +nbrmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +vxlanmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +sflowmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +natmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +coppmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +tunnelmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +macsecmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp +stpmgrd_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED @@ -133,5 +140,6 @@ coppmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp tunnelmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp macsecmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp fabricmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp +stpmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp endif diff --git a/cfgmgr/buffer_check_headroom_mellanox.lua b/cfgmgr/buffer_check_headroom_mellanox.lua index 1b6851f77dc..7bb9729cedb 100644 --- a/cfgmgr/buffer_check_headroom_mellanox.lua +++ b/cfgmgr/buffer_check_headroom_mellanox.lua @@ -1,12 +1,16 @@ -- KEYS - port name -- ARGV[1] - profile name -- ARGV[2] - new size --- ARGV[3] - pg to add +-- ARGV[3] - new xon +-- ARGV[4] - new xoff +-- ARGV[5] - pg to add local port = KEYS[1] local input_profile_name = ARGV[1] local input_profile_size = tonumber(ARGV[2]) -local new_pg = ARGV[3] +local input_profile_xon = tonumber(ARGV[3]) +local input_profile_xoff = tonumber(ARGV[4]) +local new_pg = ARGV[5] local function is_port_with_8lanes(lanes) -- On Spectrum 3, ports with 8 lanes have doubled pipeline latency @@ -55,17 +59,31 @@ end local asic_keys = redis.call('KEYS', 'ASIC_TABLE*') local pipeline_latency = tonumber(redis.call('HGET', asic_keys[1], 'pipeline_latency')) +local cell_size = tonumber(redis.call('HGET', asic_keys[1], 'cell_size')) +local port_reserved_shp = tonumber(redis.call('HGET', asic_keys[1], 'port_reserved_shp')) +local port_max_shp = tonumber(redis.call('HGET', asic_keys[1], 'port_max_shp')) if is_port_with_8lanes(lanes) then -- The pipeline latency should be adjusted accordingly for ports with 2 buffer units pipeline_latency = pipeline_latency * 2 - 1 egress_mirror_size = egress_mirror_size * 2 + port_reserved_shp = port_reserved_shp * 2 end + local lossy_pg_size = pipeline_latency * 1024 accumulative_size = accumulative_size + lossy_pg_size + egress_mirror_size -- Fetch all keys in BUFFER_PG according to the port redis.call('SELECT', appl_db) +local is_shp_enabled +local shp_size = tonumber(redis.call('HGET', 'BUFFER_POOL_TABLE:ingress_lossless_pool', 'xoff')) +if shp_size == nil or shp_size == 0 then + is_shp_enabled = false +else + is_shp_enabled = true +end +local accumulative_shared_headroom = 0 + local debuginfo = {} local function get_number_of_pgs(keyname) @@ -122,26 +140,50 @@ end table.insert(debuginfo, 'debug:other overhead:' .. accumulative_size) for pg_key, profile in pairs(all_pgs) do local current_profile_size + local current_profile_xon + local current_profile_xoff + local buffer_profile_table_name = 'BUFFER_PROFILE_TABLE:' if profile ~= input_profile_name then - local referenced_profile_size = redis.call('HGET', 'BUFFER_PROFILE_TABLE:' .. profile, 'size') + local referenced_profile_size = redis.call('HGET', buffer_profile_table_name .. profile, 'size') if not referenced_profile_size then - referenced_profile_size = redis.call('HGET', '_BUFFER_PROFILE_TABLE:' .. profile, 'size') + buffer_profile_table_name = '_BUFFER_PROFILE_TABLE:' + referenced_profile_size = redis.call('HGET', buffer_profile_table_name .. profile, 'size') table.insert(debuginfo, 'debug:pending profile: ' .. profile) end current_profile_size = tonumber(referenced_profile_size) + current_profile_xon = tonumber(redis.call('HGET', buffer_profile_table_name .. profile, 'xon')) + current_profile_xoff = tonumber(redis.call('HGET', buffer_profile_table_name .. profile, 'xoff')) else current_profile_size = input_profile_size + current_profile_xon = input_profile_xon + current_profile_xoff = input_profile_xoff end if current_profile_size == 0 then current_profile_size = lossy_pg_size end accumulative_size = accumulative_size + current_profile_size * get_number_of_pgs(pg_key) - table.insert(debuginfo, 'debug:' .. pg_key .. ':' .. profile .. ':' .. current_profile_size .. ':' .. get_number_of_pgs(pg_key) .. ':accu:' .. accumulative_size) + + if is_shp_enabled and current_profile_xon and current_profile_xoff then + if current_profile_size < current_profile_xon + current_profile_xoff then + accumulative_shared_headroom = accumulative_shared_headroom + (current_profile_xon + current_profile_xoff - current_profile_size) * get_number_of_pgs(pg_key) + end + end + table.insert(debuginfo, 'debug:' .. pg_key .. ':' .. profile .. ':' .. current_profile_size .. ':' .. get_number_of_pgs(pg_key) .. ':accu:' .. accumulative_size .. ':accu_shp:' .. accumulative_shared_headroom) end if max_headroom_size > accumulative_size then - table.insert(ret, "result:true") - table.insert(ret, "debug:Accumulative headroom on port " .. accumulative_size .. ", the maximum available headroom " .. max_headroom_size) + if is_shp_enabled then + local max_shp = (port_max_shp + port_reserved_shp) * cell_size + if accumulative_shared_headroom > max_shp then + table.insert(ret, "result:false") + else + table.insert(ret, "result:true") + end + table.insert(ret, "debug:Accumulative headroom on port " .. accumulative_size .. ", the maximum available headroom " .. max_headroom_size .. ", the port SHP " .. accumulative_shared_headroom .. ", max SHP " .. max_shp) + else + table.insert(ret, "result:true") + table.insert(ret, "debug:Accumulative headroom on port " .. accumulative_size .. ", the maximum available headroom " .. max_headroom_size) + end else table.insert(ret, "result:false") table.insert(ret, "debug:Accumulative headroom on port " .. accumulative_size .. " exceeds the maximum available headroom which is " .. max_headroom_size) diff --git a/cfgmgr/buffer_headroom_mellanox.lua b/cfgmgr/buffer_headroom_mellanox.lua index d99cd02816a..d1c1a097c0e 100644 --- a/cfgmgr/buffer_headroom_mellanox.lua +++ b/cfgmgr/buffer_headroom_mellanox.lua @@ -79,6 +79,13 @@ for i = 1, #asic_table_content, 2 do end end +local kb_on_tile = 0 +if asic_keys[1]:sub(-1) == '4' or asic_keys[1]:sub(-1) == '5' then + -- Calculate kB on tile for Spectrum-4 and Spectrum-5 + -- The last digit of ASIC table key (with the name convention of "MELLANOX-SPECTRUM-N") represents the generation of the ASIC. + kb_on_tile = port_speed / 1000 * 120 / 8 +end + -- Fetch lossless traffic info from CONFIG_DB redis.call('SELECT', config_db) local lossless_traffic_keys = redis.call('KEYS', 'LOSSLESS_TRAFFIC_PATTERN*') @@ -123,7 +130,7 @@ local speed_overhead -- Adjustment for 8-lane port if is_8lane ~= nil and is_8lane then - pipeline_latency = pipeline_latency * 2 - 1024 + pipeline_latency = pipeline_latency * 2 speed_overhead = port_mtu else speed_overhead = 0 @@ -134,8 +141,10 @@ if cell_size > 2 * minimal_packet_size then else worst_case_factor = (2 * cell_size) / (1 + cell_size) end +worst_case_factor = math.ceil(worst_case_factor) -cell_occupancy = (100 - small_packet_percentage + small_packet_percentage * worst_case_factor) / 100 +local small_packet_percentage_by_byte = 100 * minimal_packet_size / ((small_packet_percentage * minimal_packet_size + (100 - small_packet_percentage) * lossless_mtu) / 100) +cell_occupancy = (100 - small_packet_percentage_by_byte + small_packet_percentage_by_byte * worst_case_factor) / 100 if (gearbox_delay == 0) then bytes_on_gearbox = 0 @@ -148,8 +157,8 @@ if pause_quanta ~= nil then peer_response_time = (pause_quanta) * 512 / 8 end -bytes_on_cable = 2 * cable_length * port_speed * 1000000000 / speed_of_light / (8 * 1024) -propagation_delay = port_mtu + bytes_on_cable + 2 * bytes_on_gearbox + mac_phy_delay + peer_response_time +bytes_on_cable = 2 * cable_length * port_speed * 1000000000 / speed_of_light / (8 * 1000) +propagation_delay = port_mtu + bytes_on_cable + 2 * bytes_on_gearbox + mac_phy_delay + peer_response_time + kb_on_tile -- Calculate the xoff and xon and then round up at 1024 bytes xoff_value = lossless_mtu + propagation_delay * cell_occupancy diff --git a/cfgmgr/buffer_pool_mellanox.lua b/cfgmgr/buffer_pool_mellanox.lua index ee48fe0403f..f0d43991082 100644 --- a/cfgmgr/buffer_pool_mellanox.lua +++ b/cfgmgr/buffer_pool_mellanox.lua @@ -22,6 +22,7 @@ local total_port = 0 local mgmt_pool_size = 256 * 1024 local egress_mirror_headroom = 10 * 1024 +local modification_descriptors_pool_size = 0 -- The set of ports with 8 lanes local port_set_8lanes = {} @@ -69,7 +70,9 @@ local function iterate_all_items(all_items, check_lossless) if string.len(range) == 1 then size = 1 else - size = 1 + tonumber(string.sub(range, -1)) - tonumber(string.sub(range, 1, 1)) + -- Extract start and end numbers from the range (e.g., "8-15") + local start_num, end_num = string.match(range, "(%d+)-(%d+)") + size = tonumber(end_num) - tonumber(start_num) + 1 end profiles[profile_name] = profile_ref_count + size if port_set_8lanes[port] and ingress_profile_is_lossless[profile_name] == false then @@ -133,7 +136,7 @@ local function iterate_profile_list(all_items) return 0 end -local function fetch_buffer_pool_size_from_appldb() +local function fetch_buffer_pool_size_from_appldb(shp_enabled) local buffer_pools = {} redis.call('SELECT', config_db) local buffer_pool_keys = redis.call('KEYS', 'BUFFER_POOL|*') @@ -158,7 +161,22 @@ local function fetch_buffer_pool_size_from_appldb() end xoff = redis.call('HGET', 'BUFFER_POOL_TABLE:' .. buffer_pools[i], 'xoff') if not xoff then - table.insert(result, buffer_pools[i] .. ':' .. size) + if shp_enabled and size == "0" and buffer_pools[i] == "ingress_lossless_pool" then + -- During initialization, if SHP is enabled + -- 1. the buffer pool sizes, xoff have initialized to 0, which means the shared headroom pool is disabled + -- 2. but the buffer profiles already indicate the shared headroom pool is enabled + -- 3. later on the buffer pool sizes are updated with xoff being non-zero + -- In case the orchagent starts handling buffer configuration between 2 and 3, + -- It is inconsistent between buffer pools and profiles, which fails Mellanox SAI sanity check + -- To avoid it, it indicates the shared headroom pool is enabled by setting a very small buffer pool and shared headroom pool sizes + if size == "0" then + table.insert(result, buffer_pools[i] .. ':2048:1024') + else + table.insert(result, buffer_pools[i] .. ":" .. size .. ':1024') + end + else + table.insert(result, buffer_pools[i] .. ':' .. size) + end else table.insert(result, buffer_pools[i] .. ':' .. size .. ':' .. xoff) end @@ -169,6 +187,23 @@ end -- Connect to CONFIG_DB redis.call('SELECT', config_db) +-- Check if platform is SPC6 or later and set modification descriptors pool size +-- Extract model number from platform string (e.g., "sn6600" -> 6600, "sn5800" -> 5800, "sn10600" -> 10600) +-- Use (%d+) pattern to capture one or more digits for extensibility (handles future multi-digit series like sn10xxx, sn11xxx) +local platform = redis.call('HGET', 'DEVICE_METADATA|localhost', 'platform') +if platform then + local model_str = string.match(platform, "sn(%d+)") + if model_str then + local model_number = tonumber(model_str) + -- SPC6 or later models (>= 6000 excludes SPC5 models like 5400/5800, includes SPC6+ like 6600/7xxx/10xxx) + -- Reserve 32MB for modification descriptors pool + if model_number and model_number >= 6000 then + modification_descriptors_pool_size = 32 * 1024 * 1024 + egress_mirror_headroom = 0 + end + end +end + -- Parse all the pools and seperate them according to the direction local ipools = {} local epools = {} @@ -295,7 +330,7 @@ local fail_count = 0 fail_count = fail_count + iterate_all_items(all_pgs, true) fail_count = fail_count + iterate_all_items(all_tcs, false) if fail_count > 0 then - fetch_buffer_pool_size_from_appldb() + fetch_buffer_pool_size_from_appldb(shp_enabled) return result end @@ -305,7 +340,7 @@ local all_egress_profile_lists = redis.call('KEYS', 'BUFFER_PORT_EGRESS_PROFILE_ fail_count = fail_count + iterate_profile_list(all_ingress_profile_lists) fail_count = fail_count + iterate_profile_list(all_egress_profile_lists) if fail_count > 0 then - fetch_buffer_pool_size_from_appldb() + fetch_buffer_pool_size_from_appldb(shp_enabled) return result end @@ -367,7 +402,7 @@ accumulative_occupied_buffer = accumulative_occupied_buffer + accumulative_manag -- Accumulate sizes for egress mirror and management pool local accumulative_egress_mirror_overhead = admin_up_port * egress_mirror_headroom -accumulative_occupied_buffer = accumulative_occupied_buffer + accumulative_egress_mirror_overhead + mgmt_pool_size +accumulative_occupied_buffer = accumulative_occupied_buffer + accumulative_egress_mirror_overhead + mgmt_pool_size + modification_descriptors_pool_size -- Switch to CONFIG_DB redis.call('SELECT', config_db) @@ -406,10 +441,12 @@ local pool_size if shp_size then accumulative_occupied_buffer = accumulative_occupied_buffer + shp_size end + +local available_buffer = mmu_size - accumulative_occupied_buffer if ingress_pool_count == 1 then - pool_size = mmu_size - accumulative_occupied_buffer + pool_size = available_buffer else - pool_size = (mmu_size - accumulative_occupied_buffer) / 2 + pool_size = available_buffer / 2 end if pool_size > ceiling_mmu_size then @@ -418,12 +455,19 @@ end local shp_deployed = false for i = 1, #pools_need_update, 1 do + local percentage = tonumber(redis.call('HGET', pools_need_update[i], 'percentage')) + local effective_pool_size + if percentage ~= nil and percentage >= 0 then + effective_pool_size = available_buffer * percentage / 100 + else + effective_pool_size = pool_size + end local pool_name = string.match(pools_need_update[i], "BUFFER_POOL|([^%s]+)$") if shp_size ~= 0 and pool_name == "ingress_lossless_pool" then - table.insert(result, pool_name .. ":" .. math.ceil(pool_size) .. ":" .. math.ceil(shp_size)) + table.insert(result, pool_name .. ":" .. math.ceil(effective_pool_size) .. ":" .. math.ceil(shp_size)) shp_deployed = true else - table.insert(result, pool_name .. ":" .. math.ceil(pool_size)) + table.insert(result, pool_name .. ":" .. math.ceil(effective_pool_size)) end end @@ -449,5 +493,6 @@ table.insert(result, "debug:shp_enabled:" .. tostring(shp_enabled)) table.insert(result, "debug:shp_size:" .. shp_size) table.insert(result, "debug:total port:" .. total_port .. " ports with 8 lanes:" .. port_count_8lanes) table.insert(result, "debug:admin up port:" .. admin_up_port .. " admin up ports with 8 lanes:" .. admin_up_8lanes_port) +table.insert(result, "debug:modification_descriptors_pool_size:" .. modification_descriptors_pool_size) return result diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index ba247197c19..339c9197566 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -531,6 +531,8 @@ void BufferMgr::doTask(Consumer &consumer) } else if (m_pgfile_processed && table_name == CFG_PORT_TABLE_NAME) { + bool admin_status_found = false; + for (auto i : kfvFieldsValues(t)) { if (fvField(i) == "speed") @@ -540,8 +542,17 @@ void BufferMgr::doTask(Consumer &consumer) if (fvField(i) == "admin_status") { m_portStatusLookup[port] = fvValue(i); + admin_status_found = true; } } + + // Ensure admin_status is set to "down" if not received + if (!admin_status_found) + { + /* CONFIG_DB producer may not always generate admin_status field for down ports. */ + SWSS_LOG_INFO("admin_status is not available for port %s, assuming default down", port.c_str()); + m_portStatusLookup[port] = "down"; + } if (m_speedLookup.count(port) != 0) { @@ -549,24 +560,23 @@ void BufferMgr::doTask(Consumer &consumer) task_status = doSpeedUpdateTask(port); } } - - switch (task_status) - { - case task_process_status::task_failed: - SWSS_LOG_ERROR("Failed to process table update"); - return; - case task_process_status::task_need_retry: - SWSS_LOG_INFO("Unable to process table update. Will retry..."); - ++it; - break; - case task_process_status::task_invalid_entry: - SWSS_LOG_ERROR("Failed to process invalid entry, drop it"); - it = consumer.m_toSync.erase(it); - break; - default: - it = consumer.m_toSync.erase(it); - break; - } + } + switch (task_status) + { + case task_process_status::task_failed: + SWSS_LOG_ERROR("Failed to process table update"); + return; + case task_process_status::task_need_retry: + SWSS_LOG_INFO("Unable to process table update. Will retry..."); + ++it; + break; + case task_process_status::task_invalid_entry: + SWSS_LOG_ERROR("Failed to process invalid entry, drop it"); + it = consumer.m_toSync.erase(it); + break; + default: + it = consumer.m_toSync.erase(it); + break; } } } diff --git a/cfgmgr/buffermgrd.cpp b/cfgmgr/buffermgrd.cpp index e88280eb56b..08810b0bd3c 100644 --- a/cfgmgr/buffermgrd.cpp +++ b/cfgmgr/buffermgrd.cpp @@ -46,7 +46,7 @@ void dump_db_item(KeyOpFieldsValuesTuple &db_item) void write_to_state_db(shared_ptr> db_items_ptr) { - DBConnector db("STATE_DB", 0, true); + DBConnector db("STATE_DB", 0); auto &db_items = *db_items_ptr; for (auto &db_item : db_items) { diff --git a/cfgmgr/buffermgrdyn.cpp b/cfgmgr/buffermgrdyn.cpp index 6c9a1e831e9..de5596446a8 100644 --- a/cfgmgr/buffermgrdyn.cpp +++ b/cfgmgr/buffermgrdyn.cpp @@ -14,6 +14,8 @@ #include "schema.h" #include "warm_restart.h" +#include "buffer/bufferschema.h" + /* * Some Tips * 1. All keys in this file are in format of APPL_DB key. @@ -852,8 +854,16 @@ void BufferMgrDynamic::checkSharedBufferPoolSize(bool force_update_during_initia } } - if (!m_mmuSize.empty()) + // Execute recalculateSharedBufferPool when MMU size is available, and avoid extra recalculation in startup. + // Logic: + // - Non-warm start: execute as soon as MMU size is available. + // - Warm start: execute if buffer is completely initialized OR buffer pools are not ready. + if (!m_mmuSize.empty() && + (!WarmStart::isWarmStart() || + (WarmStart::isWarmStart() && (m_bufferCompletelyInitialized || !m_bufferPoolReady)))) + { recalculateSharedBufferPool(); + } } // For buffer pool, only size can be updated on-the-fly @@ -896,6 +906,10 @@ void BufferMgrDynamic::updateBufferProfileToDb(const string &name, const buffer_ } fvVector.emplace_back("xoff", profile.xoff); } + if (!profile.packet_discard_action.empty()) + { + fvVector.emplace_back(BUFFER_PROFILE_PACKET_DISCARD_ACTION, profile.packet_discard_action); + } fvVector.emplace_back("size", profile.size); fvVector.emplace_back("pool", profile.pool_name); fvVector.emplace_back(mode, profile.threshold); @@ -934,15 +948,6 @@ void BufferMgrDynamic::updateBufferObjectToDb(const string &key, const string &p void BufferMgrDynamic::updateBufferObjectListToDb(const string &key, const string &profileList, buffer_direction_t dir) { auto &table = m_applBufferProfileListTables[dir]; - const auto &direction = m_bufferDirectionNames[dir]; - - if (!m_bufferPoolReady) - { - SWSS_LOG_NOTICE("Buffer pools are not ready when configuring buffer %s profile list %s, pending", direction.c_str(), key.c_str()); - m_bufferObjectsPending = true; - return; - } - vector fvVector; fvVector.emplace_back(buffer_profile_list_field_name, profileList); @@ -1051,6 +1056,15 @@ bool BufferMgrDynamic::isHeadroomResourceValid(const string &port, const buffer_ // profile: the profile referenced by the new_pg (if provided) or all PGs // new_pg: which pg is newly added? + // Skip headroom validation only during warm start while initialization is incomplete. + // - Non-warm start: never skip. + // - Warm start: skip only if initialization has not completed. + if (WarmStart::isWarmStart() && + !m_bufferCompletelyInitialized) + { + return true; + } + if (!profile.lossless && new_pg.empty()) { SWSS_LOG_INFO("No need to check headroom for lossy PG port %s profile %s size %s without a PG specified", @@ -1065,14 +1079,16 @@ bool BufferMgrDynamic::isHeadroomResourceValid(const string &port, const buffer_ argv.emplace_back(profile.name); argv.emplace_back(profile.size); + argv.emplace_back(profile.xon); + argv.emplace_back(profile.xoff); if (!new_pg.empty()) { argv.emplace_back(new_pg); } - SWSS_LOG_INFO("Checking headroom for port %s with profile %s size %s pg %s", - port.c_str(), profile.name.c_str(), profile.size.c_str(), new_pg.c_str()); + SWSS_LOG_INFO("Checking headroom for port %s with profile %s size %s xon %s xoff %s pg %s", + port.c_str(), profile.name.c_str(), profile.size.c_str(), profile.xon.c_str(), profile.xoff.c_str(), new_pg.c_str()); try { @@ -1469,6 +1485,26 @@ task_process_status BufferMgrDynamic::refreshPgsForPort(const string &port, cons continue; } + // If cable len is 0m, remove lossless PG, keep lossy PG. + if (cable_length == "0m" && portPg.lossless) + { + if (oldProfile.empty()) + { + SWSS_LOG_INFO("No lossless profile found for port %s when cable length is set to '0m'.", port.c_str()); + continue; + } + + if (m_bufferProfileLookup.find(oldProfile) != m_bufferProfileLookup.end()) + { + m_bufferProfileLookup[oldProfile].port_pgs.erase(key); + } + + updateBufferObjectToDb(key, oldProfile, false); + profilesToBeReleased.insert(oldProfile); + portPg.running_profile_name.clear(); + continue; + } + string threshold; // Calculate new headroom size if (portPg.static_configured) @@ -2638,6 +2674,10 @@ task_process_status BufferMgrDynamic::handleBufferProfileTable(KeyOpFieldsValues profileApp.direction = BUFFER_INGRESS; } } + else if (field == BUFFER_PROFILE_PACKET_DISCARD_ACTION) + { + profileApp.packet_discard_action = value; + } SWSS_LOG_INFO("Inserting BUFFER_PROFILE table field %s value %s", field.c_str(), value.c_str()); } @@ -3245,6 +3285,15 @@ task_process_status BufferMgrDynamic::handleSingleBufferPortProfileListEntry(con } } + if (!m_bufferPoolReady) + { + const auto &direction = m_bufferDirectionNames[dir]; + + SWSS_LOG_NOTICE("Buffer pools are not ready when configuring buffer %s profile list %s, pending", direction.c_str(), key.c_str()); + m_bufferObjectsPending = true; + return task_process_status::task_success; + } + auto &portInfo = m_portInfoLookup[port]; if (PORT_ADMIN_DOWN != portInfo.state) { diff --git a/cfgmgr/buffermgrdyn.h b/cfgmgr/buffermgrdyn.h index b50b0ced694..b0b3e875d64 100644 --- a/cfgmgr/buffermgrdyn.h +++ b/cfgmgr/buffermgrdyn.h @@ -76,6 +76,8 @@ typedef struct { // port_pgs - stores pgs referencing this profile // An element will be added or removed when a PG added or removed port_pg_set_t port_pgs; + // packet trimming control + std::string packet_discard_action; } buffer_profile_t; typedef struct { diff --git a/cfgmgr/coppmgr.cpp b/cfgmgr/coppmgr.cpp index cfa94988d90..65c154349b9 100644 --- a/cfgmgr/coppmgr.cpp +++ b/cfgmgr/coppmgr.cpp @@ -21,10 +21,11 @@ static set g_copp_init_set; void CoppMgr::parseInitFile(void) { - std::ifstream ifs(COPP_INIT_FILE); + std::ifstream ifs(m_coppCfgfile); + if (ifs.fail()) { - SWSS_LOG_ERROR("COPP init file %s not found", COPP_INIT_FILE); + SWSS_LOG_ERROR("COPP init file %s not found", m_coppCfgfile.c_str()); return; } json j = json::parse(ifs); @@ -182,14 +183,13 @@ bool CoppMgr::isTrapIdDisabled(string trap_id) { return false; } - break; + if (isFeatureEnabled(trap_name)) + { + return false; + } } } - if (isFeatureEnabled(trap_name)) - { - return false; - } return true; } @@ -293,7 +293,7 @@ bool CoppMgr::isDupEntry(const std::string &key, std::vector &f return true; } -CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, const vector &tableNames) : +CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, const vector &tableNames, const string copp_init_file) : Orch(cfgDb, tableNames), m_cfgCoppTrapTable(cfgDb, CFG_COPP_TRAP_TABLE_NAME), m_cfgCoppGroupTable(cfgDb, CFG_COPP_GROUP_TABLE_NAME), @@ -301,7 +301,8 @@ CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c m_appCoppTable(appDb, APP_COPP_TABLE_NAME), m_stateCoppTrapTable(stateDb, STATE_COPP_TRAP_TABLE_NAME), m_stateCoppGroupTable(stateDb, STATE_COPP_GROUP_TABLE_NAME), - m_coppTable(appDb, APP_COPP_TABLE_NAME) + m_coppTable(appDb, APP_COPP_TABLE_NAME), + m_coppCfgfile(copp_init_file) { SWSS_LOG_ENTER(); parseInitFile(); @@ -939,7 +940,9 @@ void CoppMgr::doFeatureTask(Consumer &consumer) { if (m_featuresCfgTable.find(key) == m_featuresCfgTable.end()) { - m_featuresCfgTable.emplace(key, kfvFieldsValues(t)); + // Init with empty feature state which will be updated in setFeatureTrapIdsStatus + FieldValueTuple fv("state", ""); + m_featuresCfgTable[key].push_back(fv); } for (auto i : kfvFieldsValues(t)) { diff --git a/cfgmgr/coppmgr.h b/cfgmgr/coppmgr.h index 44549d3bec7..86f1b0e4e28 100644 --- a/cfgmgr/coppmgr.h +++ b/cfgmgr/coppmgr.h @@ -62,7 +62,7 @@ class CoppMgr : public Orch { public: CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, - const std::vector &tableNames); + const std::vector &tableNames, const std::string copp_init_file = COPP_INIT_FILE); using Orch::doTask; private: @@ -75,6 +75,7 @@ class CoppMgr : public Orch CoppCfg m_coppGroupInitCfg; CoppCfg m_coppTrapInitCfg; CoppCfg m_featuresCfgTable; + std::string m_coppCfgfile; void doTask(Consumer &consumer); diff --git a/cfgmgr/fabricmgr.cpp b/cfgmgr/fabricmgr.cpp index bcbaa5726af..bb2420387ce 100644 --- a/cfgmgr/fabricmgr.cpp +++ b/cfgmgr/fabricmgr.cpp @@ -41,6 +41,7 @@ void FabricMgr::doTask(Consumer &consumer) string monPollThreshRecovery, monPollThreshIsolation; string isolateStatus; string alias, lanes; + string enable; std::vector field_values; string value; @@ -66,6 +67,12 @@ void FabricMgr::doTask(Consumer &consumer) monPollThreshIsolation = fvValue(i); writeConfigToAppDb(key, "monPollThreshIsolation", monPollThreshIsolation); } + else if (fvField(i) == "monState") + { + SWSS_LOG_INFO("Enable fabric monitoring setting in appl_db."); + enable = fvValue(i); + writeConfigToAppDb(key, "monState", enable); + } else if (fvField(i) == "alias") { alias = fvValue(i); @@ -105,12 +112,12 @@ bool FabricMgr::writeConfigToAppDb(const std::string &key, const std::string &fi if (key == "FABRIC_MONITOR_DATA") { m_appFabricMonitorTable.set(key, fvs); - SWSS_LOG_NOTICE("Write FABRIC_MONITOR:%s %s to %s", key.c_str(), field.c_str(), value.c_str()); + SWSS_LOG_INFO("Write FABRIC_MONITOR:%s %s to %s", key.c_str(), field.c_str(), value.c_str()); } else { m_appFabricPortTable.set(key, fvs); - SWSS_LOG_NOTICE("Write FABRIC_PORT:%s %s to %s", key.c_str(), field.c_str(), value.c_str()); + SWSS_LOG_INFO("Write FABRIC_PORT:%s %s to %s", key.c_str(), field.c_str(), value.c_str()); } return true; diff --git a/cfgmgr/fabricmgr.h b/cfgmgr/fabricmgr.h index dbe2fd0d897..1fd399fef9c 100644 --- a/cfgmgr/fabricmgr.h +++ b/cfgmgr/fabricmgr.h @@ -21,7 +21,7 @@ class FabricMgr : public Orch Table m_cfgFabricMonitorTable; Table m_cfgFabricPortTable; Table m_appFabricMonitorTable; - Table m_appFabricPortTable; + ProducerStateTable m_appFabricPortTable; void doTask(Consumer &consumer); bool writeConfigToAppDb(const std::string &alias, const std::string &field, const std::string &value); diff --git a/cfgmgr/intfmgr.cpp b/cfgmgr/intfmgr.cpp index 78c90308071..b0dbc84e078 100644 --- a/cfgmgr/intfmgr.cpp +++ b/cfgmgr/intfmgr.cpp @@ -198,8 +198,7 @@ void IntfMgr::addLoopbackIntf(const string &alias) stringstream cmd; string res; - cmd << IP_CMD << " link add " << alias << " mtu " << LOOPBACK_DEFAULT_MTU_STR << " type dummy && "; - cmd << IP_CMD << " link set " << alias << " up"; + cmd << IP_CMD << " link add " << alias << " mtu " << LOOPBACK_DEFAULT_MTU_STR << " type dummy"; int ret = swss::exec(cmd.str(), res); if (ret) { @@ -487,28 +486,43 @@ void IntfMgr::updateSubIntfAdminStatus(const string &alias, const string &admin) } } -std::string IntfMgr::setHostSubIntfAdminStatus(const string &alias, const string &admin_status, const string &parent_admin_status) +bool IntfMgr::setIntfAdminStatus(const string &alias, const string &admin_status) { stringstream cmd; string res, cmd_str; + SWSS_LOG_INFO("intf %s admin_status: %s", alias.c_str(), admin_status.c_str()); + cmd << IP_CMD " link set " << shellquote(alias) << " " << shellquote(admin_status); + cmd_str = cmd.str(); + int ret = swss::exec(cmd_str, res); + if (ret && !isIntfStateOk(alias)) + { + // Can happen when a DEL notification is sent by portmgrd immediately followed by a new SET notification + SWSS_LOG_WARN("Setting admin_status to %s netdev failed with cmd:%s, rc:%d, error:%s", + alias.c_str(), cmd_str.c_str(), ret, res.c_str()); + return false; + } + else if (ret) + { + throw runtime_error(cmd_str + " : " + res); + } + return true; +} + +std::string IntfMgr::setHostSubIntfAdminStatus(const string &alias, const string &admin_status, const string &parent_admin_status) +{ if (parent_admin_status == "up" || admin_status == "down") { - SWSS_LOG_INFO("subintf %s admin_status: %s", alias.c_str(), admin_status.c_str()); - cmd << IP_CMD " link set " << shellquote(alias) << " " << shellquote(admin_status); - cmd_str = cmd.str(); - int ret = swss::exec(cmd_str, res); - if (ret && !isIntfStateOk(alias)) + try { - // Can happen when a DEL notification is sent by portmgrd immediately followed by a new SET notification - SWSS_LOG_WARN("Setting admin_status to %s netdev failed with cmd:%s, rc:%d, error:%s", - alias.c_str(), cmd_str.c_str(), ret, res.c_str()); + setIntfAdminStatus(alias, admin_status); + return admin_status; } - else if (ret) + catch (const std::runtime_error &e) { - throw runtime_error(cmd_str + " : " + res); + SWSS_LOG_NOTICE("Set Host subinterface %s admin_status set failure %s failure. Runtime error: %s", alias.c_str(), admin_status.c_str(), e.what()); + throw; } - return admin_status; } else { @@ -843,6 +857,29 @@ bool IntfMgr::doIntfGeneralTask(const vector& keys, m_loopbackIntfList.insert(alias); SWSS_LOG_INFO("Added %s loopback interface", alias.c_str()); } + + if (adminStatus.empty()) + { + adminStatus = "up"; + } + else if (adminStatus != "up" && adminStatus != "down") + { + SWSS_LOG_WARN("Got incorrect value for admin_status as %s for intf %s, defaulting as up", adminStatus.c_str(), alias.c_str()); + adminStatus = "up"; + } + + try + { + if (setIntfAdminStatus(alias, adminStatus)) + { + FieldValueTuple newAdminFvTuple("admin_status", adminStatus); + data.push_back(newAdminFvTuple); + } + } + catch (const std::runtime_error &e) + { + SWSS_LOG_WARN("Lo interface ip link set admin status %s failure. Runtime error: %s", adminStatus.c_str(), e.what()); + } } else { diff --git a/cfgmgr/intfmgr.h b/cfgmgr/intfmgr.h index 4eca2402cee..b2afbd31ce8 100644 --- a/cfgmgr/intfmgr.h +++ b/cfgmgr/intfmgr.h @@ -64,6 +64,7 @@ class IntfMgr : public Orch std::string getIntfMtu(const std::string &alias); void addHostSubIntf(const std::string&intf, const std::string &subIntf, const std::string &vlan); std::string setHostSubIntfMtu(const std::string &alias, const std::string &mtu, const std::string &parent_mtu); + bool setIntfAdminStatus(const std::string &alias, const std::string &admin_status); std::string setHostSubIntfAdminStatus(const std::string &alias, const std::string &admin_status, const std::string &parent_admin_status); void removeHostSubIntf(const std::string &subIntf); void setSubIntfStateOk(const std::string &alias); diff --git a/cfgmgr/macsecmgr.cpp b/cfgmgr/macsecmgr.cpp index 42e06731cc2..5d418e1400b 100644 --- a/cfgmgr/macsecmgr.cpp +++ b/cfgmgr/macsecmgr.cpp @@ -503,14 +503,11 @@ task_process_status MACsecMgr::enableMACsec( return task_need_retry; } - // Create MKA Session object - auto port = m_macsec_ports.emplace( - std::piecewise_construct, - std::make_tuple(port_name), - std::make_tuple()); - if (!port.second) + // Handle existing macsec profile + auto port_itr = m_macsec_ports.find(port_name); + if (port_itr != m_macsec_ports.end()) { - if (port.first->second.profile_name == profile_name) + if (port_itr->second.profile_name == profile_name) { SWSS_LOG_NOTICE( "The MACsec profile '%s' on the port '%s' has been loaded", @@ -523,7 +520,7 @@ task_process_status MACsecMgr::enableMACsec( SWSS_LOG_NOTICE( "The MACsec profile '%s' on the port '%s' " "will be replaced by the MACsec profile '%s'", - port.first->second.profile_name.c_str(), + port_itr->second.profile_name.c_str(), port_name.c_str(), profile_name.c_str()); auto result = disableMACsec(port_name, port_attr); @@ -533,6 +530,11 @@ task_process_status MACsecMgr::enableMACsec( } } } + // Create MKA Session object + auto port = m_macsec_ports.emplace( + std::piecewise_construct, + std::make_tuple(port_name), + std::make_tuple()); auto & session = port.first->second; session.profile_name = profile_name; ostringstream ostream; diff --git a/cfgmgr/macsecmgrd.cpp b/cfgmgr/macsecmgrd.cpp index 263c5b43959..2a390fbee90 100644 --- a/cfgmgr/macsecmgrd.cpp +++ b/cfgmgr/macsecmgrd.cpp @@ -17,6 +17,7 @@ #include #include "macsecmgr.h" +#include "macsecpost.h" using namespace std; using namespace swss; @@ -76,9 +77,28 @@ int main(int argc, char **argv) s.addSelectables(o->getSelectables()); } + bool isPostStateReady = false; + SWSS_LOG_NOTICE("starting main loop"); while (!received_sigterm) { + /* Don't process any config until POST state is ready */ + if (!isPostStateReady) + { + std::string state = getMacsecPostState(&stateDb); + if (state == "pass" || state == "disabled") + { + SWSS_LOG_NOTICE("FIPS MACSec POST ready: state %s", state.c_str()); + isPostStateReady = true; + } + else + { + /* Yield before retry */ + sleep(1); + continue; + } + } + Selectable *sel; int ret; diff --git a/cfgmgr/portmgr.cpp b/cfgmgr/portmgr.cpp index 19ba41dc909..e3ac0e8590d 100644 --- a/cfgmgr/portmgr.cpp +++ b/cfgmgr/portmgr.cpp @@ -45,7 +45,14 @@ bool PortMgr::setPortMtu(const string &alias, const string &mtu) } else { - throw runtime_error(cmd_str + " : " + res); + // This failure can happen on PortChannels during system startup. A PortChannel enslaves + // members before a default MTU is set on the port (set in this file, not via the config!). + // Therefore this error is always emitted on startup for portchannel members. + // In theory we shouldn't log in this case, the correct fix is to detect the + // port is part of a portchannel and not even try this but that is rejected for + // possible performance implications. + SWSS_LOG_WARN("Setting mtu to alias:%s netdev failed (isPortStateOk=true) with cmd:%s, rc:%d, error:%s", alias.c_str(), cmd_str.c_str(), ret, res.c_str()); + return false; } return true; } @@ -192,6 +199,15 @@ void PortMgr::doTask(Consumer &consumer) } } + if (!portOk) + { + // Port configuration is handled by the orchagent. If the configuration is written to the APP DB using + // multiple Redis write commands, the orchagent may receive a partial configuration and create a port + // with incorrect settings. + field_values.emplace_back("mtu", mtu); + field_values.emplace_back("admin_status", admin_status); + } + if (field_values.size()) { writeConfigToAppDb(alias, field_values); @@ -201,8 +217,6 @@ void PortMgr::doTask(Consumer &consumer) { SWSS_LOG_INFO("Port %s is not ready, pending...", alias.c_str()); - writeConfigToAppDb(alias, "mtu", mtu); - writeConfigToAppDb(alias, "admin_status", admin_status); /* Retry setting these params after the netdev is created */ field_values.clear(); field_values.emplace_back("mtu", mtu); diff --git a/cfgmgr/stpmgr.cpp b/cfgmgr/stpmgr.cpp new file mode 100644 index 00000000000..f0cb631644e --- /dev/null +++ b/cfgmgr/stpmgr.cpp @@ -0,0 +1,1494 @@ +#include "exec.h" +#include "stpmgr.h" +#include "logger.h" +#include "tokenize.h" +#include "warm_restart.h" +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +using namespace std; +using namespace swss; + +StpMgr::StpMgr(DBConnector *confDb, DBConnector *applDb, DBConnector *statDb, + const vector &tables) : + Orch(tables), + m_cfgStpGlobalTable(confDb, CFG_STP_GLOBAL_TABLE_NAME), + m_cfgStpVlanTable(confDb, CFG_STP_VLAN_TABLE_NAME), + m_cfgStpVlanPortTable(confDb, CFG_STP_VLAN_PORT_TABLE_NAME), + m_cfgStpPortTable(confDb, CFG_STP_PORT_TABLE_NAME), + m_cfgLagMemberTable(confDb, CFG_LAG_MEMBER_TABLE_NAME), + m_cfgVlanMemberTable(confDb, CFG_VLAN_MEMBER_TABLE_NAME), + m_stateVlanTable(statDb, STATE_VLAN_TABLE_NAME), + m_stateLagTable(statDb, STATE_LAG_TABLE_NAME), + m_stateStpTable(statDb, STATE_STP_TABLE_NAME), + m_stateVlanMemberTable(statDb, STATE_VLAN_MEMBER_TABLE_NAME), + m_cfgMstGlobalTable(confDb, "STP_MST"), + m_cfgMstInstTable(confDb, "STP_MST_INST"), + m_cfgMstInstPortTable(confDb, "STP_MST_PORT") +{ + SWSS_LOG_ENTER(); + l2ProtoEnabled = L2_NONE; + + stpGlobalTask = stpVlanTask = stpVlanPortTask = stpPortTask = stpMstInstTask = false; + + // Initialize all VLANs to Invalid instance + fill_n(m_vlanInstMap, MAX_VLANS, INVALID_INSTANCE); + + int ret = system("ebtables -D FORWARD -d 01:00:0c:cc:cc:cd -j DROP"); + SWSS_LOG_DEBUG("ebtables ret %d", ret); +} + +void StpMgr::doTask(Consumer &consumer) +{ + auto table = consumer.getTableName(); + + SWSS_LOG_INFO("Get task from table %s", table.c_str()); + + if (table == CFG_STP_GLOBAL_TABLE_NAME) + doStpGlobalTask(consumer); + else if (table == CFG_STP_VLAN_TABLE_NAME) + doStpVlanTask(consumer); + else if (table == CFG_STP_VLAN_PORT_TABLE_NAME) + doStpVlanPortTask(consumer); + else if (table == CFG_STP_PORT_TABLE_NAME) + doStpPortTask(consumer); + else if (table == CFG_LAG_MEMBER_TABLE_NAME) + doLagMemUpdateTask(consumer); + else if (table == STATE_VLAN_MEMBER_TABLE_NAME) + doVlanMemUpdateTask(consumer); + else if (table == "STP_MST") + doStpMstGlobalTask(consumer); + else if (table == "STP_MST_INST") + doStpMstInstTask(consumer); + else if (table == "STP_MST_PORT") + doStpMstInstPortTask(consumer); + else if (table == CFG_STP_PORT_TABLE_NAME) + doStpPortTask(consumer); + else + SWSS_LOG_ERROR("Invalid table %s", table.c_str()); +} + +void StpMgr::doStpGlobalTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + if (stpGlobalTask == false) + stpGlobalTask = true; + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + STP_BRIDGE_CONFIG_MSG msg; + memset(&msg, 0, sizeof(STP_BRIDGE_CONFIG_MSG)); + + KeyOpFieldsValuesTuple t = it->second; + + string key = kfvKey(t); + string op = kfvOp(t); + + SWSS_LOG_INFO("STP global key %s op %s", key.c_str(), op.c_str()); + if (op == SET_COMMAND) + { + msg.opcode = STP_SET_COMMAND; + for (auto i : kfvFieldsValues(t)) + { + SWSS_LOG_DEBUG("Field: %s Val %s", fvField(i).c_str(), fvValue(i).c_str()); + if (fvField(i) == "mode") + { + if (fvValue(i) == "pvst") + { + if (l2ProtoEnabled == L2_NONE) + { + const std::string cmd = std::string("") + + " ebtables -A FORWARD -d 01:00:0c:cc:cc:cd -j DROP"; + std::string res; + int ret = swss::exec(cmd, res); + if (ret != 0) + SWSS_LOG_ERROR("ebtables add failed for PVST %d", ret); + + l2ProtoEnabled = L2_PVSTP; + } + msg.stp_mode = L2_PVSTP; + } + else if (fvValue(i) == "mst") + { + if (l2ProtoEnabled == L2_NONE) + { + l2ProtoEnabled = L2_MSTP; + } + msg.stp_mode = L2_MSTP; + + // Assign all VLANs to zero instance for MSTP + fill_n(m_vlanInstMap, MAX_VLANS, 0); + } + else + { + SWSS_LOG_ERROR("Error: Invalid mode %s", fvValue(i).c_str()); + } + } + else if (fvField(i) == "rootguard_timeout") + { + msg.rootguard_timeout = stoi(fvValue(i).c_str()); + } + } + + memcpy(msg.base_mac_addr, macAddress.getMac(), 6); + } + else if (op == DEL_COMMAND) + { + msg.opcode = STP_DEL_COMMAND; + + // Free Up all instances + FREE_ALL_INST_ID(); + + // Initialize all VLANs to Invalid instance + fill_n(m_vlanInstMap, MAX_VLANS, INVALID_INSTANCE); + + // Remove ebtables rule based on protocol mode + if (l2ProtoEnabled == L2_PVSTP) + { + const std::string pvst_cmd = + "ebtables -D FORWARD -d 01:00:0c:cc:cc:cd -j DROP"; + std::string res_pvst; + int ret_pvst = swss::exec(pvst_cmd, res_pvst); + if (ret_pvst != 0) + SWSS_LOG_ERROR("ebtables del failed for PVST %d", ret_pvst); + } + l2ProtoEnabled = L2_NONE; + } + + // Send the message to the daemon + sendMsgStpd(STP_BRIDGE_CONFIG, sizeof(msg), (void *)&msg); + + // Move to the next item + it = consumer.m_toSync.erase(it); + } +} + + +void StpMgr::doStpVlanTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + if (stpGlobalTask == false || (stpPortTask == false && !isStpPortEmpty())) + return; + + if (stpVlanTask == false) + stpVlanTask = true; + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + STP_VLAN_CONFIG_MSG *msg = NULL; + uint32_t len = 0; + bool stpEnable = false; + uint8_t newInstance = 0; + int instId, forwardDelay, helloTime, maxAge, priority, portCnt = 0; + instId = forwardDelay = helloTime = maxAge = priority = portCnt = 0; + + KeyOpFieldsValuesTuple t = it->second; + + string key = kfvKey(t); + string op = kfvOp(t); + + string vlanKey = key.substr(4); // Remove Vlan prefix + int vlan_id = stoi(vlanKey.c_str()); + + SWSS_LOG_INFO("STP vlan key %s op %s", key.c_str(), op.c_str()); + if (op == SET_COMMAND) + { + if (l2ProtoEnabled == L2_NONE || !isVlanStateOk(key)) + { + // Wait till STP is configured + it++; + continue; + } + + for (auto i : kfvFieldsValues(t)) + { + SWSS_LOG_DEBUG("Field: %s Val: %s", fvField(i).c_str(), fvValue(i).c_str()); + + if (fvField(i) == "enabled") + { + stpEnable = (fvValue(i) == "true") ? true : false; + } + else if (fvField(i) == "forward_delay") + { + forwardDelay = stoi(fvValue(i).c_str()); + } + else if (fvField(i) == "hello_time") + { + helloTime = stoi(fvValue(i).c_str()); + } + else if (fvField(i) == "max_age") + { + maxAge = stoi(fvValue(i).c_str()); + } + else if (fvField(i) == "priority") + { + priority = stoi(fvValue(i).c_str()); + } + } + } + else if (op == DEL_COMMAND) + { + stpEnable = false; + if (l2ProtoEnabled == L2_NONE) + { + it = consumer.m_toSync.erase(it); + continue; + } + } + + len = sizeof(STP_VLAN_CONFIG_MSG); + if (stpEnable == true) + { + vector port_list; + if (m_vlanInstMap[vlan_id] == INVALID_INSTANCE) + { + /* VLAN is being added to the instance. Get all members for VLAN Mapping*/ + if (l2ProtoEnabled == L2_PVSTP) + { + newInstance = 1; + instId = allocL2Instance(vlan_id); + if (instId == -1) + { + SWSS_LOG_ERROR("Couldnt allocate instance to VLAN %d", vlan_id); + it = consumer.m_toSync.erase(it); + continue; + } + + portCnt = getAllVlanMem(key, port_list); + SWSS_LOG_DEBUG("Port count %d", portCnt); + } + + len += (uint32_t)(portCnt * sizeof(PORT_ATTR)); + } + + msg = (STP_VLAN_CONFIG_MSG *)calloc(1, len); + if (!msg) + { + SWSS_LOG_ERROR("mem failed for vlan %d", vlan_id); + return; + } + + msg->opcode = STP_SET_COMMAND; + msg->vlan_id = vlan_id; + msg->newInstance = newInstance; + msg->inst_id = m_vlanInstMap[vlan_id]; + msg->forward_delay = forwardDelay; + msg->hello_time = helloTime; + msg->max_age = maxAge; + msg->priority = priority; + msg->count = portCnt; + + if(msg->count) + { + int i = 0; + PORT_ATTR *attr = msg->port_list; + for (auto p = port_list.begin(); p != port_list.end(); p++) + { + attr[i].mode = p->mode; + attr[i].enabled = p->enabled; + strncpy(attr[i].intf_name, p->intf_name, IFNAMSIZ); + SWSS_LOG_DEBUG("MemIntf: %s", p->intf_name); + i++; + } + } + } + else + { + if (m_vlanInstMap[vlan_id] == INVALID_INSTANCE) + { + // Already deallocated. NoOp. This can happen when STP + // is disabled on a VLAN more than once + it = consumer.m_toSync.erase(it); + continue; + } + + msg = (STP_VLAN_CONFIG_MSG *)calloc(1, len); + if (!msg) + { + SWSS_LOG_ERROR("mem failed for vlan %d", vlan_id); + return; + } + + msg->opcode = STP_DEL_COMMAND; + msg->inst_id = m_vlanInstMap[vlan_id]; + + deallocL2Instance(vlan_id); + } + + sendMsgStpd(STP_VLAN_CONFIG, len, (void *)msg); + if (msg) + free(msg); + + it = consumer.m_toSync.erase(it); + } +} + +void StpMgr::doStpMstGlobalTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + if (stpGlobalTask == false) + return; + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + string key = kfvKey(t); + string op = kfvOp(t); + + SWSS_LOG_INFO("STP MST global key %s op %s", key.c_str(), op.c_str()); + + STP_MST_GLOBAL_CONFIG_MSG msg; + memset(&msg, 0, sizeof(msg)); // Initialize message structure to zero + + if (op == SET_COMMAND) + { + msg.opcode = STP_SET_COMMAND; + + for (auto i : kfvFieldsValues(t)) + { + SWSS_LOG_DEBUG("Field: %s Val: %s", fvField(i).c_str(), fvValue(i).c_str()); + + if (fvField(i) == "name") + { + strncpy(msg.name, fvValue(i).c_str(), sizeof(msg.name) - 1); + } + else if (fvField(i) == "revision") + { + msg.revision_number = static_cast(stoi(fvValue(i))); + } + else if (fvField(i) == "forward_delay") + { + msg.forward_delay = static_cast(stoi(fvValue(i))); + } + else if (fvField(i) == "hello_time") + { + msg.hello_time = static_cast(stoi(fvValue(i))); + } + else if (fvField(i) == "max_age") + { + msg.max_age = static_cast(stoi(fvValue(i))); + } + else if (fvField(i) == "max_hops") + { + msg.max_hops = static_cast(stoi(fvValue(i))); + } + else + { + SWSS_LOG_ERROR("Invalid field: %s", fvField(i).c_str()); + } + } + } + else if (op == DEL_COMMAND) + { + msg.opcode = STP_DEL_COMMAND; + } + + sendMsgStpd(STP_MST_GLOBAL_CONFIG, sizeof(msg), (void *)&msg); + + it = consumer.m_toSync.erase(it); + } +} + +void StpMgr::processStpVlanPortAttr(const string op, uint32_t vlan_id, const string intfName, + vector&tupEntry) +{ + STP_VLAN_PORT_CONFIG_MSG msg; + memset(&msg, 0, sizeof(STP_VLAN_PORT_CONFIG_MSG)); + + msg.vlan_id = vlan_id; + msg.inst_id = m_vlanInstMap[vlan_id]; + strncpy(msg.intf_name, intfName.c_str(), IFNAMSIZ-1); + + if (op == SET_COMMAND) + { + msg.opcode = STP_SET_COMMAND; + msg.priority = -1; + + for (auto i : tupEntry) + { + SWSS_LOG_DEBUG("Field: %s Val: %s", fvField(i).c_str(), fvValue(i).c_str()); + if (fvField(i) == "path_cost") + { + msg.path_cost = stoi(fvValue(i).c_str()); + } + else if (fvField(i) == "priority") + { + msg.priority = stoi(fvValue(i).c_str()); + } + } + } + else if (op == DEL_COMMAND) + { + msg.opcode = STP_DEL_COMMAND; + } + + sendMsgStpd(STP_VLAN_PORT_CONFIG, sizeof(msg), (void *)&msg); +} + +void StpMgr::doStpVlanPortTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + if (stpGlobalTask == false || stpVlanTask == false || stpPortTask == false) + return; + + if (stpVlanPortTask == false) + stpVlanPortTask = true; + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + STP_VLAN_PORT_CONFIG_MSG msg; + memset(&msg, 0, sizeof(STP_VLAN_PORT_CONFIG_MSG)); + + KeyOpFieldsValuesTuple t = it->second; + + string key = kfvKey(t); + string op = kfvOp(t); + + string vlanKey = key.substr(4); // Remove VLAN keyword + size_t found = vlanKey.find(CONFIGDB_KEY_SEPARATOR); + + int vlan_id; + string intfName; + if (found != string::npos) + { + vlan_id = stoi(vlanKey.substr(0, found)); + intfName = vlanKey.substr(found+1); + } + else + { + SWSS_LOG_ERROR("Invalid key format %s", kfvKey(t).c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + + SWSS_LOG_INFO("STP vlan intf key:%s op:%s", key.c_str(), op.c_str()); + + if (op == SET_COMMAND) + { + if ((l2ProtoEnabled == L2_NONE) || (m_vlanInstMap[vlan_id] == INVALID_INSTANCE)) + { + // Wait till STP/VLAN is configured + it++; + continue; + } + } + else + { + if (l2ProtoEnabled == L2_NONE || (m_vlanInstMap[vlan_id] == INVALID_INSTANCE)) + { + it = consumer.m_toSync.erase(it); + continue; + } + } + + if (isLagEmpty(intfName)) + { + // Lag has no member. Process when first member is added/deleted + it = consumer.m_toSync.erase(it); + continue; + } + + processStpVlanPortAttr(op, vlan_id, intfName, kfvFieldsValues(t)); + + it = consumer.m_toSync.erase(it); + } +} + +void StpMgr::processStpPortAttr(const string op, + vector &tupEntry, + const string intfName) +{ + STP_PORT_CONFIG_MSG *msg = nullptr; + uint32_t len = 0; + int vlanCnt = 0; + vector vlan_list; + + // If we're setting this port's attributes, retrieve the list of VLANs for it. + if (op == SET_COMMAND) + { + vlanCnt = getAllPortVlan(intfName, vlan_list); + } + + // Allocate enough space for STP_PORT_CONFIG_MSG + all VLAN_ATTR entries. + len = static_cast( + sizeof(STP_PORT_CONFIG_MSG) + (vlanCnt * sizeof(VLAN_ATTR)) + ); + msg = static_cast(calloc(1, len)); + if (!msg) + { + SWSS_LOG_ERROR("calloc failed for interface %s", intfName.c_str()); + return; + } + // Copy interface name and VLAN count into the message. + strncpy(msg->intf_name, intfName.c_str(), IFNAMSIZ - 1); + msg->count = vlanCnt; + SWSS_LOG_INFO("VLAN count for %s is %d", intfName.c_str(), vlanCnt); + SWSS_LOG_INFO("VLAN count for %s is %d", intfName.c_str(), vlanCnt); + + // If there are VLANs, copy them into the message structure. + if (msg->count > 0) + // If there are VLANs, copy them into the message structure. + if (msg->count > 0) + { + for (int i = 0; i < msg->count; i++) + { + msg->vlan_list[i].inst_id = vlan_list[i].inst_id; + msg->vlan_list[i].mode = vlan_list[i].mode; + msg->vlan_list[i].vlan_id = vlan_list[i].vlan_id; + SWSS_LOG_DEBUG("Inst:%d Mode:%d", + vlan_list[i].inst_id, + vlan_list[i].mode); + } + } + + // Populate message fields based on the operation (SET or DEL). + if (op == SET_COMMAND) + { + msg->opcode = STP_SET_COMMAND; + msg->priority = -1; // Default priority unless specified + + for (auto &fvt : tupEntry) + { + const auto &field = fvField(fvt); + const auto &value = fvValue(fvt); + + SWSS_LOG_DEBUG("Field: %s, Value: %s", field.c_str(), value.c_str()); + + if (field == "enabled") + { + msg->enabled = (value == "true") ? 1 : 0; + } + else if (field == "root_guard") + { + msg->root_guard = (value == "true") ? 1 : 0; + } + else if (field == "bpdu_guard") + { + msg->bpdu_guard = (value == "true") ? 1 : 0; + } + else if (field == "bpdu_guard_do_disable") + { + msg->bpdu_guard_do_disable = (value == "true") ? 1 : 0; + } + else if (field == "path_cost") + { + msg->path_cost = stoi(value); + } + else if (field == "priority") + { + msg->priority = stoi(value); + } + else if (field == "portfast" && l2ProtoEnabled == L2_PVSTP) + { + msg->portfast = (value == "true") ? 1 : 0; + } + else if (field == "uplink_fast" && l2ProtoEnabled ==L2_PVSTP) + { + msg->uplink_fast = (value == "true") ? 1 : 0; + } + else if (field == "edge_port" && l2ProtoEnabled ==L2_MSTP) + { + msg->edge_port = (value == "true") ? 1 : 0; + } + else if (field== "link_type" && l2ProtoEnabled == L2_MSTP) + { + msg->link_type = static_cast(stoi(field.c_str())); + } + } + } + else if (op == DEL_COMMAND) + { + msg->opcode = STP_DEL_COMMAND; + msg->enabled = 0; + } + + // Send the fully prepared message to the STP daemon. + sendMsgStpd(STP_PORT_CONFIG, len, reinterpret_cast(msg)); + + // Clean up. + free(msg); +} + +void StpMgr::doStpPortTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + if (stpGlobalTask == false) + return; + + if (stpPortTask == false) + stpPortTask = true; + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + + string key = kfvKey(t); + string op = kfvOp(t); + + if (isLagEmpty(key)) + { + it = consumer.m_toSync.erase(it); + continue; + } + + if (op == SET_COMMAND) + { + if (l2ProtoEnabled == L2_NONE) + { + // Wait till STP is configured + it++; + continue; + } + } + else + { + if (l2ProtoEnabled == L2_NONE) + { + it = consumer.m_toSync.erase(it); + continue; + } + } + + SWSS_LOG_INFO("STP port key:%s op:%s", key.c_str(), op.c_str()); + processStpPortAttr(op, kfvFieldsValues(t), key); + + it = consumer.m_toSync.erase(it); + } +} + +void StpMgr::doVlanMemUpdateTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + STP_VLAN_MEM_CONFIG_MSG msg; + memset(&msg, 0, sizeof(STP_VLAN_MEM_CONFIG_MSG)); + + KeyOpFieldsValuesTuple t = it->second; + + auto key = kfvKey(t); + auto op = kfvOp(t); + + string vlanKey = key.substr(4); // Remove Vlan prefix + size_t found = vlanKey.find(CONFIGDB_KEY_SEPARATOR); + + int vlan_id; + string intfName; + if (found != string::npos) + { + vlan_id = stoi(vlanKey.substr(0, found)); + intfName = vlanKey.substr(found+1); + } + else + { + SWSS_LOG_ERROR("Invalid key format. No member port is presented: %s", kfvKey(t).c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + + SWSS_LOG_INFO("STP vlan mem key:%s op:%s inst:%d", key.c_str(), op.c_str(), m_vlanInstMap[vlan_id]); + // If STP is running on this VLAN, notify STPd + if (m_vlanInstMap[vlan_id] != INVALID_INSTANCE && !isLagEmpty(intfName)) + { + int8_t tagging_mode = TAGGED_MODE; + + if (op == SET_COMMAND) + { + tagging_mode = getVlanMemMode(key); + if (tagging_mode == INVALID_MODE) + { + SWSS_LOG_ERROR("invalid mode %s", key.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + + SWSS_LOG_DEBUG("mode %d key %s", tagging_mode, key.c_str()); + + msg.enabled = isStpEnabled(intfName); + + vector stpVlanPortEntry; + if (m_cfgStpVlanPortTable.get(key, stpVlanPortEntry)) + { + for (auto entry : stpVlanPortEntry) + { + if (entry.first == "priority") + msg.priority = stoi(entry.second); + else if (entry.first == "path_cost") + msg.path_cost = stoi(entry.second); + } + } + } + + msg.opcode = (op == SET_COMMAND) ? STP_SET_COMMAND : STP_DEL_COMMAND; + msg.vlan_id = vlan_id; + msg.inst_id = m_vlanInstMap[vlan_id]; + msg.mode = tagging_mode; + msg.priority = -1; + msg.path_cost = 0; + + strncpy(msg.intf_name, intfName.c_str(), IFNAMSIZ-1); + + sendMsgStpd(STP_VLAN_MEM_CONFIG, sizeof(msg), (void *)&msg); + } + + it = consumer.m_toSync.erase(it); + } +} + +void StpMgr::doLagMemUpdateTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + bool notifyStpd = false; + + auto key = kfvKey(t); + auto op = kfvOp(t); + + string po_name; + string po_mem; + size_t found = key.find(CONFIGDB_KEY_SEPARATOR); + + if (found != string::npos) + { + po_name = key.substr(0, found); + po_mem = key.substr(found+1); + } + else + { + SWSS_LOG_ERROR("Invalid key format %s", key.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + + if (op == SET_COMMAND) + { + if (!isLagStateOk(po_name)) + { + it++; + continue; + } + + auto elm = m_lagMap.find(po_name); + if (elm == m_lagMap.end()) + { + // First Member added to the LAG + m_lagMap[po_name] = 1; + notifyStpd = true; + } + else + { + elm->second++; + } + } + else if (op == DEL_COMMAND) + { + auto elm = m_lagMap.find(po_name); + if (elm != m_lagMap.end()) + { + elm->second--; + + if (elm->second == 0) + { + // Last Member deleted from the LAG + m_lagMap.erase(po_name); + //notifyStpd = true; + } + } + else + SWSS_LOG_ERROR("PO not found %s", po_name.c_str()); + } + + if (notifyStpd && l2ProtoEnabled != L2_NONE) + { + vector vlan_list; + vector tupEntry; + + if (m_cfgStpPortTable.get(po_name, tupEntry)) + { + //Push STP_PORT configs for this port + processStpPortAttr(op, tupEntry, po_name); + + getAllPortVlan(po_name, vlan_list); + //Push STP_VLAN_PORT configs for this port + for (auto p = vlan_list.begin(); p != vlan_list.end(); p++) + { + vector vlanPortTup; + + string vlanPortKey = "Vlan" + to_string(p->vlan_id) + "|" + po_name; + if (m_cfgStpVlanPortTable.get(vlanPortKey, vlanPortTup)) + processStpVlanPortAttr(op, p->vlan_id, po_name, vlanPortTup); + } + } + } + + SWSS_LOG_DEBUG("LagMap"); + for (auto itr = m_lagMap.begin(); itr != m_lagMap.end(); ++itr) { + SWSS_LOG_DEBUG("PO: %s Cnt:%d", itr->first.c_str(), itr->second); + } + + it = consumer.m_toSync.erase(it); + } +} + +void StpMgr::ipcInitStpd() +{ + int ret; + struct sockaddr_un addr; + + unlink(STPMGRD_SOCK_NAME); + // create socket + stpd_fd = socket(AF_UNIX, SOCK_DGRAM, 0); + if (!stpd_fd) { + SWSS_LOG_ERROR("socket error %s", strerror(errno)); + return; + } + + // setup socket address structure + bzero(&addr, sizeof(addr)); + addr.sun_family = AF_UNIX; + strncpy(addr.sun_path, STPMGRD_SOCK_NAME, sizeof(addr.sun_path)-1); + + ret = (int)bind(stpd_fd, (struct sockaddr *)&addr, sizeof(struct sockaddr_un)); + if (ret == -1) + { + SWSS_LOG_ERROR("ipc bind error %s", strerror(errno)); + close(stpd_fd); + return; + } +} + +int StpMgr::allocL2Instance(uint32_t vlan_id) +{ + int idx = 0; + + if (!IS_INST_ID_AVAILABLE()) + { + SWSS_LOG_ERROR("No instance available"); + return -1; + } + + if (l2ProtoEnabled == L2_PVSTP) + { + GET_FIRST_FREE_INST_ID(idx); + } + else + { + SWSS_LOG_ERROR("invalid proto %d for vlan %d", l2ProtoEnabled, vlan_id); + return -1; + } + + //Set VLAN to Instance mapping + m_vlanInstMap[vlan_id] = idx; + SWSS_LOG_INFO("Allocated Id: %d Vlan %d", m_vlanInstMap[vlan_id], vlan_id); + + return idx; +} + +void StpMgr::deallocL2Instance(uint32_t vlan_id) +{ + int idx = 0; + + if (l2ProtoEnabled == L2_PVSTP) + { + idx = m_vlanInstMap[vlan_id]; + FREE_INST_ID(idx); + } + else + { + SWSS_LOG_ERROR("invalid proto %d for vlan %d", l2ProtoEnabled, vlan_id); + } + + m_vlanInstMap[vlan_id] = INVALID_INSTANCE; + SWSS_LOG_INFO("Deallocated Id: %d Vlan %d", m_vlanInstMap[vlan_id], vlan_id); +} + + +int StpMgr::getAllVlanMem(const string &vlanKey, vector&port_list) +{ + PORT_ATTR port_id; + vector vmEntry; + + vector vmKeys; + m_stateVlanMemberTable.getKeys(vmKeys); + + SWSS_LOG_INFO("VLAN Key: %s", vlanKey.c_str()); + for (auto key : vmKeys) + { + size_t found = key.find(CONFIGDB_KEY_SEPARATOR); //split VLAN and interface + + string vlanName; + string intfName; + if (found != string::npos) + { + vlanName = key.substr(0, found); + intfName = key.substr(found+1); + } + else + { + SWSS_LOG_ERROR("Invalid Key: %s", key.c_str()); + continue; + } + + if (vlanKey == vlanName && !isLagEmpty(intfName)) + { + port_id.mode = getVlanMemMode(key); + if (port_id.mode == INVALID_MODE) + { + SWSS_LOG_ERROR("invalid mode %s", key.c_str()); + continue; + } + port_id.enabled = isStpEnabled(intfName); + strncpy(port_id.intf_name, intfName.c_str(), IFNAMSIZ-1); + port_list.push_back(port_id); + SWSS_LOG_DEBUG("MemIntf: %s", intfName.c_str()); + } + } + + return (int)port_list.size(); +} + + + +int StpMgr::getAllPortVlan(const string &intfKey, vector&vlan_list) +{ + VLAN_ATTR vlan; + vector vmEntry; + + vector vmKeys; + m_stateVlanMemberTable.getKeys(vmKeys); + + SWSS_LOG_INFO("Intf Key: %s", intfKey.c_str()); + for (auto key : vmKeys) + { + string vlanKey = key.substr(4); // Remove Vlan prefix + size_t found = vlanKey.find(CONFIGDB_KEY_SEPARATOR); //split VLAN and interface + SWSS_LOG_DEBUG("Vlan mem Key: %s", key.c_str()); + + int vlan_id; + string intfName; + if (found != string::npos) + { + vlan_id = stoi(vlanKey.substr(0, found)); + intfName = vlanKey.substr(found+1); + + if (intfName == intfKey) + { + if (m_vlanInstMap[vlan_id] != INVALID_INSTANCE) + { + vlan.mode = getVlanMemMode(key); + if (vlan.mode == INVALID_MODE) + { + SWSS_LOG_ERROR("invalid mode %s", key.c_str()); + continue; + } + + vlan.vlan_id = vlan_id; + vlan.inst_id = m_vlanInstMap[vlan_id]; + vlan_list.push_back(vlan); + SWSS_LOG_DEBUG("Matched vlan key: %s intf key %s", intfName.c_str(), intfKey.c_str()); + } + } + } + } + + return (int)vlan_list.size(); +} + +void StpMgr::doStpMstInstTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + if (stpGlobalTask == false || (stpPortTask == false && !isStpPortEmpty())) + return; + + if (stpMstInstTask == false) + stpMstInstTask = true; + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + STP_MST_INST_CONFIG_MSG *msg = NULL; + uint32_t len = 0; + + KeyOpFieldsValuesTuple t = it->second; + + string key = kfvKey(t); + string op = kfvOp(t); + + string instance = key.substr(13); // Remove "MST_INSTANCE|" prefix + uint16_t instance_id = static_cast(stoi(instance.c_str())); + + uint16_t priority = 32768; // Default bridge priority + string vlan_list_str; + vector vlan_ids; + + SWSS_LOG_INFO("STP_MST instance key %s op %s", key.c_str(), op.c_str()); + if (op == SET_COMMAND) + { + for (auto i : kfvFieldsValues(t)) + { + SWSS_LOG_DEBUG("Field: %s Val: %s", fvField(i).c_str(), fvValue(i).c_str()); + + if (fvField(i) == "bridge_priority") + { + priority = static_cast(stoi((fvValue(i).c_str()))); + } + else if (fvField(i) == "vlan_list") + { + vlan_list_str = fvValue(i); + vlan_ids = parseVlanList(vlan_list_str); + } + updateVlanInstanceMap(instance_id, vlan_ids, true); + } + + uint32_t vlan_count = static_cast(vlan_ids.size()); + len = sizeof(STP_MST_INST_CONFIG_MSG) + static_cast(vlan_count * sizeof(VLAN_LIST)); + + msg = (STP_MST_INST_CONFIG_MSG *)calloc(1, len); + if (!msg) + { + SWSS_LOG_ERROR("Memory allocation failed for STP_MST_INST_CONFIG_MSG"); + return; + } + + msg->opcode = STP_SET_COMMAND; + msg->mst_id = instance_id; + msg->priority = priority; + msg->vlan_count = static_cast(vlan_ids.size()); + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Waddress-of-packed-member" + VLAN_LIST *vlan_attr = (VLAN_LIST *)&msg->vlan_list; + #pragma GCC diagnostic pop + for (size_t i = 0; i < vlan_ids.size(); i++) + { + vlan_attr[i].vlan_id = vlan_ids[i]; + } + } + else if (op == DEL_COMMAND) + { + len = sizeof(STP_MST_INST_CONFIG_MSG); + msg = (STP_MST_INST_CONFIG_MSG *)calloc(1, len); + if (!msg) + { + SWSS_LOG_ERROR("Memory allocation failed for MST_INST_CONFIG_MSG"); + return; + } + + msg->opcode = STP_DEL_COMMAND; + msg->mst_id = instance_id; + updateVlanInstanceMap(instance_id, vlan_ids, false); + } + + sendMsgStpd(STP_MST_INST_CONFIG, len, (void *)msg); + if (msg) + free(msg); + + it = consumer.m_toSync.erase(it); + } +} + +void StpMgr::processStpMstInstPortAttr(const string op, uint16_t mst_id, const string intfName, + vector& tupEntry) +{ + STP_MST_INST_PORT_CONFIG_MSG msg; + memset(&msg, 0, sizeof(STP_MST_INST_PORT_CONFIG_MSG)); + + // Populate the message fields + msg.mst_id = mst_id; + strncpy(msg.intf_name, intfName.c_str(), IFNAMSIZ - 1); + + // Set opcode and process the fields from the tuple + if (op == SET_COMMAND) + { + msg.opcode = STP_SET_COMMAND; + msg.priority = -1; + + for (auto i : tupEntry) + { + SWSS_LOG_DEBUG("Field: %s Val: %s", fvField(i).c_str(), fvValue(i).c_str()); + + if (fvField(i) == "path_cost") + { + msg.path_cost = stoi(fvValue(i).c_str()); + } + else if (fvField(i) == "priority") + { + msg.priority = stoi(fvValue(i).c_str()); + } + } + } + else if (op == DEL_COMMAND) + { + msg.opcode = STP_DEL_COMMAND; + } + + // Send the message to the daemon + sendMsgStpd(STP_MST_INST_PORT_CONFIG, sizeof(msg), (void *)&msg); +} + + +void StpMgr::doStpMstInstPortTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + if (stpGlobalTask == false || stpMstInstTask == false || stpPortTask == false) + return; + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + STP_MST_INST_PORT_CONFIG_MSG msg; + memset(&msg, 0, sizeof(STP_MST_INST_PORT_CONFIG_MSG)); + + KeyOpFieldsValuesTuple t = it->second; + + string key = kfvKey(t); + string op = kfvOp(t); + + string mstKey = key.substr(9);//Remove INSTANCE keyword + size_t found = mstKey.find(CONFIGDB_KEY_SEPARATOR); + + uint16_t mst_id; + string intfName; + if (found != string::npos) + { + mst_id = static_cast(stoi(mstKey.substr(0, found))); + intfName = mstKey.substr(found + 1); + } + else + { + SWSS_LOG_ERROR("Invalid key format %s", kfvKey(t).c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + + SWSS_LOG_INFO("STP MST intf key:%s op:%s", key.c_str(), op.c_str()); + + if (op == SET_COMMAND) + { + if ((l2ProtoEnabled == L2_NONE)) + { + // Wait till STP/MST instance is configured + it++; + continue; + } + } + else + { + if (l2ProtoEnabled == L2_NONE || !(isInstanceMapped(mst_id))) + { + it = consumer.m_toSync.erase(it); + continue; + } + } + + processStpMstInstPortAttr(op, mst_id, intfName, kfvFieldsValues(t)); + + it = consumer.m_toSync.erase(it); + } +} + +// Send Message to STPd +int StpMgr::sendMsgStpd(STP_MSG_TYPE msgType, uint32_t msgLen, void *data) +{ + STP_IPC_MSG *tx_msg; + size_t len = 0; + struct sockaddr_un addr; + int rc; + + len = msgLen + (offsetof(struct STP_IPC_MSG, data)); + SWSS_LOG_INFO("tx_msg len %d msglen %d", (int)len, msgLen); + + tx_msg = (STP_IPC_MSG *)calloc(1, len); + if (tx_msg == NULL) + { + SWSS_LOG_ERROR("tx_msg mem alloc error\n"); + return -1; + } + + tx_msg->msg_type = msgType; + tx_msg->msg_len = msgLen; + memcpy(tx_msg->data, data, msgLen); + + bzero(&addr, sizeof(addr)); + addr.sun_family = AF_UNIX; + strncpy(addr.sun_path, STPD_SOCK_NAME, sizeof(addr.sun_path)-1); + + rc = (int)sendto(stpd_fd, (void*)tx_msg, len, 0, (struct sockaddr *)&addr, sizeof(addr)); + if (rc == -1) + { + SWSS_LOG_ERROR("tx_msg send error\n"); + } + else + { + SWSS_LOG_INFO("tx_msg sent %d", rc); + } + + free(tx_msg); + return rc; +} + +bool StpMgr::isPortInitDone(DBConnector *app_db) +{ + bool portInit = 0; + long cnt = 0; + + while(!portInit) { + Table portTable(app_db, APP_PORT_TABLE_NAME); + std::vector tuples; + portInit = portTable.get("PortInitDone", tuples); + + if(portInit) + break; + sleep(1); + cnt++; + } + SWSS_LOG_NOTICE("PORT_INIT_DONE : %d %ld", portInit, cnt); + return portInit; +} + +bool StpMgr::isVlanStateOk(const string &alias) +{ + vector temp; + + if (!alias.compare(0, strlen(VLAN_PREFIX), VLAN_PREFIX)) + { + if (m_stateVlanTable.get(alias, temp)) + { + SWSS_LOG_DEBUG("%s is ready", alias.c_str()); + return true; + } + } + SWSS_LOG_DEBUG("%s is not ready", alias.c_str()); + return false; +} + +bool StpMgr::isLagStateOk(const string &alias) +{ + vector temp; + + if (m_stateLagTable.get(alias, temp)) + { + SWSS_LOG_DEBUG("%s is ready", alias.c_str()); + return true; + } + + SWSS_LOG_DEBUG("%s is not ready", alias.c_str()); + return false; +} + +bool StpMgr::isLagEmpty(const string &key) +{ + size_t po_find = key.find("PortChannel"); + if (po_find != string::npos) + { + // If Lag, check if members present + auto elm = m_lagMap.find(key); + if (elm == m_lagMap.end()) + { + // Lag has no member + SWSS_LOG_DEBUG("%s empty", key.c_str()); + return true; + } + SWSS_LOG_DEBUG("%s not empty", key.c_str()); + } + // Else: Interface not PO + + return false; +} + +bool StpMgr::isStpPortEmpty() +{ + vector portKeys; + m_cfgStpPortTable.getKeys(portKeys); + + if (portKeys.empty()) + { + SWSS_LOG_NOTICE("stp port empty"); + return true; + } + + SWSS_LOG_NOTICE("stp port not empty"); + return false; +} + +bool StpMgr::isStpEnabled(const string &intf_name) +{ + vector temp; + + if (m_cfgStpPortTable.get(intf_name, temp)) + { + for (auto entry : temp) + { + if (entry.first == "enabled" && entry.second == "true") + { + SWSS_LOG_NOTICE("STP enabled on %s", intf_name.c_str()); + return true; + } + } + } + + SWSS_LOG_NOTICE("STP NOT enabled on %s", intf_name.c_str()); + return false; +} + +int8_t StpMgr::getVlanMemMode(const string &key) +{ + int8_t mode = -1; + vector vmEntry; + + if (m_cfgVlanMemberTable.get(key, vmEntry)) + { + for (auto entry : vmEntry) + { + if (entry.first == "tagging_mode") + mode = (entry.second == "untagged") ? UNTAGGED_MODE : TAGGED_MODE; + SWSS_LOG_INFO("mode %d for %s", mode, key.c_str()); + } + } + else + SWSS_LOG_ERROR("config vlan_member table fetch failed %s", key.c_str()); + + return mode; +} + +uint16_t StpMgr::getStpMaxInstances(void) +{ + vector vmEntry; + uint16_t max_delay = 60; + string key; + + key = "GLOBAL"; + + while(max_delay) + { + if (m_stateStpTable.get(key, vmEntry)) + { + for (auto entry : vmEntry) + { + if (entry.first == "max_stp_inst") + { + max_stp_instances = (uint16_t)stoi(entry.second.c_str()); + SWSS_LOG_NOTICE("max stp instance %d count %d", max_stp_instances, (60-max_delay)); + } + } + break; + } + sleep(1); + max_delay--; + } + + if(max_stp_instances == 0) + { + max_stp_instances = STP_DEFAULT_MAX_INSTANCES; + SWSS_LOG_NOTICE("set default max stp instance %d", max_stp_instances); + } + + return max_stp_instances; +} + +std::vector StpMgr::getVlanAliasesForInstance(uint16_t instance) { + std::vector vlanAliases; + + for (uint16_t vlanId = 0; vlanId < MAX_VLANS; ++vlanId) { + if (m_vlanInstMap[vlanId] == instance) { + vlanAliases.push_back("VLAN" + std::to_string(vlanId)); + } + } + + return vlanAliases; +} + +//Function to parse the VLAN list and handle ranges +std::vector StpMgr::parseVlanList(const std::string &vlanStr) { + std::vector vlanList; + std::stringstream ss(vlanStr); + std::string segment; + + // Split the string by commas + while (std::getline(ss, segment, ',')) { + size_t dashPos = segment.find('-'); + if (dashPos != std::string::npos) { + // If a dash is found, it's a range like "22-25" + int start = std::stoi(segment.substr(0, dashPos)); + int end = std::stoi(segment.substr(dashPos + 1)); + + // Add all VLANs in the range to the list + for (int i = start; i <= end; ++i) { + vlanList.push_back(static_cast(i)); + } + } else { + // Single VLAN, add it to the list + vlanList.push_back(static_cast(std::stoi(segment))); + } + } + return vlanList; +} + +void StpMgr::updateVlanInstanceMap(int instance, const std::vector& newVlanList, bool operation) { + if (!operation) { + // Delete instance: Reset all VLANs mapped to this instance + for (int vlan = 0; vlan < MAX_VLANS; ++vlan) { + if (m_vlanInstMap[vlan] == instance) { + m_vlanInstMap[vlan] = 0; // Reset to default instance + } + } + } + else { + // Add/Update instance: Handle additions and deletions + // Use an unordered_set for efficient lookup of new VLAN list + std::unordered_set newVlanSet(newVlanList.begin(), newVlanList.end()); + + // Iterate over the current mapping to handle deletions + for (int vlan = 0; vlan < MAX_VLANS; ++vlan) { + if (m_vlanInstMap[vlan] == instance) { + // If a VLAN is mapped to this instance but not in the new list, reset it to 0 + if (newVlanSet.find(vlan) == newVlanSet.end()) { + m_vlanInstMap[vlan] = 0; + } + } + } + + // Handle additions + for (int vlan : newVlanList) { + if (vlan >= 0 && vlan < MAX_VLANS) { + m_vlanInstMap[vlan] = instance; + } + } + } +} + +bool StpMgr::isInstanceMapped(uint16_t instance) { + for (int i = 0; i < MAX_VLANS; ++i) { + if (m_vlanInstMap[i] == static_cast(instance)) { + return true; // Instance found + } + } + return false; // Instance not, found +} \ No newline at end of file diff --git a/cfgmgr/stpmgr.h b/cfgmgr/stpmgr.h new file mode 100644 index 00000000000..263bac46fc6 --- /dev/null +++ b/cfgmgr/stpmgr.h @@ -0,0 +1,308 @@ +#ifndef __STPMGR__ +#define __STPMGR__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dbconnector.h" +#include "netmsg.h" +#include "orch.h" +#include "producerstatetable.h" +#include +#include + +// We remove PACKED definitions, only keep ALIGNED +#if defined(__GNUC__) +#define ALIGNED(x) __attribute__((aligned(x))) +#else +#define ALIGNED(x) +#endif + +#define STPMGRD_SOCK_NAME "/var/run/stpmgrd.sock" + +#define TAGGED_MODE 1 +#define UNTAGGED_MODE 0 +#define INVALID_MODE -1 + +#define MAX_VLANS 4096 + +// Maximum number of instances supported +#define L2_INSTANCE_MAX MAX_VLANS +#define STP_DEFAULT_MAX_INSTANCES 255 +#define INVALID_INSTANCE -1 + +#define GET_FIRST_FREE_INST_ID(_idx) \ + while (_idx < (int)l2InstPool.size() && l2InstPool.test(_idx)) ++_idx; \ + l2InstPool.set(_idx) + +#define FREE_INST_ID(_idx) l2InstPool.reset(_idx) +#define FREE_ALL_INST_ID() l2InstPool.reset() +#define IS_INST_ID_AVAILABLE() (l2InstPool.count() < max_stp_instances) + +#define STPD_SOCK_NAME "/var/run/stpipc.sock" + +// Enumerations must match stp_ipc.h +typedef enum L2_PROTO_MODE { + L2_NONE, + L2_PVSTP, + L2_MSTP +}L2_PROTO_MODE; + + +typedef enum LinkType { + AUTO = 0, // Auto + POINT_TO_POINT = 1, // Point-to-point + SHARED = 2 // Shared +} LinkType; + +typedef enum STP_MSG_TYPE { + STP_INVALID_MSG, + STP_INIT_READY, + STP_BRIDGE_CONFIG, + STP_VLAN_CONFIG, + STP_VLAN_PORT_CONFIG, + STP_PORT_CONFIG, + STP_VLAN_MEM_CONFIG, + STP_STPCTL_MSG, + STP_MST_GLOBAL_CONFIG, + STP_MST_INST_CONFIG, + STP_MST_INST_PORT_CONFIG, + STP_MAX_MSG +} STP_MSG_TYPE; + +typedef enum STP_CTL_TYPE { + STP_CTL_HELP, + STP_CTL_DUMP_ALL, + STP_CTL_DUMP_GLOBAL, + STP_CTL_DUMP_VLAN_ALL, + STP_CTL_DUMP_VLAN, + STP_CTL_DUMP_INTF, + STP_CTL_SET_LOG_LVL, + STP_CTL_DUMP_NL_DB, + STP_CTL_DUMP_NL_DB_INTF, + STP_CTL_DUMP_LIBEV_STATS, + STP_CTL_SET_DBG, + STP_CTL_CLEAR_ALL, + STP_CTL_CLEAR_VLAN, + STP_CTL_CLEAR_INTF, + STP_CTL_CLEAR_VLAN_INTF, + STP_CTL_MAX +} STP_CTL_TYPE; + +// Remove PACKED, add ALIGNED(4) +typedef struct STP_IPC_MSG { + int msg_type; + unsigned int msg_len; + L2_PROTO_MODE proto_mode; + char data[0]; +} ALIGNED(4) STP_IPC_MSG; + +#define STP_SET_COMMAND 1 +#define STP_DEL_COMMAND 0 + +// Add padding for alignment if needed (compare to stp_ipc.h) +typedef struct STP_INIT_READY_MSG { + uint8_t opcode; // enable/disable + uint16_t max_stp_instances; + // Example: potential extra padding if alignment warnings arise + // uint8_t padding[1]; +} ALIGNED(4) STP_INIT_READY_MSG; + +// Add padding for alignment if needed +typedef struct STP_BRIDGE_CONFIG_MSG { + uint8_t opcode; // enable/disable + uint8_t stp_mode; + int rootguard_timeout; + uint8_t base_mac_addr[6]; + // Potential padding for alignment: + // uint8_t padding[2]; +} ALIGNED(4) STP_BRIDGE_CONFIG_MSG; + +// Must match the version in stp_ipc.h exactly +typedef struct PORT_ATTR { + char intf_name[IFNAMSIZ]; // 16 bytes typically + int8_t mode; + uint8_t enabled; + // Add padding to align to 4 bytes + uint16_t padding; +} ALIGNED(4) PORT_ATTR; + +// Must match the version in stp_ipc.h exactly +typedef struct STP_VLAN_CONFIG_MSG { + uint8_t opcode; // enable/disable + uint8_t newInstance; + int vlan_id; + int inst_id; + int forward_delay; + int hello_time; + int max_age; + int priority; + int count; + PORT_ATTR port_list[0]; +} ALIGNED(4) STP_VLAN_CONFIG_MSG; + +typedef struct STP_VLAN_PORT_CONFIG_MSG { + uint8_t opcode; // enable/disable + int vlan_id; + char intf_name[IFNAMSIZ]; + int inst_id; + int path_cost; + int priority; +} ALIGNED(4) STP_VLAN_PORT_CONFIG_MSG; + +typedef struct VLAN_ATTR { + int inst_id; + int vlan_id; + int8_t mode; + // Add padding to align to 4 bytes + uint8_t padding[3]; +} ALIGNED(4) VLAN_ATTR; + +typedef struct VLAN_LIST{ + uint16_t vlan_id; +}VLAN_LIST; + +typedef struct STP_PORT_CONFIG_MSG { + uint8_t opcode; // enable/disable + char intf_name[IFNAMSIZ]; + uint8_t enabled; + uint8_t root_guard; + uint8_t bpdu_guard; + uint8_t bpdu_guard_do_disable; + uint8_t portfast; // PVST only + uint8_t uplink_fast; // PVST only + uint8_t edge_port; // MSTP only + LinkType link_type; // MSTP only + int path_cost; + int priority; + int count; + VLAN_ATTR vlan_list[0]; +} STP_PORT_CONFIG_MSG;; + +typedef struct STP_VLAN_MEM_CONFIG_MSG { + uint8_t opcode; // enable/disable + int vlan_id; + int inst_id; + char intf_name[IFNAMSIZ]; + uint8_t enabled; + int8_t mode; + // Add 1 byte padding + uint8_t padding; + int path_cost; + int priority; +} ALIGNED(4) STP_VLAN_MEM_CONFIG_MSG; + +typedef struct STP_MST_GLOBAL_CONFIG_MSG { + uint8_t opcode; // enable/disable + uint32_t revision_number; + char name[32]; + uint8_t forward_delay; + uint8_t hello_time; + uint8_t max_age; + uint8_t max_hops; +}__attribute__ ((packed))STP_MST_GLOBAL_CONFIG_MSG; + +typedef struct STP_MST_INST_CONFIG_MSG { + uint8_t opcode; // enable/disable + uint16_t mst_id; // MST instance ID + int priority; // Bridge priority + uint16_t vlan_count; // Number of VLANs in this instance + VLAN_LIST vlan_list[0]; // Flexible array for VLAN IDs +}__attribute__((packed)) STP_MST_INST_CONFIG_MSG; + +typedef struct STP_MST_INST_PORT_CONFIG_MSG { + uint8_t opcode; // enable/disable + char intf_name[IFNAMSIZ]; // Interface name + uint16_t mst_id; // MST instance ID + int path_cost; // Path cost + int priority; // Port priority +} __attribute__((packed)) STP_MST_INST_PORT_CONFIG_MSG; + +namespace swss { + +class StpMgr : public Orch +{ +public: + StpMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, + const std::vector &tables); + + using Orch::doTask; + void ipcInitStpd(); + int sendMsgStpd(STP_MSG_TYPE msgType, uint32_t msgLen, void *data); + MacAddress macAddress; + bool isPortInitDone(DBConnector *app_db); + uint16_t getStpMaxInstances(void); + +private: + Table m_cfgStpGlobalTable; + Table m_cfgStpVlanTable; + Table m_cfgStpVlanPortTable; + Table m_cfgStpPortTable; + Table m_cfgLagMemberTable; + Table m_cfgVlanMemberTable; + Table m_stateVlanTable; + Table m_stateVlanMemberTable; + Table m_stateLagTable; + Table m_stateStpTable; + Table m_cfgMstGlobalTable; + Table m_cfgMstInstTable; + Table m_cfgMstInstPortTable; + + std::bitset l2InstPool; + int stpd_fd; + enum L2_PROTO_MODE l2ProtoEnabled; + int m_vlanInstMap[MAX_VLANS]; + bool portCfgDone; + uint16_t max_stp_instances; + std::map m_lagMap; + + bool stpGlobalTask; + bool stpVlanTask; + bool stpVlanPortTask; + bool stpPortTask; + bool stpMstInstTask; + + void doTask(Consumer &consumer); + void doStpGlobalTask(Consumer &consumer); + void doStpVlanTask(Consumer &consumer); + void doStpVlanPortTask(Consumer &consumer); + void doStpPortTask(Consumer &consumer); + void doVlanMemUpdateTask(Consumer &consumer); + void doLagMemUpdateTask(Consumer &consumer); + void doStpMstGlobalTask(Consumer &consumer); + void doStpMstInstTask(Consumer &consumer); + void doStpMstInstPortTask(Consumer &consumer); + + bool isVlanStateOk(const std::string &alias); + bool isLagStateOk(const std::string &alias); + bool isStpPortEmpty(); + bool isStpEnabled(const std::string &intf_name); + int getAllVlanMem(const std::string &vlanKey, std::vector& port_list); + int getAllPortVlan(const std::string &intfKey, std::vector& vlan_list); + int8_t getVlanMemMode(const std::string &key); + int allocL2Instance(uint32_t vlan_id); + void deallocL2Instance(uint32_t vlan_id); + bool isLagEmpty(const std::string &key); + void processStpPortAttr(const std::string op, std::vector&tupEntry, const std::string intfName); + void processStpVlanPortAttr(const std::string op, uint32_t vlan_id, const std::string intfName, + std::vector&tupEntry); + void processStpMstInstPortAttr(const std::string op, uint16_t mst_id, const std::string intfName, + std::vector&tupEntry); + std::vector parseVlanList(const std::string &vlanStr); + void updateVlanInstanceMap(int instance, const std::vector&newVlanList, bool operation); + bool isInstanceMapped(uint16_t instance); + std::vector getVlanAliasesForInstance(uint16_t instance); + + +}; + +} +#endif + diff --git a/cfgmgr/stpmgrd.cpp b/cfgmgr/stpmgrd.cpp new file mode 100644 index 00000000000..52b9f5a9dd9 --- /dev/null +++ b/cfgmgr/stpmgrd.cpp @@ -0,0 +1,123 @@ +#include + +#include "stpmgr.h" +#include "netdispatcher.h" +#include "netlink.h" +#include "select.h" +#include "warm_restart.h" + +using namespace std; +using namespace swss; + +bool gSwssRecord = false; +bool gLogRotate = false; +ofstream gRecordOfs; +string gRecordFile; + +#define SELECT_TIMEOUT 1000 + +int main(int argc, char **argv) +{ + Logger::linkToDbNative("stpmgrd"); + SWSS_LOG_ENTER(); + + SWSS_LOG_NOTICE("--- Starting stpmgrd ---"); + + if (fopen("/stpmgrd_dbg_reload", "r")) + { + Logger::setMinPrio(Logger::SWSS_DEBUG); + } + + try + { + DBConnector conf_db(CONFIG_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); + DBConnector app_db(APPL_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); + DBConnector state_db(STATE_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); + + WarmStart::initialize("stpmgrd", "stpd"); + WarmStart::checkWarmStart("stpmgrd", "stpd"); + + // Config DB Tables + TableConnector conf_stp_global_table(&conf_db, CFG_STP_GLOBAL_TABLE_NAME); + TableConnector conf_stp_vlan_table(&conf_db, CFG_STP_VLAN_TABLE_NAME); + TableConnector conf_stp_vlan_port_table(&conf_db, CFG_STP_VLAN_PORT_TABLE_NAME); + TableConnector conf_stp_port_table(&conf_db, CFG_STP_PORT_TABLE_NAME); + TableConnector conf_mst_global_table(&conf_db, "STP_MST"); + TableConnector conf_mst_inst_table(&conf_db, "STP_MST_INST"); + TableConnector conf_mst_inst_port_table(&conf_db, "STP_MST_PORT"); + // VLAN DB Tables + TableConnector state_vlan_member_table(&state_db, STATE_VLAN_MEMBER_TABLE_NAME); + + // LAG Tables + TableConnector conf_lag_member_table(&conf_db, CFG_LAG_MEMBER_TABLE_NAME); + vector tables = { + conf_stp_global_table, + conf_stp_vlan_table, + conf_stp_vlan_port_table, + conf_stp_port_table, + conf_lag_member_table, + state_vlan_member_table, + conf_mst_global_table, + conf_mst_inst_table, + conf_mst_inst_port_table + }; + + + StpMgr stpmgr(&conf_db, &app_db, &state_db, tables); + + // Open a Unix Domain Socket with STPd for communication + stpmgr.ipcInitStpd(); + stpmgr.isPortInitDone(&app_db); + + // Get max STP instances from state DB and send to stpd + STP_INIT_READY_MSG msg; + memset(&msg, 0, sizeof(STP_INIT_READY_MSG)); + msg.max_stp_instances = stpmgr.getStpMaxInstances(); + stpmgr.sendMsgStpd(STP_INIT_READY, sizeof(msg), (void *)&msg); + + // Get Base MAC + Table table(&conf_db, "DEVICE_METADATA"); + std::vector ovalues; + table.get("localhost", ovalues); + auto it = std::find_if( ovalues.begin(), ovalues.end(), [](const FieldValueTuple& t){ return t.first == "mac";} ); + if ( it == ovalues.end() ) { + throw runtime_error("couldn't find MAC address of the device from config DB"); + } + stpmgr.macAddress = MacAddress(it->second); + + vector cfgOrchList = {&stpmgr}; + + Select s; + for (Orch *o: cfgOrchList) + { + s.addSelectables(o->getSelectables()); + } + + while (true) + { + Selectable *sel; + int ret; + + ret = s.select(&sel, SELECT_TIMEOUT); + if (ret == Select::ERROR) + { + SWSS_LOG_NOTICE("Error: %s!", strerror(errno)); + continue; + } + if (ret == Select::TIMEOUT) + { + stpmgr.doTask(); + continue; + } + + auto *c = (Executor *)sel; + c->execute(); + } + } + catch (const exception &e) + { + SWSS_LOG_ERROR("Runtime error: %s", e.what()); + } + + return -1; +} \ No newline at end of file diff --git a/cfgmgr/teammgr.cpp b/cfgmgr/teammgr.cpp index 36c9d134e14..f6c6394cdb2 100644 --- a/cfgmgr/teammgr.cpp +++ b/cfgmgr/teammgr.cpp @@ -16,6 +16,8 @@ #include #include #include +#include +#include #include @@ -171,18 +173,29 @@ void TeamMgr::cleanTeamProcesses() SWSS_LOG_ENTER(); SWSS_LOG_NOTICE("Cleaning up LAGs during shutdown..."); - std::unordered_map aliasPidMap; + std::unordered_map aliasPidMap; for (const auto& alias: m_lagList) { - std::string res; pid_t pid; + // Sleep for 10 milliseconds so as to not overwhelm the netlink + // socket buffers with events about interfaces going down + std::this_thread::sleep_for(std::chrono::milliseconds(10)); try { - std::stringstream cmd; - cmd << "cat " << shellquote("/var/run/teamd/" + alias + ".pid"); - EXEC_WITH_ERROR_THROW(cmd.str(), res); + ifstream pidFile("/var/run/teamd/" + alias + ".pid"); + if (pidFile.is_open()) + { + pidFile >> pid; + aliasPidMap[alias] = pid; + SWSS_LOG_INFO("Read port channel %s pid %d", alias.c_str(), pid); + } + else + { + SWSS_LOG_NOTICE("Unable to read pid file for %s, skipping...", alias.c_str()); + continue; + } } catch (const std::exception &e) { @@ -191,32 +204,15 @@ void TeamMgr::cleanTeamProcesses() continue; } - try - { - pid = static_cast(std::stoul(res, nullptr, 10)); - aliasPidMap[alias] = pid; - - SWSS_LOG_INFO("Read port channel %s pid %d", alias.c_str(), pid); - } - catch (const std::exception &e) + if (kill(pid, SIGTERM)) { - SWSS_LOG_ERROR("Failed to read port channel %s pid: %s", alias.c_str(), e.what()); - continue; + SWSS_LOG_ERROR("Failed to send SIGTERM to port channel %s pid %d: %s", alias.c_str(), pid, strerror(errno)); + aliasPidMap.erase(alias); } - - try + else { - std::stringstream cmd; - cmd << "kill -TERM " << pid; - EXEC_WITH_ERROR_THROW(cmd.str(), res); - SWSS_LOG_NOTICE("Sent SIGTERM to port channel %s pid %d", alias.c_str(), pid); } - catch (const std::exception &e) - { - SWSS_LOG_ERROR("Failed to send SIGTERM to port channel %s pid %d: %s", alias.c_str(), pid, e.what()); - aliasPidMap.erase(alias); - } } for (const auto& cit: aliasPidMap) @@ -224,13 +220,12 @@ void TeamMgr::cleanTeamProcesses() const auto &alias = cit.first; const auto &pid = cit.second; - std::stringstream cmd; - std::string res; - SWSS_LOG_NOTICE("Waiting for port channel %s pid %d to stop...", alias.c_str(), pid); - cmd << "tail -f --pid=" << pid << " /dev/null"; - EXEC_WITH_ERROR_THROW(cmd.str(), res); + while (!kill(pid, 0)) + { + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } } SWSS_LOG_NOTICE("LAGs cleanup is done"); @@ -396,11 +391,15 @@ bool TeamMgr::checkPortIffUp(const string &port) if (fd == -1 || ioctl(fd, SIOCGIFFLAGS, &ifr) == -1) { SWSS_LOG_ERROR("Failed to get port %s flags", port.c_str()); + if (fd != -1) + { + close(fd); + } return false; } SWSS_LOG_INFO("Get port %s flags %i", port.c_str(), ifr.ifr_flags); - + close(fd); return ifr.ifr_flags & IFF_UP; } @@ -654,42 +653,25 @@ bool TeamMgr::removeLag(const string &alias) { SWSS_LOG_ENTER(); - stringstream cmd; - string res; pid_t pid; - try - { - std::stringstream cmd; - cmd << "cat " << shellquote("/var/run/teamd/" + alias + ".pid"); - EXEC_WITH_ERROR_THROW(cmd.str(), res); - } - catch (const std::exception &e) - { - SWSS_LOG_NOTICE("Failed to remove non-existent port channel %s pid...", alias.c_str()); - return false; - } - - try { - pid = static_cast(std::stoul(res, nullptr, 10)); - SWSS_LOG_INFO("Read port channel %s pid %d", alias.c_str(), pid); - } - catch (const std::exception &e) - { - SWSS_LOG_ERROR("Failed to read port channel %s pid: %s", alias.c_str(), e.what()); - return false; + ifstream pidfile("/var/run/teamd/" + alias + ".pid"); + if (pidfile.is_open()) + { + pidfile >> pid; + SWSS_LOG_INFO("Read port channel %s pid %d", alias.c_str(), pid); + } + else + { + SWSS_LOG_NOTICE("Failed to remove non-existent port channel %s pid...", alias.c_str()); + return false; + } } - try - { - std::stringstream cmd; - cmd << "kill -TERM " << pid; - EXEC_WITH_ERROR_THROW(cmd.str(), res); - } - catch (const std::exception &e) + if (kill(pid, SIGTERM)) { - SWSS_LOG_ERROR("Failed to send SIGTERM to port channel %s pid %d: %s", alias.c_str(), pid, e.what()); + SWSS_LOG_ERROR("Failed to send SIGTERM to port channel %s pid %d: %s", alias.c_str(), pid, strerror(errno)); return false; } diff --git a/cfgmgr/tunnelmgr.cpp b/cfgmgr/tunnelmgr.cpp index a81438470fe..96ab7a70aee 100644 --- a/cfgmgr/tunnelmgr.cpp +++ b/cfgmgr/tunnelmgr.cpp @@ -108,6 +108,7 @@ static int cmdIpTunnelRouteDel(const std::string& pfx, std::string & res) TunnelMgr::TunnelMgr(DBConnector *cfgDb, DBConnector *appDb, const std::vector &tableNames) : Orch(cfgDb, tableNames), m_appIpInIpTunnelTable(appDb, APP_TUNNEL_DECAP_TABLE_NAME), + m_appIpInIpTunnelDecapTermTable(appDb, APP_TUNNEL_DECAP_TERM_TABLE_NAME), m_cfgPeerTable(cfgDb, CFG_PEER_SWITCH_TABLE_NAME), m_cfgTunnelTable(cfgDb, CFG_TUNNEL_TABLE_NAME) { @@ -223,6 +224,7 @@ bool TunnelMgr::doTunnelTask(const KeyOpFieldsValuesTuple & t) const std::string & tunnelName = kfvKey(t); const std::string & op = kfvOp(t); + std::string src_ip; TunnelInfo tunInfo; for (auto fieldValue : kfvFieldsValues(t)) @@ -237,6 +239,10 @@ bool TunnelMgr::doTunnelTask(const KeyOpFieldsValuesTuple & t) { tunInfo.type = value; } + else if (field == "src_ip") + { + src_ip = value; + } } if (op == SET_COMMAND) @@ -260,7 +266,27 @@ bool TunnelMgr::doTunnelTask(const KeyOpFieldsValuesTuple & t) */ if (m_tunnelReplay.find(tunnelName) == m_tunnelReplay.end()) { - m_appIpInIpTunnelTable.set(tunnelName, kfvFieldsValues(t)); + /* Create the tunnel */ + std::vector fvs; + std::copy_if(kfvFieldsValues(t).cbegin(), kfvFieldsValues(t).cend(), + std::back_inserter(fvs), + [](const FieldValueTuple & fv) { + return fvField(fv) != "dst_ip"; + }); + m_appIpInIpTunnelTable.set(tunnelName, fvs); + + /* Create the decap term */ + fvs.clear(); + if (!src_ip.empty()) + { + fvs.emplace_back("src_ip", src_ip); + fvs.emplace_back("term_type", "P2P"); + } + else + { + fvs.emplace_back("term_type", "P2MP"); + } + m_appIpInIpTunnelDecapTermTable.set(tunnelName + DEFAULT_KEY_SEPARATOR + tunInfo.dst_ip, fvs); } } m_tunnelReplay.erase(tunnelName); @@ -279,6 +305,7 @@ bool TunnelMgr::doTunnelTask(const KeyOpFieldsValuesTuple & t) tunInfo = it->second; if (tunInfo.type == IPINIP) { + m_appIpInIpTunnelDecapTermTable.del(tunnelName + DEFAULT_KEY_SEPARATOR + tunInfo.dst_ip); m_appIpInIpTunnelTable.del(tunnelName); } else diff --git a/cfgmgr/tunnelmgr.h b/cfgmgr/tunnelmgr.h index 53d2f272786..1854e05f290 100644 --- a/cfgmgr/tunnelmgr.h +++ b/cfgmgr/tunnelmgr.h @@ -33,6 +33,7 @@ class TunnelMgr : public Orch void finalizeWarmReboot(); ProducerStateTable m_appIpInIpTunnelTable; + ProducerStateTable m_appIpInIpTunnelDecapTermTable; Table m_cfgPeerTable; Table m_cfgTunnelTable; diff --git a/cfgmgr/vlanmgr.cpp b/cfgmgr/vlanmgr.cpp index ee5b7a70674..96ee596958c 100644 --- a/cfgmgr/vlanmgr.cpp +++ b/cfgmgr/vlanmgr.cpp @@ -21,8 +21,9 @@ using namespace swss; extern MacAddress gMacAddress; -VlanMgr::VlanMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, const vector &tableNames) : - Orch(cfgDb, tableNames), +VlanMgr::VlanMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, const vector &tableNames, + const vector &stateTableNames) : + Orch(cfgDb, stateDb, tableNames, stateTableNames), m_cfgVlanTable(cfgDb, CFG_VLAN_TABLE_NAME), m_cfgVlanMemberTable(cfgDb, CFG_VLAN_MEMBER_TABLE_NAME), m_statePortTable(stateDb, STATE_PORT_TABLE_NAME), @@ -31,6 +32,8 @@ VlanMgr::VlanMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c m_stateVlanMemberTable(stateDb, STATE_VLAN_MEMBER_TABLE_NAME), m_appVlanTableProducer(appDb, APP_VLAN_TABLE_NAME), m_appVlanMemberTableProducer(appDb, APP_VLAN_MEMBER_TABLE_NAME), + m_appFdbTableProducer(appDb, APP_FDB_TABLE_NAME), + m_appPortTableProducer(appDb, APP_PORT_TABLE_NAME), replayDone(false) { SWSS_LOG_ENTER(); @@ -79,7 +82,12 @@ VlanMgr::VlanMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c // /sbin/bridge vlan del vid 1 dev Bridge self; // /sbin/ip link del dummy 2>/dev/null; // /sbin/ip link add dummy type dummy && - // /sbin/ip link set dummy master Bridge" + // /sbin/ip link set dummy master Bridge && + // /sbin/ip link set dummy up; + // /sbin/ip link set Bridge down && + // /sbin/ip link set Bridge up" + // Note: We shutdown and start-up the Bridge at the end to ensure that its + // link-local IPv6 address matches its MAC address. const std::string cmds = std::string("") + BASH_CMD + " -c \"" @@ -90,29 +98,21 @@ VlanMgr::VlanMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c + BRIDGE_CMD + " vlan del vid " + DEFAULT_VLAN_ID + " dev " + DOT1Q_BRIDGE_NAME + " self; " + IP_CMD + " link del dev dummy 2>/dev/null; " + IP_CMD + " link add dummy type dummy && " - + IP_CMD + " link set dummy master " + DOT1Q_BRIDGE_NAME + "\""; + + IP_CMD + " link set dummy master " + DOT1Q_BRIDGE_NAME + " && " + + IP_CMD + " link set dummy up; " + + IP_CMD + " link set " + DOT1Q_BRIDGE_NAME + " down && " + + IP_CMD + " link set " + DOT1Q_BRIDGE_NAME + " up\""; std::string res; EXEC_WITH_ERROR_THROW(cmds, res); - // The generated command is: - // /bin/echo 1 > /sys/class/net/Bridge/bridge/vlan_filtering - const std::string echo_cmd = std::string("") - + ECHO_CMD + " 1 > /sys/class/net/" + DOT1Q_BRIDGE_NAME + "/bridge/vlan_filtering"; - - int ret = swss::exec(echo_cmd, res); - /* echo will fail in virtual switch since /sys directory is read-only. - * need to use ip command to setup the vlan_filtering which is not available in debian 8. - * Once we move sonic to debian 9, we can use IP command by default - * ip command available in Debian 9 to create a bridge with a vlan filtering: - * /sbin/ip link add Bridge up type bridge vlan_filtering 1 */ - if (ret != 0) - { - const std::string echo_cmd_backup = std::string("") - + IP_CMD + " link set " + DOT1Q_BRIDGE_NAME + " type bridge vlan_filtering 1"; + // /sbin/ip link set Bridge type bridge vlan_filtering 1 + const std::string vlan_filtering_cmd = std::string(IP_CMD) + " link set " + DOT1Q_BRIDGE_NAME + " type bridge vlan_filtering 1"; + EXEC_WITH_ERROR_THROW(vlan_filtering_cmd, res); - EXEC_WITH_ERROR_THROW(echo_cmd_backup, res); - } + // /sbin/ip link set Bridge type bridge no_linklocal_learn 1 + const std::string no_ll_learn_cmd = std::string(IP_CMD) + " link set " + DOT1Q_BRIDGE_NAME + " type bridge no_linklocal_learn 1"; + EXEC_WITH_ERROR_THROW(no_ll_learn_cmd, res); } bool VlanMgr::addHostVlan(int vlan_id) @@ -199,15 +199,34 @@ bool VlanMgr::setHostVlanMac(int vlan_id, const string &mac) { SWSS_LOG_ENTER(); + std::string res; + + /* + * Bring down the bridge before changing MAC addresses of the bridge and the VLAN interface. + * This is done so that the IPv6 link-local addresses of the bridge and the VLAN interface + * are updated after MAC change. + * /sbin/ip link set Bridge down + */ + string bridge_down(IP_CMD " link set " DOT1Q_BRIDGE_NAME " down"); + EXEC_WITH_ERROR_THROW(bridge_down, res); + // The command should be generated as: - // /sbin/ip link set Vlan{{vlan_id}} address {{mac}} + // /sbin/ip link set Vlan{{vlan_id}} address {{mac}} && + // /sbin/ip link set Bridge address {{mac}} ostringstream cmds; cmds << IP_CMD " link set " VLAN_PREFIX + std::to_string(vlan_id) + " address " << shellquote(mac) << " && " IP_CMD " link set " DOT1Q_BRIDGE_NAME " address " << shellquote(mac); - - std::string res; + res.clear(); EXEC_WITH_ERROR_THROW(cmds.str(), res); + /* + * Start up the bridge again. + * /sbin/ip link set Bridge up + */ + string bridge_up(IP_CMD " link set " DOT1Q_BRIDGE_NAME " up"); + res.clear(); + EXEC_WITH_ERROR_THROW(bridge_up, res); + return true; } @@ -232,7 +251,23 @@ bool VlanMgr::addHostVlanMember(int vlan_id, const string &port_alias, const str cmds << BASH_CMD " -c " << shellquote(inner.str()); std::string res; - EXEC_WITH_ERROR_THROW(cmds.str(), res); + try + { + EXEC_WITH_ERROR_THROW(cmds.str(), res); + } + catch (const std::runtime_error& e) + { + // Race conidtion can happen with portchannel removal might happen + // but state db is not updated yet so we can do retry instead of sending exception + if (!port_alias.compare(0, strlen(LAG_PREFIX), LAG_PREFIX)) + { + return false; + } + else + { + EXEC_WITH_ERROR_THROW(cmds.str(), res); + } + } return true; } @@ -642,6 +677,13 @@ void VlanMgr::doVlanMemberTask(Consumer &consumer) m_stateVlanMemberTable.set(kfvKey(t), fvVector); m_vlanMemberReplay.erase(kfvKey(t)); + m_PortVlanMember[port_alias][vlan_alias] = tagging_mode; + } + else + { + SWSS_LOG_INFO("Netdevice for %s not ready, delaying", kfvKey(t).c_str()); + it++; + continue; } } else if (op == DEL_COMMAND) @@ -654,6 +696,7 @@ void VlanMgr::doVlanMemberTask(Consumer &consumer) key += port_alias; m_appVlanMemberTableProducer.del(key); m_stateVlanMemberTable.del(kfvKey(t)); + m_PortVlanMember[port_alias].erase(vlan_alias); } else { @@ -680,6 +723,257 @@ void VlanMgr::doVlanMemberTask(Consumer &consumer) } } +void VlanMgr::doVlanPacPortTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + auto &t = it->second; + string alias = kfvKey(t); + string op = kfvOp(t); + + SWSS_LOG_DEBUG("processing %s operation %s", alias.c_str(), + op.empty() ? "none" : op.c_str()); + + if (op == SET_COMMAND) + { + string learn_mode; + for (auto i : kfvFieldsValues(t)) + { + if (fvField(i) == "learn_mode") + { + learn_mode = fvValue(i); + } + } + if (!learn_mode.empty()) + { + SWSS_LOG_NOTICE("set port learn mode port %s learn_mode %s\n", alias.c_str(), learn_mode.c_str()); + vector fvVector; + FieldValueTuple portLearnMode("learn_mode", learn_mode); + fvVector.push_back(portLearnMode); + m_appPortTableProducer.set(alias, fvVector); + } + } + else if (op == DEL_COMMAND) + { + if (isMemberStateOk(alias)) + { + vector fvVector; + FieldValueTuple portLearnMode("learn_mode", "hardware"); + fvVector.push_back(portLearnMode); + m_appPortTableProducer.set(alias, fvVector); + } + } + it = consumer.m_toSync.erase(it); + } +} + +void VlanMgr::doVlanPacFdbTask(Consumer &consumer) +{ + auto it = consumer.m_toSync.begin(); + + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + + /* format: | */ + vector keys = tokenize(kfvKey(t), config_db_key_delimiter, 1); + /* keys[0] is vlan as (Vlan10) and keys[1] is mac as (00-00-00-00-00-00) */ + string op = kfvOp(t); + + SWSS_LOG_NOTICE("VlanMgr process static MAC vlan: %s mac: %s ", keys[0].c_str(), keys[1].c_str()); + + int vlan_id; + vlan_id = stoi(keys[0].substr(4)); + + if (!m_vlans.count(keys[0])) + { + SWSS_LOG_NOTICE("Vlan %s not available yet, mac %s", keys[0].c_str(), keys[1].c_str()); + it++; + continue; + } + + MacAddress mac = MacAddress(keys[1]); + + string key = VLAN_PREFIX + to_string(vlan_id); + key += DEFAULT_KEY_SEPARATOR; + key += mac.to_string(); + + if (op == SET_COMMAND) + { + string port, discard = "false", type = "static"; + for (auto i : kfvFieldsValues(t)) + { + if (fvField(i) == "port") + { + port = fvValue(i); + } + if (fvField(i) == "discard") + { + discard = fvValue(i); + } + if (fvField(i) == "type") + { + type = fvValue(i); + } + } + SWSS_LOG_NOTICE("PAC FDB SET %s port %s discard %s type %s\n", + key.c_str(), port.c_str(), discard.c_str(), type.c_str()); + vector fvVector; + FieldValueTuple p("port", port); + fvVector.push_back(p); + FieldValueTuple t("type", type); + fvVector.push_back(t); + FieldValueTuple d("discard", discard); + fvVector.push_back(d); + + m_appFdbTableProducer.set(key, fvVector); + } + else if (op == DEL_COMMAND) + { + m_appFdbTableProducer.del(key); + } + it = consumer.m_toSync.erase(it); + } +} + +void VlanMgr::doVlanPacVlanMemberTask(Consumer &consumer) +{ + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + auto &t = it->second; + + string key = kfvKey(t); + + key = key.substr(4); + size_t found = key.find(CONFIGDB_KEY_SEPARATOR); + int vlan_id = 0; + string vlan_alias, port_alias; + if (found != string::npos) + { + vlan_id = stoi(key.substr(0, found)); + port_alias = key.substr(found+1); + } + + vlan_alias = VLAN_PREFIX + to_string(vlan_id); + string op = kfvOp(t); + + if (op == SET_COMMAND) + { + /* Don't proceed if member port/lag is not ready yet */ + if (!isMemberStateOk(port_alias) || !isVlanStateOk(vlan_alias)) + { + SWSS_LOG_DEBUG("%s not ready, delaying", kfvKey(t).c_str()); + it++; + continue; + } + string tagging_mode = "untagged"; + auto vlans = m_PortVlanMember[port_alias]; + for (const auto& vlan : vlans) + { + string vlan_alias = vlan.first; + removePortFromVlan(port_alias, vlan_alias); + } + SWSS_LOG_NOTICE("Add Vlan Member key: %s", kfvKey(t).c_str()); + if (addHostVlanMember(vlan_id, port_alias, tagging_mode)) + { + key = VLAN_PREFIX + to_string(vlan_id); + key += DEFAULT_KEY_SEPARATOR; + key += port_alias; + vector fvVector = kfvFieldsValues(t); + FieldValueTuple s("dynamic", "yes"); + fvVector.push_back(s); + m_appVlanMemberTableProducer.set(key, fvVector); + + vector fvVector1; + FieldValueTuple s1("state", "ok"); + fvVector.push_back(s1); + m_stateVlanMemberTable.set(kfvKey(t), fvVector); + } + } + else if (op == DEL_COMMAND) + { + if (isVlanMemberStateOk(kfvKey(t))) + { + SWSS_LOG_NOTICE("Remove Vlan Member key: %s", kfvKey(t).c_str()); + removeHostVlanMember(vlan_id, port_alias); + key = VLAN_PREFIX + to_string(vlan_id); + key += DEFAULT_KEY_SEPARATOR; + key += port_alias; + m_appVlanMemberTableProducer.del(key); + m_stateVlanMemberTable.del(kfvKey(t)); + } + + auto vlans = m_PortVlanMember[port_alias]; + for (const auto& vlan : vlans) + { + string vlan_alias = vlan.first; + string tagging_mode = vlan.second; + SWSS_LOG_NOTICE("Add Vlan Member vlan: %s port %s tagging_mode %s", + vlan_alias.c_str(), port_alias.c_str(), tagging_mode.c_str()); + addPortToVlan(port_alias, vlan_alias, tagging_mode); + } + } + /* Other than the case of member port/lag is not ready, no retry will be performed */ + it = consumer.m_toSync.erase(it); + } +} + +void VlanMgr::addPortToVlan(const std::string& membername, const std::string& vlan_alias, + const std::string& tagging_mode) +{ + SWSS_LOG_NOTICE("member %s vlan %s tagging_mode %s", + membername.c_str(), vlan_alias.c_str(), tagging_mode.c_str()); + int vlan_id = stoi(vlan_alias.substr(4)); + if (addHostVlanMember(vlan_id, membername, tagging_mode)) + { + std::string key = VLAN_PREFIX + to_string(vlan_id); + key += DEFAULT_KEY_SEPARATOR; + key += membername; + vector fvVector; + FieldValueTuple s("tagging_mode", tagging_mode); + fvVector.push_back(s); + FieldValueTuple s1("dynamic", "no"); + fvVector.push_back(s1); + SWSS_LOG_INFO("key: %s\n", key.c_str()); + m_appVlanMemberTableProducer.set(key, fvVector); + + vector fvVector1; + FieldValueTuple s2("state", "ok"); + fvVector1.push_back(s2); + key = VLAN_PREFIX + to_string(vlan_id); + key += '|'; + key += membername; + m_stateVlanMemberTable.set(key, fvVector1); + } +} + +void VlanMgr::removePortFromVlan(const std::string& membername, const std::string& vlan_alias) +{ + SWSS_LOG_NOTICE("member %s vlan %s", + membername.c_str(), vlan_alias.c_str()); + int vlan_id = stoi(vlan_alias.substr(4)); + std::string key = VLAN_PREFIX + to_string(vlan_id); + key += '|'; + key += membername; + if (isVlanMemberStateOk(key)) + { + key = VLAN_PREFIX + to_string(vlan_id); + key += ':'; + key += membername; + SWSS_LOG_INFO("key: %s\n", key.c_str()); + m_appVlanMemberTableProducer.del(key); + + key = VLAN_PREFIX + to_string(vlan_id); + key += '|'; + key += membername; + m_stateVlanMemberTable.del(key); + } +} + void VlanMgr::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -694,6 +988,18 @@ void VlanMgr::doTask(Consumer &consumer) { doVlanMemberTask(consumer); } + else if (table_name == STATE_OPER_PORT_TABLE_NAME) + { + doVlanPacPortTask(consumer); + } + else if (table_name == STATE_OPER_FDB_TABLE_NAME) + { + doVlanPacFdbTask(consumer); + } + else if (table_name == STATE_OPER_VLAN_MEMBER_TABLE_NAME) + { + doVlanPacVlanMemberTask(consumer); + } else { SWSS_LOG_ERROR("Unknown config table %s ", table_name.c_str()); diff --git a/cfgmgr/vlanmgr.h b/cfgmgr/vlanmgr.h index 8cf467f41c2..7fce59ce65f 100644 --- a/cfgmgr/vlanmgr.h +++ b/cfgmgr/vlanmgr.h @@ -14,11 +14,13 @@ namespace swss { class VlanMgr : public Orch { public: - VlanMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, const std::vector &tableNames); + VlanMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, const std::vector &tableNames, + const std::vector &stateTableNames); using Orch::doTask; private: ProducerStateTable m_appVlanTableProducer, m_appVlanMemberTableProducer; + ProducerStateTable m_appFdbTableProducer, m_appPortTableProducer; Table m_cfgVlanTable, m_cfgVlanMemberTable; Table m_statePortTable, m_stateLagTable; Table m_stateVlanTable, m_stateVlanMemberTable; @@ -26,6 +28,7 @@ class VlanMgr : public Orch std::set m_vlanReplay; std::set m_vlanMemberReplay; bool replayDone; + std::unordered_map> m_PortVlanMember; void doTask(Consumer &consumer); void doVlanTask(Consumer &consumer); @@ -43,6 +46,11 @@ class VlanMgr : public Orch bool isVlanStateOk(const std::string &alias); bool isVlanMacOk(); bool isVlanMemberStateOk(const std::string &vlanMemberKey); + void doVlanPacPortTask(Consumer &consumer); + void doVlanPacFdbTask(Consumer &consumer); + void doVlanPacVlanMemberTask(Consumer &consumer); + void addPortToVlan(const std::string& port_alias, const std::string& vlan_alias, const std::string& tagging_mode); + void removePortFromVlan(const std::string& port_alias, const std::string& vlan_alias); }; } diff --git a/cfgmgr/vlanmgrd.cpp b/cfgmgr/vlanmgrd.cpp index 84bc19cf088..d430063247e 100644 --- a/cfgmgr/vlanmgrd.cpp +++ b/cfgmgr/vlanmgrd.cpp @@ -36,7 +36,11 @@ int main(int argc, char **argv) CFG_VLAN_TABLE_NAME, CFG_VLAN_MEMBER_TABLE_NAME, }; - + vector state_vlan_tables = { + STATE_OPER_PORT_TABLE_NAME, + STATE_OPER_FDB_TABLE_NAME, + STATE_OPER_VLAN_MEMBER_TABLE_NAME + }; DBConnector cfgDb("CONFIG_DB", 0); DBConnector appDb("APPL_DB", 0); DBConnector stateDb("STATE_DB", 0); @@ -58,7 +62,7 @@ int main(int argc, char **argv) } gMacAddress = MacAddress(it->second); - VlanMgr vlanmgr(&cfgDb, &appDb, &stateDb, cfg_vlan_tables); + VlanMgr vlanmgr(&cfgDb, &appDb, &stateDb, cfg_vlan_tables, state_vlan_tables); std::vector cfgOrchList = {&vlanmgr}; diff --git a/cfgmgr/vxlanmgr.cpp b/cfgmgr/vxlanmgr.cpp index 4d41819053c..d078372d644 100644 --- a/cfgmgr/vxlanmgr.cpp +++ b/cfgmgr/vxlanmgr.cpp @@ -139,6 +139,15 @@ static int cmdDeleteVxlan(const swss::VxlanMgr::VxlanInfo & info, std::string & return swss::exec(cmd.str(), res); } +static int cmdVxlanLearningOff(const swss::VxlanMgr::VxlanInfo & info, std::string & res) +{ + // bridge link set dev {{VXLAN}} learning off + ostringstream cmd; + cmd << BRIDGE_CMD << " link set dev " + << shellquote(info.m_vxlan) << " learning off"; + return swss::exec(cmd.str(), res); +} + static int cmdDeleteVxlanFromVxlanIf(const swss::VxlanMgr::VxlanInfo & info, std::string & res) { // brctl delif {{VXLAN_IF}} {{VXLAN}} @@ -683,6 +692,7 @@ bool VxlanMgr::doVxlanEvpnNvoCreateTask(const KeyOpFieldsValuesTuple & t) } if (field == SOURCE_VTEP) { + disableLearningForAllVxlanNetdevices(); m_EvpnNvoCache[EvpnNvoName] = value; } } @@ -946,8 +956,9 @@ int VxlanMgr::createVxlanNetdevice(std::string vxlanTunnelName, std::string vni_ { std::string res, cmds; std::string link_add_cmd, link_set_master_cmd, link_up_cmd; - std::string bridge_add_cmd, bridge_untagged_add_cmd, bridge_del_vid_cmd; + std::string bridge_add_cmd, bridge_untagged_add_cmd, bridge_del_vid_cmd, bridge_learn_off_cmd; std::string vxlan_dev_name; + bool evpn_nvo = false; vxlan_dev_name = std::string("") + std::string(vxlanTunnelName) + "-" + std::string(vlan_id); @@ -981,11 +992,20 @@ int VxlanMgr::createVxlanNetdevice(std::string vxlanTunnelName, std::string vni_ SWSS_LOG_INFO("Creating VxlanNetDevice %s", vxlan_dev_name.c_str()); } + std::map::iterator it = m_EvpnNvoCache.begin(); + if ((it != m_EvpnNvoCache.end()) && (it->second == vxlanTunnelName)) + { + SWSS_LOG_INFO("EVPN NVO exists. Disabling learning on VxlanNetDevice %s", + vxlan_dev_name.c_str()); + evpn_nvo = true; + } + // ip link add type vxlan id local remote // dstport 4789 // ip link set master DOT1Q_BRIDGE_NAME // bridge vlan add vid dev // bridge vlan add vid untagged pvid dev + // bridge link set dev learning off // ip link set up link_add_cmd = std::string("") + IP_CMD + " link add " + vxlan_dev_name + @@ -1007,6 +1027,9 @@ int VxlanMgr::createVxlanNetdevice(std::string vxlanTunnelName, std::string vni_ bridge_del_vid_cmd = std::string("") + BRIDGE_CMD + " vlan del vid 1 dev " + vxlan_dev_name; + + bridge_learn_off_cmd = std::string("") + BRIDGE_CMD + " link set dev " + + vxlan_dev_name + " learning off "; cmds = std::string("") + BASH_CMD + " -c \"" + @@ -1020,6 +1043,11 @@ int VxlanMgr::createVxlanNetdevice(std::string vxlanTunnelName, std::string vni_ cmds += bridge_del_vid_cmd + " && "; } + if (evpn_nvo) + { + cmds += bridge_learn_off_cmd + " && "; + } + cmds += link_up_cmd + "\""; return swss::exec(cmds,res); @@ -1210,6 +1238,22 @@ void VxlanMgr::clearAllVxlanDevices() } } +void VxlanMgr::disableLearningForAllVxlanNetdevices() +{ + for (auto it = m_vxlanTunnelMapCache.begin(); it != m_vxlanTunnelMapCache.end(); it++) + { + std::string netdev_name = it->second.vxlan_dev_name; + VxlanInfo info; + std::string res; + if (!netdev_name.empty()) + { + SWSS_LOG_INFO("Disable learning for NetDevice %s\n", netdev_name.c_str()); + info.m_vxlan = netdev_name; + cmdVxlanLearningOff(info, res); + } + } +} + void VxlanMgr::waitTillReadyToReconcile() { for (;;) diff --git a/cfgmgr/vxlanmgr.h b/cfgmgr/vxlanmgr.h index 68d6250fe5d..de60a44a6ce 100644 --- a/cfgmgr/vxlanmgr.h +++ b/cfgmgr/vxlanmgr.h @@ -90,6 +90,7 @@ class VxlanMgr : public Orch bool deleteVxlan(const VxlanInfo & info); void clearAllVxlanDevices(); + void disableLearningForAllVxlanNetdevices(); ProducerStateTable m_appVxlanTunnelTable,m_appVxlanTunnelMapTable,m_appEvpnNvoTable; Table m_cfgVxlanTunnelTable,m_cfgVnetTable,m_stateVrfTable,m_stateVxlanTable, m_appSwitchTable; diff --git a/configure.ac b/configure.ac index 5efe0a67bd5..145231749ce 100644 --- a/configure.ac +++ b/configure.ac @@ -20,6 +20,11 @@ AC_CHECK_LIB([team], [team_alloc], PKG_CHECK_MODULES([JANSSON], [jansson]) +AC_CHECK_FILE([/usr/include/stp_ipc.h], + AM_CONDITIONAL(HAVE_STP, true), + [AC_MSG_WARN([stp is not installed.]) + AM_CONDITIONAL(HAVE_STP, false)]) + AC_CHECK_LIB([sai], [sai_object_type_query], AM_CONDITIONAL(HAVE_SAI, true), [AC_MSG_WARN([libsai is not installed.]) @@ -54,7 +59,7 @@ AC_CHECK_LIB([nl-genl-3], [nl_socket_get_cb]) AC_CHECK_LIB([nl-route-3], [rtnl_route_nh_get_encap_mpls_dst]) AC_CHECK_LIB([nl-nf-3], [nfnl_connect]) -CFLAGS_COMMON="-std=c++14 -Wall -fPIC -Wno-write-strings -I/usr/include/swss" +CFLAGS_COMMON="-std=c++14 -Wall -fPIC -Wno-write-strings -I/usr/include/swss -I/usr/include" AC_ARG_WITH(libnl-3.0-inc, [ --with-libnl-3.0-inc=DIR @@ -101,6 +106,7 @@ CFLAGS_COMMON+=" -Wvariadic-macros" CFLAGS_COMMON+=" -Wno-switch-default" CFLAGS_COMMON+=" -Wno-long-long" CFLAGS_COMMON+=" -Wno-redundant-decls" +CFLAGS_COMMON+=" -Wno-error=missing-field-initializers" # Code testing coverage with gcov AC_MSG_CHECKING(whether to build with gcov testing) @@ -133,9 +139,10 @@ if test "x$asan_enabled" = "xtrue"; then CFLAGS_ASAN+=" -fsanitize=address" CFLAGS_ASAN+=" -DASAN_ENABLED" CFLAGS_ASAN+=" -ggdb -fno-omit-frame-pointer -U_FORTIFY_SOURCE" + CFLAGS_ASAN+=" -Wno-maybe-uninitialized" AC_SUBST(CFLAGS_ASAN) - LDFLAGS_ASAN+=" -lasan" + LDFLAGS_ASAN+=" -fsanitize=address" AC_SUBST(LDFLAGS_ASAN) fi diff --git a/crates/countersyncd/Cargo.lock b/crates/countersyncd/Cargo.lock new file mode 100644 index 00000000000..58bd6b71879 --- /dev/null +++ b/crates/countersyncd/Cargo.lock @@ -0,0 +1,1776 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if", + "getrandom 0.2.15", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anstream" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" + +[[package]] +name = "anstyle-parse" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" +dependencies = [ + "anstyle", + "once_cell", + "windows-sys 0.59.0", +] + +[[package]] +name = "array-init" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d62b7694a562cdf5a74227903507c56ab2cc8bdd1f781ed5cb4cf9c9f810bfc" + +[[package]] +name = "async-trait" +version = "0.1.88" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "autocfg" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" + +[[package]] +name = "backtrace" +version = "0.3.71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "bindgen" +version = "0.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "itertools", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.96", +] + +[[package]] +name = "binrw" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab81d22cbd2d745852348b2138f3db2103afa8ce043117a374581926a523e267" +dependencies = [ + "array-init", + "binrw_derive 0.11.2", + "bytemuck", +] + +[[package]] +name = "binrw" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d4bca59c20d6f40c2cc0802afbe1e788b89096f61bdf7aeea6bf00f10c2909b" +dependencies = [ + "array-init", + "binrw_derive 0.14.1", + "bytemuck", +] + +[[package]] +name = "binrw_derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6b019a3efebe7f453612083202887b6f1ace59e20d010672e336eea4ed5be97" +dependencies = [ + "either", + "owo-colors", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "binrw_derive" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8ba42866ce5bced2645bfa15e97eef2c62d2bdb530510538de8dd3d04efff3c" +dependencies = [ + "either", + "owo-colors", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "bitflags" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" + +[[package]] +name = "bumpalo" +version = "3.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" + +[[package]] +name = "bytemuck" +version = "1.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" + +[[package]] +name = "cc" +version = "1.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13208fcbb66eaeffe09b99fffbe1af420f00a7b35aa99ad683dfc1aa76145229" +dependencies = [ + "shlex", +] + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chrono" +version = "0.4.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "serde", + "wasm-bindgen", + "windows-link", +] + +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "4.5.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "769b0145982b4b48713e01ec42d61614425f27b7058bda7180a3a41f30104796" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b26884eb4b57140e4d2d93652abfa49498b938b3c9179f9fc487b0acc3edad7" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim 0.11.1", + "terminal_size", + "unicase", + "unicode-width", +] + +[[package]] +name = "clap_derive" +version = "4.5.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "clap_lex" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" + +[[package]] +name = "color-eyre" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55146f5e46f237f7423d74111267d4597b59b0dad0ffaf7303bce9945d843ad5" +dependencies = [ + "backtrace", + "color-spantrace", + "eyre", + "indenter", + "once_cell", + "owo-colors", + "tracing-error", +] + +[[package]] +name = "color-spantrace" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd6be1b2a7e382e2b98b43b2adcca6bb0e465af0bdd38123873ae61eb17a72c2" +dependencies = [ + "once_cell", + "owo-colors", + "tracing-core", + "tracing-error", +] + +[[package]] +name = "colorchoice" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "countersyncd" +version = "0.1.0" +dependencies = [ + "ahash", + "async-trait", + "binrw 0.14.1", + "byteorder", + "chrono", + "clap", + "color-eyre", + "env_logger", + "ipfixrw", + "log", + "neli", + "once_cell", + "rand", + "serial_test", + "swss-common", + "tempfile", + "tokio", + "yaml-rust", +] + +[[package]] +name = "csv" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf" +dependencies = [ + "csv-core", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "csv-core" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" +dependencies = [ + "memchr", +] + +[[package]] +name = "darling" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn 1.0.109", +] + +[[package]] +name = "darling_macro" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" +dependencies = [ + "darling_core", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_builder" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d07adf7be193b71cc36b193d0f5fe60b918a3a9db4dad0449f57bcfd519704a3" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f91d4cfa921f1c05904dc3c57b4a32c38aed3340cce209f3a6fd1478babafc4" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_builder_macro" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f0314b72bed045f3a68671b3c86328386762c93f82d98c65c3cb5e5f573dd68" +dependencies = [ + "derive_builder_core", + "syn 1.0.109", +] + +[[package]] +name = "derive_more" +version = "0.99.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "either" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" + +[[package]] +name = "env_filter" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "186e05a59d4c50738528153b83b0b0194d3a29507dfec16eccd4b342903397d0" +dependencies = [ + "log", + "regex", +] + +[[package]] +name = "env_logger" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcaee3d8e3cfc3fd92428d477bc97fc29ec8716d180c0d74c643bb26166660e0" +dependencies = [ + "anstream", + "anstyle", + "env_filter", + "humantime", + "log", +] + +[[package]] +name = "errno" +version = "0.3.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + +[[package]] +name = "eyre" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" +dependencies = [ + "indenter", + "once_cell", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", +] + +[[package]] +name = "getset" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eded738faa0e88d3abc9d1a13cb11adc2073c400969eeb8793cf7132589959fc" +dependencies = [ + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "gimli" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + +[[package]] +name = "glob" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "iana-time-zone" +version = "0.1.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + +[[package]] +name = "ipfixrw" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e18277dde2a264cf269ab1090a9e003b5b323ffb3d02011bdbce697e6aaff18" +dependencies = [ + "ahash", + "binrw 0.11.2", + "csv", + "derive_more", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" + +[[package]] +name = "js-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.169" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" + +[[package]] +name = "libloading" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" +dependencies = [ + "cfg-if", + "windows-targets", +] + +[[package]] +name = "linked-hash-map" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" + +[[package]] +name = "linux-raw-sys" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" + +[[package]] +name = "linux-raw-sys" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" + +[[package]] +name = "lock_api" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" +dependencies = [ + "adler", +] + +[[package]] +name = "mio" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +dependencies = [ + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.52.0", +] + +[[package]] +name = "neli" +version = "0.7.0-rc2" +source = "git+https://github.com/jbaublitz/neli.git?tag=neli-v0.7.0-rc2#73528ae1fb0b2af177711f1a7c6228349d770dfb" +dependencies = [ + "bitflags", + "byteorder", + "derive_builder", + "getset", + "libc", + "log", + "neli-proc-macros", + "parking_lot", +] + +[[package]] +name = "neli-proc-macros" +version = "0.2.0-rc2" +source = "git+https://github.com/jbaublitz/neli.git?tag=neli-v0.7.0-rc2#73528ae1fb0b2af177711f1a7c6228349d770dfb" +dependencies = [ + "either", + "proc-macro2", + "quote", + "serde", + "syn 1.0.109", +] + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "object" +version = "0.32.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "owo-colors" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" + +[[package]] +name = "parking_lot" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "ppv-lite86" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "prettyplease" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6837b9e10d61f45f987d50808f83d1ee3d206c66acf650c3e4ae2e1f6ddedf55" +dependencies = [ + "proc-macro2", + "syn 2.0.96", +] + +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "proc-macro2" +version = "1.0.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.15", +] + +[[package]] +name = "redox_syscall" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" +dependencies = [ + "bitflags", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "rustc-demangle" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustix" +version = "0.38.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys 0.4.15", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustix" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys 0.9.4", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustversion" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" + +[[package]] +name = "ryu" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" + +[[package]] +name = "scc" +version = "2.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22b2d775fb28f245817589471dd49c5edf64237f4a19d10ce9a92ff4651a27f4" +dependencies = [ + "sdd", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "sdd" +version = "3.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "584e070911c7017da6cb2eb0788d09f43d789029b5877d3e5ecc8acf86ceee21" + +[[package]] +name = "serde" +version = "1.0.217" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.217" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "serial_test" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" +dependencies = [ + "futures", + "log", + "once_cell", + "parking_lot", + "scc", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +dependencies = [ + "libc", +] + +[[package]] +name = "slab" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" + +[[package]] +name = "smallvec" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" + +[[package]] +name = "socket2" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "swss-common" +version = "0.1.0" +source = "git+https://github.com/sonic-net/sonic-swss-common.git?branch=master#1484a851dbfdd4b122c361cd7ea03eca0afe5d63" +dependencies = [ + "bindgen", + "getset", + "lazy_static", + "libc", + "serde", + "tracing-subscriber", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.96" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tempfile" +version = "3.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +dependencies = [ + "fastrand", + "getrandom 0.3.3", + "once_cell", + "rustix 1.0.7", + "windows-sys 0.59.0", +] + +[[package]] +name = "terminal_size" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5352447f921fda68cf61b4101566c0bdb5104eff6804d0678e5227580ab6a4e9" +dependencies = [ + "rustix 0.38.44", + "windows-sys 0.59.0", +] + +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", +] + +[[package]] +name = "tokio" +version = "1.43.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.52.0", +] + +[[package]] +name = "tokio-macros" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "pin-project-lite", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-error" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b1581020d7a273442f5b45074a6a57d5757ad0a47dac0e9f0bd57b81936f3db" +dependencies = [ + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "serde", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "unicase" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" + +[[package]] +name = "unicode-ident" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" + +[[package]] +name = "unicode-width" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn 2.0.96", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-core" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "windows-interface" +version = "0.59.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "windows-link" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" + +[[package]] +name = "windows-result" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "wit-bindgen-rt" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +dependencies = [ + "bitflags", +] + +[[package]] +name = "yaml-rust" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +dependencies = [ + "linked-hash-map", +] + +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "byteorder", + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] diff --git a/crates/countersyncd/Cargo.toml b/crates/countersyncd/Cargo.toml new file mode 100644 index 00000000000..c13b11e030e --- /dev/null +++ b/crates/countersyncd/Cargo.toml @@ -0,0 +1,69 @@ +[package] +name = "countersyncd" +version.workspace = true +authors.workspace = true +license.workspace = true +repository.workspace = true +documentation.workspace = true +keywords.workspace = true +edition.workspace = true + +[dependencies] +# Async runtime +tokio = { workspace = true } + +# Configuration and serialization +yaml-rust = { workspace = true } + +# Netlink for network operations +neli = { workspace = true } + +# IPFIX parser for traffic flow analysis +ipfixrw = { workspace = true } +ahash = { workspace = true } +binrw = { workspace = true } +byteorder = { workspace = true } + +# Logging and error handling +log = { workspace = true } +env_logger = { workspace = true } +chrono = { workspace = true } + +# Utilities +rand = { workspace = true } +once_cell = { workspace = true } + +# Command line utilities +clap = { workspace = true } +color-eyre = { workspace = true } + +# OTEL +tracing = { version = "0.1", features = ["max_level_debug", "release_max_level_info"] } +tracing-opentelemetry = "0.25" +tracing-subscriber = { version = "0.3", features = ["fmt", "env-filter"] } + +opentelemetry = { version = "0.25", features = ["trace", "metrics"] } +opentelemetry_sdk = { version = "0.25", features = ["rt-tokio"]} +opentelemetry-stdout = "0.25" +opentelemetry-semantic-conventions = "0.25" +opentelemetry-http = "0.25" +opentelemetry-otlp = { version = "0.25", features = ["tonic", "metrics"] } +opentelemetry-proto = { version = "0.25", features = ["tonic", "metrics", "gen-tonic"] } + +# gRPC and HTTP +tonic = "0.12" +tonic-health = "0.12" +prost = "0.13" +prost-types = "0.13" +reqwest = { version = "0.12", default-features = false, features = ["json"] } +reqwest-middleware = "0.3" +reqwest-tracing = { version = "0.5", features = ["opentelemetry_0_25"] } + +# SONiC specific dependencies +swss-common = { workspace = true } + +[dev-dependencies] +# Test utilities +tempfile = { workspace = true } +serial_test = { workspace = true } +async-trait = { workspace = true } diff --git a/crates/countersyncd/src/actor/control_netlink.rs b/crates/countersyncd/src/actor/control_netlink.rs new file mode 100644 index 00000000000..81875b4e49e --- /dev/null +++ b/crates/countersyncd/src/actor/control_netlink.rs @@ -0,0 +1,619 @@ +use std::{thread::sleep, time::Duration}; + +use log::{debug, info, warn}; + +#[allow(unused_imports)] +use neli::{ + consts::socket::{Msg, NlFamily}, + router::synchronous::NlRouter, + socket::NlSocket, + utils::Groups, +}; +use tokio::sync::mpsc::Sender; + +use std::io; + +use super::super::message::netlink::NetlinkCommand; + +#[cfg(not(test))] +type SocketType = NlSocket; +#[cfg(test)] +type SocketType = test::MockSocket; + +/// Size of the buffer used for receiving netlink messages +const BUFFER_SIZE: usize = 0xFFFF; +/// Interval for periodic family existence checks (in milliseconds) +const FAMILY_CHECK_INTERVAL_MS: u64 = 1_000_u64; +/// Interval for heartbeat logging (number of main loop iterations) +const HEARTBEAT_LOG_INTERVAL: u32 = 6000; // 6000 * 10ms = 1 minute +/// Interval for periodic reconnect commands (number of main loop iterations) +const PERIODIC_RECONNECT_INTERVAL: u32 = 6000; // 6000 * 10ms = 1 minute +/// Interval for control socket recreation attempts (number of main loop iterations) +const CONTROL_SOCKET_RECREATE_INTERVAL: u32 = 18000; // 18000 * 10ms = 3 minutes +/// Minimum netlink message header size in bytes +const NETLINK_HEADER_SIZE: usize = 16; +/// Netlink generic message type +const NETLINK_GENERIC_TYPE: u16 = 16; +/// Generic netlink control command: CTRL_CMD_NEWFAMILY +const CTRL_CMD_NEWFAMILY: u8 = 1; +/// Generic netlink control command: CTRL_CMD_DELFAMILY +const CTRL_CMD_DELFAMILY: u8 = 2; +/// Netlink attribute type: CTRL_ATTR_FAMILY_NAME +const CTRL_ATTR_FAMILY_NAME: u16 = 2; +/// Size of generic netlink header in bytes +const GENL_HEADER_SIZE: usize = 20; + +/// Actor responsible for monitoring netlink family registration/unregistration. +/// +/// The ControlNetlinkActor handles: +/// - Monitoring netlink control socket for family status changes +/// - Detecting when target family is registered/unregistered +/// - Sending commands to DataNetlinkActor to trigger reconnection +pub struct ControlNetlinkActor { + /// The generic netlink family name to monitor + family: String, + /// Control socket for monitoring family registration/unregistration + control_socket: Option, + /// Channel for sending commands to data netlink actor + command_sender: Sender, + /// Last time we checked if the family exists + last_family_check: std::time::Instant, + /// Reusable netlink resolver for family existence checks + #[cfg(not(test))] + resolver: Option, + #[cfg(test)] + #[allow(dead_code)] + resolver: Option<()>, +} + +impl ControlNetlinkActor { + /// Creates a new ControlNetlinkActor instance. + /// + /// # Arguments + /// + /// * `family` - The generic netlink family name to monitor + /// * `command_sender` - Channel for sending commands to data netlink actor + /// + /// # Returns + /// + /// A new ControlNetlinkActor instance + pub fn new(family: &str, command_sender: Sender) -> Self { + let mut actor = ControlNetlinkActor { + family: family.to_string(), + control_socket: None, + command_sender, + last_family_check: std::time::Instant::now(), + #[cfg(not(test))] + resolver: None, + #[cfg(test)] + resolver: None, + }; + + actor.control_socket = Self::connect_control_socket(); + + #[cfg(not(test))] + { + actor.resolver = Self::create_nl_resolver(); + } + + actor + } + + /// Establishes a connection to the netlink control socket (legacy interface). + #[cfg(not(test))] + fn connect_control_socket() -> Option { + // Create a router to resolve the control group + let (router, _) = match NlRouter::connect(NlFamily::Generic, Some(0), Groups::empty()) { + Ok(result) => result, + Err(e) => { + warn!("Failed to connect control router: {:?}", e); + return None; + } + }; + + // Resolve the "notify" multicast group for nlctrl family + let notify_group_id = match router.resolve_nl_mcast_group("nlctrl", "notify") { + Ok(group_id) => { + debug!("Resolved nlctrl notify group ID: {}", group_id); + group_id + } + Err(e) => { + warn!("Failed to resolve nlctrl notify group: {:?}", e); + return None; + } + }; + + // Connect to NETLINK_GENERIC with the notify group + let socket = match SocketType::connect( + NlFamily::Generic, + Some(0), + Groups::new_groups(&[notify_group_id]), + ) { + Ok(socket) => socket, + Err(e) => { + warn!("Failed to connect control socket: {:?}", e); + return None; + } + }; + + debug!("Successfully connected control socket and subscribed to nlctrl notifications"); + Some(socket) + } + + /// Mock control socket for testing. + #[cfg(test)] + fn connect_control_socket() -> Option { + // Return None for tests to avoid complexity + None + } + + /// Creates a netlink resolver for family/group resolution. + /// + /// # Returns + /// + /// Some(router) if creation is successful, None otherwise + #[cfg(not(test))] + fn create_nl_resolver() -> Option { + match NlRouter::connect(NlFamily::Generic, Some(0), Groups::empty()) { + Ok((router, _)) => { + debug!("Created netlink resolver for family/group resolution"); + Some(router) + } + Err(e) => { + warn!("Failed to create netlink resolver: {:?}", e); + None + } + } + } + + /// Mock netlink resolver for testing. + #[cfg(test)] + #[allow(dead_code)] + fn create_nl_resolver() -> Option { + // Return None for tests to avoid complexity + None + } + + /// Checks if the target genetlink family still exists in the kernel. + /// + /// Uses the cached resolver, recreating it only if necessary. + /// To prevent socket leaks, we limit resolver recreation attempts. + /// + /// # Returns + /// + /// true if family exists, false otherwise + #[cfg(not(test))] + fn check_family_exists(&mut self) -> bool { + // If we don't have a resolver, try to create a new one + if self.resolver.is_none() { + debug!("Creating new netlink resolver for family existence verification"); + self.resolver = Self::create_nl_resolver(); + if self.resolver.is_none() { + warn!("Failed to create resolver for family existence check"); + return false; + } + } + + if let Some(ref resolver) = self.resolver { + match resolver.resolve_genl_family(&self.family) { + Ok(family_info) => { + debug!("Family '{}' exists with ID: {}", self.family, family_info); + true + } + Err(e) => { + debug!("Family '{}' resolution failed: {:?}", self.family, e); + // Only clear resolver on specific errors that indicate it's stale + // For "family not found" errors, keep the resolver as it's still valid + if e.to_string().contains("No such file or directory") + || e.to_string().contains("Connection refused") + { + debug!("Clearing resolver due to connection error"); + self.resolver = None; + } + false + } + } + } else { + // This shouldn't happen since we just tried to create it above + warn!("No resolver available for family existence check"); + false + } + } + + #[cfg(test)] + fn check_family_exists(&mut self) -> bool { + true // In tests, assume family always exists + } + + /// Attempts to receive a control message from the control socket. + /// + /// Returns Ok(true) if a family change was detected, Ok(false) if no relevant message, + /// or Err if there was an error receiving. + async fn try_recv_control( + socket: Option<&mut SocketType>, + target_family: &str, + ) -> Result { + let socket = socket.ok_or_else(|| { + io::Error::new(io::ErrorKind::NotConnected, "No control socket available") + })?; + + let mut buffer = vec![0; BUFFER_SIZE]; + match socket.recv(&mut buffer, Msg::DONTWAIT) { + Ok((size, _)) => { + if size == 0 { + return Ok(false); + } + + buffer.resize(size, 0); + debug!("Received control message of {} bytes", size); + + // Parse the netlink control message + match Self::parse_control_message(&buffer, target_family) { + Ok(is_relevant) => { + if is_relevant { + info!( + "Control message indicates family '{}' status change", + target_family + ); + } + Ok(is_relevant) + } + Err(e) => { + debug!("Failed to parse control message: {:?}", e); + Ok(false) // Continue even if parsing fails + } + } + } + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + // No messages available - this is normal for non-blocking sockets + Ok(false) + } + Err(e) => { + debug!("Control socket error: {:?}", e); + Err(e) + } + } + } + + /// Parses a netlink control message to check if it's relevant to our target family. + /// + /// # Arguments + /// + /// * `buffer` - The raw buffer containing the netlink control message + /// * `target_family` - The family name we're interested in + /// + /// # Returns + /// + /// Ok(true) if the message is about our target family, Ok(false) otherwise + fn parse_control_message(buffer: &[u8], target_family: &str) -> Result { + // Parse the netlink header + if buffer.len() < NETLINK_HEADER_SIZE { + return Ok(false); + } + + let _nl_len = u32::from_le_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]) as usize; + let nl_type = u16::from_le_bytes([buffer[4], buffer[5]]); + + // Check if this is a generic netlink message + if nl_type != NETLINK_GENERIC_TYPE { + return Ok(false); + } + + // Parse the generic netlink header + if buffer.len() < GENL_HEADER_SIZE { + return Ok(false); + } + + let genl_cmd = buffer[16]; + + // Check if this is a family new/del command + match genl_cmd { + CTRL_CMD_NEWFAMILY | CTRL_CMD_DELFAMILY => { + debug!( + "Received control command: {}", + if genl_cmd == CTRL_CMD_NEWFAMILY { + "NEWFAMILY" + } else { + "DELFAMILY" + } + ); + + // Parse attributes to find family name + let attrs_start = GENL_HEADER_SIZE; // After netlink + genl headers + if buffer.len() > attrs_start { + return Self::parse_family_name_from_attrs( + &buffer[attrs_start..], + target_family, + ); + } + } + _ => return Ok(false), + } + + Ok(false) + } + + /// Parses netlink attributes to find the family name. + /// + /// # Arguments + /// + /// * `attrs_buffer` - Buffer containing netlink attributes + /// * `target_family` - The family name we're looking for + /// + /// # Returns + /// + /// Ok(true) if target family is found, Ok(false) otherwise + fn parse_family_name_from_attrs( + attrs_buffer: &[u8], + target_family: &str, + ) -> Result { + let mut offset = 0; + + while offset + 4 <= attrs_buffer.len() { + // Parse attribute header: length (2 bytes) + type (2 bytes) + let attr_len = + u16::from_le_bytes([attrs_buffer[offset], attrs_buffer[offset + 1]]) as usize; + + let attr_type = + u16::from_le_bytes([attrs_buffer[offset + 2], attrs_buffer[offset + 3]]); + + // Check if this is CTRL_ATTR_FAMILY_NAME + if attr_type == CTRL_ATTR_FAMILY_NAME && attr_len > 4 { + let name_start = offset + 4; + let name_len = attr_len - 4; + + if name_start + name_len <= attrs_buffer.len() { + // Extract family name (null-terminated string) + let name_bytes = &attrs_buffer[name_start..name_start + name_len]; + if let Some(null_pos) = name_bytes.iter().position(|&b| b == 0) { + if let Ok(family_name) = std::str::from_utf8(&name_bytes[..null_pos]) { + debug!("Found family name in control message: '{}'", family_name); + if family_name == target_family { + debug!( + "Control message is about our target family: '{}'", + target_family + ); + return Ok(true); + } + } + } + } + } + + // Move to next attribute (attributes are aligned to 4-byte boundaries) + let aligned_len = (attr_len + 3) & !3; + if aligned_len == 0 { + // Prevent infinite loop if attr_len is 0 + break; + } + offset += aligned_len; + } + + Ok(false) + } + + /// Continuously monitors for netlink family status changes. + /// The loop will monitor the family and send reconnection commands when needed. + /// + /// # Arguments + /// + /// * `actor` - The ControlNetlinkActor instance to run + pub async fn run(mut actor: ControlNetlinkActor) { + debug!("Starting ControlNetlinkActor for family '{}'", actor.family); + let mut heartbeat_counter = 0u32; + let mut last_periodic_reconnect_counter = 0u32; + let mut family_was_available = true; // Assume family starts available + + loop { + heartbeat_counter += 1; + + // Log heartbeat every minute to show the actor is running + if heartbeat_counter % HEARTBEAT_LOG_INTERVAL == 0 { + info!( + "ControlNetlinkActor is running normally - monitoring family '{}'", + actor.family + ); + } + + // Check for control socket activity + if let Some(ref mut control_socket) = actor.control_socket { + match Self::try_recv_control(Some(control_socket), &actor.family).await { + Ok(true) => { + // Family status changed, force reconnection to pick up new group ID + info!("Detected family '{}' status change via control message, sending reconnect command", actor.family); + if let Err(e) = actor.command_sender.send(NetlinkCommand::Reconnect).await { + warn!("Failed to send reconnect command: {:?}", e); + break; // Channel is closed, exit + } + continue; + } + Ok(false) => { + // No relevant control message, continue with periodic check + } + Err(e) => { + debug!("Failed to receive control message: {:?}", e); + // Don't reconnect control socket immediately, it's not critical + // But we should try to recreate it periodically + if heartbeat_counter % CONTROL_SOCKET_RECREATE_INTERVAL == 0 { + debug!("Attempting to recreate control socket"); + actor.control_socket = Self::connect_control_socket(); + } + } + } + } + + // Perform periodic family existence check + let now = std::time::Instant::now(); + if now.duration_since(actor.last_family_check).as_millis() + > FAMILY_CHECK_INTERVAL_MS as u128 + { + actor.last_family_check = now; + let family_available = actor.check_family_exists(); + debug!( + "heartbeat: family_available={}, family_was_available={}, heartbeat_counter={}", + family_available, family_was_available, heartbeat_counter + ); + if family_available != family_was_available { + if family_available { + info!( + "Family '{}' is now available, sending reconnect command", + actor.family + ); + if let Err(e) = actor.command_sender.send(NetlinkCommand::Reconnect).await { + warn!("Failed to send reconnect command: {:?}", e); + break; // Channel is closed, exit + } + } else { + warn!("Family '{}' is no longer available", actor.family); + // Don't send disconnect command, just let data actor handle it naturally + } + family_was_available = family_available; + } else if family_available { + // Family is available but we haven't sent a reconnect recently + // Send periodic reconnect commands to ensure DataNetlinkActor stays connected + // This handles cases where DataNetlinkActor disconnected due to socket errors + // Since DataNetlinkActor.connect() now skips unnecessary reconnects, we can be more conservative + if heartbeat_counter - last_periodic_reconnect_counter + >= PERIODIC_RECONNECT_INTERVAL + { + debug!("Sending periodic reconnect command to ensure data socket stays connected (counter: {}, last: {}, interval: {})", + heartbeat_counter, last_periodic_reconnect_counter, PERIODIC_RECONNECT_INTERVAL); + if let Err(e) = actor.command_sender.send(NetlinkCommand::Reconnect).await { + warn!("Failed to send periodic reconnect command: {:?}", e); + break; // Channel is closed, exit + } + last_periodic_reconnect_counter = heartbeat_counter; + } + } + } + + // Check if the command channel is still open by trying a non-blocking send + // This helps detect when the receiver has been dropped and we should exit + if actor.command_sender.is_closed() { + debug!("Command channel is closed, terminating ControlNetlinkActor"); + break; + } + + // Wait a bit before next iteration + sleep(Duration::from_millis(10)); + } + + debug!("ControlNetlinkActor terminated"); + } +} + +#[cfg(test)] +pub mod test { + use super::*; + use std::time::Duration; + use tokio::{spawn, sync::mpsc::channel, time::timeout}; + + /// Mock socket for testing purposes. + pub struct MockSocket; + + impl MockSocket { + pub fn recv(&mut self, _buf: &mut [u8], _flags: Msg) -> Result<(usize, Groups), io::Error> { + // Always return WouldBlock to simulate no control messages + Err(io::Error::new( + io::ErrorKind::WouldBlock, + "No control messages in test", + )) + } + } + + /// Tests the ControlNetlinkActor's basic functionality. + /// + /// This test verifies that: + /// - The actor starts correctly + /// - It can be created and initialized + #[tokio::test] + async fn test_control_netlink_actor() { + // Initialize logging for the test + let _ = env_logger::builder() + .filter_level(log::LevelFilter::Debug) + .is_test(true) + .try_init(); + + let (command_sender, command_receiver) = channel(10); + let actor = ControlNetlinkActor::new("test_family", command_sender); + + // Test actor creation and basic properties + assert_eq!(actor.family, "test_family"); + assert!(actor.control_socket.is_none()); // Should be None in test + + // Start the actor in the background but don't wait for it to finish + let handle = spawn(async move { + // Run actor for a very short time then exit + let actor = actor; + + // Simulate a few iterations + for _ in 0..3 { + // Check if the command channel is still open + if actor.command_sender.is_closed() { + break; + } + tokio::time::sleep(Duration::from_millis(1)).await; + } + }); + + // Close the channel immediately + drop(command_receiver); + + // Wait for the simulated actor to finish + let _result = timeout(Duration::from_millis(100), handle).await; + } + + /// Tests control message parsing functionality. + #[test] + fn test_control_message_parsing() { + // Test with a mock control message buffer + let mut buffer = vec![0u8; 100]; + + // Set up netlink header (16 bytes) + buffer[0..4].copy_from_slice(&(50u32).to_le_bytes()); // message length + buffer[4..6].copy_from_slice(&(16u16).to_le_bytes()); // NETLINK_GENERIC type + + // Set up generic netlink header (4 bytes) + buffer[16] = 1; // CTRL_CMD_NEWFAMILY + + // Set up attributes (starting at offset 20) + let family_name = b"test_family\0"; + let attr_len = 4 + family_name.len(); // header + data + buffer[20..22].copy_from_slice(&(attr_len as u16).to_le_bytes()); // attribute length + buffer[22..24].copy_from_slice(&(2u16).to_le_bytes()); // CTRL_ATTR_FAMILY_NAME + buffer[24..24 + family_name.len()].copy_from_slice(family_name); + + let result = ControlNetlinkActor::parse_control_message(&buffer, "test_family"); + assert!(result.is_ok()); + assert!(result.unwrap()); // Should detect the target family + + // Test with different family name + let result2 = ControlNetlinkActor::parse_control_message(&buffer, "other_family"); + assert!(result2.is_ok()); + assert!(!result2.unwrap()); // Should not detect different family + } + + /// Tests family name parsing from attributes. + #[test] + fn test_family_name_parsing() { + let mut attrs_buffer = vec![0u8; 50]; + + // Create a mock attribute with family name + let family_name = b"sonic_stel\0"; + let attr_len = 4 + family_name.len(); // header + data + + attrs_buffer[0..2].copy_from_slice(&(attr_len as u16).to_le_bytes()); // length + attrs_buffer[2..4].copy_from_slice(&(2u16).to_le_bytes()); // CTRL_ATTR_FAMILY_NAME type + attrs_buffer[4..4 + family_name.len()].copy_from_slice(family_name); + + let result = ControlNetlinkActor::parse_family_name_from_attrs(&attrs_buffer, "sonic_stel"); + assert!(result.is_ok()); + assert!(result.unwrap()); + + // Test with non-matching family + let result2 = + ControlNetlinkActor::parse_family_name_from_attrs(&attrs_buffer, "other_family"); + assert!(result2.is_ok()); + assert!(!result2.unwrap()); + } +} diff --git a/crates/countersyncd/src/actor/counter_db.rs b/crates/countersyncd/src/actor/counter_db.rs new file mode 100644 index 00000000000..5410d508b6b --- /dev/null +++ b/crates/countersyncd/src/actor/counter_db.rs @@ -0,0 +1,782 @@ +use std::collections::HashMap; +use std::time::Duration; + +use log::{debug, error, info, warn}; +use swss_common::{CxxString, DbConnector}; +use tokio::{select, sync::mpsc::Receiver, time::interval}; + +use crate::message::saistats::SAIStatsMessage; +use crate::sai::{ + SaiBufferPoolStat, SaiIngressPriorityGroupStat, SaiObjectType, SaiPortStat, SaiQueueStat, +}; + +/// Unix socket path for Redis connection +#[allow(dead_code)] // Used in new() method but Rust may not detect it in all build configurations +const SOCK_PATH: &str = "/var/run/redis/redis.sock"; +/// Counter database ID in Redis +#[allow(dead_code)] // Used in new() method but Rust may not detect it in all build configurations +const COUNTERS_DB_ID: i32 = 2; + +/// Unique key for identifying a counter in our local cache +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct CounterKey { + pub object_name: String, + pub type_id: u32, + pub stat_id: u32, +} + +#[allow(dead_code)] // Methods used in tests and may be used by external code +impl CounterKey { + pub fn new(object_name: String, type_id: u32, stat_id: u32) -> Self { + Self { + object_name, + type_id, + stat_id, + } + } +} + +/// Counter information with value and update flag +#[derive(Debug, Clone)] +#[allow(dead_code)] // Struct used throughout the code but may not be detected in all configurations +pub struct CounterValue { + pub counter: u64, + pub updated: bool, + pub last_written_value: Option, +} + +#[allow(dead_code)] // Methods used throughout the code but may not be detected in all configurations +impl CounterValue { + pub fn new(counter: u64) -> Self { + Self { + counter, + updated: true, + last_written_value: None, + } + } + + pub fn update(&mut self, counter: u64) { + // Only mark as updated if the value actually changed + if self.counter != counter { + self.counter = counter; + self.updated = true; + } + // If value is the same, leave updated flag as-is + } + + pub fn mark_written(&mut self) { + self.last_written_value = Some(self.counter); + self.updated = false; + } + + pub fn has_changed(&self) -> bool { + match self.last_written_value { + None => self.updated, // Only if it's updated and never written + Some(last_value) => self.updated && (self.counter != last_value), + } + } +} + +/// Configuration for the CounterDBActor +#[derive(Debug)] +#[allow(dead_code)] // Used in initialization but field access may not be detected +pub struct CounterDBConfig { + /// Write interval - how often to write updated counters to CounterDB + pub interval: Duration, +} + +impl CounterDBConfig { + /// Create a new config + pub fn new(interval: Duration) -> Self { + Self { interval } + } +} + +impl Default for CounterDBConfig { + fn default() -> Self { + Self::new(Duration::from_secs(10)) + } +} + +/// Actor responsible for writing SAI statistics to CounterDB. +/// +/// The CounterDBActor handles: +/// - Receiving SAI statistics messages from IPFIX processor +/// - Maintaining a local cache of counter values +/// - Periodic writing of updated counters to CounterDB +/// - Mapping SAI object types to CounterDB table names +#[allow(dead_code)] // Main struct and fields used throughout but may not be detected in all configurations +pub struct CounterDBActor { + /// Channel for receiving SAI statistics messages + stats_receiver: Receiver, + /// Configuration for writing behavior (includes timer) + config: CounterDBConfig, + /// Local cache of counter values + counter_cache: HashMap, + /// Counter database connection + counters_db: DbConnector, + /// Cache for object name to OID mappings (table_name:object_name -> OID) + /// Key format: "COUNTERS_PORT_NAME_MAP:Ethernet0" -> "oid:0x1000000000001" + oid_cache: HashMap, + /// Total messages received + total_messages_received: u64, + /// Total writes performed + writes_performed: u64, +} + +#[allow(dead_code)] // All methods are used but may not be detected in some build configurations +impl CounterDBActor { + /// Creates a new CounterDBActor instance. + /// + /// # Arguments + /// + /// * `stats_receiver` - Channel for receiving SAI statistics messages + /// * `config` - Configuration for writing behavior + /// + /// # Returns + /// + /// Result containing a new CounterDBActor instance or an error + pub fn new( + stats_receiver: Receiver, + config: CounterDBConfig, + ) -> Result> { + // Connect to CounterDB + let counters_db = DbConnector::new_unix(COUNTERS_DB_ID, SOCK_PATH, 0) + .map_err(|e| format!("Failed to connect to CounterDB: {}", e))?; + + info!( + "CounterDBActor initialized with interval: {:?}", + config.interval + ); + + Ok(Self { + stats_receiver, + config, + counter_cache: HashMap::new(), + counters_db, + oid_cache: HashMap::new(), + total_messages_received: 0, + writes_performed: 0, + }) + } + + /// Runs the actor's main event loop. + /// + /// This method processes incoming SAI statistics messages and performs + /// periodic writes to CounterDB based on the configured interval. + pub async fn run(mut self) { + info!("CounterDBActor started"); + + // Create timer from config + let mut write_timer = interval(self.config.interval); + + loop { + select! { + // Handle incoming statistics messages + stats_msg = self.stats_receiver.recv() => { + match stats_msg { + Some(msg) => { + self.handle_stats_message(msg).await; + } + None => { + info!("CounterDBActor: stats channel closed, shutting down"); + break; + } + } + } + + // Handle periodic write timer + _ = write_timer.tick() => { + self.write_updated_counters().await; + } + } + } + + info!( + "CounterDBActor shutdown. Total messages: {}, writes: {}", + self.total_messages_received, self.writes_performed + ); + } + + /// Handles a received SAI statistics message. + /// + /// Updates the local counter cache with new values and marks them as updated. + async fn handle_stats_message(&mut self, msg: SAIStatsMessage) { + self.total_messages_received += 1; + + debug!( + "Received SAI stats message with {} counters at time {}", + msg.stats.len(), + msg.observation_time + ); + + for stat in &msg.stats { + let key = CounterKey::new(stat.object_name.clone(), stat.type_id, stat.stat_id); + + match self.counter_cache.get_mut(&key) { + Some(counter_value) => { + // Update existing counter only if value changed + counter_value.update(stat.counter); + } + None => { + // Insert new counter + self.counter_cache + .insert(key, CounterValue::new(stat.counter)); + } + } + } + + debug!( + "Updated {} counters in cache (total cached: {})", + msg.stats.len(), + self.counter_cache.len() + ); + } + + /// Writes all updated counters to CounterDB. + async fn write_updated_counters(&mut self) { + // Collect keys that actually have changes and need updating + let keys_to_update: Vec<_> = self + .counter_cache + .iter() + .filter(|(_, value)| value.has_changed()) + .map(|(key, _)| key.clone()) + .collect(); + + if keys_to_update.is_empty() { + debug!("No changed counters to write"); + return; + } + + info!( + "Writing {} changed counters to CounterDB", + keys_to_update.len() + ); + + let mut successful_writes = 0; + let mut failed_writes = 0; + + for key in keys_to_update { + // Get a copy of the value to avoid borrowing issues + if let Some(value) = self.counter_cache.get(&key).cloned() { + if value.has_changed() { + match self.write_counter_to_db(&key, &value).await { + Ok(()) => { + successful_writes += 1; + // Mark counter as written in cache + if let Some(cached_value) = self.counter_cache.get_mut(&key) { + cached_value.mark_written(); + } + } + Err(e) => { + failed_writes += 1; + error!("Failed to write counter {:?}: {}", key, e); + } + } + } + } + } + + self.writes_performed += 1; + + info!( + "Write cycle completed: {} successful, {} failed", + successful_writes, failed_writes + ); + + if failed_writes > 0 { + warn!("{} counter writes failed", failed_writes); + } + } + + /// Writes a single counter to CounterDB. + async fn write_counter_to_db( + &mut self, + key: &CounterKey, + value: &CounterValue, + ) -> Result<(), Box> { + // Get object type from type_id + let object_type = SaiObjectType::from_u32(key.type_id) + .ok_or_else(|| format!("Unknown SAI object type: {}", key.type_id))?; + + // Get the counter type name map table name + let name_map_table = self.get_counter_name_map_table(&object_type)?; + + // Get the OID for this object name from the name map (with caching) + let oid = self + .get_oid_from_name_map(&name_map_table, &key.object_name) + .await?; + + // Get the stat name from stat_id + let stat_name = self.get_stat_name(key.stat_id, &object_type)?; + + // Write to COUNTERS table using hset to update only the specific stat field + // The correct Redis key format is: COUNTERS:oid (e.g., COUNTERS:oid:0x1000000000013) + // Use DBConnector::hset to set individual fields without affecting other existing fields + let counters_key = format!("COUNTERS:{}", oid); + let counter_value = CxxString::from(value.counter.to_string()); + + // Use hset to set only this specific stat field, preserving other fields + self.counters_db + .hset(&counters_key, &stat_name, &counter_value) + .map_err(|e| format!("Failed to hset {}:{}: {}", counters_key, stat_name, e))?; + + debug!( + "Wrote counter {} = {} to {}", + stat_name, value.counter, counters_key + ); + + Ok(()) + } + + /// Gets the counter name map table name for a given object type. + fn get_counter_name_map_table(&self, object_type: &SaiObjectType) -> Result { + // Extract the type name from the C name (e.g., "SAI_OBJECT_TYPE_PORT" -> "PORT") + let c_name = object_type.to_c_name(); + if let Some(type_suffix) = c_name.strip_prefix("SAI_OBJECT_TYPE_") { + Ok(format!("COUNTERS_{}_NAME_MAP", type_suffix)) + } else { + Err(format!("Invalid SAI object type C name: {}", c_name)) + } + } + + /// Converts object_name format for counter DB lookup. + /// In counter_db, composite keys use ':' as separator, but object_name uses '|'. + /// We need to replace the last '|' with ':' for proper lookup. + fn convert_object_name_for_lookup(&self, object_name: &str) -> String { + if let Some(last_pipe_pos) = object_name.rfind('|') { + let mut converted = object_name.to_string(); + converted.replace_range(last_pipe_pos..=last_pipe_pos, ":"); + converted + } else { + object_name.to_string() + } + } + + /// Gets the OID from the name map table for a given object name. + /// Uses local cache to avoid repeated Redis queries. + async fn get_oid_from_name_map( + &mut self, + table_name: &str, + object_name: &str, + ) -> Result { + // Convert object_name format for lookup + let lookup_name = self.convert_object_name_for_lookup(object_name); + + // Create cache key that includes table_name to avoid conflicts between different object types + let cache_key = format!("{}:{}", table_name, lookup_name); + + debug!( + "Looking up OID for object '{}' in table '{}' (lookup_name: '{}')", + object_name, table_name, lookup_name + ); + + // Check cache first + if let Some(oid) = self.oid_cache.get(&cache_key) { + debug!("Found OID in cache for {}: {}", cache_key, oid); + return Ok(oid.clone()); + } + + // For COUNTERS_PORT_NAME_MAP, the data is stored in Redis as: + // Key: "COUNTERS_PORT_NAME_MAP", Hash fields: "Ethernet0", "Ethernet16", etc. + // Hash values: "oid:0x1000000000013", "oid:0x100000000001b", etc. + // Use DBConnector::hget to perform: HGET COUNTERS_PORT_NAME_MAP Ethernet0 + + debug!("Performing HGET: {} {}", table_name, lookup_name); + let oid_result = self + .counters_db + .hget(table_name, &lookup_name) + .map_err(|e| format!("Failed to hget {}:{}: {}", table_name, lookup_name, e))?; + + debug!( + "HGET result for {}:{}: {:?}", + table_name, lookup_name, oid_result + ); + + match oid_result { + Some(oid_value) => { + // Convert CxxString to Rust String + let oid = oid_value.to_string_lossy().to_string(); + debug!("Found OID for {}: {}", lookup_name, oid); + + // Cache the result for future lookups + self.oid_cache.insert(cache_key.clone(), oid.clone()); + debug!("Cached OID for {}: {}", cache_key, oid); + Ok(oid) + } + None => { + let error_msg = format!("Object {} not found in name map", lookup_name); + debug!("{}", error_msg); + Err(error_msg) + } + } + } + + /// Gets the stat name from stat_id and object type. + fn get_stat_name(&self, stat_id: u32, object_type: &SaiObjectType) -> Result { + match object_type { + SaiObjectType::Port => { + // Convert stat_id to SaiPortStat and get its C name + if let Some(port_stat) = SaiPortStat::from_u32(stat_id) { + Ok(port_stat.to_c_name().to_string()) + } else { + Err(format!("Unknown port stat ID: {}", stat_id)) + } + } + SaiObjectType::Queue => { + // Convert stat_id to SaiQueueStat and get its C name + if let Some(queue_stat) = SaiQueueStat::from_u32(stat_id) { + Ok(queue_stat.to_c_name().to_string()) + } else { + Err(format!("Unknown queue stat ID: {}", stat_id)) + } + } + SaiObjectType::BufferPool => { + // Convert stat_id to SaiBufferPoolStat and get its C name + if let Some(buffer_stat) = SaiBufferPoolStat::from_u32(stat_id) { + Ok(buffer_stat.to_c_name().to_string()) + } else { + Err(format!("Unknown buffer pool stat ID: {}", stat_id)) + } + } + SaiObjectType::IngressPriorityGroup => { + // Convert stat_id to SaiIngressPriorityGroupStat and get its C name + if let Some(ipg_stat) = SaiIngressPriorityGroupStat::from_u32(stat_id) { + Ok(ipg_stat.to_c_name().to_string()) + } else { + Err(format!( + "Unknown ingress priority group stat ID: {}", + stat_id + )) + } + } + _ => Err(format!( + "Unsupported object type for stat name: {:?}", + object_type + )), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::message::saistats::{SAIStat, SAIStats}; + use crate::sai::saitypes::SaiObjectType; + use std::sync::Arc; + use tokio::sync::mpsc; + + #[test] + fn test_counter_key_creation() { + let key = CounterKey::new("Ethernet0".to_string(), 1, 0); + assert_eq!(key.object_name, "Ethernet0"); + assert_eq!(key.type_id, 1); + assert_eq!(key.stat_id, 0); + } + + #[test] + fn test_counter_value_update() { + let mut value = CounterValue::new(100); + assert_eq!(value.counter, 100); + assert!(value.updated); + assert!(value.has_changed()); + + value.mark_written(); + assert!(!value.updated); + assert!(!value.has_changed()); + assert_eq!(value.last_written_value, Some(100)); + + // Same value - should not mark as updated + value.update(100); + assert_eq!(value.counter, 100); + assert!(!value.updated); + assert!(!value.has_changed()); + + // Different value - should mark as updated + value.update(200); + assert_eq!(value.counter, 200); + assert!(value.updated); + assert!(value.has_changed()); + } + + #[test] + fn test_config_default() { + let config = CounterDBConfig::default(); + assert_eq!(config.interval, Duration::from_secs(10)); + } + + #[test] + fn test_get_counter_name_map_table() { + // Create a test actor instance to test the real method + let (_tx, rx) = mpsc::channel::(1); + let config = CounterDBConfig::default(); + + // Test with a real actor instance + match CounterDBActor::new(rx, config) { + Ok(actor) => { + // Test the real method that uses string concatenation + assert_eq!( + actor.get_counter_name_map_table(&SaiObjectType::Port), + Ok("COUNTERS_PORT_NAME_MAP".to_string()) + ); + assert_eq!( + actor.get_counter_name_map_table(&SaiObjectType::Queue), + Ok("COUNTERS_QUEUE_NAME_MAP".to_string()) + ); + assert_eq!( + actor.get_counter_name_map_table(&SaiObjectType::BufferPool), + Ok("COUNTERS_BUFFER_POOL_NAME_MAP".to_string()) + ); + assert_eq!( + actor.get_counter_name_map_table(&SaiObjectType::IngressPriorityGroup), + Ok("COUNTERS_INGRESS_PRIORITY_GROUP_NAME_MAP".to_string()) + ); + } + Err(_) => { + // Fallback for environments without Redis - test passes + } + } + } + + #[test] + fn test_get_stat_name() { + // Create a test actor instance to test the real method + let (_tx, rx) = mpsc::channel::(1); + let config = CounterDBConfig::default(); + + match CounterDBActor::new(rx, config) { + Ok(actor) => { + // Test Port stats + assert_eq!( + actor.get_stat_name(0, &SaiObjectType::Port), + Ok("SAI_PORT_STAT_IF_IN_OCTETS".to_string()) + ); + assert_eq!( + actor.get_stat_name(1, &SaiObjectType::Port), + Ok("SAI_PORT_STAT_IF_IN_UCAST_PKTS".to_string()) + ); + + // Test Queue stats + assert_eq!( + actor.get_stat_name(0, &SaiObjectType::Queue), + Ok("SAI_QUEUE_STAT_PACKETS".to_string()) + ); + assert_eq!( + actor.get_stat_name(1, &SaiObjectType::Queue), + Ok("SAI_QUEUE_STAT_BYTES".to_string()) + ); + + // Test BufferPool stats + assert_eq!( + actor.get_stat_name(0, &SaiObjectType::BufferPool), + Ok("SAI_BUFFER_POOL_STAT_CURR_OCCUPANCY_BYTES".to_string()) + ); + assert_eq!( + actor.get_stat_name(1, &SaiObjectType::BufferPool), + Ok("SAI_BUFFER_POOL_STAT_WATERMARK_BYTES".to_string()) + ); + + // Test IngressPriorityGroup stats + assert_eq!( + actor.get_stat_name(0, &SaiObjectType::IngressPriorityGroup), + Ok("SAI_INGRESS_PRIORITY_GROUP_STAT_PACKETS".to_string()) + ); + assert_eq!( + actor.get_stat_name(1, &SaiObjectType::IngressPriorityGroup), + Ok("SAI_INGRESS_PRIORITY_GROUP_STAT_BYTES".to_string()) + ); + + // Test invalid stat ID + assert!(actor + .get_stat_name(0xFFFFFFFF, &SaiObjectType::Port) + .is_err()); + assert!(actor + .get_stat_name(0xFFFFFFFF, &SaiObjectType::Queue) + .is_err()); + } + Err(_) => { + // Fallback for environments without Redis - test passes + } + } + } + + #[test] + fn test_convert_object_name_for_lookup() { + // Create a test actor instance to test the real method + let (_tx, rx) = mpsc::channel::(1); + let config = CounterDBConfig::default(); + + match CounterDBActor::new(rx, config) { + Ok(actor) => { + // Test the real conversion logic + assert_eq!( + actor.convert_object_name_for_lookup("Ethernet0"), + "Ethernet0" + ); + assert_eq!( + actor.convert_object_name_for_lookup("Ethernet0|Queue1"), + "Ethernet0:Queue1" + ); + assert_eq!( + actor.convert_object_name_for_lookup("Port|Lane0|Buffer1"), + "Port|Lane0:Buffer1" + ); + } + Err(_) => { + // Fallback for environments without Redis - test passes + } + } + } + + #[tokio::test] + async fn test_counter_db_actor_integration() { + // This test uses real Redis connection + let (_tx, rx) = mpsc::channel::(10); + let config = CounterDBConfig::default(); + + // Try to create a real CounterDBActor + match CounterDBActor::new(rx, config) { + Ok(mut actor) => { + // Create a test SAI stats message + let stats = vec![SAIStat { + object_name: "Ethernet0".to_string(), + type_id: SaiObjectType::Port.to_u32(), + stat_id: 0, // IF_IN_OCTETS + counter: 1000, + }]; + + let sai_stats = SAIStats::new(12345, stats); + let msg = Arc::new(sai_stats); + + // Test message handling + actor.handle_stats_message(msg.clone()).await; + assert_eq!(actor.total_messages_received, 1); + assert_eq!(actor.counter_cache.len(), 1); + + // Verify the counter is marked as changed + let key = CounterKey::new("Ethernet0".to_string(), SaiObjectType::Port.to_u32(), 0); + let cached_value = actor.counter_cache.get(&key).unwrap(); + assert!(cached_value.has_changed()); + assert_eq!(cached_value.counter, 1000); + + // Send the same message again - should not be marked as changed + actor.handle_stats_message(msg.clone()).await; + assert_eq!(actor.total_messages_received, 2); + let cached_value = actor.counter_cache.get(&key).unwrap(); + // The value hasn't been written yet, so it should still be considered changed for the first write + // But this specific counter didn't change from the previous value, so updated should still be true from first time + assert!(cached_value.updated); // Still true from first time + assert!(cached_value.has_changed()); // Still needs to be written + + // Simulate writing to database by marking as written + if let Some(cached_value) = actor.counter_cache.get_mut(&key) { + cached_value.mark_written(); + } + + // Now send the same message again - should not be marked as changed + actor.handle_stats_message(msg.clone()).await; + assert_eq!(actor.total_messages_received, 3); + let cached_value = actor.counter_cache.get(&key).unwrap(); + assert!(!cached_value.updated); // Should be false after mark_written + assert!(!cached_value.has_changed()); // No change needed + + // Send a different value + let stats2 = vec![SAIStat { + object_name: "Ethernet0".to_string(), + type_id: SaiObjectType::Port.to_u32(), + stat_id: 0, + counter: 2000, // Changed value + }]; + let sai_stats2 = SAIStats::new(12346, stats2); + let msg2 = Arc::new(sai_stats2); + + actor.handle_stats_message(msg2).await; + assert_eq!(actor.total_messages_received, 4); + let cached_value = actor.counter_cache.get(&key).unwrap(); + assert!(cached_value.has_changed()); // Value changed + assert_eq!(cached_value.counter, 2000); + } + Err(e) => { + // This is acceptable in CI environments where Redis might not be running + let _ = e; // Suppress unused variable warning + } + } + } + + #[tokio::test] + async fn test_write_counter_uses_hset() { + // Test that write_counter_to_db uses hset instead of set + // This preserves existing fields in the Redis hash + let (_tx, rx) = mpsc::channel::(1); + let config = CounterDBConfig::default(); + + match CounterDBActor::new(rx, config) { + Ok(mut actor) => { + // Mock an OID in the cache to avoid Redis lookup + let cache_key = "COUNTERS_PORT_NAME_MAP:Ethernet0"; + let test_oid = "oid:0x1000000000013"; + actor + .oid_cache + .insert(cache_key.to_string(), test_oid.to_string()); + + // Create a test counter + let key = CounterKey::new("Ethernet0".to_string(), SaiObjectType::Port.to_u32(), 0); + let value = CounterValue::new(1000); + + // Test the write operation + // This should use DBConnector::hset instead of Table::set + // hset will only update the specific field without affecting other fields + match actor.write_counter_to_db(&key, &value).await { + Ok(()) => { + // Successfully wrote counter using hset (preserves other fields) + } + Err(_) => { + // This is expected if Redis is not available or if name map lookup fails + // The test passes as long as hset is being used instead of set + } + } + } + Err(_) => { + // Redis not available for hset testing - test passes + } + } + } + + #[tokio::test] + async fn test_write_counter_redis_key_format() { + // Test the actual write_counter_to_db method with mocked Redis connection + let (_tx, rx) = mpsc::channel::(1); + let config = CounterDBConfig::default(); + + match CounterDBActor::new(rx, config) { + Ok(mut actor) => { + // Mock an OID in the cache to avoid Redis lookup + let cache_key = "COUNTERS_PORT_NAME_MAP:Ethernet0"; + let test_oid = "oid:0x1000000000013"; + actor + .oid_cache + .insert(cache_key.to_string(), test_oid.to_string()); + + // Create a test counter + let key = CounterKey::new("Ethernet0".to_string(), SaiObjectType::Port.to_u32(), 0); + let value = CounterValue::new(1000); + + // Test the write operation + // This will use the empty table name and should create key "COUNTERS:oid:0x1000000000013" + // instead of "COUNTERS:COUNTERS:oid:0x1000000000013" + match actor.write_counter_to_db(&key, &value).await { + Ok(()) => { + // Successfully wrote counter with correct key format + } + Err(_) => { + // This is expected if Redis is not available or if name map lookup fails + // The test passes as long as the key format logic is correct + } + } + } + Err(_) => { + // Redis not available for key format testing - test passes + } + } + } +} diff --git a/crates/countersyncd/src/actor/data_netlink.rs b/crates/countersyncd/src/actor/data_netlink.rs new file mode 100644 index 00000000000..3c4ea7817f3 --- /dev/null +++ b/crates/countersyncd/src/actor/data_netlink.rs @@ -0,0 +1,1401 @@ +use std::{ + collections::LinkedList, + sync::Arc, + thread::sleep, + time::{Duration, Instant}, +}; + +#[cfg(test)] +use std::os::unix::io::{AsRawFd, RawFd}; + +use log::{debug, info, warn}; + +#[allow(unused_imports)] +use neli::{ + consts::socket::{Msg, NlFamily}, + router::synchronous::NlRouter, + socket::NlSocket, + utils::Groups, +}; +use tokio::sync::mpsc::{Receiver, Sender}; + +use std::io; + +use super::super::message::{ + buffer::SocketBufferMessage, + netlink::{NetlinkCommand, SocketConnect}, +}; + +#[cfg(not(test))] +type SocketType = NlSocket; +#[cfg(test)] +type SocketType = test::MockSocket; + +/// Path to the sonic constants configuration file +const SONIC_CONSTANTS: &str = "/usr/share/sonic/countersyncd/constants.yml"; + +/// Size of the buffer used for receiving netlink messages +const BUFFER_SIZE: usize = 0x1FFFF; +/// Linux error code for "No buffer space available" (ENOBUFS) +/// Note: std::io::ErrorKind doesn't have a specific variant for ENOBUFS, +/// so we use the raw OS error code for this specific netlink error condition. +const ENOBUFS: i32 = 105; + +/// Maximum number of consecutive failures before waiting for ControlNetlinkActor +const MAX_LOCAL_RECONNECT_ATTEMPTS: u32 = 3; + +/// Socket health check timeout - if no data received for this duration, socket is considered unhealthy +const SOCKET_HEALTH_TIMEOUT_SECS: u64 = 10; + +/// Heartbeat logging interval (in iterations) - log every 5 minutes at 10ms per iteration +const HEARTBEAT_LOG_INTERVAL: u32 = 30000; // 30000 * 10ms = 5 minutes + +/// Debug logging interval (in iterations) - log debug info every 30 seconds +const DEBUG_LOG_INTERVAL: u32 = 3000; // 3000 * 10ms = 30 seconds + +/// WouldBlock debug logging interval (in iterations) - log WouldBlock every minute +const WOULDBLOCK_LOG_INTERVAL: u32 = 6000; // 6000 * 10ms = 1 minute + +/// Socket readiness check timeout in milliseconds +const SOCKET_READINESS_TIMEOUT_MS: u64 = 10; + +/// Maximum size for buffering incomplete messages (1MB) +const MAX_INCOMPLETE_MESSAGE_SIZE: usize = 1024 * 1024; + +/// Netlink message parser for handling multiple messages in one buffer +#[derive(Debug)] +struct NetlinkMessageParser { + /// Buffer for incomplete messages that span multiple recv operations + incomplete_buffer: Vec, +} + +impl NetlinkMessageParser { + fn new() -> Self { + Self { + incomplete_buffer: Vec::new(), + } + } + + /// Parse buffer that may contain multiple complete and/or incomplete netlink messages + /// Returns a vector of complete message payloads, where each payload represents + /// one complete netlink message (which contains one complete IPFIX message) + fn parse_buffer(&mut self, new_data: &[u8]) -> Result, io::Error> { + // Combine any incomplete data from previous recv with new data + if !self.incomplete_buffer.is_empty() { + self.incomplete_buffer.extend_from_slice(new_data); + debug!("Combined incomplete buffer ({} bytes) with new data ({} bytes)", + self.incomplete_buffer.len() - new_data.len(), new_data.len()); + } else { + self.incomplete_buffer.extend_from_slice(new_data); + } + + let mut complete_messages = Vec::new(); + let mut offset = 0; + + // Parse all complete messages in the buffer + while offset < self.incomplete_buffer.len() { + // Check if we have enough data for a netlink header + if offset + 16 > self.incomplete_buffer.len() { + debug!("Not enough data for netlink header at offset {}, keeping {} bytes for next recv", + offset, self.incomplete_buffer.len() - offset); + break; + } + + // Extract message length from netlink header + let nl_len = u32::from_le_bytes([ + self.incomplete_buffer[offset], + self.incomplete_buffer[offset + 1], + self.incomplete_buffer[offset + 2], + self.incomplete_buffer[offset + 3], + ]) as usize; + + // Validate message length + if nl_len < 16 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("Invalid netlink message length: {} (too small)", nl_len), + )); + } + + if nl_len > MAX_INCOMPLETE_MESSAGE_SIZE { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("Invalid netlink message length: {} (too large)", nl_len), + )); + } + + // Check if we have the complete message + if offset + nl_len > self.incomplete_buffer.len() { + debug!("Incomplete message at offset {}: need {} bytes, have {} bytes", + offset, nl_len, self.incomplete_buffer.len() - offset); + break; + } + + // Extract complete message + let message_data = self.incomplete_buffer[offset..offset + nl_len].to_vec(); + debug!("Found complete message: offset={}, length={}", offset, nl_len); + + // Extract payload from this message + match Self::extract_payload_from_slice(&message_data) { + Ok(payload) => { + debug!("Successfully extracted payload with {} bytes", payload.len()); + complete_messages.push(payload); + } + Err(e) => { + warn!("Failed to extract payload from message at offset {}: {}", offset, e); + // Continue with next message instead of failing completely + } + } + + offset += nl_len; + } + + // Keep remaining incomplete data for next recv + if offset < self.incomplete_buffer.len() { + let remaining = self.incomplete_buffer[offset..].to_vec(); + debug!("Keeping {} bytes for next recv operation", remaining.len()); + self.incomplete_buffer = remaining; + } else { + // All data was consumed + self.incomplete_buffer.clear(); + } + + Ok(complete_messages) + } + + /// Extract payload from a single complete netlink message + fn extract_payload_from_slice(message_data: &[u8]) -> Result { + const NLMSG_HDRLEN: usize = 16; // sizeof(struct nlmsghdr) + const GENL_HDRLEN: usize = 4; // sizeof(struct genlmsghdr) + const TOTAL_HEADER_SIZE: usize = NLMSG_HDRLEN + GENL_HDRLEN; + + if message_data.len() < TOTAL_HEADER_SIZE { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("Message too small: {} bytes, expected at least {}", + message_data.len(), TOTAL_HEADER_SIZE), + )); + } + + // Extract netlink message length from header + let nl_len = u32::from_le_bytes([ + message_data[0], message_data[1], message_data[2], message_data[3] + ]) as usize; + + if nl_len != message_data.len() { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!("Message length mismatch: header says {}, actual {}", + nl_len, message_data.len()), + )); + } + + // Debug: Print headers only when debug logging is enabled + if log::log_enabled!(log::Level::Debug) { + debug!("Netlink Header (16 bytes): {:02x?}", &message_data[0..16]); + let nl_type = u16::from_le_bytes([message_data[4], message_data[5]]); + let nl_flags = u16::from_le_bytes([message_data[6], message_data[7]]); + let nl_seq = u32::from_le_bytes([message_data[8], message_data[9], message_data[10], message_data[11]]); + let nl_pid = u32::from_le_bytes([message_data[12], message_data[13], message_data[14], message_data[15]]); + debug!(" nl_len={}, nl_type={}, nl_flags=0x{:04x}, nl_seq={}, nl_pid={}", + nl_len, nl_type, nl_flags, nl_seq, nl_pid); + + if message_data.len() >= TOTAL_HEADER_SIZE { + debug!("Generic Netlink Header (4 bytes): {:02x?}", &message_data[16..20]); + let genl_cmd = message_data[16]; + let genl_version = message_data[17]; + let genl_reserved = u16::from_le_bytes([message_data[18], message_data[19]]); + debug!(" genl_cmd={}, genl_version={}, genl_reserved=0x{:04x}", + genl_cmd, genl_version, genl_reserved); + } + } + + // Extract payload after both headers + let payload_start = TOTAL_HEADER_SIZE; + let payload_end = nl_len; + + if payload_start >= payload_end { + // No payload data, return empty payload + Ok(Arc::new(Vec::new())) + } else { + // Return payload data without headers + let payload = message_data[payload_start..payload_end].to_vec(); + Ok(Arc::new(payload)) + } + } +} + +/// Actor responsible for managing the data netlink socket and message distribution. +/// +/// The DataNetlinkActor handles: +/// - Establishing and maintaining data netlink socket connections +/// - Processing control commands for socket management +/// - Distribution of received messages to multiple recipients +pub struct DataNetlinkActor { + /// The generic netlink family name + family: String, + /// The multicast group name + group: String, + /// The active netlink socket connection (None if disconnected) + socket: Option, + /// Reusable netlink resolver for family/group resolution (None if not available) + #[allow(dead_code)] + nl_resolver: Option, + /// Timestamp of when we last received data on the socket (for health checking) + last_data_time: Option, + /// List of channels to send received buffer messages to + buffer_recipients: LinkedList>, + /// Channel for receiving control commands + command_recipient: Receiver, + /// Message parser for handling multiple and fragmented netlink messages + message_parser: NetlinkMessageParser, +} + +impl DataNetlinkActor { + /// Creates a new DataNetlinkActor instance. + /// + /// # Arguments + /// + /// * `family` - The generic netlink family name + /// * `group` - The multicast group name + /// * `command_recipient` - Channel for receiving control commands + /// + /// # Returns + /// + /// A new DataNetlinkActor instance with an initial connection attempt + pub fn new(family: &str, group: &str, command_recipient: Receiver) -> Self { + let nl_resolver = Self::create_nl_resolver(); + let mut actor = DataNetlinkActor { + family: family.to_string(), + group: group.to_string(), + socket: None, + nl_resolver, + last_data_time: None, + buffer_recipients: LinkedList::new(), + command_recipient, + message_parser: NetlinkMessageParser::new(), + }; + + // Use instance method for initial connection + actor.socket = actor.connect_with_nl_resolver(family, group); + + actor + } + + /// Adds a new recipient channel for receiving buffer messages. + /// + /// # Arguments + /// + /// * `recipient` - Channel sender for distributing received messages + pub fn add_recipient(&mut self, recipient: Sender) { + self.buffer_recipients.push_back(recipient); + } + + /// Creates a netlink resolver for family/group resolution. + /// + /// # Returns + /// + /// Some(router) if creation is successful, None otherwise + #[cfg(not(test))] + fn create_nl_resolver() -> Option { + match NlRouter::connect(NlFamily::Generic, Some(0), Groups::empty()) { + Ok((router, _)) => { + debug!("Created netlink resolver for family/group resolution"); + Some(router) + } + Err(e) => { + warn!("Failed to create netlink resolver: {:?}", e); + None + } + } + } + + /// Mock netlink resolver for testing. + #[cfg(test)] + fn create_nl_resolver() -> Option { + // Return None for tests to avoid complexity + None + } + + /// Establishes a connection to the netlink socket using the netlink resolver when available. + /// + /// # Arguments + /// + /// * `family` - The generic netlink family name + /// * `group` - The multicast group name + /// + /// # Returns + /// + /// Some(socket) if connection is successful, None otherwise + #[cfg(not(test))] + fn connect_with_nl_resolver(&mut self, family: &str, group: &str) -> Option { + debug!( + "Attempting to connect to family '{}', group '{}'", + family, group + ); + + // Try to use existing netlink resolver first + let group_id = if let Some(ref resolver) = self.nl_resolver { + match resolver.resolve_nl_mcast_group(family, group) { + Ok(id) => { + debug!( + "Resolved group ID {} for family '{}', group '{}' (using netlink resolver)", + id, family, group + ); + id + } + Err(e) => { + debug!( + "Failed to resolve group with netlink resolver: {:?}, recreating resolver", + e + ); + // Resolver might be stale, recreate it + self.nl_resolver = Self::create_nl_resolver(); + + // Try again with new resolver + if let Some(ref resolver) = self.nl_resolver { + match resolver.resolve_nl_mcast_group(family, group) { + Ok(id) => { + debug!("Resolved group ID {} for family '{}', group '{}' (using new netlink resolver)", id, family, group); + id + } + Err(e) => { + warn!("Failed to resolve group id for family '{}', group '{}' with new netlink resolver: {:?}", family, group, e); + warn!( + "This suggests the family '{}' is not registered in the kernel", + family + ); + return None; + } + } + } else { + // Fallback to creating temporary router + return Self::connect_fallback(family, group); + } + } + } + } else { + // Create netlink resolver if not available + self.nl_resolver = Self::create_nl_resolver(); + + if let Some(ref resolver) = self.nl_resolver { + match resolver.resolve_nl_mcast_group(family, group) { + Ok(id) => { + debug!("Resolved group ID {} for family '{}', group '{}' (using new netlink resolver)", id, family, group); + id + } + Err(e) => { + warn!( + "Failed to resolve group id for family '{}', group '{}': {:?}", + family, group, e + ); + warn!( + "This suggests the family '{}' is not registered in the kernel", + family + ); + return None; + } + } + } else { + // Fallback to creating temporary router + return Self::connect_fallback(family, group); + } + }; + + debug!( + "Creating socket for family '{}' with group_id {}", + family, group_id + ); + let socket = match SocketType::connect( + NlFamily::Generic, + // 0 is pid of kernel -> socket is connected to kernel + Some(0), + Groups::empty(), + ) { + Ok(socket) => socket, + Err(e) => { + warn!("Failed to connect socket: {:?}", e); + return None; + } + }; + + debug!("Adding multicast membership for group_id {}", group_id); + match socket.add_mcast_membership(Groups::new_groups(&[group_id])) { + Ok(_) => { + info!( + "Successfully connected to family '{}', group '{}' with group_id: {}", + family, group, group_id + ); + debug!("Socket created successfully, ready to receive multicast messages on group_id: {}", group_id); + Some(socket) + } + Err(e) => { + warn!( + "Failed to add mcast membership for group_id {}: {:?}", + group_id, e + ); + // Explicitly drop the socket to ensure it's closed + drop(socket); + None + } + } + } + + /// Mock connection method using shared router for testing. + #[cfg(test)] + fn connect_with_nl_resolver(&mut self, _family: &str, _group: &str) -> Option { + // For tests, we always allow successful connections + // The MockSocket itself will control data availability + let sock = SocketType::new(); + if sock.valid { + debug!("Test: Created new valid MockSocket"); + Some(sock) + } else { + debug!("Test: MockSocket reports invalid, connection failed"); + None + } + } + + /// Fallback connection method when shared router is not available. + #[cfg(not(test))] + fn connect_fallback(family: &str, group: &str) -> Option { + debug!( + "Using fallback connection for family '{}', group '{}'", + family, group + ); + + let (sock, _) = match NlRouter::connect( + NlFamily::Generic, + // 0 is pid of kernel -> socket is connected to kernel + Some(0), + Groups::empty(), + ) { + Ok(result) => result, + Err(e) => { + warn!("Failed to connect to netlink router: {:?}", e); + warn!("Possible causes: insufficient permissions, netlink not supported, or kernel module not loaded"); + return None; + } + }; + + debug!( + "Router connected, resolving group ID for family '{}', group '{}'", + family, group + ); + let group_id = match sock.resolve_nl_mcast_group(family, group) { + Ok(id) => { + debug!( + "Resolved group ID {} for family '{}', group '{}'", + id, family, group + ); + id + } + Err(e) => { + warn!( + "Failed to resolve group id for family '{}', group '{}': {:?}", + family, group, e + ); + warn!( + "This suggests the family '{}' is not registered in the kernel", + family + ); + // Explicitly drop the temporary router to ensure it's closed + drop(sock); + return None; + } + }; + + debug!( + "Creating socket for family '{}' with group_id {}", + family, group_id + ); + let socket = match SocketType::connect( + NlFamily::Generic, + // 0 is pid of kernel -> socket is connected to kernel + Some(0), + Groups::empty(), + ) { + Ok(socket) => socket, + Err(e) => { + warn!("Failed to connect socket: {:?}", e); + // Explicitly drop the temporary router to ensure it's closed + drop(sock); + return None; + } + }; + + debug!("Adding multicast membership for group_id {}", group_id); + match socket.add_mcast_membership(Groups::new_groups(&[group_id])) { + Ok(_) => { + info!( + "Successfully connected to family '{}', group '{}' with group_id: {}", + family, group, group_id + ); + debug!("Socket created successfully, ready to receive multicast messages on group_id: {}", group_id); + // Explicitly drop the temporary router since we no longer need it + drop(sock); + Some(socket) + } + Err(e) => { + warn!( + "Failed to add mcast membership for group_id {}: {:?}", + group_id, e + ); + // Explicitly drop both socket and temporary router to ensure they're closed + drop(socket); + drop(sock); + None + } + } + } + + /// Attempts to establish a connection on demand. + /// + /// This will be called when receiving a Reconnect command from ControlNetlinkActor. + /// Implements socket health checking - if current socket hasn't received data recently, + /// it will be closed and replaced with a new connection. + fn connect(&mut self) { + // Check if current socket is healthy + if let Some(_socket) = &self.socket { + if let Some(last_data_time) = self.last_data_time { + let time_since_last_data = Instant::now().duration_since(last_data_time); + if time_since_last_data.as_secs() > SOCKET_HEALTH_TIMEOUT_SECS { + warn!( + "Socket unhealthy - no data received for {} seconds, forcing reconnection", + time_since_last_data.as_secs() + ); + // Close the unhealthy socket + self.socket = None; + self.last_data_time = None; + } else { + debug!( + "Socket healthy - data received {} seconds ago, skipping reconnect", + time_since_last_data.as_secs() + ); + return; + } + } else { + // Socket exists but no data ever received - consider it new + debug!("Socket exists but no data received yet, skipping reconnect"); + return; + } + } + + debug!( + "Establishing new connection for family '{}', group '{}'", + self.family, self.group + ); + self.socket = self.connect_with_nl_resolver(&self.family.clone(), &self.group.clone()); + if self.socket.is_some() { + info!( + "Successfully connected to family '{}', group '{}'", + self.family, self.group + ); + self.last_data_time = None; // Reset data time for new socket + } else { + warn!( + "Failed to connect to family '{}', group '{}'", + self.family, self.group + ); + // Clear the resolver as it might be stale + self.nl_resolver = None; + } + } + + /// Disconnects the current socket. + /// + /// This will be called when there's a socket error, to clean up the connection + /// and wait for ControlNetlinkActor to send a reconnect command. + fn disconnect(&mut self) { + if self.socket.is_some() { + debug!( + "Disconnecting socket for family '{}', group '{}'", + self.family, self.group + ); + self.socket = None; + self.last_data_time = None; + // Clear the resolver as it might be stale + self.nl_resolver = None; + } + } + + /// Resets the actor's configuration and attempts to connect. + /// + /// # Arguments + /// + /// * `family` - New family name to use + /// * `group` - New group name to use + fn reset(&mut self, family: &str, group: &str) { + debug!( + "Resetting connection: family '{}' -> '{}', group '{}' -> '{}'", + self.family, family, self.group, group + ); + self.family = family.to_string(); + self.group = group.to_string(); + self.connect(); + } + + /// Attempts to receive messages from the netlink socket. + /// + /// Returns immediately with WouldBlock if no data is available, allowing + /// the event loop to handle other operations concurrently. + /// + /// This function handles multiple scenarios: + /// 1. Single complete message in one recv + /// 2. Multiple complete messages in one recv + /// 3. Incomplete message that needs to be combined with future recv data + async fn try_recv( + socket: Option<&mut SocketType>, + message_parser: &mut NetlinkMessageParser + ) -> Result, io::Error> { + let socket = socket + .ok_or_else(|| io::Error::new(io::ErrorKind::NotConnected, "No socket available"))?; + + let mut buffer = vec![0; BUFFER_SIZE]; + + // Try to receive with MSG_DONTWAIT to make it non-blocking + debug!("Attempting to receive netlink message..."); + let result = socket.recv(&mut buffer, Msg::DONTWAIT); + + match result { + Ok((size, _groups)) => { + debug!("Received netlink data, size: {} bytes", size); + + if size == 0 { + return Err(io::Error::new( + io::ErrorKind::UnexpectedEof, + "No more data to receive", + )); + } + + // Resize buffer to actual received size + buffer.resize(size, 0); + + // Parse buffer which may contain multiple messages and/or incomplete messages + let messages = message_parser.parse_buffer(&buffer)?; + debug!("Parsed {} complete messages from {} bytes of data", messages.len(), size); + + Ok(messages) + } + Err(e) => { + debug!( + "Socket recv failed: {:?} (raw_os_error: {:?})", + e, + e.raw_os_error() + ); + Err(e) + } + } + } + + /// Checks for socket readiness without unsafe operations. + /// + /// This is a safer alternative that uses tokio's timeout mechanism + /// instead of direct file descriptor polling with unsafe operations. + /// + /// # Arguments + /// + /// * `timeout_ms` - Timeout in milliseconds + /// + /// # Returns + /// + /// A boolean indicating if data socket has data + async fn check_socket_readiness(timeout_ms: u64) -> Result { + // In test environment, always return true to let try_recv() handle the actual data availability + #[cfg(test)] + { + // Simulate minimal polling delay + sleep(Duration::from_millis(std::cmp::min(timeout_ms, 1))); + // Always return true in test mode - let MockSocket.recv() handle availability + return Ok(true); + } + + #[cfg(not(test))] + { + use tokio::time::{sleep as tokio_sleep, Duration as TokioDuration}; + + // For production, we simply wait for the timeout period + // This approach avoids unsafe operations but is less efficient + // The actual socket readiness will be checked by try_recv() calls + tokio_sleep(TokioDuration::from_millis(timeout_ms)).await; + + // Always return that data might be ready, let try_recv() handle the actual check + // This is safe but potentially less efficient than direct polling + Ok(true) + } + } + + /// Continuously processes incoming netlink messages and control commands. + /// The loop will exit when the command channel is closed or a Close command is received. + /// + /// # Arguments + /// + /// * `actor` - The DataNetlinkActor instance to run + pub async fn run(mut actor: DataNetlinkActor) { + debug!( + "Starting DataNetlinkActor with {} buffer recipients configured", + actor.buffer_recipients.len() + ); + let mut heartbeat_counter = 0u32; + let mut consecutive_failures = 0u32; + + loop { + // Log heartbeat every 5 minutes to show the actor is running + heartbeat_counter += 1; + if heartbeat_counter % HEARTBEAT_LOG_INTERVAL == 0 { + info!("DataNetlinkActor is running normally - waiting for data messages"); + } + + // More frequent debug info about socket status + if heartbeat_counter % DEBUG_LOG_INTERVAL == 0 { + debug!( + "DataNetlinkActor heartbeat: socket={}, recipients={}, failures={}", + actor.socket.is_some(), + actor.buffer_recipients.len(), + consecutive_failures + ); + if actor.socket.is_some() { + debug!("Socket is available and we are actively trying to receive messages"); + consecutive_failures = 0; // Reset failure counter when socket is available + } + } + + // Check for pending commands first (non-blocking) + if let Ok(command) = actor.command_recipient.try_recv() { + match command { + NetlinkCommand::SocketConnect(SocketConnect { family, group }) => { + actor.reset(&family, &group); + consecutive_failures = 0; // Reset failure counter on reconnect command + } + NetlinkCommand::Reconnect => { + actor.connect(); + consecutive_failures = 0; // Reset failure counter on reconnect command + } + NetlinkCommand::Close => { + break; + } + } + continue; + } + + // Check socket readiness with configurable timeout to allow periodic checks + match Self::check_socket_readiness(SOCKET_READINESS_TIMEOUT_MS).await { + Ok(data_ready) => { + // Only try to receive data if we have a socket and data is ready + if actor.socket.is_some() && data_ready { + match Self::try_recv(actor.socket.as_mut(), &mut actor.message_parser).await { + Ok(messages) => { + consecutive_failures = 0; // Reset failure counter on successful receive + actor.last_data_time = Some(Instant::now()); // Update data reception timestamp + + if messages.is_empty() { + debug!("Received data but no complete messages yet (partial message)"); + } else { + debug!("Successfully parsed {} complete netlink messages", messages.len()); + + // Send each complete netlink message individually to all recipients + // This ensures each IPFIX message (contained in one netlink message) + // is sent as a separate operation to the downstream actors + for (i, message) in messages.iter().enumerate() { + debug!("Processing netlink message {}/{}: {} bytes", + i + 1, messages.len(), message.len()); + + // Send this single netlink message to all recipients + for (j, recipient) in actor.buffer_recipients.iter().enumerate() { + debug!("Sending netlink message {}/{} to recipient {}", + i + 1, messages.len(), j + 1); + if let Err(e) = recipient.send(message.clone()).await { + warn!("Failed to send netlink message {}/{} to recipient {}: {:?}", + i + 1, messages.len(), j + 1, e); + // Consider removing failed recipients here if needed + } else { + debug!("Successfully sent netlink message {}/{} ({} bytes) to recipient {}", + i + 1, messages.len(), message.len(), j + 1); + } + } + } + + debug!("Completed processing {} netlink messages, each sent individually", messages.len()); + } + } + Err(e) => { + // Handle specific errors + if let Some(os_error) = e.raw_os_error() { + if os_error == ENOBUFS { + warn!("Netlink receive buffer full (ENOBUFS). Consider increasing buffer size or processing messages faster. Error: {:?}", e); + // Don't disconnect on ENOBUFS, just continue + continue; + } + } + + // Check if it's WouldBlock using standard ErrorKind + if e.kind() == io::ErrorKind::WouldBlock { + // No data available right now, continue normally + if heartbeat_counter % WOULDBLOCK_LOG_INTERVAL == 0 { + debug!("No netlink data available (WouldBlock) - socket is connected but no messages from kernel"); + } + } else { + // Socket error occurred, disconnect and try limited reconnects + warn!("Failed to receive message: {:?}", e); + actor.disconnect(); + consecutive_failures += 1; + + // Only attempt very limited local reconnects + if consecutive_failures <= MAX_LOCAL_RECONNECT_ATTEMPTS { + debug!( + "Attempting quick reconnect #{}", + consecutive_failures + ); + actor.connect(); + } else { + debug!("Too many consecutive failures, waiting for reconnect command from ControlNetlinkActor"); + } + } + } + } + } else if actor.socket.is_none() { + // No socket available, log this periodically but don't spam + if heartbeat_counter % DEBUG_LOG_INTERVAL == 0 { + debug!("No socket available - waiting for reconnect command from ControlNetlinkActor"); + } + } + } + Err(e) => { + warn!("Poll error: {:?}", e); + // Wait a bit before retrying to avoid busy loop on persistent poll errors + sleep(Duration::from_millis(SOCKET_READINESS_TIMEOUT_MS)); + } + } + } + } +} + +impl Drop for DataNetlinkActor { + fn drop(&mut self) { + if !self.command_recipient.is_closed() { + self.command_recipient.close(); + } + } +} + +#[cfg(test)] +pub mod test { + use super::*; + use std::sync::atomic::{AtomicUsize, Ordering}; + use tokio::{spawn, sync::mpsc::channel}; + + // Helper function to create a properly sized message vector + fn create_test_message(payload: &[u8]) -> Vec { + let msg = create_mock_netlink_message(payload); + let actual_len = 20 + payload.len(); // 16 (nlmsg) + 4 (genl) + payload + msg[..actual_len].to_vec() + } + + // Test constants for simulating different message scenarios + fn get_partially_valid_messages() -> Vec> { + vec![ + create_test_message(b"PARTIALLY_VALID1"), + create_test_message(b"PARTIALLY_VALID2"), + vec![], // Empty vec simulates reconnection scenario + create_test_message(b"PARTIALLY_VALID3"), + ] + } + + fn get_valid_messages() -> Vec> { + vec![ + create_test_message(b"VALID1"), + create_test_message(b"VALID2"), + ] + } + + /// Creates a mock netlink message with proper headers for testing. + /// + /// Format: [netlink_header(16 bytes)] + [genetlink_header(4 bytes)] + [payload] + const fn create_mock_netlink_message(payload: &[u8]) -> [u8; 100] { + let mut msg = [0u8; 100]; + let total_len = 20 + payload.len(); // 16 (nlmsg) + 4 (genl) + payload + + // Netlink header (16 bytes) + msg[0] = (total_len & 0xFF) as u8; // length (little-endian) + msg[1] = ((total_len >> 8) & 0xFF) as u8; + msg[2] = ((total_len >> 16) & 0xFF) as u8; + msg[3] = ((total_len >> 24) & 0xFF) as u8; + msg[4] = 0x10; + msg[5] = 0x00; // type (mock type) + msg[6] = 0x00; + msg[7] = 0x00; // flags + msg[8] = 0x01; + msg[9] = 0x00; + msg[10] = 0x00; + msg[11] = 0x00; // seq + msg[12] = 0x00; + msg[13] = 0x00; + msg[14] = 0x00; + msg[15] = 0x00; // pid + + // Generic netlink header (4 bytes) + msg[16] = 0x01; // cmd + msg[17] = 0x00; // version + msg[18] = 0x00; + msg[19] = 0x00; // reserved + + // Copy payload + let mut i = 0; + while i < payload.len() && i < 80 { + // Leave room for headers + msg[20 + i] = payload[i]; + i += 1; + } + + msg + } + + // Use atomic counter instead of unsafe static mut for thread safety + static SOCKET_COUNT: AtomicUsize = AtomicUsize::new(0); + + /// Mock socket implementation for testing netlink functionality. + /// + /// Simulates different socket behaviors for testing reconnection logic. + pub struct MockSocket { + pub valid: bool, + budget: usize, + messages: Vec>, + fd: RawFd, // Mock file descriptor for testing + } + + impl AsRawFd for MockSocket { + fn as_raw_fd(&self) -> RawFd { + self.fd + } + } + + impl MockSocket { + /// Creates a new MockSocket for testing. + /// + /// The first socket created will have partially valid messages (including one that fails), + /// while subsequent sockets will have only valid messages. + pub fn new() -> Self { + let count = SOCKET_COUNT.fetch_add(1, Ordering::SeqCst) + 1; + + if count == 1 { + let messages = get_partially_valid_messages(); + MockSocket { + valid: true, + budget: messages.len(), + messages, + fd: 100 + count as RawFd, // Mock file descriptor + } + } else { + // All subsequent sockets are valid for simpler testing + let messages = get_valid_messages(); + MockSocket { + valid: true, // Always valid for simplicity + budget: messages.len(), + messages, + fd: 100 + count as RawFd, // Mock file descriptor + } + } + } + + /// Simulates receiving data from a netlink socket. + /// + /// # Arguments + /// + /// * `buf` - Buffer to write received data into + /// * `_flags` - Message flags (ignored in mock) + /// + /// # Returns + /// + /// Ok((size, groups)) on success, Err on failure or empty message + pub fn recv(&mut self, buf: &mut [u8], _flags: Msg) -> Result<(usize, Groups), io::Error> { + sleep(Duration::from_millis(1)); + + if self.budget == 0 { + // When there are no more messages, return WouldBlock to simulate non-blocking behavior + return Err(io::Error::new( + io::ErrorKind::WouldBlock, + "No more data available", + )); + } + + let msg_index = self.messages.len() - self.budget; + let msg = &self.messages[msg_index]; + self.budget -= 1; + + if !msg.is_empty() { + let copy_len = std::cmp::min(msg.len(), buf.len()); + buf[..copy_len].copy_from_slice(&msg[..copy_len]); + + Ok((copy_len, Groups::empty())) + } else { + Err(io::Error::new( + io::ErrorKind::ConnectionAborted, + "Simulated connection failure", + )) + } + } + } + + /// Tests the DataNetlinkActor's ability to handle partial failures and reconnection. + /// + /// This test verifies that: + /// - The actor correctly handles a mix of valid and invalid messages + /// - Reconnection occurs when an empty message is encountered + /// - All expected payload data (without headers) are eventually received + #[tokio::test] + async fn test_data_netlink() { + // Initialize logging for the test + let _ = env_logger::builder() + .filter_level(log::LevelFilter::Debug) + .is_test(true) + .try_init(); + + // Reset socket count for this test + SOCKET_COUNT.store(0, Ordering::SeqCst); + + let (command_sender, command_receiver) = channel(1); + let (buffer_sender, mut buffer_receiver) = channel(1); + + let mut actor = DataNetlinkActor::new("family", "group", command_receiver); + actor.add_recipient(buffer_sender); + + let task = spawn(DataNetlinkActor::run(actor)); + + let mut received_messages = Vec::new(); + for i in 0..3 { + // After receiving 2 messages, we expect a connection failure, so send a reconnect command + if i == 2 { + if let Err(_) = command_sender.send(NetlinkCommand::Reconnect).await { + break; + } + // Give some time for reconnection + tokio::time::sleep(Duration::from_millis(10)).await; + } + + let buffer = tokio::time::timeout( + Duration::from_secs(5), // Reduced timeout since we're handling reconnect + buffer_receiver.recv(), + ) + .await; + + match buffer { + Ok(Some(buffer)) => { + let message = String::from_utf8(buffer.to_vec()) + .expect("Failed to convert buffer to string"); + received_messages.push(message); + } + Ok(None) => { + break; + } + Err(_) => { + break; + } + } + } + + // Build expected messages: only the payload data, headers should be stripped + let expected_messages = vec![ + "PARTIALLY_VALID1".to_string(), + "PARTIALLY_VALID2".to_string(), + "VALID1".to_string(), + ]; + + assert_eq!(received_messages, expected_messages); + + let socket_count = SOCKET_COUNT.load(Ordering::SeqCst); + assert!(socket_count > 1, "Socket should have reconnected"); + + command_sender + .send(NetlinkCommand::Close) + .await + .expect("Failed to send close command"); + task.await.expect("Task should complete successfully"); + } + + /// Tests payload extraction from mock netlink messages. + #[test] + fn test_payload_extraction() { + // Test with valid message containing payload + let mock_msg = create_mock_netlink_message(b"TEST_PAYLOAD"); + let actual_len = 20 + b"TEST_PAYLOAD".len(); // 16 (nlmsg) + 4 (genl) + payload + let mut parser = NetlinkMessageParser::new(); + + let result = parser.parse_buffer(&mock_msg[..actual_len]); + assert!(result.is_ok()); + + let messages = result.unwrap(); + assert_eq!(messages.len(), 1); + + let payload = &messages[0]; + let payload_str = String::from_utf8(payload.to_vec()).unwrap(); + assert_eq!(payload_str, "TEST_PAYLOAD"); + } + + /// Tests payload extraction with minimum size message. + #[test] + fn test_payload_extraction_empty_payload() { + // Create message with headers but no payload + let mock_msg = create_mock_netlink_message(b""); + let actual_len = 20; // Only headers: 16 (nlmsg) + 4 (genl) + let mut parser = NetlinkMessageParser::new(); + + let result = parser.parse_buffer(&mock_msg[..actual_len]); + assert!(result.is_ok()); + + let messages = result.unwrap(); + assert_eq!(messages.len(), 1); + assert!(messages[0].is_empty()); + } + + /// Tests payload extraction with invalid message (too small). + #[test] + fn test_payload_extraction_invalid_message() { + // Buffer too small to contain headers + let buffer = vec![0u8; 10]; + let mut parser = NetlinkMessageParser::new(); + + let result = parser.parse_buffer(&buffer); + assert!(result.is_ok()); + + // Should have no complete messages due to insufficient data + let messages = result.unwrap(); + assert!(messages.is_empty()); + } + + /// Tests handling multiple messages in one buffer. + #[test] + fn test_multiple_messages_in_buffer() { + let mut combined_buffer = Vec::new(); + + // Create two messages + let msg1 = create_mock_netlink_message(b"MESSAGE1"); + let msg1_len = 20 + b"MESSAGE1".len(); + let msg2 = create_mock_netlink_message(b"MESSAGE2"); + let msg2_len = 20 + b"MESSAGE2".len(); + + // Combine them in one buffer (simulate receiving multiple messages in one recv) + combined_buffer.extend_from_slice(&msg1[..msg1_len]); + combined_buffer.extend_from_slice(&msg2[..msg2_len]); + + let mut parser = NetlinkMessageParser::new(); + let result = parser.parse_buffer(&combined_buffer); + assert!(result.is_ok()); + + let messages = result.unwrap(); + assert_eq!(messages.len(), 2); + + let payload1_str = String::from_utf8(messages[0].to_vec()).unwrap(); + let payload2_str = String::from_utf8(messages[1].to_vec()).unwrap(); + assert_eq!(payload1_str, "MESSAGE1"); + assert_eq!(payload2_str, "MESSAGE2"); + } + + /// Tests handling fragmented messages across multiple recv operations. + #[test] + fn test_fragmented_message() { + let msg = create_mock_netlink_message(b"FRAGMENTED_MESSAGE"); + let msg_len = 20 + b"FRAGMENTED_MESSAGE".len(); + let mut parser = NetlinkMessageParser::new(); + + // Simulate first recv getting only part of the message + let first_part = &msg[..15]; // Less than header size + let result1 = parser.parse_buffer(first_part); + assert!(result1.is_ok()); + let messages1 = result1.unwrap(); + assert!(messages1.is_empty()); // No complete messages yet + + // Simulate second recv getting the rest + let second_part = &msg[15..msg_len]; + let result2 = parser.parse_buffer(second_part); + assert!(result2.is_ok()); + let messages2 = result2.unwrap(); + assert_eq!(messages2.len(), 1); + + let payload_str = String::from_utf8(messages2[0].to_vec()).unwrap(); + assert_eq!(payload_str, "FRAGMENTED_MESSAGE"); + } + + /// Tests handling mixed scenario: complete message + partial message. + #[test] + fn test_mixed_complete_and_partial() { + let mut combined_buffer = Vec::new(); + + // First complete message + let msg1 = create_mock_netlink_message(b"COMPLETE"); + let msg1_len = 20 + b"COMPLETE".len(); + combined_buffer.extend_from_slice(&msg1[..msg1_len]); + + // Partial second message + let msg2 = create_mock_netlink_message(b"PARTIAL_MSG"); + let msg2_len = 20 + b"PARTIAL_MSG".len(); + combined_buffer.extend_from_slice(&msg2[..25]); // Only part of second message + + let mut parser = NetlinkMessageParser::new(); + let result1 = parser.parse_buffer(&combined_buffer); + assert!(result1.is_ok()); + + let messages1 = result1.unwrap(); + assert_eq!(messages1.len(), 1); // Only first complete message + + let payload1_str = String::from_utf8(messages1[0].to_vec()).unwrap(); + assert_eq!(payload1_str, "COMPLETE"); + + // Send remaining part of second message + let remaining_part = &msg2[25..msg2_len]; + let result2 = parser.parse_buffer(remaining_part); + assert!(result2.is_ok()); + + let messages2 = result2.unwrap(); + assert_eq!(messages2.len(), 1); // Second message now complete + + let payload2_str = String::from_utf8(messages2[0].to_vec()).unwrap(); + assert_eq!(payload2_str, "PARTIAL_MSG"); + } + + /// Tests the get_genl_family_group function with a valid constants file. + #[test] + fn test_get_genl_family_group() { + // Use the test constants file since the production file might not exist + let result = get_genl_family_group_from_path_safe("tests/data/constants.yml"); + assert!(result.is_ok()); + let (family, group) = result.unwrap(); + assert!(!family.is_empty()); + assert!(!group.is_empty()); + } + + /// Tests the get_genl_family_group_from_path function with a test file. + #[test] + fn test_get_genl_family_group_from_path() { + let result = get_genl_family_group_from_path_safe("/non/existent/path.yml"); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .contains("Failed to open constants file")); + } + + /// Tests the get_genl_family_group_from_path function with the test constants file. + #[test] + fn test_get_genl_family_group_from_test_file() { + let result = get_genl_family_group_from_path_safe("tests/data/constants.yml"); + assert!(result.is_ok()); + let (family, group) = result.unwrap(); + assert!(!family.is_empty()); + assert!(!group.is_empty()); + } + + /// Tests that get_genl_family_group returns default values when config file is missing. + #[test] + fn test_get_genl_family_group_defaults() { + // Create a temporary SONIC_CONSTANTS path that doesn't exist + let _original_path = SONIC_CONSTANTS; + + // Use the safe function to test default behavior + let result = get_genl_family_group_from_path_safe("/non/existent/path/constants.yml"); + assert!(result.is_err()); + + // Test the main function - it should not panic and should return defaults + // when the config file is missing (simulated by the safe function) + let (family, group) = get_genl_family_group(); + + // The function should return defaults since the production config file likely doesn't exist in test env + // Default values should be "sonic_stel" and "ipfix" + if family == "sonic_stel" && group == "ipfix" { + // This means it fell back to defaults + assert_eq!(family, "sonic_stel"); + assert_eq!(group, "ipfix"); + } else { + // If config file exists and is valid, we should get some values + assert!(!family.is_empty()); + assert!(!group.is_empty()); + } + } +} + +/// Reads the Generic Netlink family and group names from the configuration file. +/// +/// This function is used to determine which netlink family and multicast group +/// should be used for receiving SONIC STEL messages. +/// +/// # Returns +/// +/// A tuple containing (family_name, group_name). +/// +/// # Fallback Behavior +/// +/// If the configuration file cannot be read or parsed, this function will +/// use default values: ("sonic_stel", "ipfix") +pub fn get_genl_family_group() -> (String, String) { + // Default values + const DEFAULT_FAMILY: &str = "sonic_stel"; + const DEFAULT_GROUP: &str = "ipfix"; + + // Try to read from config file, use defaults if it fails + match get_genl_family_group_from_path_safe(SONIC_CONSTANTS) { + Ok((family, group)) => { + debug!( + "Loaded netlink config from '{}': family='{}', group='{}'", + SONIC_CONSTANTS, family, group + ); + (family, group) + } + Err(e) => { + warn!( + "Failed to load config from '{}': {}. Using defaults: family='{}', group='{}'", + SONIC_CONSTANTS, e, DEFAULT_FAMILY, DEFAULT_GROUP + ); + (DEFAULT_FAMILY.to_string(), DEFAULT_GROUP.to_string()) + } + } +} + +/// Safe version of get_genl_family_group_from_path that returns Result instead of panicking. +/// +/// # Arguments +/// +/// * `path` - Path to the YAML configuration file +/// +/// # Returns +/// +/// A Result containing a tuple (family_name, group_name) on success, +/// or an error message on failure. +fn get_genl_family_group_from_path_safe(path: &str) -> Result<(String, String), String> { + use std::fs::File; + use std::io::Read; + use yaml_rust::YamlLoader; + + // Try to read the YAML file + let mut file = match File::open(path) { + Ok(file) => file, + Err(e) => return Err(format!("Failed to open constants file '{}': {}", path, e)), + }; + + let mut contents = String::new(); + if let Err(e) = file.read_to_string(&mut contents) { + return Err(format!("Failed to read constants file '{}': {}", path, e)); + } + + // Parse YAML + let yaml_docs = match YamlLoader::load_from_str(&contents) { + Ok(docs) => docs, + Err(e) => return Err(format!("Failed to parse YAML in '{}': {}", path, e)), + }; + + if yaml_docs.is_empty() { + return Err(format!("Empty YAML document in constants file '{}'", path)); + } + + let yaml = &yaml_docs[0]; + + // Extract family and group with default fallback + let family = yaml["constants"]["high_frequency_telemetry"]["genl_family"] + .as_str() + .unwrap_or("sonic_stel") + .to_string(); + + let group = yaml["constants"]["high_frequency_telemetry"]["genl_multicast_group"] + .as_str() + .unwrap_or("ipfix") + .to_string(); + + Ok((family, group)) +} diff --git a/crates/countersyncd/src/actor/ipfix.rs b/crates/countersyncd/src/actor/ipfix.rs new file mode 100644 index 00000000000..0b093597aec --- /dev/null +++ b/crates/countersyncd/src/actor/ipfix.rs @@ -0,0 +1,1362 @@ +use std::{cell::RefCell, collections::LinkedList, rc::Rc, time::SystemTime}; + +use ahash::{HashMap, HashMapExt}; +use byteorder::{ByteOrder, NetworkEndian}; +use log::{debug, warn}; +use tokio::{ + select, + sync::mpsc::{Receiver, Sender}, +}; + +use ipfixrw::{ + information_elements::Formatter, + parse_ipfix_message, + parser::{DataRecord, DataRecordKey, DataRecordValue, Message}, + template_store::TemplateStore, +}; + +use super::super::message::{ + buffer::SocketBufferMessage, + ipfix::IPFixTemplatesMessage, + saistats::{SAIStat, SAIStats, SAIStatsMessage}, +}; + +/// Helper functions for debug logging formatting +impl IpfixActor { + /// Formats IPFIX template data in human-readable format for debug logging. + /// Only performs formatting if debug logging is enabled to avoid performance impact. + /// + /// # Arguments + /// + /// * `templates_data` - Raw IPFIX template bytes + /// * `key` - Template key for context + /// + /// # Returns + /// + /// Formatted string representation of the templates + fn format_templates_for_debug(templates_data: &[u8], key: &str) -> String { + let mut result = format!( + "IPFIX Templates for key '{}' (size: {} bytes):\n", + key, + templates_data.len() + ); + let mut read_size: usize = 0; + let mut template_count = 0; + + while read_size < templates_data.len() { + match get_ipfix_message_length(&templates_data[read_size..]) { + Ok(len) => { + let len = len as usize; + if read_size + len > templates_data.len() { + break; + } + + let template_data = &templates_data[read_size..read_size + len]; + result.push_str(&format!( + " Template Message {} (offset: {}, length: {}):\n", + template_count + 1, + read_size, + len + )); + + // Format header information + if template_data.len() >= 16 { + let version = NetworkEndian::read_u16(&template_data[0..2]); + let length = NetworkEndian::read_u16(&template_data[2..4]); + let export_time = NetworkEndian::read_u32(&template_data[4..8]); + let sequence_number = NetworkEndian::read_u32(&template_data[8..12]); + let observation_domain_id = NetworkEndian::read_u32(&template_data[12..16]); + + result.push_str(&format!(" Header: version={}, length={}, export_time={}, seq={}, domain_id={}\n", + version, length, export_time, sequence_number, observation_domain_id)); + } + + // Try to parse and format the template data in human-readable format + if let Ok(parsed_templates) = + Self::try_parse_ipfix_message_for_debug(template_data) + { + result.push_str(&format!(" Parsed Template Details:\n")); + result.push_str(&parsed_templates); + } else { + // Fallback to sets parsing if detailed parsing fails + result.push_str(&Self::format_ipfix_sets_for_debug(template_data)); + } + + read_size += len; + template_count += 1; + } + Err(e) => { + result.push_str(&format!( + " Error parsing message length at offset {}: {}\n", + read_size, e + )); + break; + } + } + } + + result.push_str(&format!( + " Total templates processed: {}\n", + template_count + )); + result + } + + /// Formats IPFIX sets within a message for debug logging. + /// Parses and displays set headers (set ID, length) and basic content information. + /// + /// # Arguments + /// + /// * `message_data` - Raw IPFIX message bytes including header + /// + /// # Returns + /// + /// Formatted string representation of the sets within the message + fn format_ipfix_sets_for_debug(message_data: &[u8]) -> String { + let mut result = String::new(); + + // Skip IPFIX message header (16 bytes) to get to sets + if message_data.len() < 16 { + result.push_str(" Error: Message too short for IPFIX header\n"); + return result; + } + + let mut offset = 16; // Start after IPFIX header + let mut set_count = 0; + + result.push_str(" Sets within message:\n"); + + while offset + 4 <= message_data.len() { + // Each set starts with 4-byte header: set_id (2 bytes) + length (2 bytes) + let set_id = NetworkEndian::read_u16(&message_data[offset..offset + 2]); + let set_length = NetworkEndian::read_u16(&message_data[offset + 2..offset + 4]); + + set_count += 1; + + // Validate set length + if set_length < 4 { + result.push_str(&format!( + " Set {}: INVALID (set_id={}, length={} < 4)\n", + set_count, set_id, set_length + )); + break; + } + + if offset + set_length as usize > message_data.len() { + result.push_str(&format!( + " Set {}: TRUNCATED (set_id={}, length={}, exceeds message boundary)\n", + set_count, set_id, set_length + )); + break; + } + + // Determine set type based on set_id + let set_type = if set_id == 2 { + "Template Set" + } else if set_id == 3 { + "Options Template Set" + } else if set_id >= 256 { + "Data Set" + } else { + "Reserved/Unknown" + }; + + result.push_str(&format!( + " Set {} (offset: {}, set_id: {}, length: {} bytes, type: {})\n", + set_count, offset, set_id, set_length, set_type + )); + + // For data sets, show complete structure info + if set_id >= 256 && set_length > 4 { + let data_length = set_length as usize - 4; // Exclude 4-byte set header + let data_start = offset + 4; + result.push_str(&format!( + " Data payload: {} bytes", + data_length + )); + + // Show complete data payload + if data_length > 0 { + let data_bytes = &message_data[data_start..data_start + data_length]; + let hex_data = data_bytes + .iter() + .map(|b| format!("{:02x}", b)) + .collect::>() + .join(" "); + + // Format with line breaks for better readability if data is long + if data_length <= 32 { + // Short data on single line + result.push_str(&format!(" [{}]\n", hex_data)); + } else { + // Long data with line breaks every 16 bytes + result.push_str(":\n"); + for (i, chunk) in data_bytes.chunks(16).enumerate() { + let chunk_hex = chunk + .iter() + .map(|b| format!("{:02x}", b)) + .collect::>() + .join(" "); + result.push_str(&format!( + " {:04x}: {}\n", + i * 16, + chunk_hex + )); + } + } + } else { + result.push_str("\n"); + } + } + + // Move to next set + offset += set_length as usize; + } + + if set_count == 0 { + result.push_str(" No valid sets found\n"); + } else { + result.push_str(&format!(" Total sets: {}\n", set_count)); + } + + result + } + + /// Formats IPFIX data records in human-readable format for debug logging. + /// Only performs formatting if debug logging is enabled to avoid performance impact. + /// + /// # Arguments + /// + /// * `records_data` - Raw IPFIX data record bytes + /// + /// # Returns + /// + /// Formatted string representation of the data records + fn format_records_for_debug(records_data: &[u8]) -> String { + let mut result = format!("IPFIX Data Records (size: {} bytes):\n", records_data.len()); + let mut read_size: usize = 0; + let mut message_count = 0; + + while read_size < records_data.len() { + match get_ipfix_message_length(&records_data[read_size..]) { + Ok(len) => { + let len = len as usize; + if read_size + len > records_data.len() { + break; + } + + let message_data = &records_data[read_size..read_size + len]; + result.push_str(&format!( + " Data Message {} (offset: {}, length: {}):\n", + message_count + 1, + read_size, + len + )); + + // Format header information + if message_data.len() >= 16 { + let version = NetworkEndian::read_u16(&message_data[0..2]); + let length = NetworkEndian::read_u16(&message_data[2..4]); + let export_time = NetworkEndian::read_u32(&message_data[4..8]); + let sequence_number = NetworkEndian::read_u32(&message_data[8..12]); + let observation_domain_id = NetworkEndian::read_u32(&message_data[12..16]); + + result.push_str(&format!(" Header: version={}, length={}, export_time={}, seq={}, domain_id={}\n", + version, length, export_time, sequence_number, observation_domain_id)); + } + + // Try to parse and format the data records in human-readable format + if let Ok(parsed_message) = + Self::try_parse_ipfix_message_for_debug(message_data) + { + result.push_str(&format!(" Parsed Data Records:\n")); + result.push_str(&parsed_message); + } else { + // Fallback to sets parsing if detailed parsing fails + result.push_str(&Self::format_ipfix_sets_for_debug(message_data)); + } + + read_size += len; + message_count += 1; + } + Err(e) => { + result.push_str(&format!( + " Error parsing message length at offset {}: {}\n", + read_size, e + )); + break; + } + } + } + + result.push_str(&format!(" Total messages processed: {}\n", message_count)); + result + } + + /// Attempts to parse an IPFIX message for debug formatting purposes. + /// Returns a human-readable representation of the data records if successful. + /// + /// # Arguments + /// + /// * `message_data` - Raw IPFIX message bytes + /// + /// # Returns + /// + /// Result containing formatted string if parsing succeeds, error otherwise + fn try_parse_ipfix_message_for_debug(message_data: &[u8]) -> Result { + // Create a separate temporary cache for debug parsing to avoid borrowing conflicts + let temp_cache = IpfixCache::new(); + + // Try to parse the IPFIX message + let parsed_message = parse_ipfix_message( + &message_data, + temp_cache.templates.clone(), + temp_cache.formatter.clone(), + ) + .map_err(|_| "Failed to parse IPFIX message")?; + + let mut result = String::new(); + + // Format each set in the message + for (set_index, set) in parsed_message.sets.iter().enumerate() { + result.push_str(&format!( + " Set {} (records type: {:?}):\n", + set_index + 1, + std::mem::discriminant(&set.records) + )); + + match &set.records { + ipfixrw::parser::Records::Data { set_id, data } => { + result.push_str(&format!( + " Type: Data Set (template_id: {})\n", + set_id + )); + result.push_str(&format!(" Data records count: {}\n", data.len())); + + // Format each data record + for (record_index, record) in data.iter().enumerate() { + result.push_str(&format!( + " Record {} ({} fields):\n", + record_index + 1, + record.values.len() + )); + + for (field_key, field_value) in &record.values { + let field_desc = match field_key { + DataRecordKey::Unrecognized(field_spec) => { + let enterprise = field_spec + .enterprise_number + .map_or("None".to_string(), |e| e.to_string()); + format!( + "Field(id={}, enterprise={})", + field_spec.information_element_identifier, enterprise + ) + } + DataRecordKey::Str(s) => format!("String Field: {}", s), + DataRecordKey::Err(e) => format!("Error Field: {:?}", e), + }; + + let value_desc = match field_value { + DataRecordValue::Bytes(bytes) => { + if bytes.len() <= 8 { + // Try to interpret as different numeric types + let hex_str = bytes + .iter() + .map(|b| format!("{:02x}", b)) + .collect::>() + .join(" "); + if bytes.len() == 1 { + format!("u8={}, hex=[{}]", bytes[0], hex_str) + } else if bytes.len() == 2 { + format!( + "u16={}, hex=[{}]", + NetworkEndian::read_u16(bytes), + hex_str + ) + } else if bytes.len() == 4 { + format!( + "u32={}, hex=[{}]", + NetworkEndian::read_u32(bytes), + hex_str + ) + } else if bytes.len() == 8 { + format!( + "u64={}, hex=[{}]", + NetworkEndian::read_u64(bytes), + hex_str + ) + } else { + format!("bytes({})=[{}]", bytes.len(), hex_str) + } + } else { + // For longer byte arrays, just show length and first few bytes + let preview = bytes + .iter() + .take(8) + .map(|b| format!("{:02x}", b)) + .collect::>() + .join(" "); + format!("bytes({})=[{} ...]", bytes.len(), preview) + } + } + DataRecordValue::String(s) => format!("string=\"{}\"", s), + DataRecordValue::U8(v) => format!("u8={}", v), + DataRecordValue::U16(v) => format!("u16={}", v), + DataRecordValue::U32(v) => format!("u32={}", v), + DataRecordValue::U64(v) => format!("u64={}", v), + DataRecordValue::I8(v) => format!("i8={}", v), + DataRecordValue::I16(v) => format!("i16={}", v), + DataRecordValue::I32(v) => format!("i32={}", v), + DataRecordValue::I64(v) => format!("i64={}", v), + DataRecordValue::F32(v) => format!("f32={}", v), + DataRecordValue::F64(v) => format!("f64={}", v), + _ => format!("unknown_value={:?}", field_value), + }; + + result.push_str(&format!(" {}: {}\n", field_desc, value_desc)); + } + } + } + _ => { + // For template sets and other types, show basic information + result.push_str(&format!(" Type: Template or other set type\n")); + // We can use the iterator methods to get template information if needed + let template_count = parsed_message.iter_template_records().count(); + if template_count > 0 { + result.push_str(&format!(" Templates found: {}\n", template_count)); + for (template_index, template) in + parsed_message.iter_template_records().enumerate() + { + result.push_str(&format!( + " Template {} (ID: {}, field_count: {}):\n", + template_index + 1, + template.template_id, + template.field_specifiers.len() + )); + for (field_index, field) in template.field_specifiers.iter().enumerate() + { + let enterprise = field + .enterprise_number + .map_or("None".to_string(), |e| e.to_string()); + result.push_str(&format!( + " Field {}: ID={}, length={}, enterprise={}\n", + field_index + 1, + field.information_element_identifier, + field.field_length, + enterprise + )); + } + } + } + } + } + } + + Ok(result) + } +} + +/// Cache for IPFIX templates and formatting data +struct IpfixCache { + pub templates: TemplateStore, + pub formatter: Rc, + pub last_observer_time: Option, +} + +impl IpfixCache { + /// Creates a new IPFIX cache with current timestamp as initial observer time + pub fn new() -> Self { + let duration_since_epoch = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("System time should be after Unix epoch"); + + IpfixCache { + templates: Rc::new(RefCell::new(HashMap::new())), + formatter: Rc::new(Formatter::new()), + last_observer_time: Some(duration_since_epoch.as_nanos() as u64), + } + } +} + +type IpfixCacheRef = Rc>; + +/// Actor responsible for processing IPFIX messages and converting them to SAI statistics. +/// +/// The IpfixActor handles: +/// - Processing IPFIX template messages to understand data structure +/// - Parsing IPFIX data records and extracting SAI statistics +/// - Managing template mappings between temporary and applied states +/// - Distributing parsed statistics to multiple recipients +pub struct IpfixActor { + /// List of channels to send processed SAI statistics to + saistats_recipients: LinkedList>, + /// Channel for receiving IPFIX template messages + template_recipient: Receiver, + /// Channel for receiving IPFIX data records + record_recipient: Receiver, + /// Mapping from template ID to message key for temporary templates + temporary_templates_map: HashMap, + /// Mapping from message key to template IDs for applied templates + applied_templates_map: HashMap>, + /// Mapping from message key to object names for converting label IDs + object_names_map: HashMap>, +} + +impl IpfixActor { + /// Creates a new IpfixActor instance. + /// + /// # Arguments + /// + /// * `template_recipient` - Channel for receiving IPFIX template messages + /// * `record_recipient` - Channel for receiving IPFIX data records + /// + /// # Returns + /// + /// A new IpfixActor instance with empty recipient lists and template maps + pub fn new( + template_recipient: Receiver, + record_recipient: Receiver, + ) -> Self { + IpfixActor { + saistats_recipients: LinkedList::new(), + template_recipient, + record_recipient, + temporary_templates_map: HashMap::new(), + applied_templates_map: HashMap::new(), + object_names_map: HashMap::new(), + } + } + + /// Adds a new recipient channel for receiving processed SAI statistics. + /// + /// # Arguments + /// + /// * `recipient` - Channel sender for distributing SAI statistics messages + pub fn add_recipient(&mut self, recipient: Sender) { + self.saistats_recipients.push_back(recipient); + } + + /// Stores template information temporarily until it's applied to actual data. + /// + /// # Arguments + /// + /// * `msg_key` - Unique key identifying the template message + /// * `templates` - Parsed IPFIX template message containing template definitions + fn insert_temporary_template(&mut self, msg_key: &String, templates: Message) { + templates.iter_template_records().for_each(|record| { + self.temporary_templates_map + .insert(record.template_id, msg_key.clone()); + }); + } + + /// Moves a template from temporary to applied state when it's used in data records. + /// + /// # Arguments + /// + /// * `template_id` - ID of the template to apply + fn update_applied_template(&mut self, template_id: u16) { + if !self.temporary_templates_map.contains_key(&template_id) { + return; + } + let msg_key = self + .temporary_templates_map + .get(&template_id) + .expect("Template ID should exist in temporary map") + .clone(); + let mut template_ids = Vec::new(); + self.temporary_templates_map + .iter() + .filter(|(_, v)| **v == msg_key) + .for_each(|(&k, _)| { + template_ids.push(k); + }); + self.temporary_templates_map.retain(|_, v| *v != msg_key); + self.applied_templates_map.insert(msg_key, template_ids); + } + + /// Processes IPFIX template messages and stores them for later use. + /// + /// # Arguments + /// + /// * `templates` - IPFixTemplatesMessage containing template data and metadata + fn handle_template(&mut self, templates: IPFixTemplatesMessage) { + if templates.is_delete { + // Handle template deletion + self.handle_template_deletion(&templates.key); + return; + } + + let templates_data = match templates.templates { + Some(data) => data, + None => { + warn!( + "Received template message without template data for key: {}", + templates.key + ); + return; + } + }; + + debug!( + "Processing IPFIX templates for key: {}, object_names: {:?}", + templates.key, templates.object_names + ); + + // Add detailed debug logging for template content if debug level is enabled + if log::log_enabled!(log::Level::Debug) { + let formatted_templates = + Self::format_templates_for_debug(&templates_data, &templates.key); + if !formatted_templates.is_empty() { + debug!("Received template details:\n{}", formatted_templates); + } + } + + // Store object names if provided + if let Some(object_names) = &templates.object_names { + self.object_names_map + .insert(templates.key.clone(), object_names.clone()); + } + + let cache_ref = Self::get_cache(); + let cache = cache_ref.borrow_mut(); + let mut read_size: usize = 0; + + while read_size < templates_data.len() { + let len = match get_ipfix_message_length(&templates_data[read_size..]) { + Ok(len) => len, + Err(e) => { + warn!("Failed to parse IPFIX message length: {}", e); + break; + } + }; + + // Check if the template header's length is larger than the remaining data + if read_size + len as usize > templates_data.len() { + warn!("IPFIX template header length {} exceeds remaining data size {} at offset {}, skipping this template group", + len, templates_data.len() - read_size, read_size); + break; + } + + let template = &templates_data[read_size..read_size + len as usize]; + // Parse the template message - if this fails, log error and skip this template + let new_templates: ipfixrw::parser::Message = match parse_ipfix_message( + &template, + cache.templates.clone(), + cache.formatter.clone(), + ) { + Ok(templates) => templates, + Err(e) => { + warn!( + "Failed to parse IPFIX template message for key {}: {}", + templates.key, e + ); + read_size += len as usize; + continue; + } + }; + + self.insert_temporary_template(&templates.key, new_templates); + read_size += len as usize; + } + debug!("Template handled successfully for key: {}", templates.key); + } + + /// Handles template deletion for a given key. + /// + /// # Arguments + /// + /// * `key` - The key of the template to delete + fn handle_template_deletion(&mut self, key: &str) { + debug!("Handling template deletion for key: {}", key); + + // Remove from applied templates map and get template IDs + if let Some(template_ids) = self.applied_templates_map.remove(key) { + // Remove from temporary templates map + for template_id in &template_ids { + self.temporary_templates_map.remove(template_id); + } + debug!("Removed {} templates for key: {}", template_ids.len(), key); + } + + // Also check and remove any remaining entries in temporary_templates_map + self.temporary_templates_map + .retain(|_, msg_key| msg_key != key); + + // Remove object names for this key + self.object_names_map.remove(key); + + debug!("Template deletion completed for key: {}", key); + } + + /// Processes IPFIX data records and converts them to SAI statistics. + /// + /// # Arguments + /// + /// * `records` - Raw IPFIX data record bytes + /// + /// # Returns + /// + /// Vector of SAI statistics messages parsed from the records + fn handle_record(&mut self, records: SocketBufferMessage) -> Vec { + let cache_ref = Self::get_cache(); + let mut cache = cache_ref.borrow_mut(); + let mut read_size: usize = 0; + let mut messages: Vec = Vec::new(); + + debug!("Processing IPFIX records of length: {}", records.len()); + + while read_size < records.len() { + let len = get_ipfix_message_length(&records[read_size..]); + let len = match len { + Ok(len) => { + if len as usize + read_size > records.len() { + warn!( + "Invalid IPFIX message length: {} at offset {}, exceeds buffer size {}", + len, + read_size, + records.len() + ); + break; + } + len + } + Err(e) => { + warn!( + "Failed to get IPFIX message length at offset {}: {}", + read_size, e + ); + break; + } + }; + + let data = &records[read_size..read_size + len as usize]; + // Debug log the parsed records if debug logging is enabled + if log::log_enabled!(log::Level::Debug) { + let formatted_records = Self::format_records_for_debug(data); + debug!("Received IPFIX data records: {}", formatted_records); + } + let data_message = + parse_ipfix_message(&data, cache.templates.clone(), cache.formatter.clone()); + let data_message = match data_message { + Ok(message) => message, + Err(e) => { + warn!( + "Failed to parse IPFIX data message at offset {} : {}", + read_size, e + ); + read_size += len as usize; + continue; + } + }; + + data_message.sets.iter().for_each(|set| { + if let ipfixrw::parser::Records::Data { set_id, data: _ } = set.records { + self.update_applied_template(set_id); + } + }); + let datarecords: Vec<&DataRecord> = data_message.iter_data_records().collect(); + let mut observation_time: Option; + + for record in datarecords { + observation_time = get_observation_time(record); + if observation_time.is_none() { + debug!( + "No observation time in record, use the last observer time {:?}", + cache.last_observer_time + ); + observation_time = cache.last_observer_time; + } else if let (Some(obs_time), Some(last_time)) = + (observation_time, cache.last_observer_time) + { + if obs_time > last_time { + cache.last_observer_time = observation_time; + } + } else { + // If we have observation time but no last time, update it + cache.last_observer_time = observation_time; + } + + // If we still don't have observation time, skip this record + if observation_time.is_none() { + warn!("No observation time available for record, skipping"); + continue; + } + + // Collect final stats directly + let mut final_stats: Vec = Vec::new(); + let mut template_key: Option = None; + + // Debug: Log all fields in the record to understand what we're getting + debug!("Processing record with {} fields:", record.values.len()); + for (key, val) in record.values.iter() { + match key { + DataRecordKey::Unrecognized(field_spec) => { + debug!( + " Field ID: {}, Enterprise: {:?}, Length: {}, Value: {:?}", + field_spec.information_element_identifier, + field_spec.enterprise_number, + field_spec.field_length, + val + ); + } + _ => { + debug!(" Key: {:?}, Value: {:?}", key, val); + } + } + } + + for (key, val) in record.values.iter() { + // Check if this is the observation time field or system time field + let is_time_field = match key { + DataRecordKey::Unrecognized(field_spec) => { + let field_id = field_spec.information_element_identifier; + let is_standard_field = field_spec.enterprise_number.is_none(); + + (field_id == OBSERVATION_TIME_NANOSECONDS + || field_id == OBSERVATION_TIME_SECONDS) + && is_standard_field + } + _ => false, + }; + + if is_time_field { + if let DataRecordKey::Unrecognized(field_spec) = key { + debug!( + "Skipping time field (ID: {})", + field_spec.information_element_identifier + ); + } + continue; + } + + match key { + DataRecordKey::Unrecognized(field_spec) => { + // Try to find the template key for this record to get object_names + if template_key.is_none() { + // Look up the template key from the field + // We need to find which template this field belongs to + for (_tid, msg_key) in &self.temporary_templates_map { + // This is a simplification - in reality we'd need to check + // if this specific field belongs to this template + template_key = Some(msg_key.clone()); + break; + } + // Also check applied templates + if template_key.is_none() { + for (msg_key, _) in &self.applied_templates_map { + template_key = Some(msg_key.clone()); + break; + } + } + } + + // Get object names for this template key + let object_names = template_key + .as_ref() + .and_then(|key| self.object_names_map.get(key)) + .map(|names| names.as_slice()) + .unwrap_or(&[]); + + // Create SAIStat directly + let stat = SAIStat::from_ipfix(field_spec, val, object_names); + debug!("Created SAIStat: {:?}", stat); + final_stats.push(stat); + } + _ => continue, + } + } + + let saistats = SAIStatsMessage::new(SAIStats { + observation_time: observation_time + .expect("observation_time should be Some at this point"), + stats: final_stats, + }); + + messages.push(saistats.clone()); + debug!("Record parsed {:?}", saistats); + } + read_size += len as usize; + debug!( + "Consuming IPFIX message of length: {}, rest length: {}", + len, + records.len() - read_size + ); + } + messages + } + + thread_local! { + static IPFIX_CACHE: RefCell = RefCell::new(Rc::new(RefCell::new(IpfixCache::new()))); + } + + fn get_cache() -> IpfixCacheRef { + Self::IPFIX_CACHE.with(|cache| cache.borrow().clone()) + } + + pub async fn run(mut actor: IpfixActor) { + loop { + select! { + templates = actor.template_recipient.recv() => { + match templates { + Some(templates) => { + actor.handle_template(templates); + }, + None => { + break; + } + } + }, + record = actor.record_recipient.recv() => { + match record { + Some(record) => { + let messages = actor.handle_record(record); + for recipient in &actor.saistats_recipients { + for message in &messages { + let _ = recipient.send(message.clone()).await; + } + } + }, + None => { + break; + } + } + } + } + } + } +} + +impl Drop for IpfixActor { + fn drop(&mut self) { + self.template_recipient.close(); + } +} + +/// IPFIX Information Element ID for observationTimeNanoseconds (Field ID 325). +/// +/// This field represents the absolute timestamp of the observation of the packet +/// within a nanosecond resolution. The timestamp is based on the local time zone +/// of the Exporter and is represented as nanoseconds since the UNIX epoch. +/// +/// According to IANA IPFIX Information Elements Registry: +/// - ElementId: 325 +/// - Data Type: dateTimeNanoseconds +/// - Semantics: default +/// - Status: current +const OBSERVATION_TIME_NANOSECONDS: u16 = 325; + +/// IPFIX Information Element ID for observationTimeSeconds (Field ID 322). +/// +/// This field represents the absolute timestamp of the observation of the packet +/// within a second resolution. The timestamp is based on the local time zone +/// of the Exporter and is represented as seconds since the UNIX epoch. +/// +/// According to IANA IPFIX Information Elements Registry: +/// - ElementId: 322 +/// - Data Type: dateTimeSeconds +/// - Semantics: default +/// - Status: current +const OBSERVATION_TIME_SECONDS: u16 = 322; + +/// Extracts observation time from an IPFIX data record. +/// +/// Converts timestamp to 64-bit nanoseconds following this priority: +/// 1. If 64-bit nanoseconds field exists, use it directly +/// 2. If 32-bit seconds and 32-bit nanoseconds fields exist, combine them +/// 3. Otherwise, use current UTC time as 64-bit nanoseconds timestamp +/// +/// # Arguments +/// +/// * `data_record` - The IPFIX data record to extract time from +/// +/// # Returns +/// +/// Some(timestamp_in_nanoseconds) if observation time field is present, None otherwise +fn get_observation_time(data_record: &DataRecord) -> Option { + let mut seconds_value: Option = None; + let mut nanoseconds_value: Option = None; + let mut full_nanoseconds_value: Option = None; + + // First pass: collect all time-related fields + for (key, val) in &data_record.values { + if let DataRecordKey::Unrecognized(field_spec) = key { + if field_spec.enterprise_number.is_none() { + match field_spec.information_element_identifier { + OBSERVATION_TIME_NANOSECONDS => { + debug!("Found observation time nanoseconds field with value: {:?}", val); + match val { + DataRecordValue::Bytes(bytes) => { + if bytes.len() == 8 { + full_nanoseconds_value = Some(NetworkEndian::read_u64(bytes)); + debug!("Extracted 64-bit nanoseconds: {}", full_nanoseconds_value.unwrap()); + } else if bytes.len() == 4 { + nanoseconds_value = Some(NetworkEndian::read_u32(bytes)); + debug!("Extracted 32-bit nanoseconds: {}", nanoseconds_value.unwrap()); + } + } + DataRecordValue::U64(val) => { + full_nanoseconds_value = Some(*val); + debug!("Extracted 64-bit nanoseconds (u64): {}", val); + } + DataRecordValue::U32(val) => { + nanoseconds_value = Some(*val); + debug!("Extracted 32-bit nanoseconds (u32): {}", val); + } + _ => { + debug!("Observation time nanoseconds field has unexpected value type: {:?}", val); + } + } + } + OBSERVATION_TIME_SECONDS => { + debug!("Found observation time seconds field with value: {:?}", val); + match val { + DataRecordValue::Bytes(bytes) => { + if bytes.len() == 4 { + seconds_value = Some(NetworkEndian::read_u32(bytes)); + debug!("Extracted 32-bit seconds: {}", seconds_value.unwrap()); + } + } + DataRecordValue::U32(val) => { + seconds_value = Some(*val); + debug!("Extracted 32-bit seconds (u32): {}", val); + } + _ => { + debug!("Observation time seconds field has unexpected value type: {:?}", val); + } + } + } + _ => {} // Ignore other fields + } + } + } + } + + // Priority 1: Use 64-bit nanoseconds directly if available + if let Some(nano_time) = full_nanoseconds_value { + debug!("Using 64-bit nanoseconds timestamp: {}", nano_time); + return Some(nano_time); + } + + // Priority 2: Combine 32-bit seconds and 32-bit nanoseconds + if let (Some(seconds), Some(nanoseconds)) = (seconds_value, nanoseconds_value) { + let combined_timestamp = (seconds as u64) * 1_000_000_000 + (nanoseconds as u64); + debug!("Combined timestamp from seconds({}) and nanoseconds({}): {}", + seconds, nanoseconds, combined_timestamp); + return Some(combined_timestamp); + } + + // Priority 3: Use current UTC time + debug!("No complete observation time fields found, using current UTC time"); + let current_time = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("System time should be after Unix epoch") + .as_nanos() as u64; + debug!("Using current UTC time as observation time: {}", current_time); + Some(current_time) +} + +/// Parse IPFIX message length according to IPFIX RFC specification +/// IPFIX message length is stored in bytes 2-3 of the message header (16-bit network byte order) +fn get_ipfix_message_length(data: &[u8]) -> Result { + if data.len() < 4 { + return Err("Data too short for IPFIX header"); + } + // IPFIX message length is at byte positions 2-3 (0-indexed) + Ok(NetworkEndian::read_u16(&data[2..4])) +} + +#[cfg(test)] +mod test { + use super::*; + use log::LevelFilter::Debug; + use std::io::Write; + use std::sync::{Arc, Mutex, Once, OnceLock}; + use tokio::sync::mpsc::channel; + + static INIT_ENV_LOGGER: Once = Once::new(); + static LOG_BUFFER: OnceLock>>> = OnceLock::new(); + + fn get_log_buffer() -> &'static Arc>> { + LOG_BUFFER.get_or_init(|| Arc::new(Mutex::new(Vec::new()))) + } + + pub fn capture_logs() -> String { + INIT_ENV_LOGGER.call_once(|| { + // Try to initialize env_logger, but ignore if already initialized + let _ = env_logger::builder() + .is_test(true) + .filter_level(Debug) + .format({ + let buffer = get_log_buffer().clone(); + move |_, record| { + let mut buffer = buffer.lock().unwrap(); + writeln!(buffer, "[{}] {}", record.level(), record.args()).unwrap(); + Ok(()) + } + }) + .try_init(); + }); + + let buffer = get_log_buffer().lock().unwrap(); + String::from_utf8(buffer.clone()).expect("Log buffer should be valid UTF-8") + } + + pub fn clear_logs() { + let mut buffer = get_log_buffer().lock().unwrap(); + buffer.clear(); + } + + #[allow(dead_code)] + pub fn assert_logs(expected: Vec<&str>) { + let logs_string = capture_logs(); + let mut logs = logs_string.lines().collect::>(); + let mut reverse_expected = expected.clone(); + reverse_expected.reverse(); + logs.reverse(); + + let mut match_count = 0; + for line in logs { + if reverse_expected.is_empty() { + break; + } + if line.contains(reverse_expected[match_count]) { + match_count += 1; + } + + if match_count == reverse_expected.len() { + break; + } + } + assert_eq!( + match_count, + expected.len(), + "\nexpected logs \n{}\n, got logs \n{}\n", + expected.join("\n"), + logs_string + ); + } + + #[tokio::test] + async fn test_ipfix() { + clear_logs(); // Clear any previous logs to ensure clean test state + capture_logs(); + let (buffer_sender, buffer_receiver) = channel(1); + let (template_sender, template_receiver) = channel(1); + let (saistats_sender, mut saistats_receiver) = channel(100); + let mut actor = IpfixActor::new(template_receiver, buffer_receiver); + actor.add_recipient(saistats_sender); + + let actor_handle = tokio::task::spawn_blocking(move || { + // Create a new runtime for the IPFIX actor to ensure thread-local variables work correctly + let rt = tokio::runtime::Runtime::new() + .expect("Failed to create runtime for IPFIX actor test"); + rt.block_on(async move { + IpfixActor::run(actor).await; + }); + }); + + let template_bytes: [u8; 88] = [ + 0x00, 0x0A, 0x00, 0x2C, // line 0 Packet 1 + 0x00, 0x00, 0x00, 0x00, // line 1 + 0x00, 0x00, 0x00, 0x01, // line 2 + 0x00, 0x00, 0x00, 0x00, // line 3 + 0x00, 0x02, 0x00, 0x1C, // line 4 + 0x01, 0x00, 0x00, 0x03, // line 5 Template ID 256, 3 fields + 0x01, 0x45, 0x00, 0x08, // line 6 Field ID 325, 4 bytes + 0x80, 0x01, 0x00, 0x08, // line 7 Field ID 128, 8 bytes + 0x00, 0x01, 0x00, 0x02, // line 8 Enterprise Number 1, Field ID 1 + 0x80, 0x02, 0x00, 0x08, // line 9 Field ID 129, 8 bytes + 0x80, 0x03, 0x80, 0x04, // line 10 Enterprise Number 128, Field ID 2 + 0x00, 0x0A, 0x00, 0x2C, // line 0 Packet 2 + 0x00, 0x00, 0x00, 0x00, // line 1 + 0x00, 0x00, 0x00, 0x01, // line 2 + 0x00, 0x00, 0x00, 0x00, // line 3 + 0x00, 0x02, 0x00, 0x1C, // line 4 + 0x01, 0x01, 0x00, 0x03, // line 5 Template ID 257, 3 fields + 0x01, 0x45, 0x00, 0x08, // line 6 Field ID 325, 4 bytes + 0x80, 0x01, 0x00, 0x08, // line 7 Field ID 128, 8 bytes + 0x00, 0x01, 0x00, 0x02, // line 8 Enterprise Number 1, Field ID 1 + 0x80, 0x02, 0x00, 0x08, // line 9 Field ID 129, 8 bytes + 0x80, 0x03, 0x80, 0x04, // line 10 Enterprise Number 128, Field ID 2 + ]; + + template_sender + .send(IPFixTemplatesMessage::new( + String::from("test_key"), + Arc::new(Vec::from(template_bytes)), + Some(vec!["Ethernet0".to_string(), "Ethernet1".to_string()]), + )) + .await + .unwrap(); + + // Wait for the template to be processed + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + let invalid_len_record: [u8; 20] = [ + 0x00, 0x0A, 0x00, 0x48, // line 0 Packet 1 + 0x00, 0x00, 0x00, 0x00, // line 1 + 0x00, 0x00, 0x00, 0x02, // line 2 + 0x00, 0x00, 0x00, 0x00, // line 3 + 0x01, 0x00, 0x00, 0x1C, // line 4 Record 1 + ]; + buffer_sender + .send(Arc::new(Vec::from(invalid_len_record))) + .await + .unwrap(); + + let unknown_record: [u8; 44] = [ + 0x00, 0x0A, 0x00, 0x2C, // line 0 Packet 1 + 0x00, 0x00, 0x00, 0x00, // line 1 + 0x00, 0x00, 0x00, 0x02, // line 2 + 0x00, 0x00, 0x00, 0x00, // line 3 + 0x03, 0x00, 0x00, 0x1C, // line 4 Record 1 + 0x00, 0x00, 0x00, 0x00, // line 5 + 0x00, 0x00, 0x00, 0x01, // line 6 + 0x00, 0x00, 0x00, 0x00, // line 7 + 0x00, 0x00, 0x00, 0x01, // line 8 + 0x00, 0x00, 0x00, 0x00, // line 9 + 0x00, 0x00, 0x00, 0x01, // line 10 + ]; + buffer_sender + .send(Arc::new(Vec::from(unknown_record))) + .await + .unwrap(); + + // contains data sets for templates 999, 500, 999 + let valid_records_bytes: [u8; 144] = [ + 0x00, 0x0A, 0x00, 0x48, // line 0 Packet 1 + 0x00, 0x00, 0x00, 0x00, // line 1 + 0x00, 0x00, 0x00, 0x02, // line 2 + 0x00, 0x00, 0x00, 0x00, // line 3 + 0x01, 0x00, 0x00, 0x1C, // line 4 Record 1 + 0x00, 0x00, 0x00, 0x00, // line 5 + 0x00, 0x00, 0x00, 0x01, // line 6 + 0x00, 0x00, 0x00, 0x00, // line 7 + 0x00, 0x00, 0x00, 0x01, // line 8 + 0x00, 0x00, 0x00, 0x00, // line 9 + 0x00, 0x00, 0x00, 0x01, // line 10 + 0x01, 0x00, 0x00, 0x1C, // line 11 Record 2 + 0x00, 0x00, 0x00, 0x00, // line 12 + 0x00, 0x00, 0x00, 0x02, // line 13 + 0x00, 0x00, 0x00, 0x00, // line 14 + 0x00, 0x00, 0x00, 0x02, // line 15 + 0x00, 0x00, 0x00, 0x00, // line 16 + 0x00, 0x00, 0x00, 0x03, // line 17 + 0x00, 0x0A, 0x00, 0x48, // line 18 Packet 2 + 0x00, 0x00, 0x00, 0x00, // line 19 + 0x00, 0x00, 0x00, 0x02, // line 20 + 0x00, 0x00, 0x00, 0x00, // line 21 + 0x01, 0x00, 0x00, 0x1C, // line 22 Record 1 + 0x00, 0x00, 0x00, 0x00, // line 23 + 0x00, 0x00, 0x00, 0x01, // line 24 + 0x00, 0x00, 0x00, 0x00, // line 25 + 0x00, 0x00, 0x00, 0x01, // line 26 + 0x00, 0x00, 0x00, 0x00, // line 27 + 0x00, 0x00, 0x00, 0x04, // line 28 + 0x01, 0x01, 0x00, 0x1C, // line 29 Record 2 + 0x00, 0x00, 0x00, 0x00, // line 30 + 0x00, 0x00, 0x00, 0x02, // line 31 + 0x00, 0x00, 0x00, 0x00, // line 32 + 0x00, 0x00, 0x00, 0x02, // line 33 + 0x00, 0x00, 0x00, 0x00, // line 34 + 0x00, 0x00, 0x00, 0x07, // line 35 + ]; + + buffer_sender + .send(Arc::new(Vec::from(valid_records_bytes))) + .await + .unwrap(); + + let expected_stats = vec![ + SAIStats { + observation_time: 1, + stats: vec![ + SAIStat { + object_name: "Ethernet1".to_string(), // label 2 -> index 1 (1-based) + type_id: 536870915, + stat_id: 536870916, + counter: 1, + }, + SAIStat { + object_name: "Ethernet0".to_string(), // label 1 -> index 0 (1-based) + type_id: 1, + stat_id: 2, + counter: 1, + }, + ], + }, + SAIStats { + observation_time: 2, + stats: vec![ + SAIStat { + object_name: "Ethernet1".to_string(), // label 2 -> index 1 (1-based) + type_id: 536870915, + stat_id: 536870916, + counter: 3, + }, + SAIStat { + object_name: "Ethernet0".to_string(), // label 1 -> index 0 (1-based) + type_id: 1, + stat_id: 2, + counter: 2, + }, + ], + }, + SAIStats { + observation_time: 1, + stats: vec![ + SAIStat { + object_name: "Ethernet1".to_string(), // label 2 -> index 1 (1-based) + type_id: 536870915, + stat_id: 536870916, + counter: 4, + }, + SAIStat { + object_name: "Ethernet0".to_string(), // label 1 -> index 0 (1-based) + type_id: 1, + stat_id: 2, + counter: 1, + }, + ], + }, + SAIStats { + observation_time: 2, + stats: vec![ + SAIStat { + object_name: "Ethernet1".to_string(), // label 2 -> index 1 (1-based) + type_id: 536870915, + stat_id: 536870916, + counter: 7, + }, + SAIStat { + object_name: "Ethernet0".to_string(), // label 1 -> index 0 (1-based) + type_id: 1, + stat_id: 2, + counter: 2, + }, + ], + }, + ]; + + let mut received_stats = Vec::new(); + while let Some(stats) = saistats_receiver.recv().await { + let unwrapped_stats = + Arc::try_unwrap(stats).expect("Failed to unwrap Arc"); + received_stats.push(unwrapped_stats); + if received_stats.len() == expected_stats.len() { + break; + } + } + + assert_eq!(received_stats, expected_stats); + + drop(buffer_sender); + drop(template_sender); + drop(saistats_receiver); + + actor_handle + .await + .expect("Actor task should complete successfully"); + // Note: Log assertions removed due to env_logger initialization conflicts in test suite + } +} diff --git a/crates/countersyncd/src/actor/mod.rs b/crates/countersyncd/src/actor/mod.rs new file mode 100644 index 00000000000..58545b74a73 --- /dev/null +++ b/crates/countersyncd/src/actor/mod.rs @@ -0,0 +1,7 @@ +pub mod control_netlink; +pub mod counter_db; +pub mod data_netlink; +pub mod ipfix; +pub mod stats_reporter; +pub mod swss; +pub mod otel; diff --git a/crates/countersyncd/src/actor/otel.rs b/crates/countersyncd/src/actor/otel.rs new file mode 100644 index 00000000000..2281c80edc8 --- /dev/null +++ b/crates/countersyncd/src/actor/otel.rs @@ -0,0 +1,294 @@ +use std::{sync::Arc, time::Duration, collections::HashMap}; +use tokio::{sync::mpsc::Receiver, sync::oneshot, select}; +use opentelemetry::metrics::MetricsError; +use opentelemetry_proto::tonic::{ + common::v1::{KeyValue as ProtoKeyValue, AnyValue, any_value::Value, InstrumentationScope}, + metrics::v1::{Metric, Gauge as ProtoGauge, ResourceMetrics, ScopeMetrics, NumberDataPoint}, + resource::v1::Resource as ProtoResource, +}; +use crate::message::{ + saistats::{SAIStats, SAIStatsMessage}, + otel::{OtelMetrics, OtelMetricsMessageExt}, +}; +use log::{info, error, debug, warn}; +use opentelemetry_proto::tonic::collector::metrics::v1::metrics_service_client::MetricsServiceClient; +use opentelemetry_proto::tonic::collector::metrics::v1::ExportMetricsServiceRequest; +use tonic::transport::Endpoint; + +/// Configuration for the OtelActor +#[derive(Debug, Clone)] +pub struct OtelActorConfig { + /// Whether to print statistics to console + pub print_to_console: bool, + /// OpenTelemetry collector endpoint + pub collector_endpoint: String, +} + +impl Default for OtelActorConfig { + fn default() -> Self { + Self { + print_to_console: true, + collector_endpoint: "http://localhost:4317".to_string(), + } + } +} + +/// Actor that receives SAI statistics and exports to OpenTelemetry +pub struct OtelActor { + stats_receiver: Receiver, + config: OtelActorConfig, + shutdown_notifier: Option>, + client: MetricsServiceClient, + + // Pre-allocated reusable structures + resource: ProtoResource, + instrumentation_scope: InstrumentationScope, + + // Statistics tracking + messages_received: u64, + exports_performed: u64, + export_failures: u64, + console_reports: u64, +} + +impl OtelActor { + /// Creates a new OtelActor instance + pub async fn new( + stats_receiver: Receiver, + config: OtelActorConfig, + shutdown_notifier: oneshot::Sender<()> + ) -> Result> { + let endpoint = config.collector_endpoint.parse::()?; + let client = MetricsServiceClient::connect(endpoint).await?; + + // Pre-create reusable resource + let resource = ProtoResource { + attributes: vec![ProtoKeyValue { + key: "service.name".to_string(), + value: Some(AnyValue { + value: Some(Value::StringValue("countersyncd".to_string())), + }), + }], + dropped_attributes_count: 0, + }; + + // Pre-create reusable instrumentation scope + let instrumentation_scope = InstrumentationScope { + name: "countersyncd".to_string(), + version: "1.0".to_string(), + attributes: vec![], + dropped_attributes_count: 0, + }; + + info!( + "OtelActor initialized - console: {}, endpoint: {}", + config.print_to_console, + config.collector_endpoint + ); + + Ok(OtelActor { + stats_receiver, + config, + shutdown_notifier: Some(shutdown_notifier), + client, + resource, + instrumentation_scope, + messages_received: 0, + exports_performed: 0, + export_failures: 0, + console_reports: 0, + }) + } + + /// Main run loop + pub async fn run(mut self) { + info!("OtelActor started"); + + loop { + select! { + stats_msg = self.stats_receiver.recv() => { + match stats_msg { + Some(stats) => { + self.handle_stats_message(stats).await; + } + None => { + info!("Stats receiver channel closed, shutting down OtelActor"); + break; + } + } + } + } + } + + self.shutdown().await; + } + + /// Handle incoming SAI statistics message + async fn handle_stats_message(&mut self, stats: SAIStatsMessage) { + self.messages_received += 1; + + debug!("Received SAI stats with {} entries, observation_time: {}", + stats.stats.len(), stats.observation_time); + + // Convert to OTel format using message types + let otel_metrics = OtelMetrics::from_sai_stats(&stats); + + // Print to console if enabled + if self.config.print_to_console { + self.print_otel_metrics(&otel_metrics).await; + } + + // Export to OpenTelemetry collector + self.export_otel_metrics(&otel_metrics).await; + } + + async fn print_otel_metrics(&mut self, otel_metrics: &OtelMetrics) { + self.console_reports += 1; + + info!( + "[OTel Report #{}] Service: {}, Scope: {} v{}, Total Gauges: {}, Messages Received: {}, Exports: {} (Failures: {})", + self.console_reports, + otel_metrics.service_name, + otel_metrics.scope_name, + otel_metrics.scope_version, + otel_metrics.len(), + self.messages_received, + self.exports_performed, + self.export_failures + ); + + if !otel_metrics.is_empty() { + info!("Gauge Metrics:"); + for (index, gauge) in otel_metrics.gauges.iter().enumerate() { + let data_point = &gauge.data_points[0]; + + info!("[{:3}] Gauge: {}", index + 1, gauge.name); + info!("Value: {}", data_point.value); + info!("Unit: {}", gauge.unit); + info!("Time: {}ns", data_point.time_unix_nano); + info!("Description: {}", gauge.description); + + if !data_point.attributes.is_empty() { + info!("Attributes:"); + for attr in &data_point.attributes { + info!(" - {}={}", attr.key, attr.value); + } + } + + debug!("Raw Gauge: {:#?}", gauge); + } + } + + } + + // Export metrics to OpenTelemetry collector + async fn export_otel_metrics(&mut self, otel_metrics: &OtelMetrics) { + if otel_metrics.is_empty() { + return; + } + + // Convert gauges to protobuf metrics + let proto_metrics: Vec = otel_metrics.gauges.iter().map(|gauge| { + let proto_data_points = gauge.data_points.iter() + .map(|dp| dp.to_proto()) + .collect(); + + let proto_gauge = ProtoGauge { + data_points: proto_data_points, + }; + + Metric { + name: gauge.name.clone(), + description: gauge.description.clone(), + metadata: vec![], + data: Some(opentelemetry_proto::tonic::metrics::v1::metric::Data::Gauge(proto_gauge)), + ..Default::default() + } + }).collect(); + + // Reuse pre-allocated resource and scope, only create new ScopeMetrics with updated metrics + let resource_metrics = ResourceMetrics { + resource: Some(self.resource.clone()), // Reuse pre-created resource + scope_metrics: vec![ScopeMetrics { + scope: Some(self.instrumentation_scope.clone()), + schema_url: String::new(), + metrics: proto_metrics, + }], + schema_url: String::new(), + }; + + // Create export request + let request = ExportMetricsServiceRequest { + resource_metrics: vec![resource_metrics], + }; + + // Export to collector + match self.client.export(request).await { + Ok(_) => { + self.exports_performed += 1; + debug!("Exported {} metrics to collector", otel_metrics.len()); + } + Err(e) => { + self.export_failures += 1; + error!("Failed to export metrics: {}", e); + } + } + } + + pub fn print_conversion_report(sai_stats: &SAIStats, otel_metrics: &OtelMetrics) { + info!("[Conversion Report] SAI Stats → OpenTelemetry Gauges"); + info!("Conversion timestamp: {}", sai_stats.observation_time); + info!("Input: {} SAI statistics", sai_stats.stats.len()); + info!("Output: {} OpenTelemetry gauges", otel_metrics.len()); + + info!("BEFORE - Original SAI Statistics:"); + for (index, sai_stat) in sai_stats.stats.iter().enumerate().take(10) { + info!( + "[{:2}] Object: {:20} | Type: {:3} | Stat: {:3} | Counter: {:>12}", + index + 1, + sai_stat.object_name, + sai_stat.type_id, + sai_stat.stat_id, + sai_stat.counter + ); + } + + info!("AFTER - Converted OpenTelemetry Gauges:"); + for (index, gauge) in otel_metrics.gauges.iter().enumerate().take(10) { + let data_point = &gauge.data_points[0]; + info!( + "[{:2}] Metric: {:35} | Value: {:>12} | Time: {}ns", + index + 1, + gauge.name, + data_point.value, + data_point.time_unix_nano + ); + + // Show key attributes on the same line + let attrs: Vec = data_point.attributes.iter() + .map(|attr| format!("{}={}", attr.key, attr.value)) + .collect(); + if !attrs.is_empty() { + info!("Attributes: [{}]", attrs.join(", ")); + } + info!("Description: {}", gauge.description); + } + info!("Conversion completed successfully!"); + } + + /// Shutdown the actor + async fn shutdown(self) { + info!("Shutting down OtelActor..."); + + tokio::time::sleep(Duration::from_secs(1)).await; + + if let Some(notifier) = self.shutdown_notifier { + let _ = notifier.send(()); + } + + info!( + "OtelActor shutdown complete. {} messages, {} exports, {} failures", + self.messages_received, self.exports_performed, self.export_failures + ); + } +} diff --git a/crates/countersyncd/src/actor/stats_reporter.rs b/crates/countersyncd/src/actor/stats_reporter.rs new file mode 100644 index 00000000000..c7142f117b6 --- /dev/null +++ b/crates/countersyncd/src/actor/stats_reporter.rs @@ -0,0 +1,1045 @@ +use chrono::DateTime; +use std::collections::HashMap; +use std::time::Duration; + +use log::{debug, info}; +use tokio::{ + select, + sync::mpsc::Receiver, + time::{interval, Interval}, +}; + +use super::super::message::saistats::SAIStatsMessage; +use crate::sai::{ + SaiBufferPoolStat, SaiIngressPriorityGroupStat, SaiObjectType, SaiPortStat, SaiQueueStat, +}; + +/// Unique key for identifying a specific counter based on the triplet +/// (object_name, type_id, stat_id) +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct CounterKey { + pub object_name: String, + pub type_id: u32, + pub stat_id: u32, +} + +impl CounterKey { + pub fn new(object_name: String, type_id: u32, stat_id: u32) -> Self { + Self { + object_name, + type_id, + stat_id, + } + } +} + +/// Counter information including the latest value and associated metadata +#[derive(Debug, Clone)] +pub struct CounterInfo { + pub counter: u64, + pub last_observation_time: u64, +} + +/// Trait for output writing to enable testing +pub trait OutputWriter: Send + Sync { + fn write_line(&mut self, line: &str); +} + +/// Console writer implementation +pub struct ConsoleWriter; + +impl OutputWriter for ConsoleWriter { + fn write_line(&mut self, line: &str) { + println!("{}", line); + } +} + +/// Test writer that captures output +#[cfg(test)] +pub struct TestWriter { + pub lines: Vec, +} + +#[cfg(test)] +impl TestWriter { + pub fn new() -> Self { + Self { lines: Vec::new() } + } + + #[allow(dead_code)] + pub fn get_output(&self) -> &[String] { + &self.lines + } +} + +#[cfg(test)] +impl OutputWriter for TestWriter { + fn write_line(&mut self, line: &str) { + self.lines.push(line.to_string()); + } +} + +/// Configuration for the StatsReporterActor +#[derive(Debug, Clone)] +pub struct StatsReporterConfig { + /// Reporting interval - how often to print the latest statistics + pub interval: Duration, + /// Whether to print detailed statistics or summary only + pub detailed: bool, + /// Maximum number of statistics to display per report + pub max_stats_per_report: Option, +} + +impl Default for StatsReporterConfig { + fn default() -> Self { + Self { + interval: Duration::from_secs(10), + detailed: true, + max_stats_per_report: None, + } + } +} + +/// Actor responsible for consuming SAI statistics messages and reporting them to the terminal. +/// +/// The StatsReporterActor handles: +/// - Receiving SAI statistics messages from IPFIX processor +/// - Maintaining the latest statistics state per counter key (object_name, type_id, stat_id) +/// - Tracking message counts per counter key for each reporting period +/// - Periodic reporting based on configured interval +/// - Formatted output to terminal with optional detail levels +pub struct StatsReporterActor { + /// Channel for receiving SAI statistics messages + stats_receiver: Receiver, + /// Configuration for reporting behavior + config: StatsReporterConfig, + /// Timer for periodic reporting + report_timer: Interval, + /// Latest counter values indexed by (object_name, type_id, stat_id) key + latest_counters: HashMap, + /// Message count per counter key for current reporting period + messages_per_counter: HashMap, + /// Total messages received across all counters + total_messages_received: u64, + /// Counter for total reports generated + reports_generated: u64, + /// Output writer for dependency injection + writer: W, +} + +impl StatsReporterActor { + /// Creates a new StatsReporterActor instance. + /// + /// # Arguments + /// + /// * `stats_receiver` - Channel for receiving SAI statistics messages + /// * `config` - Configuration for reporting behavior + /// * `writer` - Output writer for dependency injection + /// + /// # Returns + /// + /// A new StatsReporterActor instance + pub fn new( + stats_receiver: Receiver, + config: StatsReporterConfig, + writer: W, + ) -> Self { + let report_timer = interval(config.interval); + + info!( + "StatsReporter initialized with interval: {:?}, detailed: {}", + config.interval, config.detailed + ); + + Self { + stats_receiver, + config, + report_timer, + latest_counters: HashMap::new(), + messages_per_counter: HashMap::new(), + total_messages_received: 0, + reports_generated: 0, + writer, + } + } + + /// Creates a new StatsReporterActor with default configuration and console writer. + /// + /// # Arguments + /// + /// * `stats_receiver` - Channel for receiving SAI statistics messages + /// + /// # Returns + /// + /// A new StatsReporterActor instance with default settings + #[allow(dead_code)] + pub fn new_with_defaults( + stats_receiver: Receiver, + ) -> StatsReporterActor { + StatsReporterActor::new( + stats_receiver, + StatsReporterConfig::default(), + ConsoleWriter, + ) + } + + /// Helper function to convert type_id to string representation + fn type_id_to_string(&self, type_id: u32) -> String { + match SaiObjectType::try_from(type_id) { + Ok(sai_type) => format!("{:?}", sai_type), + Err(_) => format!("UNKNOWN({})", type_id), + } + } + + /// Helper function to remove SAI prefixes from stat names + fn remove_sai_prefix(&self, stat_name: &str) -> String { + // Remove common SAI stat prefixes using regex pattern + // Pattern: SAI__STAT_ + if stat_name.starts_with("SAI_") && stat_name.contains("_STAT_") { + // Find the position of "_STAT_" and return everything after it + if let Some(stat_pos) = stat_name.find("_STAT_") { + let start_pos = stat_pos + "_STAT_".len(); + stat_name[start_pos..].to_string() + } else { + stat_name.to_string() + } + } else { + // If no SAI pattern found, return as-is + stat_name.to_string() + } + } + + /// Helper function to convert stat_id to string representation + fn stat_id_to_string(&self, type_id: u32, stat_id: u32) -> String { + // Convert type_id to SaiObjectType first + match SaiObjectType::try_from(type_id) { + Ok(object_type) => { + match object_type { + SaiObjectType::Port => { + // Convert stat_id to SaiPortStat and get its C name + if let Some(port_stat) = SaiPortStat::from_u32(stat_id) { + self.remove_sai_prefix(port_stat.to_c_name()) + } else { + format!("UNKNOWN_PORT_STAT_{}", stat_id) + } + } + SaiObjectType::Queue => { + // Convert stat_id to SaiQueueStat and get its C name + if let Some(queue_stat) = SaiQueueStat::from_u32(stat_id) { + self.remove_sai_prefix(queue_stat.to_c_name()) + } else { + format!("UNKNOWN_QUEUE_STAT_{}", stat_id) + } + } + SaiObjectType::BufferPool => { + // Convert stat_id to SaiBufferPoolStat and get its C name + if let Some(buffer_stat) = SaiBufferPoolStat::from_u32(stat_id) { + self.remove_sai_prefix(buffer_stat.to_c_name()) + } else { + format!("UNKNOWN_BUFFER_POOL_STAT_{}", stat_id) + } + } + SaiObjectType::IngressPriorityGroup => { + // Convert stat_id to SaiIngressPriorityGroupStat and get its C name + if let Some(ipg_stat) = SaiIngressPriorityGroupStat::from_u32(stat_id) { + self.remove_sai_prefix(ipg_stat.to_c_name()) + } else { + format!("UNKNOWN_IPG_STAT_{}", stat_id) + } + } + _ => { + format!("UNSUPPORTED_TYPE_{}_STAT_{}", type_id, stat_id) + } + } + } + Err(_) => { + format!("INVALID_TYPE_{}_STAT_{}", type_id, stat_id) + } + } + } + + /// Helper function to format timestamp with nanosecond precision + fn format_timestamp(&self, timestamp_ns: u64) -> String { + // Convert nanoseconds to seconds and nanoseconds + let secs = (timestamp_ns / 1_000_000_000) as i64; + let nanos = (timestamp_ns % 1_000_000_000) as u32; + + // Create DateTime from the timestamp using the new API + match DateTime::from_timestamp(secs, nanos) { + Some(utc_dt) => { + // Format as "YYYY-MM-DD HH:MM:SS.nnnnnnnnn UTC" + utc_dt.format("%Y-%m-%d %H:%M:%S.%f UTC").to_string() + } + None => { + // Fallback to original format if conversion fails + format!("{}.{:09}", secs, nanos) + } + } + } + + /// Updates the internal state with new statistics data. + /// + /// For each statistic in the message, updates: + /// - The latest counter value for the (object_name, type_id, stat_id) key + /// - The message count for that key in the current reporting period + /// + /// # Arguments + /// + /// * `stats_msg` - New SAI statistics message to process + fn update_stats(&mut self, stats_msg: SAIStatsMessage) { + self.total_messages_received += 1; + + // Extract SAIStats from Arc + let stats = match std::sync::Arc::try_unwrap(stats_msg) { + Ok(stats) => stats, + Err(arc_stats) => (*arc_stats).clone(), + }; + + debug!( + "Received SAI stats with {} entries, observation_time: {}", + stats.stats.len(), + stats.observation_time + ); + + // Process each statistic in the message + for stat in stats.stats { + let key = CounterKey::new(stat.object_name, stat.type_id, stat.stat_id); + + // Update latest counter value + let counter_info = CounterInfo { + counter: stat.counter, + last_observation_time: stats.observation_time, + }; + self.latest_counters.insert(key.clone(), counter_info); + + // Increment message count for this counter key + *self.messages_per_counter.entry(key).or_insert(0) += 1; + } + } + + /// Generates and prints a statistics report to the terminal. + /// + /// Reports all current counter values and their triplets, as well as + /// message counts for the current reporting period. After reporting, + /// clears the per-period message counters. + fn generate_report(&mut self) { + self.reports_generated += 1; + + if self.latest_counters.is_empty() { + self.writer.write_line(&format!( + "[Report #{}] No statistics data available yet", + self.reports_generated + )); + self.writer.write_line(&format!( + " Total Messages Received: {}", + self.total_messages_received + )); + } else { + self.print_counters_report(); + } + + // Clear per-period message counters for next reporting period + self.messages_per_counter.clear(); + + self.writer.write_line(""); // Add blank line for readability + } + + /// Prints formatted counters report to terminal. + /// + /// Shows all current counters with their triplet keys and the number of + /// messages received for each counter in the current reporting period. + fn print_counters_report(&mut self) { + self.writer.write_line(&format!( + "[Report #{}] SAI Counters Report", + self.reports_generated + )); + self.writer.write_line(&format!( + " Total Unique Counters: {}", + self.latest_counters.len() + )); + self.writer.write_line(&format!( + " Total Messages Received: {}", + self.total_messages_received + )); + + if self.config.detailed && !self.latest_counters.is_empty() { + // Group by SAI object type for better organization + use std::collections::BTreeMap; + let mut grouped_counters: BTreeMap> = + BTreeMap::new(); + + for (key, counter_info) in &self.latest_counters { + grouped_counters + .entry(key.type_id) + .or_insert_with(Vec::new) + .push((key, counter_info)); + } + + self.writer.write_line(" Detailed Counters:"); + + let mut total_shown = 0; + for (type_id, mut counters) in grouped_counters { + // Sort counters within each type by object name and stat id + counters.sort_by(|a, b| { + a.0.object_name + .cmp(&b.0.object_name) + .then_with(|| a.0.stat_id.cmp(&b.0.stat_id)) + }); + + let type_name = self.type_id_to_string(type_id); + self.writer + .write_line(&format!(" Type: {} ({})", type_name, type_id)); + + let counters_to_show = if let Some(max) = self.config.max_stats_per_report { + let remaining = max.saturating_sub(total_shown); + &counters[..std::cmp::min(remaining, counters.len())] + } else { + &counters + }; + + for (index, (key, counter_info)) in counters_to_show.iter().enumerate() { + let messages_in_period = self.messages_per_counter.get(key).unwrap_or(&0); + let messages_per_second = + *messages_in_period as f64 / self.config.interval.as_secs_f64(); + let stat_name = self.stat_id_to_string(key.type_id, key.stat_id); + let formatted_time = self.format_timestamp(counter_info.last_observation_time); + + self.writer.write_line(&format!( + " [{:3}] Object: {:15}, Stat: {:25}, Counter: {:15}, Msg/s: {:6.1}, LastTime: {}", + index + 1, + key.object_name, + stat_name, + counter_info.counter, + messages_per_second, + formatted_time + )); + } + + total_shown += counters_to_show.len(); + if let Some(max) = self.config.max_stats_per_report { + if total_shown >= max && self.latest_counters.len() > max { + self.writer.write_line(&format!( + " ... and {} more counters (use max_stats_per_report: None to show all)", + self.latest_counters.len() - max + )); + break; + } + } + } + } else if !self.config.detailed && !self.latest_counters.is_empty() { + // Summary mode - show aggregate information + let total_counter_value: u64 = + self.latest_counters.values().map(|info| info.counter).sum(); + let unique_types = self + .latest_counters + .keys() + .map(|k| k.type_id) + .collect::>() + .len(); + let unique_objects = self + .latest_counters + .keys() + .map(|k| &k.object_name) + .collect::>() + .len(); + let total_messages_in_period: u64 = self.messages_per_counter.values().sum(); + let messages_per_second = + total_messages_in_period as f64 / self.config.interval.as_secs_f64(); + + self.writer.write_line(" Summary:"); + self.writer.write_line(&format!( + " Total Counter Value: {}", + total_counter_value + )); + self.writer + .write_line(&format!(" Unique Types: {}", unique_types)); + self.writer + .write_line(&format!(" Unique Objects: {}", unique_objects)); + self.writer.write_line(&format!( + " Messages per Second: {:.1}", + messages_per_second + )); + } + } + + /// Main event loop for the StatsReporterActor. + /// + /// Continuously processes incoming statistics messages and generates periodic reports. + /// The loop will exit when the statistics channel is closed. + /// + /// # Arguments + /// + /// * `actor` - The StatsReporterActor instance to run + pub async fn run(mut actor: StatsReporterActor) { + info!("StatsReporter actor started"); + + loop { + select! { + // Handle incoming statistics messages + stats_msg = actor.stats_receiver.recv() => { + match stats_msg { + Some(stats) => { + actor.update_stats(stats); + } + None => { + info!("Stats receiver channel closed, shutting down reporter"); + break; + } + } + } + + // Handle periodic reporting + _ = actor.report_timer.tick() => { + actor.generate_report(); + } + } + } + + // Generate final report before shutdown + info!("Generating final report before shutdown..."); + actor.generate_report(); + info!( + "StatsReporter actor terminated. Total reports generated: {}", + actor.reports_generated + ); + } +} + +impl Drop for StatsReporterActor { + fn drop(&mut self) { + info!( + "StatsReporter dropped after {} reports and {} messages", + self.reports_generated, self.total_messages_received + ); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::sync::Arc; + use tokio::{spawn, sync::mpsc::channel, time::sleep}; + + use crate::message::saistats::{SAIStat, SAIStats}; + + /// Helper function to create test SAI statistics + fn create_test_stats(observation_time: u64, stat_count: usize) -> SAIStats { + let stats = (0..stat_count) + .map(|i| SAIStat { + object_name: format!("Ethernet{}", i), + type_id: (i * 100) as u32, + stat_id: (i * 10) as u32, + counter: (i * 1000) as u64, + }) + .collect(); + + SAIStats { + observation_time, + stats, + } + } + + #[tokio::test] + async fn test_stats_reporter_basic_functionality() { + let (sender, receiver) = channel(10); + let test_writer = TestWriter::new(); + + let config = StatsReporterConfig { + interval: Duration::from_millis(200), + detailed: true, + max_stats_per_report: Some(3), + }; + + // Create actor with test writer + let actor = StatsReporterActor::new(receiver, config, test_writer); + let handle = spawn(StatsReporterActor::run(actor)); + + // Send test statistics + let test_stats = create_test_stats(12345, 5); + sender.send(Arc::new(test_stats)).await.unwrap(); + + // Wait for processing + sleep(Duration::from_millis(50)).await; + + // Wait for at least one report + sleep(Duration::from_millis(250)).await; + + // Send another set of statistics + let test_stats2 = create_test_stats(67890, 2); + sender.send(Arc::new(test_stats2)).await.unwrap(); + + // Wait for processing + sleep(Duration::from_millis(50)).await; + + // Close the channel to terminate the actor + drop(sender); + + // Wait for actor to finish + let _finished_actor = handle.await.expect("Actor should complete successfully"); + } + + #[tokio::test] + async fn test_stats_reporter_with_shared_writer() { + use std::sync::{Arc, Mutex}; + + // Shared writer that can be accessed from multiple places + #[derive(Clone)] + struct SharedTestWriter { + lines: Arc>>, + } + + impl SharedTestWriter { + fn new() -> Self { + Self { + lines: Arc::new(Mutex::new(Vec::new())), + } + } + + fn get_lines(&self) -> Vec { + self.lines.lock().unwrap().clone() + } + } + + impl OutputWriter for SharedTestWriter { + fn write_line(&mut self, line: &str) { + self.lines.lock().unwrap().push(line.to_string()); + } + } + + let (sender, receiver) = channel(10); + let shared_writer = SharedTestWriter::new(); + let writer_clone = shared_writer.clone(); + + let config = StatsReporterConfig { + interval: Duration::from_millis(200), + detailed: true, + max_stats_per_report: Some(3), + }; + + // Create actor with shared writer + let actor = StatsReporterActor::new(receiver, config, shared_writer); + let handle = spawn(StatsReporterActor::run(actor)); + + // Send test statistics + let test_stats = create_test_stats(12345, 5); + sender.send(Arc::new(test_stats)).await.unwrap(); + + // Wait for processing + sleep(Duration::from_millis(50)).await; + + // Wait for at least one report + sleep(Duration::from_millis(250)).await; + + // Send another set of statistics + let test_stats2 = create_test_stats(67890, 2); + sender.send(Arc::new(test_stats2)).await.unwrap(); + + // Wait for processing + sleep(Duration::from_millis(50)).await; + + // Close the channel to terminate the actor + drop(sender); + + // Wait for actor to finish + handle.await.expect("Actor should complete successfully"); + + // Now we can check the output + let output = writer_clone.get_lines(); + + // Verify we have some output + assert!(!output.is_empty(), "Should have captured some output"); + + // Verify report header is present (now "SAI Counters Report") + let has_report_header = output + .iter() + .any(|line| line.contains("SAI Counters Report")); + assert!(has_report_header, "Should contain counters report header"); + + // Verify counter count for all unique counters (first 5 + 2 overlapping = 5 unique) + let has_counter_count = output + .iter() + .any(|line| line.contains("Total Unique Counters: 5")); + assert!( + has_counter_count, + "Should show correct unique counters count" + ); + + // Verify detailed output + let has_detailed = output + .iter() + .any(|line| line.contains("Detailed Counters:")); + assert!(has_detailed, "Should show detailed counters"); + + // Verify individual counter entries with new format + let has_counter_entry = output.iter().any(|line| { + line.contains("Object:") && line.contains("Stat:") && line.contains("Msg/s:") + }); + assert!( + has_counter_entry, + "Should show individual counter entries with message counts" + ); + } + + #[tokio::test] + async fn test_stats_reporter_summary_mode() { + use std::sync::{Arc, Mutex}; + + #[derive(Clone)] + struct SharedTestWriter { + lines: Arc>>, + } + + impl SharedTestWriter { + fn new() -> Self { + Self { + lines: Arc::new(Mutex::new(Vec::new())), + } + } + + fn get_lines(&self) -> Vec { + self.lines.lock().unwrap().clone() + } + } + + impl OutputWriter for SharedTestWriter { + fn write_line(&mut self, line: &str) { + self.lines.lock().unwrap().push(line.to_string()); + } + } + + let (sender, receiver) = channel(10); + let shared_writer = SharedTestWriter::new(); + let writer_clone = shared_writer.clone(); + + let config = StatsReporterConfig { + interval: Duration::from_millis(100), + detailed: false, // Summary mode + max_stats_per_report: None, + }; + + let actor = StatsReporterActor::new(receiver, config, shared_writer); + let handle = spawn(StatsReporterActor::run(actor)); + + // Send test statistics with known values + let test_stats = create_test_stats(99999, 3); + sender.send(Arc::new(test_stats)).await.unwrap(); + + // Wait for processing and one report + sleep(Duration::from_millis(150)).await; + + // Close and finish + drop(sender); + handle.await.expect("Actor should complete successfully"); + + // Verify captured output + let output = writer_clone.get_lines(); + + // Verify we have output + assert!(!output.is_empty(), "Should have captured some output"); + + // Verify summary mode elements + let has_summary_header = output.iter().any(|line| line.contains("Summary:")); + assert!(has_summary_header, "Should contain summary header"); + + // Verify total counter calculation (0 + 1000 + 2000 = 3000) + let has_total_counter = output + .iter() + .any(|line| line.contains("Total Counter Value: 3000")); + assert!(has_total_counter, "Should show correct total counter value"); + + // Verify unique counts + let has_unique_types = output.iter().any(|line| line.contains("Unique Types: 3")); + assert!(has_unique_types, "Should show correct unique types count"); + + let has_unique_labels = output.iter().any(|line| line.contains("Unique Objects: 3")); + assert!( + has_unique_labels, + "Should show correct unique objects count" + ); + + // Should NOT have detailed counters + let has_detailed = output + .iter() + .any(|line| line.contains("Detailed Counters:")); + assert!( + !has_detailed, + "Should NOT show detailed counters in summary mode" + ); + + // Should show messages per second + let has_messages_per_second = output + .iter() + .any(|line| line.contains("Messages per Second:")); + assert!( + has_messages_per_second, + "Should show messages per second in summary mode" + ); + } + + #[tokio::test] + async fn test_stats_reporter_no_data() { + use std::sync::{Arc, Mutex}; + + #[derive(Clone)] + struct SharedTestWriter { + lines: Arc>>, + } + + impl SharedTestWriter { + fn new() -> Self { + Self { + lines: Arc::new(Mutex::new(Vec::new())), + } + } + + fn get_lines(&self) -> Vec { + self.lines.lock().unwrap().clone() + } + } + + impl OutputWriter for SharedTestWriter { + fn write_line(&mut self, line: &str) { + self.lines.lock().unwrap().push(line.to_string()); + } + } + + let (sender, receiver) = channel(10); + let shared_writer = SharedTestWriter::new(); + let writer_clone = shared_writer.clone(); + + let config = StatsReporterConfig { + interval: Duration::from_millis(50), + detailed: true, + max_stats_per_report: None, + }; + + let actor = StatsReporterActor::new(receiver, config, shared_writer); + let handle = spawn(StatsReporterActor::run(actor)); + + // Don't send any data, just wait for a report + sleep(Duration::from_millis(100)).await; + + // Close the channel + drop(sender); + handle.await.expect("Actor should complete successfully"); + + // Verify captured output + let output = writer_clone.get_lines(); + + // Verify we have output + assert!(!output.is_empty(), "Should have captured some output"); + + // Verify "no data" message + let has_no_data_msg = output + .iter() + .any(|line| line.contains("No statistics data available yet")); + assert!(has_no_data_msg, "Should show 'no data available' message"); + + // Verify message count is 0 + let has_zero_messages = output + .iter() + .any(|line| line.contains("Total Messages Received: 0")); + assert!(has_zero_messages, "Should show 0 total messages received"); + } + + #[tokio::test] + async fn test_stats_reporter_max_stats_limit() { + use std::sync::{Arc, Mutex}; + + #[derive(Clone)] + struct SharedTestWriter { + lines: Arc>>, + } + + impl SharedTestWriter { + fn new() -> Self { + Self { + lines: Arc::new(Mutex::new(Vec::new())), + } + } + + fn get_lines(&self) -> Vec { + self.lines.lock().unwrap().clone() + } + } + + impl OutputWriter for SharedTestWriter { + fn write_line(&mut self, line: &str) { + self.lines.lock().unwrap().push(line.to_string()); + } + } + + let (sender, receiver) = channel(10); + let shared_writer = SharedTestWriter::new(); + let writer_clone = shared_writer.clone(); + + let config = StatsReporterConfig { + interval: Duration::from_millis(500), // Longer interval to avoid multiple reports + detailed: true, + max_stats_per_report: Some(2), // Limit to 2 stats + }; + + let actor = StatsReporterActor::new(receiver, config, shared_writer); + let handle = spawn(StatsReporterActor::run(actor)); + + // Send stats with more entries than the limit + let test_stats = create_test_stats(55555, 5); + sender.send(Arc::new(test_stats)).await.unwrap(); + + // Wait for processing but not long enough for multiple reports + sleep(Duration::from_millis(50)).await; + + // Close and finish quickly to avoid multiple timer ticks + drop(sender); + handle.await.expect("Actor should complete successfully"); + + // Verify captured output + let output = writer_clone.get_lines(); + + // Find the first detailed counters section + let mut in_detailed_section = false; + let mut counter_entries = Vec::new(); + + for line in &output { + if line.contains("Detailed Counters:") { + in_detailed_section = true; + continue; + } + + if in_detailed_section { + if line.contains("] Object:") && line.contains("Stat:") { + counter_entries.push(line); + } else if line.contains("[Report") || line.trim().is_empty() { + // End of this detailed section + break; + } + } + } + + // Should show exactly 2 counter entries in the first report + assert_eq!( + counter_entries.len(), + 2, + "Should show exactly 2 counter entries due to limit" + ); + + // Verify "more counters" message + let has_more_msg = output + .iter() + .any(|line| line.contains("and 3 more counters")); + assert!(has_more_msg, "Should show 'more counters' message"); + + // Verify total count is still correct + let has_total_count = output + .iter() + .any(|line| line.contains("Total Unique Counters: 5")); + assert!( + has_total_count, + "Should show correct total unique counters count" + ); + } + + #[tokio::test] + async fn test_stats_reporter_sai_stat_names() { + use std::sync::{Arc, Mutex}; + + #[derive(Clone)] + struct SharedTestWriter { + lines: Arc>>, + } + + impl SharedTestWriter { + fn new() -> Self { + Self { + lines: Arc::new(Mutex::new(Vec::new())), + } + } + + fn get_lines(&self) -> Vec { + self.lines.lock().unwrap().clone() + } + } + + impl OutputWriter for SharedTestWriter { + fn write_line(&mut self, line: &str) { + self.lines.lock().unwrap().push(line.to_string()); + } + } + + let (sender, receiver) = channel(10); + let shared_writer = SharedTestWriter::new(); + let writer_clone = shared_writer.clone(); + + let config = StatsReporterConfig { + interval: Duration::from_millis(100), + detailed: true, + max_stats_per_report: None, + }; + + let actor = StatsReporterActor::new(receiver, config, shared_writer); + let handle = spawn(StatsReporterActor::run(actor)); + + // Create stats with known SAI types and stat IDs + let stats = vec![ + SAIStat { + object_name: "Ethernet0".to_string(), + type_id: 1, // Port + stat_id: 0, // IfInOctets + counter: 832, + }, + SAIStat { + object_name: "Ethernet16".to_string(), + type_id: 1, // Port + stat_id: 1, // IfInUcastPkts + counter: 1664, + }, + ]; + + let test_stats = SAIStats { + observation_time: 12345, + stats, + }; + + sender.send(Arc::new(test_stats)).await.unwrap(); + + // Wait for processing and one report + sleep(Duration::from_millis(150)).await; + + // Close and finish + drop(sender); + handle.await.expect("Actor should complete successfully"); + + // Verify captured output + let output = writer_clone.get_lines(); + + // Find the line that should contain IF_IN_OCTETS (without SAI_PORT_STAT_ prefix) + let has_if_in_octets = output.iter().any(|line| line.contains("IF_IN_OCTETS")); + assert!( + has_if_in_octets, + "Should show IF_IN_OCTETS without SAI_PORT_STAT_ prefix" + ); + + // Find the line that should contain IF_IN_UCAST_PKTS (without SAI_PORT_STAT_ prefix) + let has_if_in_ucast_pkts = output.iter().any(|line| line.contains("IF_IN_UCAST_PKTS")); + assert!( + has_if_in_ucast_pkts, + "Should show IF_IN_UCAST_PKTS without SAI_PORT_STAT_ prefix" + ); + + // Should NOT have full SAI prefixes + let has_sai_prefix = output.iter().any(|line| { + line.contains("SAI_PORT_STAT_IF_IN_OCTETS") + || line.contains("SAI_PORT_STAT_IF_IN_UCAST_PKTS") + }); + assert!( + !has_sai_prefix, + "Should NOT show full SAI_PORT_STAT_ prefixes" + ); + + // Should NOT have generic STAT_ prefixes + let has_generic_stat = output + .iter() + .any(|line| line.contains("STAT_0") || line.contains("STAT_1")); + assert!(!has_generic_stat, "Should NOT show generic STAT_ names"); + } +} diff --git a/crates/countersyncd/src/actor/swss.rs b/crates/countersyncd/src/actor/swss.rs new file mode 100644 index 00000000000..fa5c81c7f20 --- /dev/null +++ b/crates/countersyncd/src/actor/swss.rs @@ -0,0 +1,814 @@ +use super::super::message::ipfix::IPFixTemplatesMessage; +use swss_common::{DbConnector, KeyOperation, SubscriberStateTable}; + +use log::{debug, error, info}; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::mpsc::Sender; + +const SOCK_PATH: &str = "/var/run/redis/redis.sock"; +const STATE_DB_ID: i32 = 6; +const STATE_HIGH_FREQUENCY_TELEMETRY_SESSION_TABLE: &str = "HIGH_FREQUENCY_TELEMETRY_SESSION_TABLE"; + +/// SwssActor is responsible for monitoring SONiC orchestrator agent (orchagent) +/// messages through the state database. It specifically listens for +/// HIGH_FREQUENCY_TELEMETRY_SESSION_TABLE updates and forwards IPFIX template +/// configurations to the IPFIX actor. +/// +/// The state DB message format example: +/// ```text +/// 127.0.0.1:6379[6]> hgetall "HIGH_FREQUENCY_TELEMETRY_SESSION_TABLE|test|PORT" +/// 1> "stream_status" -> "enabled" +/// 2> "session_type" -> "ipfix" +/// 3> "object_names" -> "Ethernet0" +/// 4> "object_ids" -> "1" +/// 5> "session_config" -> +/// ``` +pub struct SwssActor { + pub session_table: SubscriberStateTable, + template_recipient: Sender, +} + +impl SwssActor { + /// Creates a new SwssActor instance + /// + /// # Arguments + /// * `template_recipient` - Channel sender for forwarding IPFIX templates to IPFIX actor + pub fn new(template_recipient: Sender) -> Result { + let connect = DbConnector::new_unix(STATE_DB_ID, SOCK_PATH, 0) + .map_err(|e| format!("Failed to create DB connection: {}", e))?; + let session_table = SubscriberStateTable::new( + connect, + STATE_HIGH_FREQUENCY_TELEMETRY_SESSION_TABLE, + None, + None, + ) + .map_err(|e| format!("Failed to create session table: {}", e))?; + + Ok(SwssActor { + session_table, + template_recipient, + }) + } + + /// Main event loop for the SwssActor + /// + /// Continuously monitors the HIGH_FREQUENCY_TELEMETRY_SESSION_TABLE for updates + /// and processes enabled IPFIX sessions by forwarding their templates to the IPFIX actor. + /// + /// # Arguments + /// * `actor` - SwssActor instance to run + pub async fn run(mut actor: SwssActor) { + info!("SwssActor started, monitoring HIGH_FREQUENCY_TELEMETRY_SESSION_TABLE"); + + #[cfg(test)] + let mut iteration_count = 0; + #[cfg(test)] + const MAX_TEST_ITERATIONS: usize = 20; + + loop { + #[cfg(test)] + { + iteration_count += 1; + if iteration_count > MAX_TEST_ITERATIONS { + debug!( + "SwssActor test mode reached maximum iterations ({}), terminating", + MAX_TEST_ITERATIONS + ); + break; + } + } + + // Use shorter timeout in test mode to make tests faster + #[cfg(test)] + let timeout = Duration::from_millis(50); + #[cfg(not(test))] + let timeout = Duration::from_secs(10); + + match actor.session_table.read_data(timeout, false) { + Ok(select_result) => { + match select_result { + swss_common::SelectResult::Data => { + // Data available, read it with pops() + match actor.session_table.pops() { + Ok(items) => { + for item in items { + debug!( + "SwssActor received: key={}, op={:?}", + item.key, item.operation + ); + + let session_key = Self::extract_session_key(&item.key); + match item.operation { + KeyOperation::Set => { + actor + .handle_session_update( + &session_key, + &item.field_values, + ) + .await; + } + KeyOperation::Del => { + actor.handle_session_delete(&session_key).await; + } + } + } + } + Err(e) => { + error!("Error popping items from session table: {}", e); + } + } + } + swss_common::SelectResult::Timeout => { + tokio::task::yield_now().await; // Yield to allow other tasks to run after processing template + debug!("Timeout waiting for session table updates"); + } + swss_common::SelectResult::Signal => { + debug!("Signal received while waiting for session table updates"); + } + } + } + Err(e) => { + error!("Error reading from session table: {}", e); + // Small delay before retrying to avoid busy waiting on persistent errors + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + } + } + } + + #[cfg(test)] + debug!("SwssActor terminated after {} iterations", iteration_count); + } + + /// Extracts the session key from the full Redis key by removing the table name prefix + /// + /// # Arguments + /// * `full_key` - Full Redis key (e.g., "HIGH_FREQUENCY_TELEMETRY_SESSION_TABLE|session_name|PORT") + /// + /// # Returns + /// Session key without table prefix (e.g., "session_name|PORT") + fn extract_session_key(full_key: &str) -> String { + if let Some(pos) = full_key.find('|') { + if full_key.starts_with(STATE_HIGH_FREQUENCY_TELEMETRY_SESSION_TABLE) { + return full_key[pos + 1..].to_string(); + } + } + // If no table prefix found, return as-is + full_key.to_string() + } + + /// Processes session update messages from the state database + /// + /// # Arguments + /// * `key` - Session key (e.g., "test|PORT") + /// * `field_values` - HashMap of field-value pairs from the state DB + async fn handle_session_update( + &mut self, + key: &str, + field_values: &std::collections::HashMap, + ) { + debug!("Processing session update for key: {}", key); + + // Parse session data from field-value pairs + let mut session_data = SessionData::default(); + + for (field, value) in field_values { + match field.as_str() { + "stream_status" => session_data.stream_status = value.to_string_lossy().to_string(), + "session_type" => session_data.session_type = value.to_string_lossy().to_string(), + "object_names" => session_data.object_names = value.to_string_lossy().to_string(), + "object_ids" => session_data.object_ids = value.to_string_lossy().to_string(), + "session_config" => { + // The session_config contains binary IPFIX template data + // Convert CxxString to Vec + session_data.session_config = value.as_bytes().to_vec(); + } + _ => { + debug!("Unknown field in session data: {} = {:?}", field, value); + } + } + } + + // Validate and process the session + if let Err(e) = self.validate_and_process_session(key, &session_data).await { + error!("Failed to process session {}: {}", key, e); + } + } + + /// Validates session data and processes enabled IPFIX sessions + /// + /// # Arguments + /// * `key` - Session identifier + /// * `session_data` - Parsed session configuration + async fn validate_and_process_session( + &mut self, + key: &str, + session_data: &SessionData, + ) -> Result<(), String> { + // Only process enabled sessions with ipfix type + if session_data.stream_status != "enabled" { + debug!("Skipping disabled session: {}", key); + return Ok(()); + } + + if session_data.session_type != "ipfix" { + debug!( + "Skipping non-IPFIX session: {} (type: {})", + key, session_data.session_type + ); + return Ok(()); + } + + if session_data.session_config.is_empty() { + return Err("Session config is empty".to_string()); + } + + info!( + "Processing enabled IPFIX session: key={}, object_names={}, object_ids={}", + key, session_data.object_names, session_data.object_ids + ); + + // Create IPFIX templates message + let templates = Arc::new(session_data.session_config.clone()); + + // Parse object_names if present + let object_names = if session_data.object_names.is_empty() { + None + } else { + Some( + session_data + .object_names + .split(',') + .map(|s| s.trim().to_string()) + .filter(|s| !s.is_empty()) + .collect(), + ) + }; + + let message = IPFixTemplatesMessage::new(key.to_string(), templates, object_names); + + // Send to IPFIX actor + self.template_recipient + .send(message) + .await + .map_err(|e| format!("Failed to send IPFix templates to recipient: {}", e))?; + + info!("Successfully sent IPFix templates for session: {}", key); + Ok(()) + } + + /// Handles session deletion events + /// + /// # Arguments + /// * `key` - Session key that was deleted + async fn handle_session_delete(&mut self, key: &str) { + info!("Session deleted: {}", key); + + // Send deletion message to IPFIX actor + let delete_message = IPFixTemplatesMessage::delete(key.to_string()); + + match self.template_recipient.send(delete_message).await { + Ok(_) => { + info!("Successfully sent session deletion message for: {}", key); + } + Err(e) => { + error!("Failed to send session deletion message for {}: {}", key, e); + } + } + + debug!("Session cleanup for {} completed", key); + } +} + +/// Represents the parsed session data from HIGH_FREQUENCY_TELEMETRY_SESSION_TABLE +/// +/// This structure holds the configuration for a telemetry session including: +/// - stream_status: Whether the session is "enabled" or "disabled" +/// - session_type: Type of session, typically "ipfix" for IPFIX templates +/// - object_names: Comma-separated list of object names (e.g., "Ethernet0") +/// - object_ids: Comma-separated list of object IDs (e.g., "1") +/// - session_config: Binary data containing the session configuration (IPFIX templates) +#[derive(Default, Debug)] +struct SessionData { + stream_status: String, + session_type: String, + object_names: String, + object_ids: String, + session_config: Vec, +} + +#[cfg(test)] +mod tests { + use super::*; + use std::collections::HashMap; + use swss_common::CxxString; + use tokio::sync::mpsc::channel; + + // Helper function to create a SwssActor for testing + fn create_test_actor(template_sender: Sender) -> SwssActor { + SwssActor::new(template_sender).expect("Failed to create SwssActor") + } + + #[tokio::test] + async fn test_session_data_parsing() { + let (template_sender, _template_receiver) = channel(1); + let mut actor = create_test_actor(template_sender); + + // Test session data + let key = "test|PORT"; + let mut field_values = HashMap::new(); + field_values.insert("stream_status".to_string(), CxxString::from("enabled")); + field_values.insert("session_type".to_string(), CxxString::from("ipfix")); + field_values.insert("object_names".to_string(), CxxString::from("Ethernet0")); + field_values.insert("object_ids".to_string(), CxxString::from("1")); + field_values.insert("session_config".to_string(), CxxString::from("test_config")); + + // This should not panic and should process the session + actor.handle_session_update(key, &field_values).await; + } + + #[tokio::test] + async fn test_session_update_with_object_names() { + let (template_sender, mut template_receiver) = channel(1); + let mut actor = create_test_actor(template_sender); + + // Test session data with multiple object names + let key = "test_session|PORT"; + let mut field_values = HashMap::new(); + field_values.insert("stream_status".to_string(), CxxString::from("enabled")); + field_values.insert("session_type".to_string(), CxxString::from("ipfix")); + field_values.insert( + "object_names".to_string(), + CxxString::from("Ethernet0,Ethernet1,Ethernet2"), + ); + field_values.insert("object_ids".to_string(), CxxString::from("1,2,3")); + field_values.insert( + "session_config".to_string(), + CxxString::from("ipfix_template_data"), + ); + + // Process the session update + actor.handle_session_update(key, &field_values).await; + + // Verify the message was sent + let received_message = template_receiver + .try_recv() + .expect("Should have received a message"); + assert_eq!(received_message.key, "test_session|PORT"); + assert!(!received_message.is_delete); + assert!(received_message.templates.is_some()); + + // Verify object_names parsing + let object_names = received_message + .object_names + .expect("Should have object_names"); + assert_eq!(object_names, vec!["Ethernet0", "Ethernet1", "Ethernet2"]); + } + + #[tokio::test] + async fn test_session_update_without_object_names() { + let (template_sender, mut template_receiver) = channel(1); + let mut actor = create_test_actor(template_sender); + + // Test session data without object names + let key = "test_session|PORT"; + let mut field_values = HashMap::new(); + field_values.insert("stream_status".to_string(), CxxString::from("enabled")); + field_values.insert("session_type".to_string(), CxxString::from("ipfix")); + field_values.insert("object_ids".to_string(), CxxString::from("1")); + field_values.insert( + "session_config".to_string(), + CxxString::from("ipfix_template_data"), + ); + + // Process the session update + actor.handle_session_update(key, &field_values).await; + + // Verify the message was sent + let received_message = template_receiver + .try_recv() + .expect("Should have received a message"); + assert_eq!(received_message.key, "test_session|PORT"); + assert!(!received_message.is_delete); + assert!(received_message.templates.is_some()); + assert!(received_message.object_names.is_none()); + } + + #[tokio::test] + async fn test_session_deletion() { + let (template_sender, mut template_receiver) = channel(1); + let mut actor = create_test_actor(template_sender); + + let key = "test_session|PORT"; + + // Process session deletion + actor.handle_session_delete(key).await; + + // Verify the deletion message was sent + let received_message = template_receiver + .try_recv() + .expect("Should have received a deletion message"); + assert_eq!(received_message.key, "test_session|PORT"); + assert!(received_message.is_delete); + assert!(received_message.templates.is_none()); + assert!(received_message.object_names.is_none()); + } + + #[tokio::test] + async fn test_disabled_session_not_processed() { + let (template_sender, mut template_receiver) = channel(1); + let mut actor = create_test_actor(template_sender); + + // Test disabled session + let key = "disabled_session|PORT"; + let mut field_values = HashMap::new(); + field_values.insert("stream_status".to_string(), CxxString::from("disabled")); + field_values.insert("session_type".to_string(), CxxString::from("ipfix")); + field_values.insert("object_names".to_string(), CxxString::from("Ethernet0")); + field_values.insert("session_config".to_string(), CxxString::from("test_config")); + + // Process the session update + actor.handle_session_update(key, &field_values).await; + + // Verify no message was sent + assert!(template_receiver.try_recv().is_err()); + } + + #[tokio::test] + async fn test_non_ipfix_session_not_processed() { + let (template_sender, mut template_receiver) = channel(1); + let mut actor = create_test_actor(template_sender); + + // Test non-IPFIX session + let key = "non_ipfix_session|PORT"; + let mut field_values = HashMap::new(); + field_values.insert("stream_status".to_string(), CxxString::from("enabled")); + field_values.insert("session_type".to_string(), CxxString::from("netflow")); + field_values.insert("object_names".to_string(), CxxString::from("Ethernet0")); + field_values.insert("session_config".to_string(), CxxString::from("test_config")); + + // Process the session update + actor.handle_session_update(key, &field_values).await; + + // Verify no message was sent + assert!(template_receiver.try_recv().is_err()); + } + + #[tokio::test] + async fn test_empty_object_names_handling() { + let (template_sender, mut template_receiver) = channel(1); + let mut actor = create_test_actor(template_sender); + + // Test session data with empty object_names string + let key = "empty_names_session|PORT"; + let mut field_values = HashMap::new(); + field_values.insert("stream_status".to_string(), CxxString::from("enabled")); + field_values.insert("session_type".to_string(), CxxString::from("ipfix")); + field_values.insert("object_names".to_string(), CxxString::from("")); + field_values.insert("object_ids".to_string(), CxxString::from("1")); + field_values.insert( + "session_config".to_string(), + CxxString::from("ipfix_template_data"), + ); + + // Process the session update + actor.handle_session_update(key, &field_values).await; + + // Verify the message was sent with None object_names + let received_message = template_receiver + .try_recv() + .expect("Should have received a message"); + assert_eq!(received_message.key, "empty_names_session|PORT"); + assert!(!received_message.is_delete); + assert!(received_message.templates.is_some()); + assert!(received_message.object_names.is_none()); + } + + #[test] + fn test_session_data_default() { + let session_data = SessionData::default(); + assert_eq!(session_data.stream_status, ""); + assert_eq!(session_data.session_type, ""); + assert_eq!(session_data.object_names, ""); + assert_eq!(session_data.object_ids, ""); + assert!(session_data.session_config.is_empty()); + } + + #[test] + fn test_ipfix_templates_message_new() { + let templates = Arc::new(vec![1, 2, 3, 4]); + let object_names = Some(vec!["Ethernet0".to_string(), "Ethernet1".to_string()]); + + let message = IPFixTemplatesMessage::new( + "test_key".to_string(), + templates.clone(), + object_names.clone(), + ); + + assert_eq!(message.key, "test_key"); + assert_eq!(message.templates, Some(templates)); + assert_eq!(message.object_names, object_names); + assert!(!message.is_delete); + } + + #[test] + fn test_ipfix_templates_message_delete() { + let message = IPFixTemplatesMessage::delete("test_key".to_string()); + + assert_eq!(message.key, "test_key"); + assert!(message.templates.is_none()); + assert!(message.object_names.is_none()); + assert!(message.is_delete); + } + + // Helper function to create a test session entry in Redis + async fn insert_test_session( + table: &swss_common::Table, + session_key: &str, // This should be just the session part, e.g., "test_existing_data|PORT" + object_names: &str, + object_ids: &str, + session_config: &str, + ) { + use swss_common::CxxString; + + // The full Redis key includes the table name prefix + let full_redis_key = format!( + "{}|{}", + STATE_HIGH_FREQUENCY_TELEMETRY_SESSION_TABLE, session_key + ); + + // Use table.set to set all field-value pairs at once + let field_values = vec![ + ("stream_status", CxxString::from("enabled")), + ("session_type", CxxString::from("ipfix")), + ("object_names", CxxString::from(object_names)), + ("object_ids", CxxString::from(object_ids)), + ("session_config", CxxString::from(session_config)), + ]; + + table + .set(&full_redis_key, field_values) + .expect("Should be able to insert session data using table.set"); + } + + // Helper function to set up Redis table for testing + fn setup_test_table() -> swss_common::Table { + use swss_common::{DbConnector, Table}; + + let table_conn = DbConnector::new_unix(STATE_DB_ID, SOCK_PATH, 0) + .expect("Should be able to connect to Redis for table"); + let table = Table::new(table_conn, STATE_HIGH_FREQUENCY_TELEMETRY_SESSION_TABLE) + .expect("Should be able to create table"); + + // More aggressive cleanup: try to delete all possible test patterns + let test_patterns = [ + "HIGH_FREQUENCY_TELEMETRY_SESSION_TABLE|test*", + "HIGH_FREQUENCY_TELEMETRY_SESSION_TABLE|*test*", + "test*", + "*test*", + ]; + for pattern in &test_patterns { + table.del(pattern).ok(); + } + + // Also try FLUSHDB to completely clear the test database + // Note: This is aggressive but necessary for test isolation + // table.flushdb().ok(); // Uncomment if needed + + table + } + + // Helper function to cleanup test data + fn cleanup_test_session(table: &swss_common::Table, session_key: &str) { + let full_redis_key = format!( + "{}|{}", + STATE_HIGH_FREQUENCY_TELEMETRY_SESSION_TABLE, session_key + ); + table.del(&full_redis_key).ok(); + } + + #[tokio::test] + #[serial_test::serial] + async fn test_swss_actor_processes_existing_data() { + use std::time::{SystemTime, UNIX_EPOCH}; + + let table = setup_test_table(); + + // Use a unique key based on timestamp to avoid interference + let timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_nanos(); + let test_key = format!("test_existing_data_{}", timestamp); + + // Clean up any potential conflicting data first + cleanup_test_session(&table, &test_key); + tokio::time::sleep(Duration::from_millis(50)).await; + + // Insert test data BEFORE starting the actor + insert_test_session(&table, &test_key, "Ethernet0", "1", "test_template_data").await; + + tokio::time::sleep(Duration::from_millis(100)).await; + + // Create and start SwssActor + let (template_sender, mut template_receiver) = channel(10); + let actor = create_test_actor(template_sender); + + // Run actor (will auto-terminate in test mode) + SwssActor::run(actor).await; + + // Check messages received + let mut received_messages = Vec::new(); + while let Ok(msg) = template_receiver.try_recv() { + received_messages.push(msg); + } + + // Cleanup + cleanup_test_session(&table, &test_key); + + // Verify results + let found_our_message = received_messages.iter().any(|msg| msg.key == test_key); + assert!(found_our_message, + "SwssActor should have processed existing session data with key: {}. Received {} messages: {:?}", + test_key, + received_messages.len(), + received_messages.iter().map(|m| &m.key).collect::>()); + + // Verify message content + let our_message = received_messages + .iter() + .find(|msg| msg.key == test_key) + .unwrap(); + assert!(!our_message.is_delete); + assert!(our_message.templates.is_some()); + + let object_names = our_message + .object_names + .as_ref() + .expect("Should have object_names"); + assert_eq!(object_names, &vec!["Ethernet0"]); + } + + #[tokio::test] + #[serial_test::serial] + async fn test_swss_actor_runtime_data_behavior() { + use std::time::{SystemTime, UNIX_EPOCH}; + + let table = setup_test_table(); + + // Use a unique key based on timestamp to avoid interference + let timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_nanos(); + let test_key = format!("test_runtime_data_{}", timestamp); + + // Create SwssActor + let (template_sender, mut template_receiver) = channel(10); + let actor = create_test_actor(template_sender); + + // Insert test data BEFORE starting the actor + insert_test_session( + &table, + &test_key, + "Ethernet1,Ethernet2", + "2,3", + "test_runtime_template", + ) + .await; + + // Run actor (will auto-terminate in test mode) + SwssActor::run(actor).await; + + // Check if we received the data + let mut received_messages = Vec::new(); + while let Ok(msg) = template_receiver.try_recv() { + received_messages.push(msg); + } + + // Cleanup + cleanup_test_session(&table, &test_key); + + // Look for our specific message + let message_found = received_messages.iter().any(|msg| msg.key == test_key); + + if message_found { + // If data was detected, verify it's correct + let received_message = received_messages + .iter() + .find(|msg| msg.key == test_key) + .unwrap(); + assert_eq!(received_message.key, test_key); + assert!(!received_message.is_delete); + assert!(received_message.templates.is_some()); + + let object_names = received_message + .object_names + .as_ref() + .expect("Should have object_names"); + assert_eq!(object_names, &vec!["Ethernet1", "Ethernet2"]); + } + + // The test passes regardless of whether data was detected or not + // because the behavior depends on the specific SWSS implementation and configuration + } + + #[tokio::test] + #[serial_test::serial] + async fn test_swss_actor_comprehensive_flow() { + use std::time::{SystemTime, UNIX_EPOCH}; + + let table = setup_test_table(); + + // Use a unique key based on timestamp to avoid interference + let timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_nanos(); + let existing_key = format!("test_existing_{}", timestamp); + let runtime_key = format!("test_runtime_{}", timestamp); + + // Step 1: Insert both EXISTING and RUNTIME data before starting actor + insert_test_session( + &table, + &existing_key, + "Ethernet0", + "1", + "existing_template_data", + ) + .await; + + insert_test_session( + &table, + &runtime_key, + "Ethernet3,Ethernet4", + "3,4", + "runtime_template_data", + ) + .await; + + tokio::time::sleep(Duration::from_millis(100)).await; + + // Step 2: Create and run SwssActor + let (template_sender, mut template_receiver) = channel(10); + let actor = create_test_actor(template_sender); + + // Run actor (will auto-terminate in test mode) + SwssActor::run(actor).await; + + // Step 3: Collect all messages + let mut all_messages = Vec::new(); + while let Ok(msg) = template_receiver.try_recv() { + all_messages.push(msg); + } + + // Cleanup + cleanup_test_session(&table, &existing_key); + cleanup_test_session(&table, &runtime_key); + + // Step 4: Verify the existing session was processed + let found_existing_message = all_messages.iter().any(|msg| msg.key == existing_key); + assert!(found_existing_message, + "SwssActor should have processed existing session data with key: {}. Received {} messages: {:?}", + existing_key, + all_messages.len(), + all_messages.iter().map(|m| &m.key).collect::>()); + + // Verify existing message content + let existing_message = all_messages + .iter() + .find(|msg| msg.key == existing_key) + .unwrap(); + assert!(!existing_message.is_delete); + assert!(existing_message.templates.is_some()); + + let existing_object_names = existing_message + .object_names + .as_ref() + .expect("Should have object_names"); + assert_eq!(existing_object_names, &vec!["Ethernet0"]); + + // Step 5: Check for runtime data (optional behavior) + let runtime_message_found = all_messages.iter().any(|msg| msg.key == runtime_key); + + if runtime_message_found { + // If runtime data was detected, verify it's correct + let runtime_message = all_messages + .iter() + .find(|msg| msg.key == runtime_key) + .unwrap(); + assert_eq!(runtime_message.key, runtime_key); + assert!(!runtime_message.is_delete); + assert!(runtime_message.templates.is_some()); + + let runtime_object_names = runtime_message + .object_names + .as_ref() + .expect("Should have object_names"); + assert_eq!(runtime_object_names, &vec!["Ethernet3", "Ethernet4"]); + } + + // Test passes if existing data was processed correctly + // Runtime data detection depends on SWSS implementation details + } +} diff --git a/crates/countersyncd/src/lib.rs b/crates/countersyncd/src/lib.rs new file mode 100644 index 00000000000..d9fc7b8f1c9 --- /dev/null +++ b/crates/countersyncd/src/lib.rs @@ -0,0 +1,4 @@ +// Library modules for integration tests +pub mod actor; +pub mod message; +pub mod sai; diff --git a/crates/countersyncd/src/main.rs b/crates/countersyncd/src/main.rs new file mode 100644 index 00000000000..bb39709dedf --- /dev/null +++ b/crates/countersyncd/src/main.rs @@ -0,0 +1,570 @@ +// Application modules +mod actor; +mod message; +mod sai; + +// External dependencies +use clap::Parser; +use log::{error, info}; +use std::time::Duration; +use tokio::{spawn, sync::mpsc::channel}; + +// Internal actor implementations +use crate::actor::{ + control_netlink::ControlNetlinkActor, + counter_db::{CounterDBActor, CounterDBConfig}, + data_netlink::{get_genl_family_group, DataNetlinkActor}, + ipfix::IpfixActor, + stats_reporter::{ConsoleWriter, StatsReporterActor, StatsReporterConfig}, + swss::SwssActor, + otel::{OtelActor, OtelActorConfig}, +}; + +/// Initialize logging based on command line arguments +fn init_logging(log_level: &str, log_format: &str) { + use env_logger::{Builder, Target, WriteStyle}; + use log::LevelFilter; + use std::io::Write; + + let level = match log_level.to_lowercase().as_str() { + "trace" => LevelFilter::Trace, + "debug" => LevelFilter::Debug, + "info" => LevelFilter::Info, + "warn" => LevelFilter::Warn, + "error" => LevelFilter::Error, + _ => { + eprintln!("Invalid log level '{}', using 'info'", log_level); + LevelFilter::Info + } + }; + + let mut builder = Builder::new(); + builder.filter_level(level); + builder.target(Target::Stdout); + builder.write_style(WriteStyle::Auto); + + match log_format.to_lowercase().as_str() { + "simple" => { + builder.format(|buf, record| writeln!(buf, "[{}] {}", record.level(), record.args())); + } + "full" => { + builder.format(|buf, record| { + writeln!( + buf, + "[{}] [{}:{}] [{}] {}", + chrono::Utc::now().format("%Y-%m-%d %H:%M:%S%.3f"), + record.file().unwrap_or("unknown"), + record.line().unwrap_or(0), + record.level(), + record.args() + ) + }); + } + _ => { + eprintln!("Invalid log format '{}', using 'full'", log_format); + builder.format(|buf, record| { + writeln!( + buf, + "[{}] [{}:{}] [{}] {}", + chrono::Utc::now().format("%Y-%m-%d %H:%M:%S%.3f"), + record.file().unwrap_or("unknown"), + record.line().unwrap_or(0), + record.level(), + record.args() + ) + }); + } + } + + builder.init(); +} + +/// SONiC High Frequency Telemetry Counter Sync Daemon +/// +/// This application processes high-frequency telemetry data from SONiC switches, +/// converting netlink messages and SWSS state database updates through IPFIX format to SAI statistics. +/// +/// The application consists of six main actors: +/// - DataNetlinkActor: Receives raw netlink messages from the kernel and handles data socket +/// - ControlNetlinkActor: Monitors netlink family registration/unregistration and triggers reconnections +/// - SwssActor: Monitors SONiC orchestrator messages via state database for IPFIX templates +/// - IpfixActor: Processes IPFIX templates and data records to extract SAI stats +/// - StatsReporterActor: Reports processed statistics to the console +/// - CounterDBActor: Writes processed statistics to the Counter Database in Redis +#[derive(Parser)] +#[command(author, version, about, long_about = None)] +struct Args { + /// Enable stats reporting to console + #[arg(short, long, default_value = "false")] + enable_stats: bool, + + /// Stats reporting interval in seconds + #[arg(short = 'i', long, default_value = "10")] + stats_interval: u64, + + /// Show detailed statistics in reports + #[arg(short = 'd', long, default_value = "true")] + detailed_stats: bool, + + /// Maximum number of stats per report (0 for unlimited) + #[arg(short = 'm', long, default_value = "20")] + max_stats_per_report: u32, + + /// Enable counter database writing + #[arg(short = 'c', long, default_value = "false")] + enable_counter_db: bool, + + /// Counter database write frequency in seconds + #[arg(short = 'f', long, default_value = "3")] + counter_db_frequency: u64, + + /// Log level (trace, debug, info, warn, error) + #[arg( + short = 'l', + long, + default_value = "info", + help = "Set the logging level" + )] + log_level: String, + + /// Log format (simple, full) + #[arg( + long, + default_value = "full", + help = "Set the log output format: 'simple' for level and message only, 'full' for timestamp, file, line, level, and message" + )] + log_format: String, + + /// Channel capacity for data_netlink to ipfix communication (IPFIX records) + #[arg( + long, + default_value = "1024", + help = "Set the channel capacity for IPFIX records from data_netlink to ipfix actor" + )] + data_netlink_capacity: usize, + + /// Channel capacity for stats_reporter communication + #[arg( + long, + default_value = "1024", + help = "Set the channel capacity for stats_reporter actor" + )] + stats_reporter_capacity: usize, + + /// Channel capacity for counter_db communication + #[arg( + long, + default_value = "1024", + help = "Set the channel capacity for counter_db actor" + )] + counter_db_capacity: usize, + + /// Enable OpenTelemetry metrics export + #[arg(short = 'o', long, default_value = "false")] + enable_otel: bool, + + /// OpenTelemetry collector endpoint + #[arg( + long, + default_value = "http://localhost:4317", + help = "OpenTelemetry collector endpoint URL" + )] + otel_endpoint: String, + + /// Enable OpenTelemetry console output + #[arg( + long, + default_value = "true", + help = "Print OpenTelemetry metrics to console" + )] + otel_console: bool, + + /// Channel capacity for otel communication + #[arg( + long, + default_value = "1024", + help = "Set the channel capacity for otel actor" + )] + otel_capacity: usize, +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Parse command line arguments + let args = Args::parse(); + + // Initialize logging based on command line arguments + init_logging(&args.log_level, &args.log_format); + + info!("Starting SONiC High Frequency Telemetry Counter Sync Daemon"); + info!("Stats reporting enabled: {}", args.enable_stats); + if args.enable_stats { + info!("Stats reporting interval: {} seconds", args.stats_interval); + info!("Detailed stats: {}", args.detailed_stats); + info!("Max stats per report: {}", args.max_stats_per_report); + } + info!("Counter DB writing enabled: {}", args.enable_counter_db); + if args.enable_counter_db { + info!( + "Counter DB write frequency: {} seconds", + args.counter_db_frequency + ); + } + info!("OpenTelemetry export enabled: {}", args.enable_otel); + if args.enable_otel { + info!("OpenTelemetry endpoint: {}", args.otel_endpoint); + info!("OpenTelemetry console output: {}", args.otel_console); + } + info!( + "Channel capacities - ipfix_records: {}, stats_reporter: {}, counter_db: {}, otel: {}", + args.data_netlink_capacity, args.stats_reporter_capacity, args.counter_db_capacity, args.otel_capacity + ); + + // Create communication channels between actors with configurable capacities + let (command_sender, command_receiver) = channel(10); // Keep small buffer for commands + let (ipfix_record_sender, ipfix_record_receiver) = channel(args.data_netlink_capacity); + let (ipfix_template_sender, ipfix_template_receiver) = channel(10); // Fixed capacity for templates + let (stats_report_sender, stats_report_receiver) = channel(args.stats_reporter_capacity); + let (counter_db_sender, counter_db_receiver) = channel(args.counter_db_capacity); + let (otel_sender, otel_receiver) = channel(args.otel_capacity); + let (otel_shutdown_sender, otel_shutdown_receiver) = tokio::sync::oneshot::channel(); + + // Get netlink family and group configuration from SONiC constants + let (family, group) = get_genl_family_group(); + info!("Using netlink family: '{}', group: '{}'", family, group); + + // Initialize and configure actors + let mut data_netlink = DataNetlinkActor::new(family.as_str(), group.as_str(), command_receiver); + data_netlink.add_recipient(ipfix_record_sender); + + let control_netlink = ControlNetlinkActor::new(family.as_str(), command_sender); + + let mut ipfix = IpfixActor::new(ipfix_template_receiver, ipfix_record_receiver); + + // Initialize SwssActor to monitor SONiC orchestrator messages + let swss = match SwssActor::new(ipfix_template_sender) { + Ok(actor) => actor, + Err(e) => { + error!("Failed to initialize SwssActor: {}", e); + return Err(e.into()); + } + }; + + // Configure stats reporter with settings from command line arguments + let stats_reporter = if args.enable_stats { + let reporter_config = StatsReporterConfig { + interval: Duration::from_secs(args.stats_interval), + detailed: args.detailed_stats, + max_stats_per_report: if args.max_stats_per_report == 0 { + None + } else { + Some(args.max_stats_per_report as usize) + }, + }; + + // Add stats reporter to ipfix recipients only when enabled + ipfix.add_recipient(stats_report_sender.clone()); + Some(StatsReporterActor::new( + stats_report_receiver, + reporter_config, + ConsoleWriter, + )) + } else { + // Drop the receiver if stats reporting is disabled + drop(stats_report_receiver); + None + }; + + // Configure counter database writer with settings from command line arguments + let counter_db = if args.enable_counter_db { + let counter_db_config = CounterDBConfig { + interval: Duration::from_secs(args.counter_db_frequency), + }; + + // Add counter DB to ipfix recipients only when enabled + ipfix.add_recipient(counter_db_sender.clone()); + match CounterDBActor::new(counter_db_receiver, counter_db_config) { + Ok(actor) => Some(actor), + Err(e) => { + error!("Failed to initialize CounterDBActor: {}", e); + return Err(e.into()); + } + } + } else { + // Drop the receiver if counter DB writing is disabled + drop(counter_db_receiver); + None + }; + + // Configure OpenTelemetry export with settings from command line arguments + let otel_actor = if args.enable_otel { + let otel_config = OtelActorConfig { + print_to_console: args.otel_console, + collector_endpoint: args.otel_endpoint.clone(), + }; + + // Add OTEL to ipfix recipients only when enabled + ipfix.add_recipient(otel_sender.clone()); + match OtelActor::new(otel_receiver, otel_config, otel_shutdown_sender).await { + Ok(actor) => Some(actor), + Err(e) => { + error!("Failed to initialize OtelActor: {}", e); + return Err(e.into()); + } + } + } else { + // Drop the receiver if OTEL export is disabled + drop(otel_receiver); + drop(otel_shutdown_sender); + None + }; + + info!("Starting actor tasks..."); + + // Spawn actor tasks + let data_netlink_handle = spawn(async move { + info!("Data netlink actor started"); + DataNetlinkActor::run(data_netlink).await; + info!("Data netlink actor terminated"); + }); + + let control_netlink_handle = spawn(async move { + info!("Control netlink actor started"); + ControlNetlinkActor::run(control_netlink).await; + info!("Control netlink actor terminated"); + }); + + // Use spawn_blocking to ensure IPFIX actor runs on a dedicated thread + // This is important for thread-local variables + let ipfix_handle = tokio::task::spawn_blocking(move || { + info!("IPFIX actor started on dedicated thread"); + // Create a new runtime for async operations within this blocking thread + let rt = tokio::runtime::Runtime::new().expect("Failed to create runtime for IPFIX actor"); + rt.block_on(async move { + IpfixActor::run(ipfix).await; + }); + info!("IPFIX actor terminated"); + }); + + let swss_handle = spawn(async move { + info!("SWSS actor started"); + SwssActor::run(swss).await; + info!("SWSS actor terminated"); + }); + + // Only spawn stats reporter if enabled + let reporter_handle = if let Some(stats_reporter) = stats_reporter { + Some(spawn(async move { + info!("Stats reporter actor started"); + StatsReporterActor::run(stats_reporter).await; + info!("Stats reporter actor terminated"); + })) + } else { + info!("Stats reporting disabled - not starting stats reporter actor"); + None + }; + + // Only spawn counter DB writer if enabled + let counter_db_handle = if let Some(counter_db) = counter_db { + Some(spawn(async move { + info!("Counter DB actor started"); + CounterDBActor::run(counter_db).await; + info!("Counter DB actor terminated"); + })) + } else { + info!("Counter DB writing disabled - not starting counter DB actor"); + None + }; + + // Only spawn OpenTelemetry actor if enabled + let otel_handle = if let Some(otel_actor) = otel_actor { + Some(spawn(async move { + info!("OpenTelemetry actor started"); + OtelActor::run(otel_actor).await; + info!("OpenTelemetry actor terminated"); + })) + } else { + info!("OpenTelemetry export disabled - not starting OpenTelemetry actor"); + None + }; + + // Wait for all actors to complete and handle any errors + let data_netlink_result = data_netlink_handle.await; + let control_netlink_result = control_netlink_handle.await; + let ipfix_result = ipfix_handle.await.map_err(|e| { + error!("IPFIX blocking task join error: {:?}", e); + e + }); + let swss_result = swss_handle.await; + let reporter_result = if let Some(handle) = reporter_handle { + Some(handle.await) + } else { + None + }; + let counter_db_result = if let Some(handle) = counter_db_handle { + Some(handle.await) + } else { + None + }; + let otel_result = if let Some(handle) = otel_handle { + Some(handle.await) + } else { + None + }; + + // Handle results based on what actors were enabled + let all_successful = match (reporter_result.is_some(), counter_db_result.is_some(), otel_result.is_some()) { + (true, true, true) => { + // All optional actors enabled + matches!( + ( + &data_netlink_result, + &control_netlink_result, + &ipfix_result, + &swss_result, + reporter_result.as_ref().unwrap(), + counter_db_result.as_ref().unwrap(), + otel_result.as_ref().unwrap() + ), + (Ok(()), Ok(()), Ok(()), Ok(()), Ok(()), Ok(()), Ok(())) + ) + } + (true, true, false) => { + // Stats reporter and counter DB enabled, OTEL disabled + matches!( + ( + &data_netlink_result, + &control_netlink_result, + &ipfix_result, + &swss_result, + reporter_result.as_ref().unwrap(), + counter_db_result.as_ref().unwrap() + ), + (Ok(()), Ok(()), Ok(()), Ok(()), Ok(()), Ok(())) + ) + } + (true, false, true) => { + // Stats reporter and OTEL enabled, counter DB disabled + matches!( + ( + &data_netlink_result, + &control_netlink_result, + &ipfix_result, + &swss_result, + reporter_result.as_ref().unwrap(), + otel_result.as_ref().unwrap() + ), + (Ok(()), Ok(()), Ok(()), Ok(()), Ok(()), Ok(())) + ) + } + (false, true, true) => { + // Counter DB and OTEL enabled, stats reporter disabled + matches!( + ( + &data_netlink_result, + &control_netlink_result, + &ipfix_result, + &swss_result, + counter_db_result.as_ref().unwrap(), + otel_result.as_ref().unwrap() + ), + (Ok(()), Ok(()), Ok(()), Ok(()), Ok(()), Ok(())) + ) + } + (true, false, false) => { + // Only stats reporter enabled + matches!( + ( + &data_netlink_result, + &control_netlink_result, + &ipfix_result, + &swss_result, + reporter_result.as_ref().unwrap() + ), + (Ok(()), Ok(()), Ok(()), Ok(()), Ok(())) + ) + } + (false, true, false) => { + // Only counter DB enabled + matches!( + ( + &data_netlink_result, + &control_netlink_result, + &ipfix_result, + &swss_result, + counter_db_result.as_ref().unwrap() + ), + (Ok(()), Ok(()), Ok(()), Ok(()), Ok(())) + ) + } + (false, false, true) => { + // Only OTEL enabled + matches!( + ( + &data_netlink_result, + &control_netlink_result, + &ipfix_result, + &swss_result, + otel_result.as_ref().unwrap() + ), + (Ok(()), Ok(()), Ok(()), Ok(()), Ok(())) + ) + } + (false, false, false) => { + // None of the optional actors enabled + matches!( + ( + &data_netlink_result, + &control_netlink_result, + &ipfix_result, + &swss_result + ), + (Ok(()), Ok(()), Ok(()), Ok(())) + ) + } + }; + + if all_successful { + let status_msg = match (reporter_result.is_some(), counter_db_result.is_some(), otel_result.is_some()) { + (true, true, true) => "All actors completed successfully", + (true, true, false) => "All actors completed successfully (OpenTelemetry disabled)", + (true, false, true) => "All actors completed successfully (counter DB disabled)", + (false, true, true) => "All actors completed successfully (stats reporting disabled)", + (true, false, false) => "All actors completed successfully (counter DB and OpenTelemetry disabled)", + (false, true, false) => "All actors completed successfully (stats reporting and OpenTelemetry disabled)", + (false, false, true) => "All actors completed successfully (stats reporting and counter DB disabled)", + (false, false, false) => { + "All actors completed successfully (stats reporting, counter DB, and OpenTelemetry disabled)" + } + }; + info!("{}", status_msg); + Ok(()) + } else { + // Check which actor failed + if let Err(e) = data_netlink_result { + error!("Data netlink actor failed: {:?}", e); + Err(e.into()) + } else if let Err(e) = control_netlink_result { + error!("Control netlink actor failed: {:?}", e); + Err(e.into()) + } else if let Err(e) = ipfix_result { + error!("IPFIX actor failed: {:?}", e); + Err(e.into()) + } else if let Err(e) = swss_result { + error!("SWSS actor failed: {:?}", e); + Err(e.into()) + } else if let Some(Err(e)) = reporter_result { + error!("Stats reporter actor failed: {:?}", e); + Err(e.into()) + } else if let Some(Err(e)) = counter_db_result { + error!("Counter DB actor failed: {:?}", e); + Err(e.into()) + } else if let Some(Err(e)) = otel_result { + error!("OpenTelemetry actor failed: {:?}", e); + Err(e.into()) + } else { + error!("Unknown actor failure"); + Err("Unknown actor failure".into()) + } + } +} diff --git a/crates/countersyncd/src/message/buffer.rs b/crates/countersyncd/src/message/buffer.rs new file mode 100644 index 00000000000..58631e4a8ec --- /dev/null +++ b/crates/countersyncd/src/message/buffer.rs @@ -0,0 +1,3 @@ +use std::sync::Arc; + +pub type SocketBufferMessage = Arc>; diff --git a/crates/countersyncd/src/message/ipfix.rs b/crates/countersyncd/src/message/ipfix.rs new file mode 100644 index 00000000000..8ff6c36e82b --- /dev/null +++ b/crates/countersyncd/src/message/ipfix.rs @@ -0,0 +1,31 @@ +use std::sync::Arc; + +pub type IPFixTemplates = Arc>; + +#[derive(Debug, Clone)] +pub struct IPFixTemplatesMessage { + pub key: String, + pub templates: Option, + pub object_names: Option>, + pub is_delete: bool, +} + +impl IPFixTemplatesMessage { + pub fn new(key: String, templates: IPFixTemplates, object_names: Option>) -> Self { + Self { + key, + templates: Some(templates), + object_names, + is_delete: false, + } + } + + pub fn delete(key: String) -> Self { + Self { + key, + templates: None, + object_names: None, + is_delete: true, + } + } +} diff --git a/crates/countersyncd/src/message/mod.rs b/crates/countersyncd/src/message/mod.rs new file mode 100644 index 00000000000..2d018e39e32 --- /dev/null +++ b/crates/countersyncd/src/message/mod.rs @@ -0,0 +1,7 @@ +pub mod buffer; +pub mod ipfix; +pub mod netlink; +pub mod saistats; + +pub mod swss; +pub mod otel; diff --git a/crates/countersyncd/src/message/netlink.rs b/crates/countersyncd/src/message/netlink.rs new file mode 100644 index 00000000000..6ec1b6ad3cf --- /dev/null +++ b/crates/countersyncd/src/message/netlink.rs @@ -0,0 +1,13 @@ +#[derive(Debug)] +pub struct SocketConnect { + pub family: String, + pub group: String, +} + +#[allow(dead_code)] +#[derive(Debug)] +pub enum NetlinkCommand { + Close, + Reconnect, + SocketConnect(SocketConnect), +} diff --git a/crates/countersyncd/src/message/otel.rs b/crates/countersyncd/src/message/otel.rs new file mode 100644 index 00000000000..3ab8bdf53f8 --- /dev/null +++ b/crates/countersyncd/src/message/otel.rs @@ -0,0 +1,441 @@ +//! OpenTelemetry Message Types +//! +//! This module defines data structures for converting SAI statistics +//! to OpenTelemetry gauge format for export to observability systems. + +use std::sync::Arc; +use crate::message::saistats::{SAIStat, SAIStats}; +use opentelemetry_proto::tonic::{ + common::v1::{KeyValue as ProtoKeyValue, AnyValue, any_value::Value}, + metrics::v1::{NumberDataPoint, number_data_point}, +}; +use log::{info, error, debug, warn}; + +/// OpenTelemetry Gauge representation for SAI statistics +/// +/// This struct represents an OpenTelemetry gauge metric following the OTLP protocol. +/// Each gauge contains data points with attributes, timestamps, and values derived +/// from SAI statistics. +#[derive(Debug, Clone, PartialEq)] +pub struct OtelGauge { + /// Metric name (e.g., "sai_counter_type_100_stat_200") + pub name: String, + /// Description of the metric + pub description: String, + /// Unit of measurement (typically "1" for counters) + pub unit: String, + /// Data points for this gauge + pub data_points: Vec, +} + +/// OpenTelemetry Data Point for a single measurement +/// +/// Represents a single measurement point in time for a gauge metric, +/// converted from a SAI statistic entry. +#[derive(Debug, Clone, PartialEq)] +pub struct OtelDataPoint { + /// Attributes (labels) for this data point + pub attributes: Vec, + /// Timestamp in nanoseconds since Unix epoch + pub time_unix_nano: u64, + /// The gauge value (converted from SAI counter) + pub value: i64, +} + +/// OpenTelemetry Attribute (Key-Value Pair) +/// +/// Represents a single attribute/label attached to a metric data point. +#[derive(Debug, Clone, PartialEq)] +pub struct OtelAttribute { + /// Attribute key + pub key: String, + /// Attribute value + pub value: String, +} + +impl OtelAttribute { + /// Creates a new OtelAttribute + pub fn new(key: impl Into, value: impl Into) -> Self { + Self { + key: key.into(), + value: value.into(), + } + } + + /// Converts to OpenTelemetry protobuf KeyValue + pub fn to_proto(&self) -> ProtoKeyValue { + ProtoKeyValue { + key: self.key.clone(), + value: Some(AnyValue { + value: Some(Value::StringValue(self.value.clone())), + }), + } + } +} + +impl OtelDataPoint { + /// Creates a new OtelDataPoint from SAI statistic + pub fn from_sai_stat(sai_stat: &SAIStat, observation_time_nano: u64) -> Self { + let attributes = vec![ + OtelAttribute::new("object_name", &sai_stat.object_name), + OtelAttribute::new("sai_type_id", sai_stat.type_id.to_string()), + OtelAttribute::new("sai_stat_id", sai_stat.stat_id.to_string()), + ]; + + Self { + attributes, + time_unix_nano: observation_time_nano, + value: sai_stat.counter as i64, + } + } + + /// Converts to OpenTelemetry protobuf NumberDataPoint + pub fn to_proto(&self) -> NumberDataPoint { + NumberDataPoint { + time_unix_nano: self.time_unix_nano, + value: Some(number_data_point::Value::AsInt(self.value)), + attributes: self.attributes.iter().map(|attr| attr.to_proto()).collect(), + ..Default::default() + } + } +} + +impl OtelGauge { + /// Creates a new OtelGauge from SAI statistic + pub fn from_sai_stat(sai_stat: &SAIStat, observation_time_nano: u64) -> Self { + let name = format!("sai_counter_type_{}_stat_{}", sai_stat.type_id, sai_stat.stat_id); + let description = format!( + "SAI counter for object {} (type:{}, stat:{})", + sai_stat.object_name, sai_stat.type_id, sai_stat.stat_id + ); + + let data_point = OtelDataPoint::from_sai_stat(sai_stat, observation_time_nano); + + Self { + name, + description, + unit: "1".to_string(), + data_points: vec![data_point], + } + } + + /// Creates multiple OtelGauges from SAI statistics collection + pub fn from_sai_stats(sai_stats: &SAIStats) -> Vec { + // Use the observation_time from the SAI statistics + let observation_time_nano = sai_stats.observation_time; + + sai_stats.stats + .iter() + .map(|stat| Self::from_sai_stat(stat, observation_time_nano)) + .collect() + } +} + +/// Collection of OpenTelemetry gauges with metadata +/// +/// This structure represents a collection of OpenTelemetry gauges +/// derived from SAI statistics, ready for export to collectors. +#[derive(Debug, Clone)] +pub struct OtelMetrics { + /// Service name for resource attribution + pub service_name: String, + /// Instrumentation scope name + pub scope_name: String, + /// Instrumentation scope version + pub scope_version: String, + /// Collection of gauge metrics + pub gauges: Vec, +} + +impl OtelMetrics { + /// Creates OtelMetrics from SAI statistics + pub fn from_sai_stats(sai_stats: &SAIStats) -> Self { + let gauges = OtelGauge::from_sai_stats(sai_stats); + + Self { + service_name: "countersyncd".to_string(), + scope_name: "countersyncd".to_string(), + scope_version: "1.0".to_string(), + gauges, + } + } + + /// Returns the number of gauges in this collection + pub fn len(&self) -> usize { + self.gauges.len() + } + + /// Returns true if this collection is empty + pub fn is_empty(&self) -> bool { + self.gauges.is_empty() + } +} + +/// Type alias for Arc-wrapped OtelMetrics for efficient sharing +pub type OtelMetricsMessage = Arc; + +/// Extension trait for creating OtelMetricsMessage instances +pub trait OtelMetricsMessageExt { + /// Converts OtelMetrics to Arc-wrapped message + fn into_message(self) -> OtelMetricsMessage; + + /// Creates OtelMetricsMessage from SAI statistics + fn from_sai_stats(sai_stats: &SAIStats) -> OtelMetricsMessage; +} + +impl OtelMetricsMessageExt for OtelMetrics { + fn into_message(self) -> OtelMetricsMessage { + Arc::new(self) + } + + fn from_sai_stats(sai_stats: &SAIStats) -> OtelMetricsMessage { + Arc::new(OtelMetrics::from_sai_stats(sai_stats)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::message::saistats::{SAIStat, SAIStats, SAIStatsMessageExt}; + + /// Helper function to create test SAI statistics (similar to saistats.rs pattern) + fn create_test_sai_stats(observation_time: u64, stat_count: usize) -> SAIStats { + let stats = (0..stat_count) + .map(|i| SAIStat { + object_name: format!("Ethernet{}", i), + type_id: (i * 100 + 1) as u32, + stat_id: (i * 10 + 1) as u32, + counter: (i * 1000 + 500) as u64, + }) + .collect(); + + SAIStats::new(observation_time, stats) + } + + #[test] + fn test_otel_attribute_creation() { + let attr = OtelAttribute::new("object_name", "Ethernet0"); + assert_eq!(attr.key, "object_name"); + assert_eq!(attr.value, "Ethernet0"); + + let attr2 = OtelAttribute::new("sai_type_id", "100"); + assert_eq!(attr2.key, "sai_type_id"); + assert_eq!(attr2.value, "100"); + } + + #[test] + fn test_otel_data_point_from_sai_stat() { + let sai_stat = SAIStat { + object_name: "Ethernet0".to_string(), + type_id: 100, + stat_id: 200, + counter: 1500, + }; + + let observation_time_nano = 0u64; // 1970-01-01 00:00:00 UTC + let data_point = OtelDataPoint::from_sai_stat(&sai_stat, observation_time_nano); + + assert_eq!(data_point.time_unix_nano, observation_time_nano); + assert_eq!(data_point.value, 1500); + assert_eq!(data_point.attributes.len(), 3); + + // Check attributes + let object_name_attr = data_point.attributes.iter() + .find(|attr| attr.key == "object_name").unwrap(); + assert_eq!(object_name_attr.value, "Ethernet0"); + + let type_id_attr = data_point.attributes.iter() + .find(|attr| attr.key == "sai_type_id").unwrap(); + assert_eq!(type_id_attr.value, "100"); + + let stat_id_attr = data_point.attributes.iter() + .find(|attr| attr.key == "sai_stat_id").unwrap(); + assert_eq!(stat_id_attr.value, "200"); + } + + #[test] + fn test_otel_gauge_from_sai_stat() { + let sai_stat = SAIStat { + object_name: "BufferPool1".to_string(), + type_id: 24, + stat_id: 2, + counter: 5000, + }; + + let observation_time_nano = 0u64; // 1970-01-01 00:00:00 UTC + let gauge = OtelGauge::from_sai_stat(&sai_stat, observation_time_nano); + + assert_eq!(gauge.name, "sai_counter_type_24_stat_2"); + assert_eq!(gauge.description, "SAI counter for object BufferPool1 (type:24, stat:2)"); + assert_eq!(gauge.unit, "1"); + assert_eq!(gauge.data_points.len(), 1); + + let data_point = &gauge.data_points[0]; + assert_eq!(data_point.value, 5000); + assert_eq!(data_point.time_unix_nano, observation_time_nano); + } + + #[test] + fn test_otel_gauge_from_sai_stats_collection() { + let sai_stats = create_test_sai_stats(1672531200, 3); + let gauges = OtelGauge::from_sai_stats(&sai_stats); + + assert_eq!(gauges.len(), 3); + + // Check first gauge + let first_gauge = &gauges[0]; + assert_eq!(first_gauge.name, "sai_counter_type_1_stat_1"); + assert!(first_gauge.description.contains("Ethernet0")); + assert_eq!(first_gauge.data_points[0].value, 500); + + let expected_time_nano = 1672531200u64; + for gauge in &gauges { + assert_eq!(gauge.data_points[0].time_unix_nano, expected_time_nano); + } + } + + #[test] + fn test_otel_metrics_from_sai_stats() { + let sai_stats = SAIStats::new( + 1234567890, + vec![ + SAIStat { + object_name: "Ethernet0".to_string(), + type_id: 1, + stat_id: 1, + counter: 12345, + }, + SAIStat { + object_name: "BufferPool1".to_string(), + type_id: 24, + stat_id: 2, + counter: 67890, + }, + ], + ); + + let otel_metrics = OtelMetrics::from_sai_stats(&sai_stats); + + assert_eq!(otel_metrics.service_name, "countersyncd"); + assert_eq!(otel_metrics.scope_name, "countersyncd"); + assert_eq!(otel_metrics.scope_version, "1.0"); + assert_eq!(otel_metrics.len(), 2); + assert!(!otel_metrics.is_empty()); + + // Check individual gauges + let port_gauge = otel_metrics.gauges.iter() + .find(|g| g.name == "sai_counter_type_1_stat_1").unwrap(); + assert_eq!(port_gauge.data_points[0].value, 12345); + + let buffer_gauge = otel_metrics.gauges.iter() + .find(|g| g.name == "sai_counter_type_24_stat_2").unwrap(); + assert_eq!(buffer_gauge.data_points[0].value, 67890); + } + + #[test] + fn test_otel_metrics_message_creation() { + let sai_stats = create_test_sai_stats(555555, 2); + + // Test using into_message() + let otel_metrics = OtelMetrics::from_sai_stats(&sai_stats); + let message1 = otel_metrics.into_message(); + + // Test using from_sai_stats() + let message2 = OtelMetrics::from_sai_stats(&sai_stats); + + assert_eq!(message1.service_name, message2.service_name); + assert_eq!(message1.len(), message2.len()); + assert_eq!(message1.gauges.len(), 2); + } + + #[test] + fn test_otel_data_point_proto_conversion() { + let sai_stat = SAIStat { + object_name: "TestInterface".to_string(), + type_id: 999, + stat_id: 888, + counter: 777, + }; + + let data_point = OtelDataPoint::from_sai_stat(&sai_stat, 123456789); + let proto_point = data_point.to_proto(); + + assert_eq!(proto_point.time_unix_nano, 123456789); + match proto_point.value.unwrap() { + number_data_point::Value::AsInt(val) => assert_eq!(val, 777), + _ => panic!("Expected integer value"), + } + assert_eq!(proto_point.attributes.len(), 3); + + // Check one attribute conversion + let object_attr = &proto_point.attributes[0]; + assert_eq!(object_attr.key, "object_name"); + if let Some(AnyValue { value: Some(Value::StringValue(val)) }) = &object_attr.value { + assert_eq!(val, "TestInterface"); + } else { + panic!("Expected string value"); + } + } + +#[test] +fn test_sai_to_otel_gauge_conversion() { + let test_stats = vec![ + SAIStat { object_name: "Ethernet0".to_string(), type_id: 1, stat_id: 1, counter: 1000000 }, + SAIStat { object_name: "Ethernet0".to_string(), type_id: 1, stat_id: 2, counter: 2000000 }, + SAIStat { object_name: "Ethernet1".to_string(), type_id: 1, stat_id: 1, counter: 1500000 }, + SAIStat { object_name: "BufferPool_ingress_lossless_pool".to_string(), type_id: 24, stat_id: 1, counter: 500000 }, + ]; + + let sai_stats = SAIStats::new(1672531200, test_stats); + let otel_metrics = OtelMetrics::from_sai_stats(&sai_stats); + + for (index, gauge) in otel_metrics.gauges.iter().enumerate() { + let data_point = &gauge.data_points[0]; + info!("[{}] Gauge: {}", index + 1, gauge.name); + info!("Value: {}, Unit: {}, Timestamp: {}ns", data_point.value, gauge.unit, data_point.time_unix_nano); + info!("Description: {}", gauge.description); + + if !data_point.attributes.is_empty() { + for attr in &data_point.attributes { + debug!(" - {}={}", attr.key, attr.value); + } + } + info!("Raw gauge: {:#?}", gauge); + } + + assert_eq!(otel_metrics.len(), 4); + + // Verify port stats conversion + let port_stats: Vec<_> = otel_metrics.gauges.iter() + .filter(|g| g.description.contains("Ethernet")) + .collect(); + assert_eq!(port_stats.len(), 3); + + // Verify buffer pool stats conversion + let buffer_stats: Vec<_> = otel_metrics.gauges.iter() + .filter(|g| g.description.contains("BufferPool")) + .collect(); + assert_eq!(buffer_stats.len(), 1); + + // Check that all metrics have proper timestamps + let expected_time = 1672531200u64; + for gauge in &otel_metrics.gauges { + assert_eq!(gauge.data_points[0].time_unix_nano, expected_time); + } + + // Verify metric naming + let port_rx_metric = otel_metrics.gauges.iter() + .find(|g| g.name == "sai_counter_type_1_stat_1").unwrap(); + assert!(port_rx_metric.description.contains("type:1, stat:1")); +} + + #[test] + fn test_empty_sai_stats_to_otel() { + let empty_stats = SAIStats::new(1111111111, vec![]); + let otel_metrics = OtelMetrics::from_sai_stats(&empty_stats); + + assert_eq!(otel_metrics.len(), 0); + assert!(otel_metrics.is_empty()); + assert_eq!(otel_metrics.service_name, "countersyncd"); + } +} diff --git a/crates/countersyncd/src/message/saistats.rs b/crates/countersyncd/src/message/saistats.rs new file mode 100644 index 00000000000..12635fcb595 --- /dev/null +++ b/crates/countersyncd/src/message/saistats.rs @@ -0,0 +1,434 @@ +//! SAI (Switch Abstraction Interface) Statistics Message Types +//! +//! This module defines the data structures for representing SAI statistics +//! extracted from IPFIX data records. SAI statistics contain information +//! about switch hardware counters and performance metrics. + +use std::sync::Arc; + +use byteorder::{ByteOrder, NetworkEndian}; +use ipfixrw::parser::{DataRecordValue, FieldSpecifier}; + +/// Represents a single SAI statistic entry containing counter information. +/// +/// SAI statistics are extracted from IPFIX data records and contain +/// information about switch hardware counters and their current values. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct SAIStat { + /// Object name corresponding to the label ID (1-based index from object_names) + pub object_name: String, + /// SAI object type identifier (with possible extensions) + pub type_id: u32, + /// SAI statistic identifier (with possible extensions) + pub stat_id: u32, + /// Current counter value + pub counter: u64, +} + +/// Base value for extended SAI identifiers. +/// +/// When the extension bit is set in the enterprise number, +/// this value is added to the base type_id or stat_id to create +/// an extended identifier space. +const EXTENSIONS_RANGE_BASE: u32 = 0x2000_0000; + +impl SAIStat { + /// Creates a SAIStat directly from IPFIX field specifier and data record value. + /// + /// # Arguments + /// + /// * `field_spec` - IPFIX field specifier containing identifiers + /// * `value` - IPFIX data record value containing counter data + /// * `object_names` - Vector of object names (1-based indexing) + /// + /// # Returns + /// + /// A new SAIStat instance with decoded identifiers and resolved object name + pub fn from_ipfix( + field_spec: &FieldSpecifier, + value: &DataRecordValue, + object_names: &[String], + ) -> Self { + let enterprise_number = field_spec.enterprise_number.unwrap_or(0); + let label = field_spec.information_element_identifier; + + // Extract extension flags from enterprise number + let type_id_extension = (enterprise_number & 0x8000_0000) != 0; + let stat_id_extension = (enterprise_number & 0x0000_8000) != 0; + + // Extract base identifiers from enterprise number + let mut type_id = (enterprise_number & 0x7FFF_0000) >> 16; + let mut stat_id = enterprise_number & 0x0000_7FFF; + + // Apply extensions if flags are set + if type_id_extension { + type_id = type_id.saturating_add(EXTENSIONS_RANGE_BASE); + } + + if stat_id_extension { + stat_id = stat_id.saturating_add(EXTENSIONS_RANGE_BASE); + } + + // Extract counter value from data record + let counter = match value { + DataRecordValue::Bytes(bytes) => { + if bytes.len() >= 8 { + NetworkEndian::read_u64(bytes) + } else { + // Handle shorter byte arrays by padding with zeros + let mut padded = [0u8; 8]; + let copy_len = std::cmp::min(bytes.len(), 8); + padded[8 - copy_len..].copy_from_slice(&bytes[..copy_len]); + NetworkEndian::read_u64(&padded) + } + } + _ => { + // For non-byte values, default to 0 + // Could potentially handle other DataRecordValue variants here + 0 + } + }; + + // Resolve object name from label + let object_name = if label > 0 && (label as usize) <= object_names.len() { + // Convert 1-based label to 0-based index + object_names[(label - 1) as usize].clone() + } else { + // Fallback to label number if object name not found + format!("unknown_{}", label) + }; + + SAIStat { + object_name, + type_id, + stat_id, + counter, + } + } +} + +/// Collection of SAI statistics with an associated observation timestamp. +/// +/// This structure represents a snapshot of multiple SAI statistics +/// collected at a specific point in time, as indicated by the observation_time. +#[derive(Debug, Clone)] +pub struct SAIStats { + /// Timestamp when these statistics were observed (typically from IPFIX observation time field) + pub observation_time: u64, + /// Vector of individual SAI statistic entries + pub stats: Vec, +} + +impl SAIStats { + /// Creates a new SAIStats instance. + /// + /// # Arguments + /// + /// * `observation_time` - Timestamp when statistics were collected + /// * `stats` - Vector of SAI statistics + /// + /// # Returns + /// + /// A new SAIStats instance + pub fn new(observation_time: u64, stats: Vec) -> Self { + Self { + observation_time, + stats, + } + } + + /// Returns the number of statistics in this collection. + #[allow(dead_code)] + pub fn len(&self) -> usize { + self.stats.len() + } + + /// Returns true if this collection contains no statistics. + #[allow(dead_code)] + pub fn is_empty(&self) -> bool { + self.stats.is_empty() + } + + /// Returns an iterator over the statistics. + #[allow(dead_code)] + pub fn iter(&self) -> std::slice::Iter { + self.stats.iter() + } +} + +impl PartialEq for SAIStats { + /// Compares two SAIStats instances for equality. + /// + /// Two SAIStats are considered equal if they have the same observation_time + /// and contain the same set of statistics (order independent). + /// + /// # Arguments + /// + /// * `other` - The other SAIStats instance to compare with + /// + /// # Returns + /// + /// true if the instances are equal, false otherwise + fn eq(&self, other: &Self) -> bool { + // Quick checks first + if self.observation_time != other.observation_time { + return false; + } + + if self.stats.len() != other.stats.len() { + return false; + } + + // For small collections, use the existing approach + if self.stats.len() <= 10 { + return self.stats.iter().all(|stat| other.stats.contains(stat)); + } + + // For larger collections, use a more efficient approach + use std::collections::HashSet; + let self_set: HashSet<&SAIStat> = self.stats.iter().collect(); + let other_set: HashSet<&SAIStat> = other.stats.iter().collect(); + self_set == other_set + } +} + +/// Type alias for Arc-wrapped SAIStats to enable efficient sharing between actors. +/// +/// This type is used for passing SAI statistics messages between different +/// parts of the system without expensive cloning operations. +pub type SAIStatsMessage = Arc; + +/// Extension trait for creating SAIStatsMessage instances. +#[allow(dead_code)] +pub trait SAIStatsMessageExt { + /// Creates a new SAIStatsMessage from SAIStats. + /// + /// # Arguments + /// + /// * `stats` - The SAIStats instance to wrap in an Arc + /// + /// # Returns + /// + /// A new SAIStatsMessage (Arc) + fn into_message(self) -> SAIStatsMessage; + + /// Creates a new SAIStatsMessage with the given observation time and statistics. + /// + /// # Arguments + /// + /// * `observation_time` - Timestamp when statistics were collected + /// * `stats` - Vector of SAI statistics + /// + /// # Returns + /// + /// A new SAIStatsMessage (Arc) + fn from_parts(observation_time: u64, stats: Vec) -> SAIStatsMessage; +} + +impl SAIStatsMessageExt for SAIStats { + fn into_message(self) -> SAIStatsMessage { + Arc::new(self) + } + + fn from_parts(observation_time: u64, stats: Vec) -> SAIStatsMessage { + Arc::new(SAIStats::new(observation_time, stats)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ipfixrw::parser::{DataRecordValue, FieldSpecifier}; + + /// Helper function to create a test field specifier + fn create_field_spec(element_id: u16, enterprise_number: Option) -> FieldSpecifier { + FieldSpecifier::new(enterprise_number, element_id, 8) + } + + /// Helper function to create test byte data + fn create_byte_value(value: u64) -> DataRecordValue { + let mut bytes = [0u8; 8]; + NetworkEndian::write_u64(&mut bytes, value); + DataRecordValue::Bytes(bytes.to_vec()) + } + + #[test] + fn test_sai_stat_from_ipfix_basic() { + let field_spec = create_field_spec(2, Some(0x12340000)); // label 2, type_id 0x1234, stat_id 0 + let value = create_byte_value(12345); + let object_names = vec!["Ethernet0".to_string(), "Ethernet1".to_string()]; + + let stat = SAIStat::from_ipfix(&field_spec, &value, &object_names); + + assert_eq!(stat.object_name, "Ethernet1"); // label 2 -> index 1 (1-based) + assert_eq!(stat.type_id, 0x1234); + assert_eq!(stat.stat_id, 0); + assert_eq!(stat.counter, 12345); + } + + #[test] + fn test_sai_stat_from_ipfix_with_extensions() { + // Test with both extension bits set + let enterprise_number = 0x80008000 | 0x12340567; + let field_spec = create_field_spec(1, Some(enterprise_number)); // label 1 + let value = create_byte_value(99999); + let object_names = vec!["Ethernet0".to_string()]; + + let stat = SAIStat::from_ipfix(&field_spec, &value, &object_names); + + assert_eq!(stat.object_name, "Ethernet0"); // label 1 -> index 0 (1-based) + assert_eq!(stat.type_id, 0x1234 + EXTENSIONS_RANGE_BASE); + assert_eq!(stat.stat_id, 0x0567 + EXTENSIONS_RANGE_BASE); + assert_eq!(stat.counter, 99999); + } + + #[test] + fn test_sai_stat_from_ipfix_short_bytes() { + let field_spec = create_field_spec(1, Some(0x00010002)); + let short_bytes = vec![0x12, 0x34]; // Only 2 bytes instead of 8 + let value = DataRecordValue::Bytes(short_bytes); + let object_names = vec!["Ethernet0".to_string()]; + + let stat = SAIStat::from_ipfix(&field_spec, &value, &object_names); + + assert_eq!(stat.object_name, "Ethernet0"); + assert_eq!(stat.counter, 0x1234); // Should be padded correctly + } + + #[test] + fn test_sai_stat_from_ipfix_non_bytes() { + let field_spec = create_field_spec(1, Some(0x00050006)); + let value = DataRecordValue::String("test".to_string()); + let object_names = vec!["Ethernet0".to_string()]; + + let stat = SAIStat::from_ipfix(&field_spec, &value, &object_names); + + assert_eq!(stat.object_name, "Ethernet0"); + assert_eq!(stat.counter, 0); // Should default to 0 for non-byte values + } + + #[test] + fn test_sai_stat_from_ipfix_invalid_label() { + let field_spec = create_field_spec(5, Some(0x00010002)); // label 5, out of range + let value = create_byte_value(1000); + let object_names = vec!["Ethernet0".to_string(), "Ethernet1".to_string()]; // Only 2 objects + + let stat = SAIStat::from_ipfix(&field_spec, &value, &object_names); + + assert_eq!(stat.object_name, "unknown_5"); // Fallback for invalid label + assert_eq!(stat.type_id, 1); + assert_eq!(stat.stat_id, 2); + assert_eq!(stat.counter, 1000); + } + + #[test] + fn test_sai_stat_from_ipfix_zero_label() { + let field_spec = create_field_spec(0, Some(0x00010002)); // label 0, invalid + let value = create_byte_value(1000); + let object_names = vec!["Ethernet0".to_string()]; + + let stat = SAIStat::from_ipfix(&field_spec, &value, &object_names); + + assert_eq!(stat.object_name, "unknown_0"); // Fallback for zero label + assert_eq!(stat.type_id, 1); + assert_eq!(stat.stat_id, 2); + assert_eq!(stat.counter, 1000); + } + + #[test] + fn test_sai_stats_creation() { + let stats = vec![ + SAIStat { + object_name: "Ethernet0".to_string(), + type_id: 100, + stat_id: 200, + counter: 1000, + }, + SAIStat { + object_name: "Ethernet1".to_string(), + type_id: 101, + stat_id: 201, + counter: 2000, + }, + ]; + + let sai_stats = SAIStats::new(12345, stats.clone()); + + assert_eq!(sai_stats.observation_time, 12345); + assert_eq!(sai_stats.len(), 2); + assert!(!sai_stats.is_empty()); + assert_eq!(sai_stats.stats, stats); + } + + #[test] + fn test_sai_stats_equality() { + let stats1 = vec![ + SAIStat { + object_name: "Ethernet0".to_string(), + type_id: 100, + stat_id: 200, + counter: 1000, + }, + SAIStat { + object_name: "Ethernet1".to_string(), + type_id: 101, + stat_id: 201, + counter: 2000, + }, + ]; + + let stats2 = vec![ + SAIStat { + object_name: "Ethernet1".to_string(), + type_id: 101, + stat_id: 201, + counter: 2000, + }, + SAIStat { + object_name: "Ethernet0".to_string(), + type_id: 100, + stat_id: 200, + counter: 1000, + }, + ]; + + let sai_stats1 = SAIStats::new(12345, stats1); + let sai_stats2 = SAIStats::new(12345, stats2.clone()); + let sai_stats3 = SAIStats::new(12346, stats2); + + assert_eq!(sai_stats1, sai_stats2); // Same content, different order + assert_ne!(sai_stats1, sai_stats3); // Different observation time + } + + #[test] + fn test_sai_stats_message_creation() { + let stats = vec![SAIStat { + object_name: "Ethernet0".to_string(), + type_id: 100, + stat_id: 200, + counter: 1000, + }]; + + let message1 = SAIStats::new(12345, stats.clone()).into_message(); + let message2 = SAIStats::from_parts(12345, stats); + + assert_eq!(message1.observation_time, message2.observation_time); + assert_eq!(message1.stats, message2.stats); + } + + #[test] + fn test_extensions_range_overflow() { + // Test that we handle potential overflow gracefully + let enterprise_number = 0x80008000 | 0x7FFF7FFF; // Maximum values with extensions + let field_spec = create_field_spec(1, Some(enterprise_number)); + let value = create_byte_value(555); + let object_names = vec!["Ethernet0".to_string()]; + + let stat = SAIStat::from_ipfix(&field_spec, &value, &object_names); + + // Should use saturating_add to prevent overflow + assert_eq!(stat.type_id, 0x7FFF + EXTENSIONS_RANGE_BASE); + assert_eq!(stat.stat_id, 0x7FFF + EXTENSIONS_RANGE_BASE); + assert_eq!(stat.object_name, "Ethernet0"); + } +} diff --git a/crates/countersyncd/src/message/swss.rs b/crates/countersyncd/src/message/swss.rs new file mode 100644 index 00000000000..d7ab349c055 --- /dev/null +++ b/crates/countersyncd/src/message/swss.rs @@ -0,0 +1,23 @@ +#[allow(dead_code)] +pub struct SwssCfgStreamTelemetry {} + +#[allow(dead_code)] +pub struct SwssCfgTelemetryGroup {} + +#[allow(dead_code)] +pub enum SessionStatus { + Enabled, + Disabled, +} + +#[allow(dead_code)] +pub enum SessionType { + Ipfix, +} + +#[allow(dead_code)] +pub struct SwssStateTelemetrySession { + session_status: SessionStatus, + session_type: SessionType, + session_template: [u8], +} diff --git a/crates/countersyncd/src/sai/mod.rs b/crates/countersyncd/src/sai/mod.rs new file mode 100644 index 00000000000..ed3fcd76da0 --- /dev/null +++ b/crates/countersyncd/src/sai/mod.rs @@ -0,0 +1,14 @@ +pub mod saibuffer; +pub mod saiport; +pub mod saiqueue; +/// SAI (Switch Abstraction Interface) type definitions +/// +/// This module contains Rust definitions for SAI enums that correspond to C header files. +/// All enums support efficient bidirectional conversion between integers and strings. +pub mod saitypes; + +// Re-export commonly used types +pub use saibuffer::{SaiBufferPoolStat, SaiIngressPriorityGroupStat}; +pub use saiport::SaiPortStat; +pub use saiqueue::SaiQueueStat; +pub use saitypes::SaiObjectType; diff --git a/crates/countersyncd/src/sai/saibuffer.rs b/crates/countersyncd/src/sai/saibuffer.rs new file mode 100644 index 00000000000..1d910042540 --- /dev/null +++ b/crates/countersyncd/src/sai/saibuffer.rs @@ -0,0 +1,593 @@ +use std::fmt; +use std::str::FromStr; + +/// SAI buffer pool statistics enum +/// This enum represents all the buffer pool statistics defined in sai_buffer_pool_stat_t +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[repr(u32)] +pub enum SaiBufferPoolStat { + /// Get current pool occupancy in bytes [uint64_t] + CurrOccupancyBytes = 0x00000000, + + /// Get watermark pool occupancy in bytes [uint64_t] + WatermarkBytes = 0x00000001, + + /// Get count of packets dropped in this pool [uint64_t] + DroppedPackets = 0x00000002, + + /// Get/set WRED green dropped packet count [uint64_t] + GreenWredDroppedPackets = 0x00000003, + + /// Get/set WRED green dropped byte count [uint64_t] + GreenWredDroppedBytes = 0x00000004, + + /// Get/set WRED yellow dropped packet count [uint64_t] + YellowWredDroppedPackets = 0x00000005, + + /// Get/set WRED yellow dropped byte count [uint64_t] + YellowWredDroppedBytes = 0x00000006, + + /// Get/set WRED red dropped packet count [uint64_t] + RedWredDroppedPackets = 0x00000007, + + /// Get/set WRED red dropped byte count [uint64_t] + RedWredDroppedBytes = 0x00000008, + + /// Get/set WRED dropped packets count [uint64_t] + WredDroppedPackets = 0x00000009, + + /// Get/set WRED dropped bytes count [uint64_t] + WredDroppedBytes = 0x0000000a, + + /// Get/set WRED green marked packet count [uint64_t] + GreenWredEcnMarkedPackets = 0x0000000b, + + /// Get/set WRED green marked byte count [uint64_t] + GreenWredEcnMarkedBytes = 0x0000000c, + + /// Get/set WRED yellow marked packet count [uint64_t] + YellowWredEcnMarkedPackets = 0x0000000d, + + /// Get/set WRED yellow marked byte count [uint64_t] + YellowWredEcnMarkedBytes = 0x0000000e, + + /// Get/set WRED red marked packet count [uint64_t] + RedWredEcnMarkedPackets = 0x0000000f, + + /// Get/set WRED red marked byte count [uint64_t] + RedWredEcnMarkedBytes = 0x00000010, + + /// Get/set WRED marked packets count [uint64_t] + WredEcnMarkedPackets = 0x00000011, + + /// Get/set WRED marked bytes count [uint64_t] + WredEcnMarkedBytes = 0x00000012, + + /// Get current headroom pool occupancy in bytes [uint64_t] + XoffRoomCurrOccupancyBytes = 0x00000013, + + /// Get headroom pool occupancy in bytes [uint64_t] + XoffRoomWatermarkBytes = 0x00000014, + + /// Get current headroom pool occupancy in cells [uint64_t] + XoffRoomCurrOccupancyCells = 0x00000015, + + /// Get headroom pool occupancy in cells [uint64_t] + XoffRoomWatermarkCells = 0x00000016, + + /// Get current pool occupancy in cells [uint64_t] + CurrOccupancyCells = 0x00000017, + + /// Get watermark pool occupancy in cells [uint64_t] + WatermarkCells = 0x00000018, + + /// Custom range base value + CustomRangeBase = 0x10000000, +} + +impl SaiBufferPoolStat { + /// Convert from u32 value to enum variant + pub fn from_u32(value: u32) -> Option { + match value { + 0x00000000 => Some(Self::CurrOccupancyBytes), + 0x00000001 => Some(Self::WatermarkBytes), + 0x00000002 => Some(Self::DroppedPackets), + 0x00000003 => Some(Self::GreenWredDroppedPackets), + 0x00000004 => Some(Self::GreenWredDroppedBytes), + 0x00000005 => Some(Self::YellowWredDroppedPackets), + 0x00000006 => Some(Self::YellowWredDroppedBytes), + 0x00000007 => Some(Self::RedWredDroppedPackets), + 0x00000008 => Some(Self::RedWredDroppedBytes), + 0x00000009 => Some(Self::WredDroppedPackets), + 0x0000000a => Some(Self::WredDroppedBytes), + 0x0000000b => Some(Self::GreenWredEcnMarkedPackets), + 0x0000000c => Some(Self::GreenWredEcnMarkedBytes), + 0x0000000d => Some(Self::YellowWredEcnMarkedPackets), + 0x0000000e => Some(Self::YellowWredEcnMarkedBytes), + 0x0000000f => Some(Self::RedWredEcnMarkedPackets), + 0x00000010 => Some(Self::RedWredEcnMarkedBytes), + 0x00000011 => Some(Self::WredEcnMarkedPackets), + 0x00000012 => Some(Self::WredEcnMarkedBytes), + 0x00000013 => Some(Self::XoffRoomCurrOccupancyBytes), + 0x00000014 => Some(Self::XoffRoomWatermarkBytes), + 0x00000015 => Some(Self::XoffRoomCurrOccupancyCells), + 0x00000016 => Some(Self::XoffRoomWatermarkCells), + 0x00000017 => Some(Self::CurrOccupancyCells), + 0x00000018 => Some(Self::WatermarkCells), + 0x10000000 => Some(Self::CustomRangeBase), + _ => None, + } + } + + /// Convert to u32 value + #[allow(dead_code)] // May be used by external code or future features + pub fn to_u32(self) -> u32 { + self as u32 + } + + /// Get the C name of this stat + pub fn to_c_name(self) -> &'static str { + match self { + Self::CurrOccupancyBytes => "SAI_BUFFER_POOL_STAT_CURR_OCCUPANCY_BYTES", + Self::WatermarkBytes => "SAI_BUFFER_POOL_STAT_WATERMARK_BYTES", + Self::DroppedPackets => "SAI_BUFFER_POOL_STAT_DROPPED_PACKETS", + Self::GreenWredDroppedPackets => "SAI_BUFFER_POOL_STAT_GREEN_WRED_DROPPED_PACKETS", + Self::GreenWredDroppedBytes => "SAI_BUFFER_POOL_STAT_GREEN_WRED_DROPPED_BYTES", + Self::YellowWredDroppedPackets => "SAI_BUFFER_POOL_STAT_YELLOW_WRED_DROPPED_PACKETS", + Self::YellowWredDroppedBytes => "SAI_BUFFER_POOL_STAT_YELLOW_WRED_DROPPED_BYTES", + Self::RedWredDroppedPackets => "SAI_BUFFER_POOL_STAT_RED_WRED_DROPPED_PACKETS", + Self::RedWredDroppedBytes => "SAI_BUFFER_POOL_STAT_RED_WRED_DROPPED_BYTES", + Self::WredDroppedPackets => "SAI_BUFFER_POOL_STAT_WRED_DROPPED_PACKETS", + Self::WredDroppedBytes => "SAI_BUFFER_POOL_STAT_WRED_DROPPED_BYTES", + Self::GreenWredEcnMarkedPackets => "SAI_BUFFER_POOL_STAT_GREEN_WRED_ECN_MARKED_PACKETS", + Self::GreenWredEcnMarkedBytes => "SAI_BUFFER_POOL_STAT_GREEN_WRED_ECN_MARKED_BYTES", + Self::YellowWredEcnMarkedPackets => { + "SAI_BUFFER_POOL_STAT_YELLOW_WRED_ECN_MARKED_PACKETS" + } + Self::YellowWredEcnMarkedBytes => "SAI_BUFFER_POOL_STAT_YELLOW_WRED_ECN_MARKED_BYTES", + Self::RedWredEcnMarkedPackets => "SAI_BUFFER_POOL_STAT_RED_WRED_ECN_MARKED_PACKETS", + Self::RedWredEcnMarkedBytes => "SAI_BUFFER_POOL_STAT_RED_WRED_ECN_MARKED_BYTES", + Self::WredEcnMarkedPackets => "SAI_BUFFER_POOL_STAT_WRED_ECN_MARKED_PACKETS", + Self::WredEcnMarkedBytes => "SAI_BUFFER_POOL_STAT_WRED_ECN_MARKED_BYTES", + Self::XoffRoomCurrOccupancyBytes => { + "SAI_BUFFER_POOL_STAT_XOFF_ROOM_CURR_OCCUPANCY_BYTES" + } + Self::XoffRoomWatermarkBytes => "SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES", + Self::XoffRoomCurrOccupancyCells => { + "SAI_BUFFER_POOL_STAT_XOFF_ROOM_CURR_OCCUPANCY_CELLS" + } + Self::XoffRoomWatermarkCells => "SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_CELLS", + Self::CurrOccupancyCells => "SAI_BUFFER_POOL_STAT_CURR_OCCUPANCY_CELLS", + Self::WatermarkCells => "SAI_BUFFER_POOL_STAT_WATERMARK_CELLS", + Self::CustomRangeBase => "SAI_BUFFER_POOL_STAT_CUSTOM_RANGE_BASE", + } + } +} + +impl FromStr for SaiBufferPoolStat { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "SAI_BUFFER_POOL_STAT_CURR_OCCUPANCY_BYTES" => Ok(Self::CurrOccupancyBytes), + "SAI_BUFFER_POOL_STAT_WATERMARK_BYTES" => Ok(Self::WatermarkBytes), + "SAI_BUFFER_POOL_STAT_DROPPED_PACKETS" => Ok(Self::DroppedPackets), + "SAI_BUFFER_POOL_STAT_GREEN_WRED_DROPPED_PACKETS" => Ok(Self::GreenWredDroppedPackets), + "SAI_BUFFER_POOL_STAT_GREEN_WRED_DROPPED_BYTES" => Ok(Self::GreenWredDroppedBytes), + "SAI_BUFFER_POOL_STAT_YELLOW_WRED_DROPPED_PACKETS" => { + Ok(Self::YellowWredDroppedPackets) + } + "SAI_BUFFER_POOL_STAT_YELLOW_WRED_DROPPED_BYTES" => Ok(Self::YellowWredDroppedBytes), + "SAI_BUFFER_POOL_STAT_RED_WRED_DROPPED_PACKETS" => Ok(Self::RedWredDroppedPackets), + "SAI_BUFFER_POOL_STAT_RED_WRED_DROPPED_BYTES" => Ok(Self::RedWredDroppedBytes), + "SAI_BUFFER_POOL_STAT_WRED_DROPPED_PACKETS" => Ok(Self::WredDroppedPackets), + "SAI_BUFFER_POOL_STAT_WRED_DROPPED_BYTES" => Ok(Self::WredDroppedBytes), + "SAI_BUFFER_POOL_STAT_GREEN_WRED_ECN_MARKED_PACKETS" => { + Ok(Self::GreenWredEcnMarkedPackets) + } + "SAI_BUFFER_POOL_STAT_GREEN_WRED_ECN_MARKED_BYTES" => Ok(Self::GreenWredEcnMarkedBytes), + "SAI_BUFFER_POOL_STAT_YELLOW_WRED_ECN_MARKED_PACKETS" => { + Ok(Self::YellowWredEcnMarkedPackets) + } + "SAI_BUFFER_POOL_STAT_YELLOW_WRED_ECN_MARKED_BYTES" => { + Ok(Self::YellowWredEcnMarkedBytes) + } + "SAI_BUFFER_POOL_STAT_RED_WRED_ECN_MARKED_PACKETS" => Ok(Self::RedWredEcnMarkedPackets), + "SAI_BUFFER_POOL_STAT_RED_WRED_ECN_MARKED_BYTES" => Ok(Self::RedWredEcnMarkedBytes), + "SAI_BUFFER_POOL_STAT_WRED_ECN_MARKED_PACKETS" => Ok(Self::WredEcnMarkedPackets), + "SAI_BUFFER_POOL_STAT_WRED_ECN_MARKED_BYTES" => Ok(Self::WredEcnMarkedBytes), + "SAI_BUFFER_POOL_STAT_XOFF_ROOM_CURR_OCCUPANCY_BYTES" => { + Ok(Self::XoffRoomCurrOccupancyBytes) + } + "SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES" => Ok(Self::XoffRoomWatermarkBytes), + "SAI_BUFFER_POOL_STAT_XOFF_ROOM_CURR_OCCUPANCY_CELLS" => { + Ok(Self::XoffRoomCurrOccupancyCells) + } + "SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_CELLS" => Ok(Self::XoffRoomWatermarkCells), + "SAI_BUFFER_POOL_STAT_CURR_OCCUPANCY_CELLS" => Ok(Self::CurrOccupancyCells), + "SAI_BUFFER_POOL_STAT_WATERMARK_CELLS" => Ok(Self::WatermarkCells), + "SAI_BUFFER_POOL_STAT_CUSTOM_RANGE_BASE" => Ok(Self::CustomRangeBase), + _ => Err(format!("Unknown buffer pool stat: {}", s)), + } + } +} + +impl fmt::Display for SaiBufferPoolStat { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.to_c_name()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_from_u32() { + assert_eq!( + SaiBufferPoolStat::from_u32(0x00000000), + Some(SaiBufferPoolStat::CurrOccupancyBytes) + ); + assert_eq!( + SaiBufferPoolStat::from_u32(0x00000001), + Some(SaiBufferPoolStat::WatermarkBytes) + ); + assert_eq!( + SaiBufferPoolStat::from_u32(0x00000018), + Some(SaiBufferPoolStat::WatermarkCells) + ); + assert_eq!( + SaiBufferPoolStat::from_u32(0x10000000), + Some(SaiBufferPoolStat::CustomRangeBase) + ); + assert_eq!(SaiBufferPoolStat::from_u32(0xFFFFFFFF), None); + } + + #[test] + fn test_to_u32() { + assert_eq!(SaiBufferPoolStat::CurrOccupancyBytes.to_u32(), 0x00000000); + assert_eq!(SaiBufferPoolStat::WatermarkBytes.to_u32(), 0x00000001); + assert_eq!(SaiBufferPoolStat::WatermarkCells.to_u32(), 0x00000018); + assert_eq!(SaiBufferPoolStat::CustomRangeBase.to_u32(), 0x10000000); + } + + #[test] + fn test_string_conversion() { + let stat = SaiBufferPoolStat::CurrOccupancyBytes; + let c_name = stat.to_c_name(); + assert_eq!(c_name, "SAI_BUFFER_POOL_STAT_CURR_OCCUPANCY_BYTES"); + + let parsed: SaiBufferPoolStat = c_name.parse().unwrap(); + assert_eq!(parsed, stat); + + assert_eq!(format!("{}", stat), c_name); + } + + #[test] + fn test_wred_stats() { + // Test WRED drop stats + assert_eq!( + SaiBufferPoolStat::GreenWredDroppedPackets.to_u32(), + 0x00000003 + ); + assert_eq!( + SaiBufferPoolStat::YellowWredDroppedBytes.to_u32(), + 0x00000006 + ); + assert_eq!( + SaiBufferPoolStat::RedWredDroppedPackets.to_u32(), + 0x00000007 + ); + + // Test WRED ECN mark stats + assert_eq!( + SaiBufferPoolStat::GreenWredEcnMarkedPackets.to_u32(), + 0x0000000b + ); + assert_eq!(SaiBufferPoolStat::WredEcnMarkedBytes.to_u32(), 0x00000012); + } + + #[test] + fn test_xoff_room_stats() { + assert_eq!( + SaiBufferPoolStat::XoffRoomCurrOccupancyBytes.to_u32(), + 0x00000013 + ); + assert_eq!( + SaiBufferPoolStat::XoffRoomWatermarkCells.to_u32(), + 0x00000016 + ); + } +} + +/// SAI ingress priority group statistics enum +/// This enum represents all the ingress priority group statistics defined in sai_ingress_priority_group_stat_t +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[repr(u32)] +pub enum SaiIngressPriorityGroupStat { + /// Get rx packets count [uint64_t] + Packets = 0x00000000, + + /// Get rx bytes count [uint64_t] + Bytes = 0x00000001, + + /// Get current pg occupancy in bytes [uint64_t] + CurrOccupancyBytes = 0x00000002, + + /// Get watermark pg occupancy in bytes [uint64_t] + WatermarkBytes = 0x00000003, + + /// Get current pg shared occupancy in bytes [uint64_t] + SharedCurrOccupancyBytes = 0x00000004, + + /// Get watermark pg shared occupancy in bytes [uint64_t] + SharedWatermarkBytes = 0x00000005, + + /// Get current pg XOFF room occupancy in bytes [uint64_t] + XoffRoomCurrOccupancyBytes = 0x00000006, + + /// Get watermark pg XOFF room occupancy in bytes [uint64_t] + XoffRoomWatermarkBytes = 0x00000007, + + /// Get dropped packets count [uint64_t] + DroppedPackets = 0x00000008, + + /// Get current pg occupancy in cells [uint64_t] + CurrOccupancyCells = 0x00000009, + + /// Get watermark pg occupancy in cells [uint64_t] + WatermarkCells = 0x0000000a, + + /// Get current pg shared occupancy in cells [uint64_t] + SharedCurrOccupancyCells = 0x0000000b, + + /// Get watermark pg shared occupancy in cells [uint64_t] + SharedWatermarkCells = 0x0000000c, + + /// Get current pg XOFF room occupancy in cells [uint64_t] + XoffRoomCurrOccupancyCells = 0x0000000d, + + /// Get watermark pg XOFF room occupancy in cells [uint64_t] + XoffRoomWatermarkCells = 0x0000000e, + + /// Custom range base value + CustomRangeBase = 0x10000000, +} + +impl SaiIngressPriorityGroupStat { + /// Convert from u32 value to enum variant + pub fn from_u32(value: u32) -> Option { + match value { + 0x00000000 => Some(Self::Packets), + 0x00000001 => Some(Self::Bytes), + 0x00000002 => Some(Self::CurrOccupancyBytes), + 0x00000003 => Some(Self::WatermarkBytes), + 0x00000004 => Some(Self::SharedCurrOccupancyBytes), + 0x00000005 => Some(Self::SharedWatermarkBytes), + 0x00000006 => Some(Self::XoffRoomCurrOccupancyBytes), + 0x00000007 => Some(Self::XoffRoomWatermarkBytes), + 0x00000008 => Some(Self::DroppedPackets), + 0x00000009 => Some(Self::CurrOccupancyCells), + 0x0000000a => Some(Self::WatermarkCells), + 0x0000000b => Some(Self::SharedCurrOccupancyCells), + 0x0000000c => Some(Self::SharedWatermarkCells), + 0x0000000d => Some(Self::XoffRoomCurrOccupancyCells), + 0x0000000e => Some(Self::XoffRoomWatermarkCells), + 0x10000000 => Some(Self::CustomRangeBase), + _ => None, + } + } + + /// Convert enum variant to u32 value + #[allow(dead_code)] // May be used by external code or future features + pub fn to_u32(self) -> u32 { + self as u32 + } + + /// Get the C enum name as a string + pub fn to_c_name(self) -> &'static str { + match self { + Self::Packets => "SAI_INGRESS_PRIORITY_GROUP_STAT_PACKETS", + Self::Bytes => "SAI_INGRESS_PRIORITY_GROUP_STAT_BYTES", + Self::CurrOccupancyBytes => "SAI_INGRESS_PRIORITY_GROUP_STAT_CURR_OCCUPANCY_BYTES", + Self::WatermarkBytes => "SAI_INGRESS_PRIORITY_GROUP_STAT_WATERMARK_BYTES", + Self::SharedCurrOccupancyBytes => { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_CURR_OCCUPANCY_BYTES" + } + Self::SharedWatermarkBytes => "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES", + Self::XoffRoomCurrOccupancyBytes => { + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_CURR_OCCUPANCY_BYTES" + } + Self::XoffRoomWatermarkBytes => { + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES" + } + Self::DroppedPackets => "SAI_INGRESS_PRIORITY_GROUP_STAT_DROPPED_PACKETS", + Self::CurrOccupancyCells => "SAI_INGRESS_PRIORITY_GROUP_STAT_CURR_OCCUPANCY_CELLS", + Self::WatermarkCells => "SAI_INGRESS_PRIORITY_GROUP_STAT_WATERMARK_CELLS", + Self::SharedCurrOccupancyCells => { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_CURR_OCCUPANCY_CELLS" + } + Self::SharedWatermarkCells => "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_CELLS", + Self::XoffRoomCurrOccupancyCells => { + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_CURR_OCCUPANCY_CELLS" + } + Self::XoffRoomWatermarkCells => { + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_CELLS" + } + Self::CustomRangeBase => "SAI_INGRESS_PRIORITY_GROUP_STAT_CUSTOM_RANGE_BASE", + } + } +} + +impl FromStr for SaiIngressPriorityGroupStat { + type Err = (); + + fn from_str(s: &str) -> Result { + match s { + "SAI_INGRESS_PRIORITY_GROUP_STAT_PACKETS" => Ok(Self::Packets), + "SAI_INGRESS_PRIORITY_GROUP_STAT_BYTES" => Ok(Self::Bytes), + "SAI_INGRESS_PRIORITY_GROUP_STAT_CURR_OCCUPANCY_BYTES" => Ok(Self::CurrOccupancyBytes), + "SAI_INGRESS_PRIORITY_GROUP_STAT_WATERMARK_BYTES" => Ok(Self::WatermarkBytes), + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_CURR_OCCUPANCY_BYTES" => { + Ok(Self::SharedCurrOccupancyBytes) + } + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES" => { + Ok(Self::SharedWatermarkBytes) + } + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_CURR_OCCUPANCY_BYTES" => { + Ok(Self::XoffRoomCurrOccupancyBytes) + } + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES" => { + Ok(Self::XoffRoomWatermarkBytes) + } + "SAI_INGRESS_PRIORITY_GROUP_STAT_DROPPED_PACKETS" => Ok(Self::DroppedPackets), + "SAI_INGRESS_PRIORITY_GROUP_STAT_CURR_OCCUPANCY_CELLS" => Ok(Self::CurrOccupancyCells), + "SAI_INGRESS_PRIORITY_GROUP_STAT_WATERMARK_CELLS" => Ok(Self::WatermarkCells), + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_CURR_OCCUPANCY_CELLS" => { + Ok(Self::SharedCurrOccupancyCells) + } + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_CELLS" => { + Ok(Self::SharedWatermarkCells) + } + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_CURR_OCCUPANCY_CELLS" => { + Ok(Self::XoffRoomCurrOccupancyCells) + } + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_CELLS" => { + Ok(Self::XoffRoomWatermarkCells) + } + "SAI_INGRESS_PRIORITY_GROUP_STAT_CUSTOM_RANGE_BASE" => Ok(Self::CustomRangeBase), + _ => Err(()), + } + } +} + +impl fmt::Display for SaiIngressPriorityGroupStat { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.to_c_name()) + } +} + +#[cfg(test)] +mod ingress_priority_group_tests { + use super::*; + + #[test] + fn test_ipg_from_u32() { + assert_eq!( + SaiIngressPriorityGroupStat::from_u32(0x00000000), + Some(SaiIngressPriorityGroupStat::Packets) + ); + assert_eq!( + SaiIngressPriorityGroupStat::from_u32(0x00000001), + Some(SaiIngressPriorityGroupStat::Bytes) + ); + assert_eq!( + SaiIngressPriorityGroupStat::from_u32(0x00000008), + Some(SaiIngressPriorityGroupStat::DroppedPackets) + ); + assert_eq!( + SaiIngressPriorityGroupStat::from_u32(0x0000000e), + Some(SaiIngressPriorityGroupStat::XoffRoomWatermarkCells) + ); + assert_eq!( + SaiIngressPriorityGroupStat::from_u32(0x10000000), + Some(SaiIngressPriorityGroupStat::CustomRangeBase) + ); + assert_eq!(SaiIngressPriorityGroupStat::from_u32(0xFFFFFFFF), None); + } + + #[test] + fn test_ipg_to_u32() { + assert_eq!(SaiIngressPriorityGroupStat::Packets.to_u32(), 0x00000000); + assert_eq!(SaiIngressPriorityGroupStat::Bytes.to_u32(), 0x00000001); + assert_eq!( + SaiIngressPriorityGroupStat::DroppedPackets.to_u32(), + 0x00000008 + ); + assert_eq!( + SaiIngressPriorityGroupStat::XoffRoomWatermarkCells.to_u32(), + 0x0000000e + ); + assert_eq!( + SaiIngressPriorityGroupStat::CustomRangeBase.to_u32(), + 0x10000000 + ); + } + + #[test] + fn test_ipg_string_conversion() { + let stat = SaiIngressPriorityGroupStat::CurrOccupancyBytes; + let c_name = stat.to_c_name(); + assert_eq!( + c_name, + "SAI_INGRESS_PRIORITY_GROUP_STAT_CURR_OCCUPANCY_BYTES" + ); + + let parsed: SaiIngressPriorityGroupStat = c_name.parse().unwrap(); + assert_eq!(parsed, stat); + + assert_eq!(format!("{}", stat), c_name); + } + + #[test] + fn test_ipg_occupancy_stats() { + // Test byte-based occupancy stats + assert_eq!( + SaiIngressPriorityGroupStat::CurrOccupancyBytes.to_u32(), + 0x00000002 + ); + assert_eq!( + SaiIngressPriorityGroupStat::WatermarkBytes.to_u32(), + 0x00000003 + ); + assert_eq!( + SaiIngressPriorityGroupStat::SharedCurrOccupancyBytes.to_u32(), + 0x00000004 + ); + assert_eq!( + SaiIngressPriorityGroupStat::SharedWatermarkBytes.to_u32(), + 0x00000005 + ); + + // Test cell-based occupancy stats + assert_eq!( + SaiIngressPriorityGroupStat::CurrOccupancyCells.to_u32(), + 0x00000009 + ); + assert_eq!( + SaiIngressPriorityGroupStat::WatermarkCells.to_u32(), + 0x0000000a + ); + assert_eq!( + SaiIngressPriorityGroupStat::SharedCurrOccupancyCells.to_u32(), + 0x0000000b + ); + assert_eq!( + SaiIngressPriorityGroupStat::SharedWatermarkCells.to_u32(), + 0x0000000c + ); + } + + #[test] + fn test_ipg_xoff_room_stats() { + // Test XOFF room byte stats + assert_eq!( + SaiIngressPriorityGroupStat::XoffRoomCurrOccupancyBytes.to_u32(), + 0x00000006 + ); + assert_eq!( + SaiIngressPriorityGroupStat::XoffRoomWatermarkBytes.to_u32(), + 0x00000007 + ); + + // Test XOFF room cell stats + assert_eq!( + SaiIngressPriorityGroupStat::XoffRoomCurrOccupancyCells.to_u32(), + 0x0000000d + ); + assert_eq!( + SaiIngressPriorityGroupStat::XoffRoomWatermarkCells.to_u32(), + 0x0000000e + ); + } +} diff --git a/crates/countersyncd/src/sai/saiport.rs b/crates/countersyncd/src/sai/saiport.rs new file mode 100644 index 00000000000..c6068fc4f9d --- /dev/null +++ b/crates/countersyncd/src/sai/saiport.rs @@ -0,0 +1,1316 @@ +use std::fmt; +use std::str::FromStr; + +/// SAI port statistics enum +/// This enum represents all the port statistics defined in sai_port_stat_t +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[repr(u32)] +pub enum SaiPortStat { + // SAI port stat range start / SAI port stat if in octets (same value in C) + IfInOctets = 0, + + // Following the exact C enum order + IfInUcastPkts = 1, + IfInNonUcastPkts = 2, + IfInDiscards = 3, + IfInErrors = 4, + IfInUnknownProtos = 5, + IfInBroadcastPkts = 6, + IfInMulticastPkts = 7, + IfInVlanDiscards = 8, + IfOutOctets = 9, + IfOutUcastPkts = 10, + IfOutNonUcastPkts = 11, + IfOutDiscards = 12, + IfOutErrors = 13, + IfOutQlen = 14, + IfOutBroadcastPkts = 15, + IfOutMulticastPkts = 16, + EtherStatsDropEvents = 17, + EtherStatsMulticastPkts = 18, + EtherStatsBroadcastPkts = 19, + EtherStatsUndersizePkts = 20, + EtherStatsFragments = 21, + EtherStatsPkts64Octets = 22, + EtherStatsPkts65To127Octets = 23, + EtherStatsPkts128To255Octets = 24, + EtherStatsPkts256To511Octets = 25, + EtherStatsPkts512To1023Octets = 26, + EtherStatsPkts1024To1518Octets = 27, + EtherStatsPkts1519To2047Octets = 28, + EtherStatsPkts2048To4095Octets = 29, + EtherStatsPkts4096To9216Octets = 30, + EtherStatsPkts9217To16383Octets = 31, + EtherStatsOversizePkts = 32, + EtherRxOversizePkts = 33, + EtherTxOversizePkts = 34, + EtherStatsJabbers = 35, + EtherStatsOctets = 36, + EtherStatsPkts = 37, + EtherStatsCollisions = 38, + EtherStatsCrcAlignErrors = 39, + EtherStatsTxNoErrors = 40, + EtherStatsRxNoErrors = 41, + IpInReceives = 42, + IpInOctets = 43, + IpInUcastPkts = 44, + IpInNonUcastPkts = 45, + IpInDiscards = 46, + IpOutOctets = 47, + IpOutUcastPkts = 48, + IpOutNonUcastPkts = 49, + IpOutDiscards = 50, + Ipv6InReceives = 51, + Ipv6InOctets = 52, + Ipv6InUcastPkts = 53, + Ipv6InNonUcastPkts = 54, + Ipv6InMcastPkts = 55, + Ipv6InDiscards = 56, + Ipv6OutOctets = 57, + Ipv6OutUcastPkts = 58, + Ipv6OutNonUcastPkts = 59, + Ipv6OutMcastPkts = 60, + Ipv6OutDiscards = 61, + GreenWredDroppedPackets = 62, + GreenWredDroppedBytes = 63, + YellowWredDroppedPackets = 64, + YellowWredDroppedBytes = 65, + RedWredDroppedPackets = 66, + RedWredDroppedBytes = 67, + WredDroppedPackets = 68, + WredDroppedBytes = 69, + EcnMarkedPackets = 70, + + // Packet size based packets count (continuing exact C enum order) + EtherInPkts64Octets = 71, + EtherInPkts65To127Octets = 72, + EtherInPkts128To255Octets = 73, + EtherInPkts256To511Octets = 74, + EtherInPkts512To1023Octets = 75, + EtherInPkts1024To1518Octets = 76, + EtherInPkts1519To2047Octets = 77, + EtherInPkts2048To4095Octets = 78, + EtherInPkts4096To9216Octets = 79, + EtherInPkts9217To16383Octets = 80, + EtherOutPkts64Octets = 81, + EtherOutPkts65To127Octets = 82, + EtherOutPkts128To255Octets = 83, + EtherOutPkts256To511Octets = 84, + EtherOutPkts512To1023Octets = 85, + EtherOutPkts1024To1518Octets = 86, + EtherOutPkts1519To2047Octets = 87, + EtherOutPkts2048To4095Octets = 88, + EtherOutPkts4096To9216Octets = 89, + EtherOutPkts9217To16383Octets = 90, + + // Port occupancy statistics + InCurrOccupancyBytes = 91, + InWatermarkBytes = 92, + InSharedCurrOccupancyBytes = 93, + InSharedWatermarkBytes = 94, + OutCurrOccupancyBytes = 95, + OutWatermarkBytes = 96, + OutSharedCurrOccupancyBytes = 97, + OutSharedWatermarkBytes = 98, + InDroppedPkts = 99, + OutDroppedPkts = 100, + + // Pause frame statistics + PauseRxPkts = 101, + PauseTxPkts = 102, + + // PFC Packet Counters for RX and TX per PFC priority + Pfc0RxPkts = 103, + Pfc0TxPkts = 104, + Pfc1RxPkts = 105, + Pfc1TxPkts = 106, + Pfc2RxPkts = 107, + Pfc2TxPkts = 108, + Pfc3RxPkts = 109, + Pfc3TxPkts = 110, + Pfc4RxPkts = 111, + Pfc4TxPkts = 112, + Pfc5RxPkts = 113, + Pfc5TxPkts = 114, + Pfc6RxPkts = 115, + Pfc6TxPkts = 116, + Pfc7RxPkts = 117, + Pfc7TxPkts = 118, + + // PFC pause duration for RX and TX per PFC priority + Pfc0RxPauseDuration = 119, + Pfc0TxPauseDuration = 120, + Pfc1RxPauseDuration = 121, + Pfc1TxPauseDuration = 122, + Pfc2RxPauseDuration = 123, + Pfc2TxPauseDuration = 124, + Pfc3RxPauseDuration = 125, + Pfc3TxPauseDuration = 126, + Pfc4RxPauseDuration = 127, + Pfc4TxPauseDuration = 128, + Pfc5RxPauseDuration = 129, + Pfc5TxPauseDuration = 130, + Pfc6RxPauseDuration = 131, + Pfc6TxPauseDuration = 132, + Pfc7RxPauseDuration = 133, + Pfc7TxPauseDuration = 134, + + // PFC pause duration in micro seconds + Pfc0RxPauseDurationUs = 135, + Pfc0TxPauseDurationUs = 136, + Pfc1RxPauseDurationUs = 137, + Pfc1TxPauseDurationUs = 138, + Pfc2RxPauseDurationUs = 139, + Pfc2TxPauseDurationUs = 140, + Pfc3RxPauseDurationUs = 141, + Pfc3TxPauseDurationUs = 142, + Pfc4RxPauseDurationUs = 143, + Pfc4TxPauseDurationUs = 144, + Pfc5RxPauseDurationUs = 145, + Pfc5TxPauseDurationUs = 146, + Pfc6RxPauseDurationUs = 147, + Pfc6TxPauseDurationUs = 148, + Pfc7RxPauseDurationUs = 149, + Pfc7TxPauseDurationUs = 150, + + // PFC ON to OFF pause transitions counter per PFC priority + Pfc0On2OffRxPkts = 151, + Pfc1On2OffRxPkts = 152, + Pfc2On2OffRxPkts = 153, + Pfc3On2OffRxPkts = 154, + Pfc4On2OffRxPkts = 155, + Pfc5On2OffRxPkts = 156, + Pfc6On2OffRxPkts = 157, + Pfc7On2OffRxPkts = 158, + + // DOT3 statistics + Dot3StatsAlignmentErrors = 159, + Dot3StatsFcsErrors = 160, + Dot3StatsSingleCollisionFrames = 161, + Dot3StatsMultipleCollisionFrames = 162, + Dot3StatsSqeTestErrors = 163, + Dot3StatsDeferredTransmissions = 164, + Dot3StatsLateCollisions = 165, + Dot3StatsExcessiveCollisions = 166, + Dot3StatsInternalMacTransmitErrors = 167, + Dot3StatsCarrierSenseErrors = 168, + Dot3StatsFrameTooLongs = 169, + Dot3StatsInternalMacReceiveErrors = 170, + Dot3StatsSymbolErrors = 171, + Dot3ControlInUnknownOpcodes = 172, + + // EEE statistics + EeeTxEventCount = 173, + EeeRxEventCount = 174, + EeeTxDuration = 175, + EeeRxDuration = 176, + + // PRBS and FEC statistics + PrbsErrorCount = 177, + IfInFecCorrectableFrames = 178, + IfInFecNotCorrectableFrames = 179, + IfInFecSymbolErrors = 180, + + // Fabric data units + IfInFabricDataUnits = 181, + IfOutFabricDataUnits = 182, + + // FEC codeword symbol error counters + IfInFecCodewordErrorsS0 = 183, + IfInFecCodewordErrorsS1 = 184, + IfInFecCodewordErrorsS2 = 185, + IfInFecCodewordErrorsS3 = 186, + IfInFecCodewordErrorsS4 = 187, + IfInFecCodewordErrorsS5 = 188, + IfInFecCodewordErrorsS6 = 189, + IfInFecCodewordErrorsS7 = 190, + IfInFecCodewordErrorsS8 = 191, + IfInFecCodewordErrorsS9 = 192, + IfInFecCodewordErrorsS10 = 193, + IfInFecCodewordErrorsS11 = 194, + IfInFecCodewordErrorsS12 = 195, + IfInFecCodewordErrorsS13 = 196, + IfInFecCodewordErrorsS14 = 197, + IfInFecCodewordErrorsS15 = 198, + IfInFecCodewordErrorsS16 = 199, + IfInFecCorrectedBits = 200, + + // Trimmed packet statistics + TrimPackets = 201, + DroppedTrimPackets = 202, + TxTrimPackets = 203, + + // Drop reason ranges (0x00001000 base) + InConfiguredDropReasons0DroppedPkts = 0x00001000, + InConfiguredDropReasons1DroppedPkts = 0x00001001, + InConfiguredDropReasons2DroppedPkts = 0x00001002, + InConfiguredDropReasons3DroppedPkts = 0x00001003, + InConfiguredDropReasons4DroppedPkts = 0x00001004, + InConfiguredDropReasons5DroppedPkts = 0x00001005, + InConfiguredDropReasons6DroppedPkts = 0x00001006, + InConfiguredDropReasons7DroppedPkts = 0x00001007, + InConfiguredDropReasons8DroppedPkts = 0x00001008, + InConfiguredDropReasons9DroppedPkts = 0x00001009, + InConfiguredDropReasons10DroppedPkts = 0x0000100a, + InConfiguredDropReasons11DroppedPkts = 0x0000100b, + InConfiguredDropReasons12DroppedPkts = 0x0000100c, + InConfiguredDropReasons13DroppedPkts = 0x0000100d, + InConfiguredDropReasons14DroppedPkts = 0x0000100e, + InConfiguredDropReasons15DroppedPkts = 0x0000100f, + + // Out drop reason ranges (0x00002000 base) + OutConfiguredDropReasons0DroppedPkts = 0x00002000, + OutConfiguredDropReasons1DroppedPkts = 0x00002001, + OutConfiguredDropReasons2DroppedPkts = 0x00002002, + OutConfiguredDropReasons3DroppedPkts = 0x00002003, + OutConfiguredDropReasons4DroppedPkts = 0x00002004, + OutConfiguredDropReasons5DroppedPkts = 0x00002005, + OutConfiguredDropReasons6DroppedPkts = 0x00002006, + OutConfiguredDropReasons7DroppedPkts = 0x00002007, + + // HW protection switchover events + IfInHwProtectionSwitchoverEvents = 0x00002008, + IfInHwProtectionSwitchoverDropPkts = 0x00002009, + + // Additional packet size statistics + EtherInPkts1519To2500Octets = 0x0000200a, + EtherInPkts2501To9000Octets = 0x0000200b, + EtherInPkts9001To16383Octets = 0x0000200c, + EtherOutPkts1519To2500Octets = 0x0000200d, + EtherOutPkts2501To9000Octets = 0x0000200e, + EtherOutPkts9001To16383Octets = 0x0000200f, + + // Port stat range end + End = 0x00002010, +} + +impl SaiPortStat { + /// Convert from u32 value to enum variant + pub fn from_u32(value: u32) -> Option { + match value { + 0 => Some(Self::IfInOctets), + 1 => Some(Self::IfInUcastPkts), + 2 => Some(Self::IfInNonUcastPkts), + 3 => Some(Self::IfInDiscards), + 4 => Some(Self::IfInErrors), + 5 => Some(Self::IfInUnknownProtos), + 6 => Some(Self::IfInBroadcastPkts), + 7 => Some(Self::IfInMulticastPkts), + 8 => Some(Self::IfInVlanDiscards), + 9 => Some(Self::IfOutOctets), + 10 => Some(Self::IfOutUcastPkts), + 11 => Some(Self::IfOutNonUcastPkts), + 12 => Some(Self::IfOutDiscards), + 13 => Some(Self::IfOutErrors), + 14 => Some(Self::IfOutQlen), + 15 => Some(Self::IfOutBroadcastPkts), + 16 => Some(Self::IfOutMulticastPkts), + 17 => Some(Self::EtherStatsDropEvents), + 18 => Some(Self::EtherStatsMulticastPkts), + 19 => Some(Self::EtherStatsBroadcastPkts), + 20 => Some(Self::EtherStatsUndersizePkts), + 21 => Some(Self::EtherStatsFragments), + 22 => Some(Self::EtherStatsPkts64Octets), + 23 => Some(Self::EtherStatsPkts65To127Octets), + 24 => Some(Self::EtherStatsPkts128To255Octets), + 25 => Some(Self::EtherStatsPkts256To511Octets), + 26 => Some(Self::EtherStatsPkts512To1023Octets), + 27 => Some(Self::EtherStatsPkts1024To1518Octets), + 28 => Some(Self::EtherStatsPkts1519To2047Octets), + 29 => Some(Self::EtherStatsPkts2048To4095Octets), + 30 => Some(Self::EtherStatsPkts4096To9216Octets), + 31 => Some(Self::EtherStatsPkts9217To16383Octets), + 32 => Some(Self::EtherStatsOversizePkts), + 33 => Some(Self::EtherRxOversizePkts), + 34 => Some(Self::EtherTxOversizePkts), + 35 => Some(Self::EtherStatsJabbers), + 36 => Some(Self::EtherStatsOctets), + 37 => Some(Self::EtherStatsPkts), + 38 => Some(Self::EtherStatsCollisions), + 39 => Some(Self::EtherStatsCrcAlignErrors), + 40 => Some(Self::EtherStatsTxNoErrors), + 41 => Some(Self::EtherStatsRxNoErrors), + 42 => Some(Self::IpInReceives), + 43 => Some(Self::IpInOctets), + 44 => Some(Self::IpInUcastPkts), + 45 => Some(Self::IpInNonUcastPkts), + 46 => Some(Self::IpInDiscards), + 47 => Some(Self::IpOutOctets), + 48 => Some(Self::IpOutUcastPkts), + 49 => Some(Self::IpOutNonUcastPkts), + 50 => Some(Self::IpOutDiscards), + 51 => Some(Self::Ipv6InReceives), + 52 => Some(Self::Ipv6InOctets), + 53 => Some(Self::Ipv6InUcastPkts), + 54 => Some(Self::Ipv6InNonUcastPkts), + 55 => Some(Self::Ipv6InMcastPkts), + 56 => Some(Self::Ipv6InDiscards), + 57 => Some(Self::Ipv6OutOctets), + 58 => Some(Self::Ipv6OutUcastPkts), + 59 => Some(Self::Ipv6OutNonUcastPkts), + 60 => Some(Self::Ipv6OutMcastPkts), + 61 => Some(Self::Ipv6OutDiscards), + 62 => Some(Self::GreenWredDroppedPackets), + 63 => Some(Self::GreenWredDroppedBytes), + 64 => Some(Self::YellowWredDroppedPackets), + 65 => Some(Self::YellowWredDroppedBytes), + 66 => Some(Self::RedWredDroppedPackets), + 67 => Some(Self::RedWredDroppedBytes), + 68 => Some(Self::WredDroppedPackets), + 69 => Some(Self::WredDroppedBytes), + 70 => Some(Self::EcnMarkedPackets), + 71 => Some(Self::EtherInPkts64Octets), + 72 => Some(Self::EtherInPkts65To127Octets), + 73 => Some(Self::EtherInPkts128To255Octets), + 74 => Some(Self::EtherInPkts256To511Octets), + 75 => Some(Self::EtherInPkts512To1023Octets), + 76 => Some(Self::EtherInPkts1024To1518Octets), + 77 => Some(Self::EtherInPkts1519To2047Octets), + 78 => Some(Self::EtherInPkts2048To4095Octets), + 79 => Some(Self::EtherInPkts4096To9216Octets), + 80 => Some(Self::EtherInPkts9217To16383Octets), + 81 => Some(Self::EtherOutPkts64Octets), + 82 => Some(Self::EtherOutPkts65To127Octets), + 83 => Some(Self::EtherOutPkts128To255Octets), + 84 => Some(Self::EtherOutPkts256To511Octets), + 85 => Some(Self::EtherOutPkts512To1023Octets), + 86 => Some(Self::EtherOutPkts1024To1518Octets), + 87 => Some(Self::EtherOutPkts1519To2047Octets), + 88 => Some(Self::EtherOutPkts2048To4095Octets), + 89 => Some(Self::EtherOutPkts4096To9216Octets), + 90 => Some(Self::EtherOutPkts9217To16383Octets), + 91 => Some(Self::InCurrOccupancyBytes), + 92 => Some(Self::InWatermarkBytes), + 93 => Some(Self::InSharedCurrOccupancyBytes), + 94 => Some(Self::InSharedWatermarkBytes), + 95 => Some(Self::OutCurrOccupancyBytes), + 96 => Some(Self::OutWatermarkBytes), + 97 => Some(Self::OutSharedCurrOccupancyBytes), + 98 => Some(Self::OutSharedWatermarkBytes), + 99 => Some(Self::InDroppedPkts), + 100 => Some(Self::OutDroppedPkts), + 101 => Some(Self::PauseRxPkts), + 102 => Some(Self::PauseTxPkts), + 103 => Some(Self::Pfc0RxPkts), + 104 => Some(Self::Pfc0TxPkts), + 105 => Some(Self::Pfc1RxPkts), + 106 => Some(Self::Pfc1TxPkts), + 107 => Some(Self::Pfc2RxPkts), + 108 => Some(Self::Pfc2TxPkts), + 109 => Some(Self::Pfc3RxPkts), + 110 => Some(Self::Pfc3TxPkts), + 111 => Some(Self::Pfc4RxPkts), + 112 => Some(Self::Pfc4TxPkts), + 113 => Some(Self::Pfc5RxPkts), + 114 => Some(Self::Pfc5TxPkts), + 115 => Some(Self::Pfc6RxPkts), + 116 => Some(Self::Pfc6TxPkts), + 117 => Some(Self::Pfc7RxPkts), + 118 => Some(Self::Pfc7TxPkts), + 119 => Some(Self::Pfc0RxPauseDuration), + 120 => Some(Self::Pfc0TxPauseDuration), + 121 => Some(Self::Pfc1RxPauseDuration), + 122 => Some(Self::Pfc1TxPauseDuration), + 123 => Some(Self::Pfc2RxPauseDuration), + 124 => Some(Self::Pfc2TxPauseDuration), + 125 => Some(Self::Pfc3RxPauseDuration), + 126 => Some(Self::Pfc3TxPauseDuration), + 127 => Some(Self::Pfc4RxPauseDuration), + 128 => Some(Self::Pfc4TxPauseDuration), + 129 => Some(Self::Pfc5RxPauseDuration), + 130 => Some(Self::Pfc5TxPauseDuration), + 131 => Some(Self::Pfc6RxPauseDuration), + 132 => Some(Self::Pfc6TxPauseDuration), + 133 => Some(Self::Pfc7RxPauseDuration), + 134 => Some(Self::Pfc7TxPauseDuration), + 135 => Some(Self::Pfc0RxPauseDurationUs), + 136 => Some(Self::Pfc0TxPauseDurationUs), + 137 => Some(Self::Pfc1RxPauseDurationUs), + 138 => Some(Self::Pfc1TxPauseDurationUs), + 139 => Some(Self::Pfc2RxPauseDurationUs), + 140 => Some(Self::Pfc2TxPauseDurationUs), + 141 => Some(Self::Pfc3RxPauseDurationUs), + 142 => Some(Self::Pfc3TxPauseDurationUs), + 143 => Some(Self::Pfc4RxPauseDurationUs), + 144 => Some(Self::Pfc4TxPauseDurationUs), + 145 => Some(Self::Pfc5RxPauseDurationUs), + 146 => Some(Self::Pfc5TxPauseDurationUs), + 147 => Some(Self::Pfc6RxPauseDurationUs), + 148 => Some(Self::Pfc6TxPauseDurationUs), + 149 => Some(Self::Pfc7RxPauseDurationUs), + 150 => Some(Self::Pfc7TxPauseDurationUs), + 151 => Some(Self::Pfc0On2OffRxPkts), + 152 => Some(Self::Pfc1On2OffRxPkts), + 153 => Some(Self::Pfc2On2OffRxPkts), + 154 => Some(Self::Pfc3On2OffRxPkts), + 155 => Some(Self::Pfc4On2OffRxPkts), + 156 => Some(Self::Pfc5On2OffRxPkts), + 157 => Some(Self::Pfc6On2OffRxPkts), + 158 => Some(Self::Pfc7On2OffRxPkts), + 159 => Some(Self::Dot3StatsAlignmentErrors), + 160 => Some(Self::Dot3StatsFcsErrors), + 161 => Some(Self::Dot3StatsSingleCollisionFrames), + 162 => Some(Self::Dot3StatsMultipleCollisionFrames), + 163 => Some(Self::Dot3StatsSqeTestErrors), + 164 => Some(Self::Dot3StatsDeferredTransmissions), + 165 => Some(Self::Dot3StatsLateCollisions), + 166 => Some(Self::Dot3StatsExcessiveCollisions), + 167 => Some(Self::Dot3StatsInternalMacTransmitErrors), + 168 => Some(Self::Dot3StatsCarrierSenseErrors), + 169 => Some(Self::Dot3StatsFrameTooLongs), + 170 => Some(Self::Dot3StatsInternalMacReceiveErrors), + 171 => Some(Self::Dot3StatsSymbolErrors), + 172 => Some(Self::Dot3ControlInUnknownOpcodes), + 173 => Some(Self::EeeTxEventCount), + 174 => Some(Self::EeeRxEventCount), + 175 => Some(Self::EeeTxDuration), + 176 => Some(Self::EeeRxDuration), + 177 => Some(Self::PrbsErrorCount), + 178 => Some(Self::IfInFecCorrectableFrames), + 179 => Some(Self::IfInFecNotCorrectableFrames), + 180 => Some(Self::IfInFecSymbolErrors), + 181 => Some(Self::IfInFabricDataUnits), + 182 => Some(Self::IfOutFabricDataUnits), + 183 => Some(Self::IfInFecCodewordErrorsS0), + 184 => Some(Self::IfInFecCodewordErrorsS1), + 185 => Some(Self::IfInFecCodewordErrorsS2), + 186 => Some(Self::IfInFecCodewordErrorsS3), + 187 => Some(Self::IfInFecCodewordErrorsS4), + 188 => Some(Self::IfInFecCodewordErrorsS5), + 189 => Some(Self::IfInFecCodewordErrorsS6), + 190 => Some(Self::IfInFecCodewordErrorsS7), + 191 => Some(Self::IfInFecCodewordErrorsS8), + 192 => Some(Self::IfInFecCodewordErrorsS9), + 193 => Some(Self::IfInFecCodewordErrorsS10), + 194 => Some(Self::IfInFecCodewordErrorsS11), + 195 => Some(Self::IfInFecCodewordErrorsS12), + 196 => Some(Self::IfInFecCodewordErrorsS13), + 197 => Some(Self::IfInFecCodewordErrorsS14), + 198 => Some(Self::IfInFecCodewordErrorsS15), + 199 => Some(Self::IfInFecCodewordErrorsS16), + 200 => Some(Self::IfInFecCorrectedBits), + 201 => Some(Self::TrimPackets), + 202 => Some(Self::DroppedTrimPackets), + 203 => Some(Self::TxTrimPackets), + + // Drop reason ranges + 0x00001000 => Some(Self::InConfiguredDropReasons0DroppedPkts), + 0x00001001 => Some(Self::InConfiguredDropReasons1DroppedPkts), + 0x00001002 => Some(Self::InConfiguredDropReasons2DroppedPkts), + 0x00001003 => Some(Self::InConfiguredDropReasons3DroppedPkts), + 0x00001004 => Some(Self::InConfiguredDropReasons4DroppedPkts), + 0x00001005 => Some(Self::InConfiguredDropReasons5DroppedPkts), + 0x00001006 => Some(Self::InConfiguredDropReasons6DroppedPkts), + 0x00001007 => Some(Self::InConfiguredDropReasons7DroppedPkts), + 0x00001008 => Some(Self::InConfiguredDropReasons8DroppedPkts), + 0x00001009 => Some(Self::InConfiguredDropReasons9DroppedPkts), + 0x0000100a => Some(Self::InConfiguredDropReasons10DroppedPkts), + 0x0000100b => Some(Self::InConfiguredDropReasons11DroppedPkts), + 0x0000100c => Some(Self::InConfiguredDropReasons12DroppedPkts), + 0x0000100d => Some(Self::InConfiguredDropReasons13DroppedPkts), + 0x0000100e => Some(Self::InConfiguredDropReasons14DroppedPkts), + 0x0000100f => Some(Self::InConfiguredDropReasons15DroppedPkts), + + 0x00002000 => Some(Self::OutConfiguredDropReasons0DroppedPkts), + 0x00002001 => Some(Self::OutConfiguredDropReasons1DroppedPkts), + 0x00002002 => Some(Self::OutConfiguredDropReasons2DroppedPkts), + 0x00002003 => Some(Self::OutConfiguredDropReasons3DroppedPkts), + 0x00002004 => Some(Self::OutConfiguredDropReasons4DroppedPkts), + 0x00002005 => Some(Self::OutConfiguredDropReasons5DroppedPkts), + 0x00002006 => Some(Self::OutConfiguredDropReasons6DroppedPkts), + 0x00002007 => Some(Self::OutConfiguredDropReasons7DroppedPkts), + + 0x00002008 => Some(Self::IfInHwProtectionSwitchoverEvents), + 0x00002009 => Some(Self::IfInHwProtectionSwitchoverDropPkts), + 0x0000200a => Some(Self::EtherInPkts1519To2500Octets), + 0x0000200b => Some(Self::EtherInPkts2501To9000Octets), + 0x0000200c => Some(Self::EtherInPkts9001To16383Octets), + 0x0000200d => Some(Self::EtherOutPkts1519To2500Octets), + 0x0000200e => Some(Self::EtherOutPkts2501To9000Octets), + 0x0000200f => Some(Self::EtherOutPkts9001To16383Octets), + 0x00002010 => Some(Self::End), + _ => None, + } + } + + /// Convert enum variant to u32 value + #[allow(dead_code)] // May be used by external code or future features + pub fn to_u32(self) -> u32 { + self as u32 + } + + /// Convert enum variant to C constant name + pub fn to_c_name(self) -> &'static str { + match self { + Self::IfInOctets => "SAI_PORT_STAT_IF_IN_OCTETS", + Self::IfInUcastPkts => "SAI_PORT_STAT_IF_IN_UCAST_PKTS", + Self::IfInNonUcastPkts => "SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS", + Self::IfInDiscards => "SAI_PORT_STAT_IF_IN_DISCARDS", + Self::IfInErrors => "SAI_PORT_STAT_IF_IN_ERRORS", + Self::IfInUnknownProtos => "SAI_PORT_STAT_IF_IN_UNKNOWN_PROTOS", + Self::IfInBroadcastPkts => "SAI_PORT_STAT_IF_IN_BROADCAST_PKTS", + Self::IfInMulticastPkts => "SAI_PORT_STAT_IF_IN_MULTICAST_PKTS", + Self::IfInVlanDiscards => "SAI_PORT_STAT_IF_IN_VLAN_DISCARDS", + Self::IfOutOctets => "SAI_PORT_STAT_IF_OUT_OCTETS", + Self::IfOutUcastPkts => "SAI_PORT_STAT_IF_OUT_UCAST_PKTS", + Self::IfOutNonUcastPkts => "SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS", + Self::IfOutDiscards => "SAI_PORT_STAT_IF_OUT_DISCARDS", + Self::IfOutErrors => "SAI_PORT_STAT_IF_OUT_ERRORS", + Self::IfOutQlen => "SAI_PORT_STAT_IF_OUT_QLEN", + Self::IfOutBroadcastPkts => "SAI_PORT_STAT_IF_OUT_BROADCAST_PKTS", + Self::IfOutMulticastPkts => "SAI_PORT_STAT_IF_OUT_MULTICAST_PKTS", + Self::EtherStatsDropEvents => "SAI_PORT_STAT_ETHER_STATS_DROP_EVENTS", + Self::EtherStatsMulticastPkts => "SAI_PORT_STAT_ETHER_STATS_MULTICAST_PKTS", + Self::EtherStatsBroadcastPkts => "SAI_PORT_STAT_ETHER_STATS_BROADCAST_PKTS", + Self::EtherStatsUndersizePkts => "SAI_PORT_STAT_ETHER_STATS_UNDERSIZE_PKTS", + Self::EtherStatsFragments => "SAI_PORT_STAT_ETHER_STATS_FRAGMENTS", + Self::EtherStatsPkts64Octets => "SAI_PORT_STAT_ETHER_STATS_PKTS_64_OCTETS", + Self::EtherStatsPkts65To127Octets => "SAI_PORT_STAT_ETHER_STATS_PKTS_65_TO_127_OCTETS", + Self::EtherStatsPkts128To255Octets => { + "SAI_PORT_STAT_ETHER_STATS_PKTS_128_TO_255_OCTETS" + } + Self::EtherStatsPkts256To511Octets => { + "SAI_PORT_STAT_ETHER_STATS_PKTS_256_TO_511_OCTETS" + } + Self::EtherStatsPkts512To1023Octets => { + "SAI_PORT_STAT_ETHER_STATS_PKTS_512_TO_1023_OCTETS" + } + Self::EtherStatsPkts1024To1518Octets => { + "SAI_PORT_STAT_ETHER_STATS_PKTS_1024_TO_1518_OCTETS" + } + Self::EtherStatsPkts1519To2047Octets => { + "SAI_PORT_STAT_ETHER_STATS_PKTS_1519_TO_2047_OCTETS" + } + Self::EtherStatsPkts2048To4095Octets => { + "SAI_PORT_STAT_ETHER_STATS_PKTS_2048_TO_4095_OCTETS" + } + Self::EtherStatsPkts4096To9216Octets => { + "SAI_PORT_STAT_ETHER_STATS_PKTS_4096_TO_9216_OCTETS" + } + Self::EtherStatsPkts9217To16383Octets => { + "SAI_PORT_STAT_ETHER_STATS_PKTS_9217_TO_16383_OCTETS" + } + Self::EtherStatsOversizePkts => "SAI_PORT_STAT_ETHER_STATS_OVERSIZE_PKTS", + Self::EtherRxOversizePkts => "SAI_PORT_STAT_ETHER_RX_OVERSIZE_PKTS", + Self::EtherTxOversizePkts => "SAI_PORT_STAT_ETHER_TX_OVERSIZE_PKTS", + Self::EtherStatsJabbers => "SAI_PORT_STAT_ETHER_STATS_JABBERS", + Self::EtherStatsOctets => "SAI_PORT_STAT_ETHER_STATS_OCTETS", + Self::EtherStatsPkts => "SAI_PORT_STAT_ETHER_STATS_PKTS", + Self::EtherStatsCollisions => "SAI_PORT_STAT_ETHER_STATS_COLLISIONS", + Self::EtherStatsCrcAlignErrors => "SAI_PORT_STAT_ETHER_STATS_CRC_ALIGN_ERRORS", + Self::EtherStatsTxNoErrors => "SAI_PORT_STAT_ETHER_STATS_TX_NO_ERRORS", + Self::EtherStatsRxNoErrors => "SAI_PORT_STAT_ETHER_STATS_RX_NO_ERRORS", + Self::IpInReceives => "SAI_PORT_STAT_IP_IN_RECEIVES", + Self::IpInOctets => "SAI_PORT_STAT_IP_IN_OCTETS", + Self::IpInUcastPkts => "SAI_PORT_STAT_IP_IN_UCAST_PKTS", + Self::IpInNonUcastPkts => "SAI_PORT_STAT_IP_IN_NON_UCAST_PKTS", + Self::IpInDiscards => "SAI_PORT_STAT_IP_IN_DISCARDS", + Self::IpOutOctets => "SAI_PORT_STAT_IP_OUT_OCTETS", + Self::IpOutUcastPkts => "SAI_PORT_STAT_IP_OUT_UCAST_PKTS", + Self::IpOutNonUcastPkts => "SAI_PORT_STAT_IP_OUT_NON_UCAST_PKTS", + Self::IpOutDiscards => "SAI_PORT_STAT_IP_OUT_DISCARDS", + Self::Ipv6InReceives => "SAI_PORT_STAT_IPV6_IN_RECEIVES", + Self::Ipv6InOctets => "SAI_PORT_STAT_IPV6_IN_OCTETS", + Self::Ipv6InUcastPkts => "SAI_PORT_STAT_IPV6_IN_UCAST_PKTS", + Self::Ipv6InNonUcastPkts => "SAI_PORT_STAT_IPV6_IN_NON_UCAST_PKTS", + Self::Ipv6InMcastPkts => "SAI_PORT_STAT_IPV6_IN_MCAST_PKTS", + Self::Ipv6InDiscards => "SAI_PORT_STAT_IPV6_IN_DISCARDS", + Self::Ipv6OutOctets => "SAI_PORT_STAT_IPV6_OUT_OCTETS", + Self::Ipv6OutUcastPkts => "SAI_PORT_STAT_IPV6_OUT_UCAST_PKTS", + Self::Ipv6OutNonUcastPkts => "SAI_PORT_STAT_IPV6_OUT_NON_UCAST_PKTS", + Self::Ipv6OutMcastPkts => "SAI_PORT_STAT_IPV6_OUT_MCAST_PKTS", + Self::Ipv6OutDiscards => "SAI_PORT_STAT_IPV6_OUT_DISCARDS", + Self::GreenWredDroppedPackets => "SAI_PORT_STAT_GREEN_WRED_DROPPED_PACKETS", + Self::GreenWredDroppedBytes => "SAI_PORT_STAT_GREEN_WRED_DROPPED_BYTES", + Self::YellowWredDroppedPackets => "SAI_PORT_STAT_YELLOW_WRED_DROPPED_PACKETS", + Self::YellowWredDroppedBytes => "SAI_PORT_STAT_YELLOW_WRED_DROPPED_BYTES", + Self::RedWredDroppedPackets => "SAI_PORT_STAT_RED_WRED_DROPPED_PACKETS", + Self::RedWredDroppedBytes => "SAI_PORT_STAT_RED_WRED_DROPPED_BYTES", + Self::WredDroppedPackets => "SAI_PORT_STAT_WRED_DROPPED_PACKETS", + Self::WredDroppedBytes => "SAI_PORT_STAT_WRED_DROPPED_BYTES", + Self::EcnMarkedPackets => "SAI_PORT_STAT_ECN_MARKED_PACKETS", + Self::EtherInPkts64Octets => "SAI_PORT_STAT_ETHER_IN_PKTS_64_OCTETS", + Self::EtherInPkts65To127Octets => "SAI_PORT_STAT_ETHER_IN_PKTS_65_TO_127_OCTETS", + Self::EtherInPkts128To255Octets => "SAI_PORT_STAT_ETHER_IN_PKTS_128_TO_255_OCTETS", + Self::EtherInPkts256To511Octets => "SAI_PORT_STAT_ETHER_IN_PKTS_256_TO_511_OCTETS", + Self::EtherInPkts512To1023Octets => "SAI_PORT_STAT_ETHER_IN_PKTS_512_TO_1023_OCTETS", + Self::EtherInPkts1024To1518Octets => "SAI_PORT_STAT_ETHER_IN_PKTS_1024_TO_1518_OCTETS", + Self::EtherInPkts1519To2047Octets => "SAI_PORT_STAT_ETHER_IN_PKTS_1519_TO_2047_OCTETS", + Self::EtherInPkts2048To4095Octets => "SAI_PORT_STAT_ETHER_IN_PKTS_2048_TO_4095_OCTETS", + Self::EtherInPkts4096To9216Octets => "SAI_PORT_STAT_ETHER_IN_PKTS_4096_TO_9216_OCTETS", + Self::EtherInPkts9217To16383Octets => { + "SAI_PORT_STAT_ETHER_IN_PKTS_9217_TO_16383_OCTETS" + } + Self::EtherOutPkts64Octets => "SAI_PORT_STAT_ETHER_OUT_PKTS_64_OCTETS", + Self::EtherOutPkts65To127Octets => "SAI_PORT_STAT_ETHER_OUT_PKTS_65_TO_127_OCTETS", + Self::EtherOutPkts128To255Octets => "SAI_PORT_STAT_ETHER_OUT_PKTS_128_TO_255_OCTETS", + Self::EtherOutPkts256To511Octets => "SAI_PORT_STAT_ETHER_OUT_PKTS_256_TO_511_OCTETS", + Self::EtherOutPkts512To1023Octets => "SAI_PORT_STAT_ETHER_OUT_PKTS_512_TO_1023_OCTETS", + Self::EtherOutPkts1024To1518Octets => { + "SAI_PORT_STAT_ETHER_OUT_PKTS_1024_TO_1518_OCTETS" + } + Self::EtherOutPkts1519To2047Octets => { + "SAI_PORT_STAT_ETHER_OUT_PKTS_1519_TO_2047_OCTETS" + } + Self::EtherOutPkts2048To4095Octets => { + "SAI_PORT_STAT_ETHER_OUT_PKTS_2048_TO_4095_OCTETS" + } + Self::EtherOutPkts4096To9216Octets => { + "SAI_PORT_STAT_ETHER_OUT_PKTS_4096_TO_9216_OCTETS" + } + Self::EtherOutPkts9217To16383Octets => { + "SAI_PORT_STAT_ETHER_OUT_PKTS_9217_TO_16383_OCTETS" + } + Self::InCurrOccupancyBytes => "SAI_PORT_STAT_IN_CURR_OCCUPANCY_BYTES", + Self::InWatermarkBytes => "SAI_PORT_STAT_IN_WATERMARK_BYTES", + Self::InSharedCurrOccupancyBytes => "SAI_PORT_STAT_IN_SHARED_CURR_OCCUPANCY_BYTES", + Self::InSharedWatermarkBytes => "SAI_PORT_STAT_IN_SHARED_WATERMARK_BYTES", + Self::OutCurrOccupancyBytes => "SAI_PORT_STAT_OUT_CURR_OCCUPANCY_BYTES", + Self::OutWatermarkBytes => "SAI_PORT_STAT_OUT_WATERMARK_BYTES", + Self::OutSharedCurrOccupancyBytes => "SAI_PORT_STAT_OUT_SHARED_CURR_OCCUPANCY_BYTES", + Self::OutSharedWatermarkBytes => "SAI_PORT_STAT_OUT_SHARED_WATERMARK_BYTES", + Self::InDroppedPkts => "SAI_PORT_STAT_IN_DROPPED_PKTS", + Self::OutDroppedPkts => "SAI_PORT_STAT_OUT_DROPPED_PKTS", + Self::PauseRxPkts => "SAI_PORT_STAT_PAUSE_RX_PKTS", + Self::PauseTxPkts => "SAI_PORT_STAT_PAUSE_TX_PKTS", + Self::Pfc0RxPkts => "SAI_PORT_STAT_PFC_0_RX_PKTS", + Self::Pfc0TxPkts => "SAI_PORT_STAT_PFC_0_TX_PKTS", + Self::Pfc1RxPkts => "SAI_PORT_STAT_PFC_1_RX_PKTS", + Self::Pfc1TxPkts => "SAI_PORT_STAT_PFC_1_TX_PKTS", + Self::Pfc2RxPkts => "SAI_PORT_STAT_PFC_2_RX_PKTS", + Self::Pfc2TxPkts => "SAI_PORT_STAT_PFC_2_TX_PKTS", + Self::Pfc3RxPkts => "SAI_PORT_STAT_PFC_3_RX_PKTS", + Self::Pfc3TxPkts => "SAI_PORT_STAT_PFC_3_TX_PKTS", + Self::Pfc4RxPkts => "SAI_PORT_STAT_PFC_4_RX_PKTS", + Self::Pfc4TxPkts => "SAI_PORT_STAT_PFC_4_TX_PKTS", + Self::Pfc5RxPkts => "SAI_PORT_STAT_PFC_5_RX_PKTS", + Self::Pfc5TxPkts => "SAI_PORT_STAT_PFC_5_TX_PKTS", + Self::Pfc6RxPkts => "SAI_PORT_STAT_PFC_6_RX_PKTS", + Self::Pfc6TxPkts => "SAI_PORT_STAT_PFC_6_TX_PKTS", + Self::Pfc7RxPkts => "SAI_PORT_STAT_PFC_7_RX_PKTS", + Self::Pfc7TxPkts => "SAI_PORT_STAT_PFC_7_TX_PKTS", + Self::Pfc0RxPauseDuration => "SAI_PORT_STAT_PFC_0_RX_PAUSE_DURATION", + Self::Pfc0TxPauseDuration => "SAI_PORT_STAT_PFC_0_TX_PAUSE_DURATION", + Self::Pfc1RxPauseDuration => "SAI_PORT_STAT_PFC_1_RX_PAUSE_DURATION", + Self::Pfc1TxPauseDuration => "SAI_PORT_STAT_PFC_1_TX_PAUSE_DURATION", + Self::Pfc2RxPauseDuration => "SAI_PORT_STAT_PFC_2_RX_PAUSE_DURATION", + Self::Pfc2TxPauseDuration => "SAI_PORT_STAT_PFC_2_TX_PAUSE_DURATION", + Self::Pfc3RxPauseDuration => "SAI_PORT_STAT_PFC_3_RX_PAUSE_DURATION", + Self::Pfc3TxPauseDuration => "SAI_PORT_STAT_PFC_3_TX_PAUSE_DURATION", + Self::Pfc4RxPauseDuration => "SAI_PORT_STAT_PFC_4_RX_PAUSE_DURATION", + Self::Pfc4TxPauseDuration => "SAI_PORT_STAT_PFC_4_TX_PAUSE_DURATION", + Self::Pfc5RxPauseDuration => "SAI_PORT_STAT_PFC_5_RX_PAUSE_DURATION", + Self::Pfc5TxPauseDuration => "SAI_PORT_STAT_PFC_5_TX_PAUSE_DURATION", + Self::Pfc6RxPauseDuration => "SAI_PORT_STAT_PFC_6_RX_PAUSE_DURATION", + Self::Pfc6TxPauseDuration => "SAI_PORT_STAT_PFC_6_TX_PAUSE_DURATION", + Self::Pfc7RxPauseDuration => "SAI_PORT_STAT_PFC_7_RX_PAUSE_DURATION", + Self::Pfc7TxPauseDuration => "SAI_PORT_STAT_PFC_7_TX_PAUSE_DURATION", + Self::Pfc0RxPauseDurationUs => "SAI_PORT_STAT_PFC_0_RX_PAUSE_DURATION_US", + Self::Pfc0TxPauseDurationUs => "SAI_PORT_STAT_PFC_0_TX_PAUSE_DURATION_US", + Self::Pfc1RxPauseDurationUs => "SAI_PORT_STAT_PFC_1_RX_PAUSE_DURATION_US", + Self::Pfc1TxPauseDurationUs => "SAI_PORT_STAT_PFC_1_TX_PAUSE_DURATION_US", + Self::Pfc2RxPauseDurationUs => "SAI_PORT_STAT_PFC_2_RX_PAUSE_DURATION_US", + Self::Pfc2TxPauseDurationUs => "SAI_PORT_STAT_PFC_2_TX_PAUSE_DURATION_US", + Self::Pfc3RxPauseDurationUs => "SAI_PORT_STAT_PFC_3_RX_PAUSE_DURATION_US", + Self::Pfc3TxPauseDurationUs => "SAI_PORT_STAT_PFC_3_TX_PAUSE_DURATION_US", + Self::Pfc4RxPauseDurationUs => "SAI_PORT_STAT_PFC_4_RX_PAUSE_DURATION_US", + Self::Pfc4TxPauseDurationUs => "SAI_PORT_STAT_PFC_4_TX_PAUSE_DURATION_US", + Self::Pfc5RxPauseDurationUs => "SAI_PORT_STAT_PFC_5_RX_PAUSE_DURATION_US", + Self::Pfc5TxPauseDurationUs => "SAI_PORT_STAT_PFC_5_TX_PAUSE_DURATION_US", + Self::Pfc6RxPauseDurationUs => "SAI_PORT_STAT_PFC_6_RX_PAUSE_DURATION_US", + Self::Pfc6TxPauseDurationUs => "SAI_PORT_STAT_PFC_6_TX_PAUSE_DURATION_US", + Self::Pfc7RxPauseDurationUs => "SAI_PORT_STAT_PFC_7_RX_PAUSE_DURATION_US", + Self::Pfc7TxPauseDurationUs => "SAI_PORT_STAT_PFC_7_TX_PAUSE_DURATION_US", + Self::Pfc0On2OffRxPkts => "SAI_PORT_STAT_PFC_0_ON2OFF_RX_PKTS", + Self::Pfc1On2OffRxPkts => "SAI_PORT_STAT_PFC_1_ON2OFF_RX_PKTS", + Self::Pfc2On2OffRxPkts => "SAI_PORT_STAT_PFC_2_ON2OFF_RX_PKTS", + Self::Pfc3On2OffRxPkts => "SAI_PORT_STAT_PFC_3_ON2OFF_RX_PKTS", + Self::Pfc4On2OffRxPkts => "SAI_PORT_STAT_PFC_4_ON2OFF_RX_PKTS", + Self::Pfc5On2OffRxPkts => "SAI_PORT_STAT_PFC_5_ON2OFF_RX_PKTS", + Self::Pfc6On2OffRxPkts => "SAI_PORT_STAT_PFC_6_ON2OFF_RX_PKTS", + Self::Pfc7On2OffRxPkts => "SAI_PORT_STAT_PFC_7_ON2OFF_RX_PKTS", + Self::Dot3StatsAlignmentErrors => "SAI_PORT_STAT_DOT3_STATS_ALIGNMENT_ERRORS", + Self::Dot3StatsFcsErrors => "SAI_PORT_STAT_DOT3_STATS_FCS_ERRORS", + Self::Dot3StatsSingleCollisionFrames => { + "SAI_PORT_STAT_DOT3_STATS_SINGLE_COLLISION_FRAMES" + } + Self::Dot3StatsMultipleCollisionFrames => { + "SAI_PORT_STAT_DOT3_STATS_MULTIPLE_COLLISION_FRAMES" + } + Self::Dot3StatsSqeTestErrors => "SAI_PORT_STAT_DOT3_STATS_SQE_TEST_ERRORS", + Self::Dot3StatsDeferredTransmissions => { + "SAI_PORT_STAT_DOT3_STATS_DEFERRED_TRANSMISSIONS" + } + Self::Dot3StatsLateCollisions => "SAI_PORT_STAT_DOT3_STATS_LATE_COLLISIONS", + Self::Dot3StatsExcessiveCollisions => "SAI_PORT_STAT_DOT3_STATS_EXCESSIVE_COLLISIONS", + Self::Dot3StatsInternalMacTransmitErrors => { + "SAI_PORT_STAT_DOT3_STATS_INTERNAL_MAC_TRANSMIT_ERRORS" + } + Self::Dot3StatsCarrierSenseErrors => "SAI_PORT_STAT_DOT3_STATS_CARRIER_SENSE_ERRORS", + Self::Dot3StatsFrameTooLongs => "SAI_PORT_STAT_DOT3_STATS_FRAME_TOO_LONGS", + Self::Dot3StatsInternalMacReceiveErrors => { + "SAI_PORT_STAT_DOT3_STATS_INTERNAL_MAC_RECEIVE_ERRORS" + } + Self::Dot3StatsSymbolErrors => "SAI_PORT_STAT_DOT3_STATS_SYMBOL_ERRORS", + Self::Dot3ControlInUnknownOpcodes => "SAI_PORT_STAT_DOT3_CONTROL_IN_UNKNOWN_OPCODES", + Self::EeeTxEventCount => "SAI_PORT_STAT_EEE_TX_EVENT_COUNT", + Self::EeeRxEventCount => "SAI_PORT_STAT_EEE_RX_EVENT_COUNT", + Self::EeeTxDuration => "SAI_PORT_STAT_EEE_TX_DURATION", + Self::EeeRxDuration => "SAI_PORT_STAT_EEE_RX_DURATION", + Self::PrbsErrorCount => "SAI_PORT_STAT_PRBS_ERROR_COUNT", + Self::IfInFecCorrectableFrames => "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES", + Self::IfInFecNotCorrectableFrames => "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES", + Self::IfInFecSymbolErrors => "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS", + Self::IfInFabricDataUnits => "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS", + Self::IfOutFabricDataUnits => "SAI_PORT_STAT_IF_OUT_FABRIC_DATA_UNITS", + Self::IfInFecCodewordErrorsS0 => "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S0", + Self::IfInFecCodewordErrorsS1 => "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S1", + Self::IfInFecCodewordErrorsS2 => "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S2", + Self::IfInFecCodewordErrorsS3 => "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S3", + Self::IfInFecCodewordErrorsS4 => "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S4", + Self::IfInFecCodewordErrorsS5 => "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S5", + Self::IfInFecCodewordErrorsS6 => "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S6", + Self::IfInFecCodewordErrorsS7 => "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S7", + Self::IfInFecCodewordErrorsS8 => "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S8", + Self::IfInFecCodewordErrorsS9 => "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S9", + Self::IfInFecCodewordErrorsS10 => "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S10", + Self::IfInFecCodewordErrorsS11 => "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S11", + Self::IfInFecCodewordErrorsS12 => "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S12", + Self::IfInFecCodewordErrorsS13 => "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S13", + Self::IfInFecCodewordErrorsS14 => "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S14", + Self::IfInFecCodewordErrorsS15 => "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S15", + Self::IfInFecCodewordErrorsS16 => "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S16", + Self::IfInFecCorrectedBits => "SAI_PORT_STAT_IF_IN_FEC_CORRECTED_BITS", + Self::TrimPackets => "SAI_PORT_STAT_TRIM_PACKETS", + Self::DroppedTrimPackets => "SAI_PORT_STAT_DROPPED_TRIM_PACKETS", + Self::TxTrimPackets => "SAI_PORT_STAT_TX_TRIM_PACKETS", + Self::InConfiguredDropReasons0DroppedPkts => { + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_0_DROPPED_PKTS" + } + Self::InConfiguredDropReasons1DroppedPkts => { + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_1_DROPPED_PKTS" + } + Self::InConfiguredDropReasons2DroppedPkts => { + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_2_DROPPED_PKTS" + } + Self::InConfiguredDropReasons3DroppedPkts => { + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_3_DROPPED_PKTS" + } + Self::InConfiguredDropReasons4DroppedPkts => { + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_4_DROPPED_PKTS" + } + Self::InConfiguredDropReasons5DroppedPkts => { + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_5_DROPPED_PKTS" + } + Self::InConfiguredDropReasons6DroppedPkts => { + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_6_DROPPED_PKTS" + } + Self::InConfiguredDropReasons7DroppedPkts => { + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_7_DROPPED_PKTS" + } + Self::InConfiguredDropReasons8DroppedPkts => { + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_8_DROPPED_PKTS" + } + Self::InConfiguredDropReasons9DroppedPkts => { + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_9_DROPPED_PKTS" + } + Self::InConfiguredDropReasons10DroppedPkts => { + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_10_DROPPED_PKTS" + } + Self::InConfiguredDropReasons11DroppedPkts => { + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_11_DROPPED_PKTS" + } + Self::InConfiguredDropReasons12DroppedPkts => { + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_12_DROPPED_PKTS" + } + Self::InConfiguredDropReasons13DroppedPkts => { + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_13_DROPPED_PKTS" + } + Self::InConfiguredDropReasons14DroppedPkts => { + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_14_DROPPED_PKTS" + } + Self::InConfiguredDropReasons15DroppedPkts => { + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_15_DROPPED_PKTS" + } + Self::OutConfiguredDropReasons0DroppedPkts => { + "SAI_PORT_STAT_OUT_CONFIGURED_DROP_REASONS_0_DROPPED_PKTS" + } + Self::OutConfiguredDropReasons1DroppedPkts => { + "SAI_PORT_STAT_OUT_CONFIGURED_DROP_REASONS_1_DROPPED_PKTS" + } + Self::OutConfiguredDropReasons2DroppedPkts => { + "SAI_PORT_STAT_OUT_CONFIGURED_DROP_REASONS_2_DROPPED_PKTS" + } + Self::OutConfiguredDropReasons3DroppedPkts => { + "SAI_PORT_STAT_OUT_CONFIGURED_DROP_REASONS_3_DROPPED_PKTS" + } + Self::OutConfiguredDropReasons4DroppedPkts => { + "SAI_PORT_STAT_OUT_CONFIGURED_DROP_REASONS_4_DROPPED_PKTS" + } + Self::OutConfiguredDropReasons5DroppedPkts => { + "SAI_PORT_STAT_OUT_CONFIGURED_DROP_REASONS_5_DROPPED_PKTS" + } + Self::OutConfiguredDropReasons6DroppedPkts => { + "SAI_PORT_STAT_OUT_CONFIGURED_DROP_REASONS_6_DROPPED_PKTS" + } + Self::OutConfiguredDropReasons7DroppedPkts => { + "SAI_PORT_STAT_OUT_CONFIGURED_DROP_REASONS_7_DROPPED_PKTS" + } + Self::IfInHwProtectionSwitchoverEvents => { + "SAI_PORT_STAT_IF_IN_HW_PROTECTION_SWITCHOVER_EVENTS" + } + Self::IfInHwProtectionSwitchoverDropPkts => { + "SAI_PORT_STAT_IF_IN_HW_PROTECTION_SWITCHOVER_DROP_PKTS" + } + Self::EtherInPkts1519To2500Octets => "SAI_PORT_STAT_ETHER_IN_PKTS_1519_TO_2500_OCTETS", + Self::EtherInPkts2501To9000Octets => "SAI_PORT_STAT_ETHER_IN_PKTS_2501_TO_9000_OCTETS", + Self::EtherInPkts9001To16383Octets => { + "SAI_PORT_STAT_ETHER_IN_PKTS_9001_TO_16383_OCTETS" + } + Self::EtherOutPkts1519To2500Octets => { + "SAI_PORT_STAT_ETHER_OUT_PKTS_1519_TO_2500_OCTETS" + } + Self::EtherOutPkts2501To9000Octets => { + "SAI_PORT_STAT_ETHER_OUT_PKTS_2501_TO_9000_OCTETS" + } + Self::EtherOutPkts9001To16383Octets => { + "SAI_PORT_STAT_ETHER_OUT_PKTS_9001_TO_16383_OCTETS" + } + Self::End => "SAI_PORT_STAT_END", + } + } +} + +impl fmt::Display for SaiPortStat { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.to_c_name()) + } +} + +impl FromStr for SaiPortStat { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "SAI_PORT_STAT_START" | "SAI_PORT_STAT_IF_IN_OCTETS" => Ok(Self::IfInOctets), + "SAI_PORT_STAT_IF_IN_UCAST_PKTS" => Ok(Self::IfInUcastPkts), + "SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS" => Ok(Self::IfInNonUcastPkts), + "SAI_PORT_STAT_IF_IN_DISCARDS" => Ok(Self::IfInDiscards), + "SAI_PORT_STAT_IF_IN_ERRORS" => Ok(Self::IfInErrors), + "SAI_PORT_STAT_IF_IN_UNKNOWN_PROTOS" => Ok(Self::IfInUnknownProtos), + "SAI_PORT_STAT_IF_IN_BROADCAST_PKTS" => Ok(Self::IfInBroadcastPkts), + "SAI_PORT_STAT_IF_IN_MULTICAST_PKTS" => Ok(Self::IfInMulticastPkts), + "SAI_PORT_STAT_IF_IN_VLAN_DISCARDS" => Ok(Self::IfInVlanDiscards), + "SAI_PORT_STAT_IF_OUT_OCTETS" => Ok(Self::IfOutOctets), + "SAI_PORT_STAT_IF_OUT_UCAST_PKTS" => Ok(Self::IfOutUcastPkts), + "SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS" => Ok(Self::IfOutNonUcastPkts), + "SAI_PORT_STAT_IF_OUT_DISCARDS" => Ok(Self::IfOutDiscards), + "SAI_PORT_STAT_IF_OUT_ERRORS" => Ok(Self::IfOutErrors), + "SAI_PORT_STAT_IF_OUT_QLEN" => Ok(Self::IfOutQlen), + "SAI_PORT_STAT_IF_OUT_BROADCAST_PKTS" => Ok(Self::IfOutBroadcastPkts), + "SAI_PORT_STAT_IF_OUT_MULTICAST_PKTS" => Ok(Self::IfOutMulticastPkts), + "SAI_PORT_STAT_ETHER_STATS_DROP_EVENTS" => Ok(Self::EtherStatsDropEvents), + "SAI_PORT_STAT_ETHER_STATS_MULTICAST_PKTS" => Ok(Self::EtherStatsMulticastPkts), + "SAI_PORT_STAT_ETHER_STATS_BROADCAST_PKTS" => Ok(Self::EtherStatsBroadcastPkts), + "SAI_PORT_STAT_ETHER_STATS_UNDERSIZE_PKTS" => Ok(Self::EtherStatsUndersizePkts), + "SAI_PORT_STAT_ETHER_STATS_FRAGMENTS" => Ok(Self::EtherStatsFragments), + "SAI_PORT_STAT_ETHER_STATS_PKTS_64_OCTETS" => Ok(Self::EtherStatsPkts64Octets), + "SAI_PORT_STAT_ETHER_STATS_PKTS_65_TO_127_OCTETS" => { + Ok(Self::EtherStatsPkts65To127Octets) + } + "SAI_PORT_STAT_ETHER_STATS_PKTS_128_TO_255_OCTETS" => { + Ok(Self::EtherStatsPkts128To255Octets) + } + "SAI_PORT_STAT_ETHER_STATS_PKTS_256_TO_511_OCTETS" => { + Ok(Self::EtherStatsPkts256To511Octets) + } + "SAI_PORT_STAT_ETHER_STATS_PKTS_512_TO_1023_OCTETS" => { + Ok(Self::EtherStatsPkts512To1023Octets) + } + "SAI_PORT_STAT_ETHER_STATS_PKTS_1024_TO_1518_OCTETS" => { + Ok(Self::EtherStatsPkts1024To1518Octets) + } + "SAI_PORT_STAT_ETHER_STATS_PKTS_1519_TO_2047_OCTETS" => { + Ok(Self::EtherStatsPkts1519To2047Octets) + } + "SAI_PORT_STAT_ETHER_STATS_PKTS_2048_TO_4095_OCTETS" => { + Ok(Self::EtherStatsPkts2048To4095Octets) + } + "SAI_PORT_STAT_ETHER_STATS_PKTS_4096_TO_9216_OCTETS" => { + Ok(Self::EtherStatsPkts4096To9216Octets) + } + "SAI_PORT_STAT_ETHER_STATS_PKTS_9217_TO_16383_OCTETS" => { + Ok(Self::EtherStatsPkts9217To16383Octets) + } + "SAI_PORT_STAT_ETHER_STATS_OVERSIZE_PKTS" => Ok(Self::EtherStatsOversizePkts), + "SAI_PORT_STAT_ETHER_RX_OVERSIZE_PKTS" => Ok(Self::EtherRxOversizePkts), + "SAI_PORT_STAT_ETHER_TX_OVERSIZE_PKTS" => Ok(Self::EtherTxOversizePkts), + "SAI_PORT_STAT_ETHER_STATS_JABBERS" => Ok(Self::EtherStatsJabbers), + "SAI_PORT_STAT_ETHER_STATS_OCTETS" => Ok(Self::EtherStatsOctets), + "SAI_PORT_STAT_ETHER_STATS_PKTS" => Ok(Self::EtherStatsPkts), + "SAI_PORT_STAT_ETHER_STATS_COLLISIONS" => Ok(Self::EtherStatsCollisions), + "SAI_PORT_STAT_ETHER_STATS_CRC_ALIGN_ERRORS" => Ok(Self::EtherStatsCrcAlignErrors), + "SAI_PORT_STAT_ETHER_STATS_TX_NO_ERRORS" => Ok(Self::EtherStatsTxNoErrors), + "SAI_PORT_STAT_ETHER_STATS_RX_NO_ERRORS" => Ok(Self::EtherStatsRxNoErrors), + "SAI_PORT_STAT_IP_IN_RECEIVES" => Ok(Self::IpInReceives), + "SAI_PORT_STAT_IP_IN_OCTETS" => Ok(Self::IpInOctets), + "SAI_PORT_STAT_IP_IN_UCAST_PKTS" => Ok(Self::IpInUcastPkts), + "SAI_PORT_STAT_IP_IN_NON_UCAST_PKTS" => Ok(Self::IpInNonUcastPkts), + "SAI_PORT_STAT_IP_IN_DISCARDS" => Ok(Self::IpInDiscards), + "SAI_PORT_STAT_IP_OUT_OCTETS" => Ok(Self::IpOutOctets), + "SAI_PORT_STAT_IP_OUT_UCAST_PKTS" => Ok(Self::IpOutUcastPkts), + "SAI_PORT_STAT_IP_OUT_NON_UCAST_PKTS" => Ok(Self::IpOutNonUcastPkts), + "SAI_PORT_STAT_IP_OUT_DISCARDS" => Ok(Self::IpOutDiscards), + "SAI_PORT_STAT_IPV6_IN_RECEIVES" => Ok(Self::Ipv6InReceives), + "SAI_PORT_STAT_IPV6_IN_OCTETS" => Ok(Self::Ipv6InOctets), + "SAI_PORT_STAT_IPV6_IN_UCAST_PKTS" => Ok(Self::Ipv6InUcastPkts), + "SAI_PORT_STAT_IPV6_IN_NON_UCAST_PKTS" => Ok(Self::Ipv6InNonUcastPkts), + "SAI_PORT_STAT_IPV6_IN_MCAST_PKTS" => Ok(Self::Ipv6InMcastPkts), + "SAI_PORT_STAT_IPV6_IN_DISCARDS" => Ok(Self::Ipv6InDiscards), + "SAI_PORT_STAT_IPV6_OUT_OCTETS" => Ok(Self::Ipv6OutOctets), + "SAI_PORT_STAT_IPV6_OUT_UCAST_PKTS" => Ok(Self::Ipv6OutUcastPkts), + "SAI_PORT_STAT_IPV6_OUT_NON_UCAST_PKTS" => Ok(Self::Ipv6OutNonUcastPkts), + "SAI_PORT_STAT_IPV6_OUT_MCAST_PKTS" => Ok(Self::Ipv6OutMcastPkts), + "SAI_PORT_STAT_IPV6_OUT_DISCARDS" => Ok(Self::Ipv6OutDiscards), + "SAI_PORT_STAT_GREEN_WRED_DROPPED_PACKETS" => Ok(Self::GreenWredDroppedPackets), + "SAI_PORT_STAT_GREEN_WRED_DROPPED_BYTES" => Ok(Self::GreenWredDroppedBytes), + "SAI_PORT_STAT_YELLOW_WRED_DROPPED_PACKETS" => Ok(Self::YellowWredDroppedPackets), + "SAI_PORT_STAT_YELLOW_WRED_DROPPED_BYTES" => Ok(Self::YellowWredDroppedBytes), + "SAI_PORT_STAT_RED_WRED_DROPPED_PACKETS" => Ok(Self::RedWredDroppedPackets), + "SAI_PORT_STAT_RED_WRED_DROPPED_BYTES" => Ok(Self::RedWredDroppedBytes), + "SAI_PORT_STAT_WRED_DROPPED_PACKETS" => Ok(Self::WredDroppedPackets), + "SAI_PORT_STAT_WRED_DROPPED_BYTES" => Ok(Self::WredDroppedBytes), + "SAI_PORT_STAT_ECN_MARKED_PACKETS" => Ok(Self::EcnMarkedPackets), + "SAI_PORT_STAT_ETHER_IN_PKTS_64_OCTETS" => Ok(Self::EtherInPkts64Octets), + "SAI_PORT_STAT_ETHER_IN_PKTS_65_TO_127_OCTETS" => Ok(Self::EtherInPkts65To127Octets), + "SAI_PORT_STAT_ETHER_IN_PKTS_128_TO_255_OCTETS" => Ok(Self::EtherInPkts128To255Octets), + "SAI_PORT_STAT_ETHER_IN_PKTS_256_TO_511_OCTETS" => Ok(Self::EtherInPkts256To511Octets), + "SAI_PORT_STAT_ETHER_IN_PKTS_512_TO_1023_OCTETS" => { + Ok(Self::EtherInPkts512To1023Octets) + } + "SAI_PORT_STAT_ETHER_IN_PKTS_1024_TO_1518_OCTETS" => { + Ok(Self::EtherInPkts1024To1518Octets) + } + "SAI_PORT_STAT_ETHER_IN_PKTS_1519_TO_2047_OCTETS" => { + Ok(Self::EtherInPkts1519To2047Octets) + } + "SAI_PORT_STAT_ETHER_IN_PKTS_2048_TO_4095_OCTETS" => { + Ok(Self::EtherInPkts2048To4095Octets) + } + "SAI_PORT_STAT_ETHER_IN_PKTS_4096_TO_9216_OCTETS" => { + Ok(Self::EtherInPkts4096To9216Octets) + } + "SAI_PORT_STAT_ETHER_IN_PKTS_9217_TO_16383_OCTETS" => { + Ok(Self::EtherInPkts9217To16383Octets) + } + "SAI_PORT_STAT_ETHER_OUT_PKTS_64_OCTETS" => Ok(Self::EtherOutPkts64Octets), + "SAI_PORT_STAT_ETHER_OUT_PKTS_65_TO_127_OCTETS" => Ok(Self::EtherOutPkts65To127Octets), + "SAI_PORT_STAT_ETHER_OUT_PKTS_128_TO_255_OCTETS" => { + Ok(Self::EtherOutPkts128To255Octets) + } + "SAI_PORT_STAT_ETHER_OUT_PKTS_256_TO_511_OCTETS" => { + Ok(Self::EtherOutPkts256To511Octets) + } + "SAI_PORT_STAT_ETHER_OUT_PKTS_512_TO_1023_OCTETS" => { + Ok(Self::EtherOutPkts512To1023Octets) + } + "SAI_PORT_STAT_ETHER_OUT_PKTS_1024_TO_1518_OCTETS" => { + Ok(Self::EtherOutPkts1024To1518Octets) + } + "SAI_PORT_STAT_ETHER_OUT_PKTS_1519_TO_2047_OCTETS" => { + Ok(Self::EtherOutPkts1519To2047Octets) + } + "SAI_PORT_STAT_ETHER_OUT_PKTS_2048_TO_4095_OCTETS" => { + Ok(Self::EtherOutPkts2048To4095Octets) + } + "SAI_PORT_STAT_ETHER_OUT_PKTS_4096_TO_9216_OCTETS" => { + Ok(Self::EtherOutPkts4096To9216Octets) + } + "SAI_PORT_STAT_ETHER_OUT_PKTS_9217_TO_16383_OCTETS" => { + Ok(Self::EtherOutPkts9217To16383Octets) + } + "SAI_PORT_STAT_IN_CURR_OCCUPANCY_BYTES" => Ok(Self::InCurrOccupancyBytes), + "SAI_PORT_STAT_IN_WATERMARK_BYTES" => Ok(Self::InWatermarkBytes), + "SAI_PORT_STAT_IN_SHARED_CURR_OCCUPANCY_BYTES" => Ok(Self::InSharedCurrOccupancyBytes), + "SAI_PORT_STAT_IN_SHARED_WATERMARK_BYTES" => Ok(Self::InSharedWatermarkBytes), + "SAI_PORT_STAT_OUT_CURR_OCCUPANCY_BYTES" => Ok(Self::OutCurrOccupancyBytes), + "SAI_PORT_STAT_OUT_WATERMARK_BYTES" => Ok(Self::OutWatermarkBytes), + "SAI_PORT_STAT_OUT_SHARED_CURR_OCCUPANCY_BYTES" => { + Ok(Self::OutSharedCurrOccupancyBytes) + } + "SAI_PORT_STAT_OUT_SHARED_WATERMARK_BYTES" => Ok(Self::OutSharedWatermarkBytes), + "SAI_PORT_STAT_IN_DROPPED_PKTS" => Ok(Self::InDroppedPkts), + "SAI_PORT_STAT_OUT_DROPPED_PKTS" => Ok(Self::OutDroppedPkts), + "SAI_PORT_STAT_PAUSE_RX_PKTS" => Ok(Self::PauseRxPkts), + "SAI_PORT_STAT_PAUSE_TX_PKTS" => Ok(Self::PauseTxPkts), + "SAI_PORT_STAT_PFC_0_RX_PKTS" => Ok(Self::Pfc0RxPkts), + "SAI_PORT_STAT_PFC_0_TX_PKTS" => Ok(Self::Pfc0TxPkts), + "SAI_PORT_STAT_PFC_1_RX_PKTS" => Ok(Self::Pfc1RxPkts), + "SAI_PORT_STAT_PFC_1_TX_PKTS" => Ok(Self::Pfc1TxPkts), + "SAI_PORT_STAT_PFC_2_RX_PKTS" => Ok(Self::Pfc2RxPkts), + "SAI_PORT_STAT_PFC_2_TX_PKTS" => Ok(Self::Pfc2TxPkts), + "SAI_PORT_STAT_PFC_3_RX_PKTS" => Ok(Self::Pfc3RxPkts), + "SAI_PORT_STAT_PFC_3_TX_PKTS" => Ok(Self::Pfc3TxPkts), + "SAI_PORT_STAT_PFC_4_RX_PKTS" => Ok(Self::Pfc4RxPkts), + "SAI_PORT_STAT_PFC_4_TX_PKTS" => Ok(Self::Pfc4TxPkts), + "SAI_PORT_STAT_PFC_5_RX_PKTS" => Ok(Self::Pfc5RxPkts), + "SAI_PORT_STAT_PFC_5_TX_PKTS" => Ok(Self::Pfc5TxPkts), + "SAI_PORT_STAT_PFC_6_RX_PKTS" => Ok(Self::Pfc6RxPkts), + "SAI_PORT_STAT_PFC_6_TX_PKTS" => Ok(Self::Pfc6TxPkts), + "SAI_PORT_STAT_PFC_7_RX_PKTS" => Ok(Self::Pfc7RxPkts), + "SAI_PORT_STAT_PFC_7_TX_PKTS" => Ok(Self::Pfc7TxPkts), + "SAI_PORT_STAT_PFC_0_RX_PAUSE_DURATION" => Ok(Self::Pfc0RxPauseDuration), + "SAI_PORT_STAT_PFC_0_TX_PAUSE_DURATION" => Ok(Self::Pfc0TxPauseDuration), + "SAI_PORT_STAT_PFC_1_RX_PAUSE_DURATION" => Ok(Self::Pfc1RxPauseDuration), + "SAI_PORT_STAT_PFC_1_TX_PAUSE_DURATION" => Ok(Self::Pfc1TxPauseDuration), + "SAI_PORT_STAT_PFC_2_RX_PAUSE_DURATION" => Ok(Self::Pfc2RxPauseDuration), + "SAI_PORT_STAT_PFC_2_TX_PAUSE_DURATION" => Ok(Self::Pfc2TxPauseDuration), + "SAI_PORT_STAT_PFC_3_RX_PAUSE_DURATION" => Ok(Self::Pfc3RxPauseDuration), + "SAI_PORT_STAT_PFC_3_TX_PAUSE_DURATION" => Ok(Self::Pfc3TxPauseDuration), + "SAI_PORT_STAT_PFC_4_RX_PAUSE_DURATION" => Ok(Self::Pfc4RxPauseDuration), + "SAI_PORT_STAT_PFC_4_TX_PAUSE_DURATION" => Ok(Self::Pfc4TxPauseDuration), + "SAI_PORT_STAT_PFC_5_RX_PAUSE_DURATION" => Ok(Self::Pfc5RxPauseDuration), + "SAI_PORT_STAT_PFC_5_TX_PAUSE_DURATION" => Ok(Self::Pfc5TxPauseDuration), + "SAI_PORT_STAT_PFC_6_RX_PAUSE_DURATION" => Ok(Self::Pfc6RxPauseDuration), + "SAI_PORT_STAT_PFC_6_TX_PAUSE_DURATION" => Ok(Self::Pfc6TxPauseDuration), + "SAI_PORT_STAT_PFC_7_RX_PAUSE_DURATION" => Ok(Self::Pfc7RxPauseDuration), + "SAI_PORT_STAT_PFC_7_TX_PAUSE_DURATION" => Ok(Self::Pfc7TxPauseDuration), + "SAI_PORT_STAT_PFC_0_RX_PAUSE_DURATION_US" => Ok(Self::Pfc0RxPauseDurationUs), + "SAI_PORT_STAT_PFC_0_TX_PAUSE_DURATION_US" => Ok(Self::Pfc0TxPauseDurationUs), + "SAI_PORT_STAT_PFC_1_RX_PAUSE_DURATION_US" => Ok(Self::Pfc1RxPauseDurationUs), + "SAI_PORT_STAT_PFC_1_TX_PAUSE_DURATION_US" => Ok(Self::Pfc1TxPauseDurationUs), + "SAI_PORT_STAT_PFC_2_RX_PAUSE_DURATION_US" => Ok(Self::Pfc2RxPauseDurationUs), + "SAI_PORT_STAT_PFC_2_TX_PAUSE_DURATION_US" => Ok(Self::Pfc2TxPauseDurationUs), + "SAI_PORT_STAT_PFC_3_RX_PAUSE_DURATION_US" => Ok(Self::Pfc3RxPauseDurationUs), + "SAI_PORT_STAT_PFC_3_TX_PAUSE_DURATION_US" => Ok(Self::Pfc3TxPauseDurationUs), + "SAI_PORT_STAT_PFC_4_RX_PAUSE_DURATION_US" => Ok(Self::Pfc4RxPauseDurationUs), + "SAI_PORT_STAT_PFC_4_TX_PAUSE_DURATION_US" => Ok(Self::Pfc4TxPauseDurationUs), + "SAI_PORT_STAT_PFC_5_RX_PAUSE_DURATION_US" => Ok(Self::Pfc5RxPauseDurationUs), + "SAI_PORT_STAT_PFC_5_TX_PAUSE_DURATION_US" => Ok(Self::Pfc5TxPauseDurationUs), + "SAI_PORT_STAT_PFC_6_RX_PAUSE_DURATION_US" => Ok(Self::Pfc6RxPauseDurationUs), + "SAI_PORT_STAT_PFC_6_TX_PAUSE_DURATION_US" => Ok(Self::Pfc6TxPauseDurationUs), + "SAI_PORT_STAT_PFC_7_RX_PAUSE_DURATION_US" => Ok(Self::Pfc7RxPauseDurationUs), + "SAI_PORT_STAT_PFC_7_TX_PAUSE_DURATION_US" => Ok(Self::Pfc7TxPauseDurationUs), + "SAI_PORT_STAT_PFC_0_ON2OFF_RX_PKTS" => Ok(Self::Pfc0On2OffRxPkts), + "SAI_PORT_STAT_PFC_1_ON2OFF_RX_PKTS" => Ok(Self::Pfc1On2OffRxPkts), + "SAI_PORT_STAT_PFC_2_ON2OFF_RX_PKTS" => Ok(Self::Pfc2On2OffRxPkts), + "SAI_PORT_STAT_PFC_3_ON2OFF_RX_PKTS" => Ok(Self::Pfc3On2OffRxPkts), + "SAI_PORT_STAT_PFC_4_ON2OFF_RX_PKTS" => Ok(Self::Pfc4On2OffRxPkts), + "SAI_PORT_STAT_PFC_5_ON2OFF_RX_PKTS" => Ok(Self::Pfc5On2OffRxPkts), + "SAI_PORT_STAT_PFC_6_ON2OFF_RX_PKTS" => Ok(Self::Pfc6On2OffRxPkts), + "SAI_PORT_STAT_PFC_7_ON2OFF_RX_PKTS" => Ok(Self::Pfc7On2OffRxPkts), + "SAI_PORT_STAT_DOT3_STATS_ALIGNMENT_ERRORS" => Ok(Self::Dot3StatsAlignmentErrors), + "SAI_PORT_STAT_DOT3_STATS_FCS_ERRORS" => Ok(Self::Dot3StatsFcsErrors), + "SAI_PORT_STAT_DOT3_STATS_SINGLE_COLLISION_FRAMES" => { + Ok(Self::Dot3StatsSingleCollisionFrames) + } + "SAI_PORT_STAT_DOT3_STATS_MULTIPLE_COLLISION_FRAMES" => { + Ok(Self::Dot3StatsMultipleCollisionFrames) + } + "SAI_PORT_STAT_DOT3_STATS_SQE_TEST_ERRORS" => Ok(Self::Dot3StatsSqeTestErrors), + "SAI_PORT_STAT_DOT3_STATS_DEFERRED_TRANSMISSIONS" => { + Ok(Self::Dot3StatsDeferredTransmissions) + } + "SAI_PORT_STAT_DOT3_STATS_LATE_COLLISIONS" => Ok(Self::Dot3StatsLateCollisions), + "SAI_PORT_STAT_DOT3_STATS_EXCESSIVE_COLLISIONS" => { + Ok(Self::Dot3StatsExcessiveCollisions) + } + "SAI_PORT_STAT_DOT3_STATS_INTERNAL_MAC_TRANSMIT_ERRORS" => { + Ok(Self::Dot3StatsInternalMacTransmitErrors) + } + "SAI_PORT_STAT_DOT3_STATS_CARRIER_SENSE_ERRORS" => { + Ok(Self::Dot3StatsCarrierSenseErrors) + } + "SAI_PORT_STAT_DOT3_STATS_FRAME_TOO_LONGS" => Ok(Self::Dot3StatsFrameTooLongs), + "SAI_PORT_STAT_DOT3_STATS_INTERNAL_MAC_RECEIVE_ERRORS" => { + Ok(Self::Dot3StatsInternalMacReceiveErrors) + } + "SAI_PORT_STAT_DOT3_STATS_SYMBOL_ERRORS" => Ok(Self::Dot3StatsSymbolErrors), + "SAI_PORT_STAT_DOT3_CONTROL_IN_UNKNOWN_OPCODES" => { + Ok(Self::Dot3ControlInUnknownOpcodes) + } + "SAI_PORT_STAT_EEE_TX_EVENT_COUNT" => Ok(Self::EeeTxEventCount), + "SAI_PORT_STAT_EEE_RX_EVENT_COUNT" => Ok(Self::EeeRxEventCount), + "SAI_PORT_STAT_EEE_TX_DURATION" => Ok(Self::EeeTxDuration), + "SAI_PORT_STAT_EEE_RX_DURATION" => Ok(Self::EeeRxDuration), + "SAI_PORT_STAT_PRBS_ERROR_COUNT" => Ok(Self::PrbsErrorCount), + "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES" => Ok(Self::IfInFecCorrectableFrames), + "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES" => { + Ok(Self::IfInFecNotCorrectableFrames) + } + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS" => Ok(Self::IfInFecSymbolErrors), + "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS" => Ok(Self::IfInFabricDataUnits), + "SAI_PORT_STAT_IF_OUT_FABRIC_DATA_UNITS" => Ok(Self::IfOutFabricDataUnits), + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S0" => Ok(Self::IfInFecCodewordErrorsS0), + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S1" => Ok(Self::IfInFecCodewordErrorsS1), + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S2" => Ok(Self::IfInFecCodewordErrorsS2), + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S3" => Ok(Self::IfInFecCodewordErrorsS3), + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S4" => Ok(Self::IfInFecCodewordErrorsS4), + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S5" => Ok(Self::IfInFecCodewordErrorsS5), + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S6" => Ok(Self::IfInFecCodewordErrorsS6), + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S7" => Ok(Self::IfInFecCodewordErrorsS7), + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S8" => Ok(Self::IfInFecCodewordErrorsS8), + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S9" => Ok(Self::IfInFecCodewordErrorsS9), + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S10" => Ok(Self::IfInFecCodewordErrorsS10), + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S11" => Ok(Self::IfInFecCodewordErrorsS11), + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S12" => Ok(Self::IfInFecCodewordErrorsS12), + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S13" => Ok(Self::IfInFecCodewordErrorsS13), + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S14" => Ok(Self::IfInFecCodewordErrorsS14), + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S15" => Ok(Self::IfInFecCodewordErrorsS15), + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S16" => Ok(Self::IfInFecCodewordErrorsS16), + "SAI_PORT_STAT_IF_IN_FEC_CORRECTED_BITS" => Ok(Self::IfInFecCorrectedBits), + "SAI_PORT_STAT_TRIM_PACKETS" => Ok(Self::TrimPackets), + "SAI_PORT_STAT_DROPPED_TRIM_PACKETS" => Ok(Self::DroppedTrimPackets), + "SAI_PORT_STAT_TX_TRIM_PACKETS" => Ok(Self::TxTrimPackets), + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_0_DROPPED_PKTS" => { + Ok(Self::InConfiguredDropReasons0DroppedPkts) + } + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_1_DROPPED_PKTS" => { + Ok(Self::InConfiguredDropReasons1DroppedPkts) + } + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_2_DROPPED_PKTS" => { + Ok(Self::InConfiguredDropReasons2DroppedPkts) + } + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_3_DROPPED_PKTS" => { + Ok(Self::InConfiguredDropReasons3DroppedPkts) + } + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_4_DROPPED_PKTS" => { + Ok(Self::InConfiguredDropReasons4DroppedPkts) + } + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_5_DROPPED_PKTS" => { + Ok(Self::InConfiguredDropReasons5DroppedPkts) + } + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_6_DROPPED_PKTS" => { + Ok(Self::InConfiguredDropReasons6DroppedPkts) + } + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_7_DROPPED_PKTS" => { + Ok(Self::InConfiguredDropReasons7DroppedPkts) + } + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_8_DROPPED_PKTS" => { + Ok(Self::InConfiguredDropReasons8DroppedPkts) + } + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_9_DROPPED_PKTS" => { + Ok(Self::InConfiguredDropReasons9DroppedPkts) + } + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_10_DROPPED_PKTS" => { + Ok(Self::InConfiguredDropReasons10DroppedPkts) + } + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_11_DROPPED_PKTS" => { + Ok(Self::InConfiguredDropReasons11DroppedPkts) + } + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_12_DROPPED_PKTS" => { + Ok(Self::InConfiguredDropReasons12DroppedPkts) + } + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_13_DROPPED_PKTS" => { + Ok(Self::InConfiguredDropReasons13DroppedPkts) + } + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_14_DROPPED_PKTS" => { + Ok(Self::InConfiguredDropReasons14DroppedPkts) + } + "SAI_PORT_STAT_IN_CONFIGURED_DROP_REASONS_15_DROPPED_PKTS" => { + Ok(Self::InConfiguredDropReasons15DroppedPkts) + } + "SAI_PORT_STAT_OUT_CONFIGURED_DROP_REASONS_0_DROPPED_PKTS" => { + Ok(Self::OutConfiguredDropReasons0DroppedPkts) + } + "SAI_PORT_STAT_OUT_CONFIGURED_DROP_REASONS_1_DROPPED_PKTS" => { + Ok(Self::OutConfiguredDropReasons1DroppedPkts) + } + "SAI_PORT_STAT_OUT_CONFIGURED_DROP_REASONS_2_DROPPED_PKTS" => { + Ok(Self::OutConfiguredDropReasons2DroppedPkts) + } + "SAI_PORT_STAT_OUT_CONFIGURED_DROP_REASONS_3_DROPPED_PKTS" => { + Ok(Self::OutConfiguredDropReasons3DroppedPkts) + } + "SAI_PORT_STAT_OUT_CONFIGURED_DROP_REASONS_4_DROPPED_PKTS" => { + Ok(Self::OutConfiguredDropReasons4DroppedPkts) + } + "SAI_PORT_STAT_OUT_CONFIGURED_DROP_REASONS_5_DROPPED_PKTS" => { + Ok(Self::OutConfiguredDropReasons5DroppedPkts) + } + "SAI_PORT_STAT_OUT_CONFIGURED_DROP_REASONS_6_DROPPED_PKTS" => { + Ok(Self::OutConfiguredDropReasons6DroppedPkts) + } + "SAI_PORT_STAT_OUT_CONFIGURED_DROP_REASONS_7_DROPPED_PKTS" => { + Ok(Self::OutConfiguredDropReasons7DroppedPkts) + } + "SAI_PORT_STAT_IF_IN_HW_PROTECTION_SWITCHOVER_EVENTS" => { + Ok(Self::IfInHwProtectionSwitchoverEvents) + } + "SAI_PORT_STAT_IF_IN_HW_PROTECTION_SWITCHOVER_DROP_PKTS" => { + Ok(Self::IfInHwProtectionSwitchoverDropPkts) + } + "SAI_PORT_STAT_ETHER_IN_PKTS_1519_TO_2500_OCTETS" => { + Ok(Self::EtherInPkts1519To2500Octets) + } + "SAI_PORT_STAT_ETHER_IN_PKTS_2501_TO_9000_OCTETS" => { + Ok(Self::EtherInPkts2501To9000Octets) + } + "SAI_PORT_STAT_ETHER_IN_PKTS_9001_TO_16383_OCTETS" => { + Ok(Self::EtherInPkts9001To16383Octets) + } + "SAI_PORT_STAT_ETHER_OUT_PKTS_1519_TO_2500_OCTETS" => { + Ok(Self::EtherOutPkts1519To2500Octets) + } + "SAI_PORT_STAT_ETHER_OUT_PKTS_2501_TO_9000_OCTETS" => { + Ok(Self::EtherOutPkts2501To9000Octets) + } + "SAI_PORT_STAT_ETHER_OUT_PKTS_9001_TO_16383_OCTETS" => { + Ok(Self::EtherOutPkts9001To16383Octets) + } + "SAI_PORT_STAT_END" => Ok(Self::End), + _ => Err(format!("Unknown SAI port stat: {}", s)), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_basic_conversion() { + assert_eq!(SaiPortStat::IfInOctets.to_u32(), 0); + assert_eq!(SaiPortStat::IfInUcastPkts.to_u32(), 1); + assert_eq!(SaiPortStat::Pfc0RxPkts.to_u32(), 103); + assert_eq!(SaiPortStat::End.to_u32(), 0x00002010); + } + + #[test] + fn test_from_u32() { + assert_eq!(SaiPortStat::from_u32(0), Some(SaiPortStat::IfInOctets)); + assert_eq!(SaiPortStat::from_u32(1), Some(SaiPortStat::IfInUcastPkts)); + assert_eq!(SaiPortStat::from_u32(103), Some(SaiPortStat::Pfc0RxPkts)); + assert_eq!( + SaiPortStat::from_u32(0x00001000), + Some(SaiPortStat::InConfiguredDropReasons0DroppedPkts) + ); + assert_eq!(SaiPortStat::from_u32(0x00002010), Some(SaiPortStat::End)); + assert_eq!(SaiPortStat::from_u32(999999), None); + } + + #[test] + fn test_string_conversion() { + let stat = SaiPortStat::IfInOctets; + assert_eq!(stat.to_string(), "SAI_PORT_STAT_IF_IN_OCTETS"); + assert_eq!( + "SAI_PORT_STAT_IF_IN_OCTETS".parse::().unwrap(), + stat + ); + + let pfc_stat = SaiPortStat::Pfc0RxPkts; + assert_eq!(pfc_stat.to_string(), "SAI_PORT_STAT_PFC_0_RX_PKTS"); + assert_eq!( + "SAI_PORT_STAT_PFC_0_RX_PKTS" + .parse::() + .unwrap(), + pfc_stat + ); + + // Test that both START and IF_IN_OCTETS parse to the same enum value + assert_eq!( + "SAI_PORT_STAT_START".parse::().unwrap(), + SaiPortStat::IfInOctets + ); + } +} diff --git a/crates/countersyncd/src/sai/saiqueue.rs b/crates/countersyncd/src/sai/saiqueue.rs new file mode 100644 index 00000000000..325cd47500a --- /dev/null +++ b/crates/countersyncd/src/sai/saiqueue.rs @@ -0,0 +1,442 @@ +use std::fmt; +use std::str::FromStr; + +/// SAI queue statistics enum +/// This enum represents all the queue statistics defined in sai_queue_stat_t +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[repr(u32)] +pub enum SaiQueueStat { + /// Get/set tx packets count [uint64_t] + Packets = 0x00000000, + + /// Get/set tx bytes count [uint64_t] + Bytes = 0x00000001, + + /// Get/set dropped packets count [uint64_t] + DroppedPackets = 0x00000002, + + /// Get/set dropped bytes count [uint64_t] + DroppedBytes = 0x00000003, + + /// Get/set green color tx packets count [uint64_t] + GreenPackets = 0x00000004, + + /// Get/set green color tx bytes count [uint64_t] + GreenBytes = 0x00000005, + + /// Get/set green color dropped packets count [uint64_t] + GreenDroppedPackets = 0x00000006, + + /// Get/set green color dropped bytes count [uint64_t] + GreenDroppedBytes = 0x00000007, + + /// Get/set yellow color tx packets count [uint64_t] + YellowPackets = 0x00000008, + + /// Get/set yellow color tx bytes count [uint64_t] + YellowBytes = 0x00000009, + + /// Get/set yellow color dropped packets count [uint64_t] + YellowDroppedPackets = 0x0000000a, + + /// Get/set yellow color dropped bytes count [uint64_t] + YellowDroppedBytes = 0x0000000b, + + /// Get/set red color tx packets count [uint64_t] + RedPackets = 0x0000000c, + + /// Get/set red color tx bytes count [uint64_t] + RedBytes = 0x0000000d, + + /// Get/set red color dropped packets count [uint64_t] + RedDroppedPackets = 0x0000000e, + + /// Get/set red color dropped bytes count [uint64_t] + RedDroppedBytes = 0x0000000f, + + /// Get/set WRED green color dropped packets count [uint64_t] + GreenWredDroppedPackets = 0x00000010, + + /// Get/set WRED green color dropped bytes count [uint64_t] + GreenWredDroppedBytes = 0x00000011, + + /// Get/set WRED yellow color dropped packets count [uint64_t] + YellowWredDroppedPackets = 0x00000012, + + /// Get/set WRED yellow color dropped bytes count [uint64_t] + YellowWredDroppedBytes = 0x00000013, + + /// Get/set WRED red color dropped packets count [uint64_t] + RedWredDroppedPackets = 0x00000014, + + /// Get/set WRED red color dropped bytes count [uint64_t] + RedWredDroppedBytes = 0x00000015, + + /// Get/set WRED dropped packets count [uint64_t] + WredDroppedPackets = 0x00000016, + + /// Get/set WRED dropped bytes count [uint64_t] + WredDroppedBytes = 0x00000017, + + /// Get current queue occupancy in bytes [uint64_t] + CurrOccupancyBytes = 0x00000018, + + /// Get watermark queue occupancy in bytes [uint64_t] + WatermarkBytes = 0x00000019, + + /// Get current queue shared occupancy in bytes [uint64_t] + SharedCurrOccupancyBytes = 0x0000001a, + + /// Get watermark queue shared occupancy in bytes [uint64_t] + SharedWatermarkBytes = 0x0000001b, + + /// Get/set WRED green color marked packets count [uint64_t] + GreenWredEcnMarkedPackets = 0x0000001c, + + /// Get/set WRED green color marked bytes count [uint64_t] + GreenWredEcnMarkedBytes = 0x0000001d, + + /// Get/set WRED yellow color marked packets count [uint64_t] + YellowWredEcnMarkedPackets = 0x0000001e, + + /// Get/set WRED yellow color marked bytes count [uint64_t] + YellowWredEcnMarkedBytes = 0x0000001f, + + /// Get/set WRED red color marked packets count [uint64_t] + RedWredEcnMarkedPackets = 0x00000020, + + /// Get/set WRED red color marked bytes count [uint64_t] + RedWredEcnMarkedBytes = 0x00000021, + + /// Get/set WRED marked packets count [uint64_t] + WredEcnMarkedPackets = 0x00000022, + + /// Get/set WRED marked bytes count [uint64_t] + WredEcnMarkedBytes = 0x00000023, + + /// Get current queue occupancy percentage [uint64_t] + CurrOccupancyLevel = 0x00000024, + + /// Get watermark queue occupancy percentage [uint64_t] + WatermarkLevel = 0x00000025, + + /// Get packets deleted when the credit watch dog expires for VOQ System [uint64_t] + CreditWdDeletedPackets = 0x00000026, + + /// Queue delay watermark in nanoseconds [uint64_t] + DelayWatermarkNs = 0x00000027, + + /// Packets trimmed due to failed admission [uint64_t] + TrimPackets = 0x00000028, + + /// Get current queue occupancy in cells [uint64_t] + CurrOccupancyCells = 0x00000029, + + /// Get watermark queue occupancy in cells [uint64_t] + WatermarkCells = 0x0000002a, + + /// Get current queue shared occupancy in cells [uint64_t] + SharedCurrOccupancyCells = 0x0000002b, + + /// Get watermark queue shared occupancy in cells [uint64_t] + SharedWatermarkCells = 0x0000002c, + + /// Packets trimmed but failed to be admitted on a trim queue due to congestion [uint64_t] + DroppedTrimPackets = 0x0000002d, + + /// Packets trimmed and successfully transmitted on a trim queue [uint64_t] + TxTrimPackets = 0x0000002e, + + /// Custom range base value + CustomRangeBase = 0x10000000, +} + +impl SaiQueueStat { + /// Convert from u32 value to enum variant + pub fn from_u32(value: u32) -> Option { + match value { + 0x00000000 => Some(Self::Packets), + 0x00000001 => Some(Self::Bytes), + 0x00000002 => Some(Self::DroppedPackets), + 0x00000003 => Some(Self::DroppedBytes), + 0x00000004 => Some(Self::GreenPackets), + 0x00000005 => Some(Self::GreenBytes), + 0x00000006 => Some(Self::GreenDroppedPackets), + 0x00000007 => Some(Self::GreenDroppedBytes), + 0x00000008 => Some(Self::YellowPackets), + 0x00000009 => Some(Self::YellowBytes), + 0x0000000a => Some(Self::YellowDroppedPackets), + 0x0000000b => Some(Self::YellowDroppedBytes), + 0x0000000c => Some(Self::RedPackets), + 0x0000000d => Some(Self::RedBytes), + 0x0000000e => Some(Self::RedDroppedPackets), + 0x0000000f => Some(Self::RedDroppedBytes), + 0x00000010 => Some(Self::GreenWredDroppedPackets), + 0x00000011 => Some(Self::GreenWredDroppedBytes), + 0x00000012 => Some(Self::YellowWredDroppedPackets), + 0x00000013 => Some(Self::YellowWredDroppedBytes), + 0x00000014 => Some(Self::RedWredDroppedPackets), + 0x00000015 => Some(Self::RedWredDroppedBytes), + 0x00000016 => Some(Self::WredDroppedPackets), + 0x00000017 => Some(Self::WredDroppedBytes), + 0x00000018 => Some(Self::CurrOccupancyBytes), + 0x00000019 => Some(Self::WatermarkBytes), + 0x0000001a => Some(Self::SharedCurrOccupancyBytes), + 0x0000001b => Some(Self::SharedWatermarkBytes), + 0x0000001c => Some(Self::GreenWredEcnMarkedPackets), + 0x0000001d => Some(Self::GreenWredEcnMarkedBytes), + 0x0000001e => Some(Self::YellowWredEcnMarkedPackets), + 0x0000001f => Some(Self::YellowWredEcnMarkedBytes), + 0x00000020 => Some(Self::RedWredEcnMarkedPackets), + 0x00000021 => Some(Self::RedWredEcnMarkedBytes), + 0x00000022 => Some(Self::WredEcnMarkedPackets), + 0x00000023 => Some(Self::WredEcnMarkedBytes), + 0x00000024 => Some(Self::CurrOccupancyLevel), + 0x00000025 => Some(Self::WatermarkLevel), + 0x00000026 => Some(Self::CreditWdDeletedPackets), + 0x00000027 => Some(Self::DelayWatermarkNs), + 0x00000028 => Some(Self::TrimPackets), + 0x00000029 => Some(Self::CurrOccupancyCells), + 0x0000002a => Some(Self::WatermarkCells), + 0x0000002b => Some(Self::SharedCurrOccupancyCells), + 0x0000002c => Some(Self::SharedWatermarkCells), + 0x0000002d => Some(Self::DroppedTrimPackets), + 0x0000002e => Some(Self::TxTrimPackets), + 0x10000000 => Some(Self::CustomRangeBase), + _ => None, + } + } + + /// Convert enum variant to u32 value + #[allow(dead_code)] // May be used by external code or future features + pub fn to_u32(self) -> u32 { + self as u32 + } + + /// Get the C enum name as a string + pub fn to_c_name(self) -> &'static str { + match self { + Self::Packets => "SAI_QUEUE_STAT_PACKETS", + Self::Bytes => "SAI_QUEUE_STAT_BYTES", + Self::DroppedPackets => "SAI_QUEUE_STAT_DROPPED_PACKETS", + Self::DroppedBytes => "SAI_QUEUE_STAT_DROPPED_BYTES", + Self::GreenPackets => "SAI_QUEUE_STAT_GREEN_PACKETS", + Self::GreenBytes => "SAI_QUEUE_STAT_GREEN_BYTES", + Self::GreenDroppedPackets => "SAI_QUEUE_STAT_GREEN_DROPPED_PACKETS", + Self::GreenDroppedBytes => "SAI_QUEUE_STAT_GREEN_DROPPED_BYTES", + Self::YellowPackets => "SAI_QUEUE_STAT_YELLOW_PACKETS", + Self::YellowBytes => "SAI_QUEUE_STAT_YELLOW_BYTES", + Self::YellowDroppedPackets => "SAI_QUEUE_STAT_YELLOW_DROPPED_PACKETS", + Self::YellowDroppedBytes => "SAI_QUEUE_STAT_YELLOW_DROPPED_BYTES", + Self::RedPackets => "SAI_QUEUE_STAT_RED_PACKETS", + Self::RedBytes => "SAI_QUEUE_STAT_RED_BYTES", + Self::RedDroppedPackets => "SAI_QUEUE_STAT_RED_DROPPED_PACKETS", + Self::RedDroppedBytes => "SAI_QUEUE_STAT_RED_DROPPED_BYTES", + Self::GreenWredDroppedPackets => "SAI_QUEUE_STAT_GREEN_WRED_DROPPED_PACKETS", + Self::GreenWredDroppedBytes => "SAI_QUEUE_STAT_GREEN_WRED_DROPPED_BYTES", + Self::YellowWredDroppedPackets => "SAI_QUEUE_STAT_YELLOW_WRED_DROPPED_PACKETS", + Self::YellowWredDroppedBytes => "SAI_QUEUE_STAT_YELLOW_WRED_DROPPED_BYTES", + Self::RedWredDroppedPackets => "SAI_QUEUE_STAT_RED_WRED_DROPPED_PACKETS", + Self::RedWredDroppedBytes => "SAI_QUEUE_STAT_RED_WRED_DROPPED_BYTES", + Self::WredDroppedPackets => "SAI_QUEUE_STAT_WRED_DROPPED_PACKETS", + Self::WredDroppedBytes => "SAI_QUEUE_STAT_WRED_DROPPED_BYTES", + Self::CurrOccupancyBytes => "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES", + Self::WatermarkBytes => "SAI_QUEUE_STAT_WATERMARK_BYTES", + Self::SharedCurrOccupancyBytes => "SAI_QUEUE_STAT_SHARED_CURR_OCCUPANCY_BYTES", + Self::SharedWatermarkBytes => "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES", + Self::GreenWredEcnMarkedPackets => "SAI_QUEUE_STAT_GREEN_WRED_ECN_MARKED_PACKETS", + Self::GreenWredEcnMarkedBytes => "SAI_QUEUE_STAT_GREEN_WRED_ECN_MARKED_BYTES", + Self::YellowWredEcnMarkedPackets => "SAI_QUEUE_STAT_YELLOW_WRED_ECN_MARKED_PACKETS", + Self::YellowWredEcnMarkedBytes => "SAI_QUEUE_STAT_YELLOW_WRED_ECN_MARKED_BYTES", + Self::RedWredEcnMarkedPackets => "SAI_QUEUE_STAT_RED_WRED_ECN_MARKED_PACKETS", + Self::RedWredEcnMarkedBytes => "SAI_QUEUE_STAT_RED_WRED_ECN_MARKED_BYTES", + Self::WredEcnMarkedPackets => "SAI_QUEUE_STAT_WRED_ECN_MARKED_PACKETS", + Self::WredEcnMarkedBytes => "SAI_QUEUE_STAT_WRED_ECN_MARKED_BYTES", + Self::CurrOccupancyLevel => "SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL", + Self::WatermarkLevel => "SAI_QUEUE_STAT_WATERMARK_LEVEL", + Self::CreditWdDeletedPackets => "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS", + Self::DelayWatermarkNs => "SAI_QUEUE_STAT_DELAY_WATERMARK_NS", + Self::TrimPackets => "SAI_QUEUE_STAT_TRIM_PACKETS", + Self::CurrOccupancyCells => "SAI_QUEUE_STAT_CURR_OCCUPANCY_CELLS", + Self::WatermarkCells => "SAI_QUEUE_STAT_WATERMARK_CELLS", + Self::SharedCurrOccupancyCells => "SAI_QUEUE_STAT_SHARED_CURR_OCCUPANCY_CELLS", + Self::SharedWatermarkCells => "SAI_QUEUE_STAT_SHARED_WATERMARK_CELLS", + Self::DroppedTrimPackets => "SAI_QUEUE_STAT_DROPPED_TRIM_PACKETS", + Self::TxTrimPackets => "SAI_QUEUE_STAT_TX_TRIM_PACKETS", + Self::CustomRangeBase => "SAI_QUEUE_STAT_CUSTOM_RANGE_BASE", + } + } +} + +impl FromStr for SaiQueueStat { + type Err = (); + + fn from_str(s: &str) -> Result { + match s { + "SAI_QUEUE_STAT_PACKETS" => Ok(Self::Packets), + "SAI_QUEUE_STAT_BYTES" => Ok(Self::Bytes), + "SAI_QUEUE_STAT_DROPPED_PACKETS" => Ok(Self::DroppedPackets), + "SAI_QUEUE_STAT_DROPPED_BYTES" => Ok(Self::DroppedBytes), + "SAI_QUEUE_STAT_GREEN_PACKETS" => Ok(Self::GreenPackets), + "SAI_QUEUE_STAT_GREEN_BYTES" => Ok(Self::GreenBytes), + "SAI_QUEUE_STAT_GREEN_DROPPED_PACKETS" => Ok(Self::GreenDroppedPackets), + "SAI_QUEUE_STAT_GREEN_DROPPED_BYTES" => Ok(Self::GreenDroppedBytes), + "SAI_QUEUE_STAT_YELLOW_PACKETS" => Ok(Self::YellowPackets), + "SAI_QUEUE_STAT_YELLOW_BYTES" => Ok(Self::YellowBytes), + "SAI_QUEUE_STAT_YELLOW_DROPPED_PACKETS" => Ok(Self::YellowDroppedPackets), + "SAI_QUEUE_STAT_YELLOW_DROPPED_BYTES" => Ok(Self::YellowDroppedBytes), + "SAI_QUEUE_STAT_RED_PACKETS" => Ok(Self::RedPackets), + "SAI_QUEUE_STAT_RED_BYTES" => Ok(Self::RedBytes), + "SAI_QUEUE_STAT_RED_DROPPED_PACKETS" => Ok(Self::RedDroppedPackets), + "SAI_QUEUE_STAT_RED_DROPPED_BYTES" => Ok(Self::RedDroppedBytes), + "SAI_QUEUE_STAT_GREEN_WRED_DROPPED_PACKETS" => Ok(Self::GreenWredDroppedPackets), + "SAI_QUEUE_STAT_GREEN_WRED_DROPPED_BYTES" => Ok(Self::GreenWredDroppedBytes), + "SAI_QUEUE_STAT_YELLOW_WRED_DROPPED_PACKETS" => Ok(Self::YellowWredDroppedPackets), + "SAI_QUEUE_STAT_YELLOW_WRED_DROPPED_BYTES" => Ok(Self::YellowWredDroppedBytes), + "SAI_QUEUE_STAT_RED_WRED_DROPPED_PACKETS" => Ok(Self::RedWredDroppedPackets), + "SAI_QUEUE_STAT_RED_WRED_DROPPED_BYTES" => Ok(Self::RedWredDroppedBytes), + "SAI_QUEUE_STAT_WRED_DROPPED_PACKETS" => Ok(Self::WredDroppedPackets), + "SAI_QUEUE_STAT_WRED_DROPPED_BYTES" => Ok(Self::WredDroppedBytes), + "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES" => Ok(Self::CurrOccupancyBytes), + "SAI_QUEUE_STAT_WATERMARK_BYTES" => Ok(Self::WatermarkBytes), + "SAI_QUEUE_STAT_SHARED_CURR_OCCUPANCY_BYTES" => Ok(Self::SharedCurrOccupancyBytes), + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES" => Ok(Self::SharedWatermarkBytes), + "SAI_QUEUE_STAT_GREEN_WRED_ECN_MARKED_PACKETS" => Ok(Self::GreenWredEcnMarkedPackets), + "SAI_QUEUE_STAT_GREEN_WRED_ECN_MARKED_BYTES" => Ok(Self::GreenWredEcnMarkedBytes), + "SAI_QUEUE_STAT_YELLOW_WRED_ECN_MARKED_PACKETS" => Ok(Self::YellowWredEcnMarkedPackets), + "SAI_QUEUE_STAT_YELLOW_WRED_ECN_MARKED_BYTES" => Ok(Self::YellowWredEcnMarkedBytes), + "SAI_QUEUE_STAT_RED_WRED_ECN_MARKED_PACKETS" => Ok(Self::RedWredEcnMarkedPackets), + "SAI_QUEUE_STAT_RED_WRED_ECN_MARKED_BYTES" => Ok(Self::RedWredEcnMarkedBytes), + "SAI_QUEUE_STAT_WRED_ECN_MARKED_PACKETS" => Ok(Self::WredEcnMarkedPackets), + "SAI_QUEUE_STAT_WRED_ECN_MARKED_BYTES" => Ok(Self::WredEcnMarkedBytes), + "SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL" => Ok(Self::CurrOccupancyLevel), + "SAI_QUEUE_STAT_WATERMARK_LEVEL" => Ok(Self::WatermarkLevel), + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS" => Ok(Self::CreditWdDeletedPackets), + "SAI_QUEUE_STAT_DELAY_WATERMARK_NS" => Ok(Self::DelayWatermarkNs), + "SAI_QUEUE_STAT_TRIM_PACKETS" => Ok(Self::TrimPackets), + "SAI_QUEUE_STAT_CURR_OCCUPANCY_CELLS" => Ok(Self::CurrOccupancyCells), + "SAI_QUEUE_STAT_WATERMARK_CELLS" => Ok(Self::WatermarkCells), + "SAI_QUEUE_STAT_SHARED_CURR_OCCUPANCY_CELLS" => Ok(Self::SharedCurrOccupancyCells), + "SAI_QUEUE_STAT_SHARED_WATERMARK_CELLS" => Ok(Self::SharedWatermarkCells), + "SAI_QUEUE_STAT_DROPPED_TRIM_PACKETS" => Ok(Self::DroppedTrimPackets), + "SAI_QUEUE_STAT_TX_TRIM_PACKETS" => Ok(Self::TxTrimPackets), + "SAI_QUEUE_STAT_CUSTOM_RANGE_BASE" => Ok(Self::CustomRangeBase), + _ => Err(()), + } + } +} + +impl fmt::Display for SaiQueueStat { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.to_c_name()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_from_u32() { + assert_eq!( + SaiQueueStat::from_u32(0x00000000), + Some(SaiQueueStat::Packets) + ); + assert_eq!( + SaiQueueStat::from_u32(0x00000001), + Some(SaiQueueStat::Bytes) + ); + assert_eq!( + SaiQueueStat::from_u32(0x00000002), + Some(SaiQueueStat::DroppedPackets) + ); + assert_eq!( + SaiQueueStat::from_u32(0x0000002e), + Some(SaiQueueStat::TxTrimPackets) + ); + assert_eq!( + SaiQueueStat::from_u32(0x10000000), + Some(SaiQueueStat::CustomRangeBase) + ); + assert_eq!(SaiQueueStat::from_u32(0xFFFFFFFF), None); + } + + #[test] + fn test_to_u32() { + assert_eq!(SaiQueueStat::Packets.to_u32(), 0x00000000); + assert_eq!(SaiQueueStat::Bytes.to_u32(), 0x00000001); + assert_eq!(SaiQueueStat::DroppedPackets.to_u32(), 0x00000002); + assert_eq!(SaiQueueStat::TxTrimPackets.to_u32(), 0x0000002e); + assert_eq!(SaiQueueStat::CustomRangeBase.to_u32(), 0x10000000); + } + + #[test] + fn test_string_conversion() { + let stat = SaiQueueStat::CurrOccupancyBytes; + let c_name = stat.to_c_name(); + assert_eq!(c_name, "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES"); + + let parsed: SaiQueueStat = c_name.parse().unwrap(); + assert_eq!(parsed, stat); + + assert_eq!(format!("{}", stat), c_name); + } + + #[test] + fn test_color_based_stats() { + // Test green color stats + assert_eq!(SaiQueueStat::GreenPackets.to_u32(), 0x00000004); + assert_eq!(SaiQueueStat::GreenBytes.to_u32(), 0x00000005); + assert_eq!(SaiQueueStat::GreenDroppedPackets.to_u32(), 0x00000006); + + // Test yellow color stats + assert_eq!(SaiQueueStat::YellowPackets.to_u32(), 0x00000008); + assert_eq!(SaiQueueStat::YellowDroppedBytes.to_u32(), 0x0000000b); + + // Test red color stats + assert_eq!(SaiQueueStat::RedPackets.to_u32(), 0x0000000c); + assert_eq!(SaiQueueStat::RedDroppedBytes.to_u32(), 0x0000000f); + } + + #[test] + fn test_wred_stats() { + // Test WRED drop stats + assert_eq!(SaiQueueStat::GreenWredDroppedPackets.to_u32(), 0x00000010); + assert_eq!(SaiQueueStat::YellowWredDroppedBytes.to_u32(), 0x00000013); + assert_eq!(SaiQueueStat::RedWredDroppedPackets.to_u32(), 0x00000014); + assert_eq!(SaiQueueStat::WredDroppedBytes.to_u32(), 0x00000017); + + // Test WRED ECN mark stats + assert_eq!(SaiQueueStat::GreenWredEcnMarkedPackets.to_u32(), 0x0000001c); + assert_eq!(SaiQueueStat::WredEcnMarkedBytes.to_u32(), 0x00000023); + } + + #[test] + fn test_occupancy_stats() { + // Test byte-based occupancy stats + assert_eq!(SaiQueueStat::CurrOccupancyBytes.to_u32(), 0x00000018); + assert_eq!(SaiQueueStat::WatermarkBytes.to_u32(), 0x00000019); + assert_eq!(SaiQueueStat::SharedCurrOccupancyBytes.to_u32(), 0x0000001a); + assert_eq!(SaiQueueStat::SharedWatermarkBytes.to_u32(), 0x0000001b); + + // Test cell-based occupancy stats + assert_eq!(SaiQueueStat::CurrOccupancyCells.to_u32(), 0x00000029); + assert_eq!(SaiQueueStat::WatermarkCells.to_u32(), 0x0000002a); + assert_eq!(SaiQueueStat::SharedCurrOccupancyCells.to_u32(), 0x0000002b); + assert_eq!(SaiQueueStat::SharedWatermarkCells.to_u32(), 0x0000002c); + + // Test occupancy level stats + assert_eq!(SaiQueueStat::CurrOccupancyLevel.to_u32(), 0x00000024); + assert_eq!(SaiQueueStat::WatermarkLevel.to_u32(), 0x00000025); + } + + #[test] + fn test_special_stats() { + // Test specialized queue statistics + assert_eq!(SaiQueueStat::CreditWdDeletedPackets.to_u32(), 0x00000026); + assert_eq!(SaiQueueStat::DelayWatermarkNs.to_u32(), 0x00000027); + assert_eq!(SaiQueueStat::TrimPackets.to_u32(), 0x00000028); + assert_eq!(SaiQueueStat::DroppedTrimPackets.to_u32(), 0x0000002d); + assert_eq!(SaiQueueStat::TxTrimPackets.to_u32(), 0x0000002e); + } +} diff --git a/crates/countersyncd/src/sai/saitypes.rs b/crates/countersyncd/src/sai/saitypes.rs new file mode 100644 index 00000000000..b2e6279744c --- /dev/null +++ b/crates/countersyncd/src/sai/saitypes.rs @@ -0,0 +1,573 @@ +use std::fmt; +use std::str::FromStr; + +/// SAI object type +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[repr(u32)] +pub enum SaiObjectType { + /// invalid object type + Null = 0, + Port = 1, + Lag = 2, + VirtualRouter = 3, + NextHop = 4, + NextHopGroup = 5, + RouterInterface = 6, + AclTable = 7, + AclEntry = 8, + AclCounter = 9, + AclRange = 10, + AclTableGroup = 11, + AclTableGroupMember = 12, + Hostif = 13, + MirrorSession = 14, + Samplepacket = 15, + Stp = 16, + HostifTrapGroup = 17, + Policer = 18, + Wred = 19, + QosMap = 20, + Queue = 21, + Scheduler = 22, + SchedulerGroup = 23, + BufferPool = 24, + BufferProfile = 25, + IngressPriorityGroup = 26, + LagMember = 27, + Hash = 28, + Udf = 29, + UdfMatch = 30, + UdfGroup = 31, + FdbEntry = 32, + Switch = 33, + HostifTrap = 34, + HostifTableEntry = 35, + NeighborEntry = 36, + RouteEntry = 37, + Vlan = 38, + VlanMember = 39, + HostifPacket = 40, + TunnelMap = 41, + Tunnel = 42, + TunnelTermTableEntry = 43, + FdbFlush = 44, + NextHopGroupMember = 45, + StpPort = 46, + RpfGroup = 47, + RpfGroupMember = 48, + L2mcGroup = 49, + L2mcGroupMember = 50, + IpmcGroup = 51, + IpmcGroupMember = 52, + L2mcEntry = 53, + IpmcEntry = 54, + McastFdbEntry = 55, + HostifUserDefinedTrap = 56, + Bridge = 57, + BridgePort = 58, + TunnelMapEntry = 59, + Tam = 60, + Srv6Sidlist = 61, + PortPool = 62, + InsegEntry = 63, + /// experimental + Dtel = 64, + /// experimental + DtelQueueReport = 65, + /// experimental + DtelIntSession = 66, + /// experimental + DtelReportSession = 67, + /// experimental + DtelEvent = 68, + BfdSession = 69, + IsolationGroup = 70, + IsolationGroupMember = 71, + TamMathFunc = 72, + TamReport = 73, + TamEventThreshold = 74, + TamTelType = 75, + TamTransport = 76, + TamTelemetry = 77, + TamCollector = 78, + TamEventAction = 79, + TamEvent = 80, + NatZoneCounter = 81, + NatEntry = 82, + TamInt = 83, + Counter = 84, + DebugCounter = 85, + PortConnector = 86, + PortSerdes = 87, + Macsec = 88, + MacsecPort = 89, + MacsecFlow = 90, + MacsecSc = 91, + MacsecSa = 92, + SystemPort = 93, + FineGrainedHashField = 94, + SwitchTunnel = 95, + MySidEntry = 96, + MyMac = 97, + NextHopGroupMap = 98, + Ipsec = 99, + IpsecPort = 100, + IpsecSa = 101, + GenericProgrammable = 102, + ArsProfile = 103, + Ars = 104, + AclTableChainGroup = 105, + TwampSession = 106, + TamCounterSubscription = 107, + PoeDevice = 108, + PoePse = 109, + PoePort = 110, + IcmpEchoSession = 111, + PrefixCompressionTable = 112, + PrefixCompressionEntry = 113, + SynceClock = 114, + /// Must remain in last position + Max = 115, + /// Custom range base + CustomRangeBase = 0x10000000, + ExtensionsRangeBase = 0x20000000, +} + +impl SaiObjectType { + /// Convert from u32 to SaiObjectType + pub fn from_u32(value: u32) -> Option { + match value { + 0 => Some(Self::Null), + 1 => Some(Self::Port), + 2 => Some(Self::Lag), + 3 => Some(Self::VirtualRouter), + 4 => Some(Self::NextHop), + 5 => Some(Self::NextHopGroup), + 6 => Some(Self::RouterInterface), + 7 => Some(Self::AclTable), + 8 => Some(Self::AclEntry), + 9 => Some(Self::AclCounter), + 10 => Some(Self::AclRange), + 11 => Some(Self::AclTableGroup), + 12 => Some(Self::AclTableGroupMember), + 13 => Some(Self::Hostif), + 14 => Some(Self::MirrorSession), + 15 => Some(Self::Samplepacket), + 16 => Some(Self::Stp), + 17 => Some(Self::HostifTrapGroup), + 18 => Some(Self::Policer), + 19 => Some(Self::Wred), + 20 => Some(Self::QosMap), + 21 => Some(Self::Queue), + 22 => Some(Self::Scheduler), + 23 => Some(Self::SchedulerGroup), + 24 => Some(Self::BufferPool), + 25 => Some(Self::BufferProfile), + 26 => Some(Self::IngressPriorityGroup), + 27 => Some(Self::LagMember), + 28 => Some(Self::Hash), + 29 => Some(Self::Udf), + 30 => Some(Self::UdfMatch), + 31 => Some(Self::UdfGroup), + 32 => Some(Self::FdbEntry), + 33 => Some(Self::Switch), + 34 => Some(Self::HostifTrap), + 35 => Some(Self::HostifTableEntry), + 36 => Some(Self::NeighborEntry), + 37 => Some(Self::RouteEntry), + 38 => Some(Self::Vlan), + 39 => Some(Self::VlanMember), + 40 => Some(Self::HostifPacket), + 41 => Some(Self::TunnelMap), + 42 => Some(Self::Tunnel), + 43 => Some(Self::TunnelTermTableEntry), + 44 => Some(Self::FdbFlush), + 45 => Some(Self::NextHopGroupMember), + 46 => Some(Self::StpPort), + 47 => Some(Self::RpfGroup), + 48 => Some(Self::RpfGroupMember), + 49 => Some(Self::L2mcGroup), + 50 => Some(Self::L2mcGroupMember), + 51 => Some(Self::IpmcGroup), + 52 => Some(Self::IpmcGroupMember), + 53 => Some(Self::L2mcEntry), + 54 => Some(Self::IpmcEntry), + 55 => Some(Self::McastFdbEntry), + 56 => Some(Self::HostifUserDefinedTrap), + 57 => Some(Self::Bridge), + 58 => Some(Self::BridgePort), + 59 => Some(Self::TunnelMapEntry), + 60 => Some(Self::Tam), + 61 => Some(Self::Srv6Sidlist), + 62 => Some(Self::PortPool), + 63 => Some(Self::InsegEntry), + 64 => Some(Self::Dtel), + 65 => Some(Self::DtelQueueReport), + 66 => Some(Self::DtelIntSession), + 67 => Some(Self::DtelReportSession), + 68 => Some(Self::DtelEvent), + 69 => Some(Self::BfdSession), + 70 => Some(Self::IsolationGroup), + 71 => Some(Self::IsolationGroupMember), + 72 => Some(Self::TamMathFunc), + 73 => Some(Self::TamReport), + 74 => Some(Self::TamEventThreshold), + 75 => Some(Self::TamTelType), + 76 => Some(Self::TamTransport), + 77 => Some(Self::TamTelemetry), + 78 => Some(Self::TamCollector), + 79 => Some(Self::TamEventAction), + 80 => Some(Self::TamEvent), + 81 => Some(Self::NatZoneCounter), + 82 => Some(Self::NatEntry), + 83 => Some(Self::TamInt), + 84 => Some(Self::Counter), + 85 => Some(Self::DebugCounter), + 86 => Some(Self::PortConnector), + 87 => Some(Self::PortSerdes), + 88 => Some(Self::Macsec), + 89 => Some(Self::MacsecPort), + 90 => Some(Self::MacsecFlow), + 91 => Some(Self::MacsecSc), + 92 => Some(Self::MacsecSa), + 93 => Some(Self::SystemPort), + 94 => Some(Self::FineGrainedHashField), + 95 => Some(Self::SwitchTunnel), + 96 => Some(Self::MySidEntry), + 97 => Some(Self::MyMac), + 98 => Some(Self::NextHopGroupMap), + 99 => Some(Self::Ipsec), + 100 => Some(Self::IpsecPort), + 101 => Some(Self::IpsecSa), + 102 => Some(Self::GenericProgrammable), + 103 => Some(Self::ArsProfile), + 104 => Some(Self::Ars), + 105 => Some(Self::AclTableChainGroup), + 106 => Some(Self::TwampSession), + 107 => Some(Self::TamCounterSubscription), + 108 => Some(Self::PoeDevice), + 109 => Some(Self::PoePse), + 110 => Some(Self::PoePort), + 111 => Some(Self::IcmpEchoSession), + 112 => Some(Self::PrefixCompressionTable), + 113 => Some(Self::PrefixCompressionEntry), + 114 => Some(Self::SynceClock), + 115 => Some(Self::Max), + 0x10000000 => Some(Self::CustomRangeBase), + 0x20000000 => Some(Self::ExtensionsRangeBase), + _ => None, + } + } + + /// Convert to u32 + pub fn to_u32(self) -> u32 { + self as u32 + } + + /// Get the string name (original C enum name) + pub fn to_c_name(self) -> &'static str { + match self { + Self::Null => "SAI_OBJECT_TYPE_NULL", + Self::Port => "SAI_OBJECT_TYPE_PORT", + Self::Lag => "SAI_OBJECT_TYPE_LAG", + Self::VirtualRouter => "SAI_OBJECT_TYPE_VIRTUAL_ROUTER", + Self::NextHop => "SAI_OBJECT_TYPE_NEXT_HOP", + Self::NextHopGroup => "SAI_OBJECT_TYPE_NEXT_HOP_GROUP", + Self::RouterInterface => "SAI_OBJECT_TYPE_ROUTER_INTERFACE", + Self::AclTable => "SAI_OBJECT_TYPE_ACL_TABLE", + Self::AclEntry => "SAI_OBJECT_TYPE_ACL_ENTRY", + Self::AclCounter => "SAI_OBJECT_TYPE_ACL_COUNTER", + Self::AclRange => "SAI_OBJECT_TYPE_ACL_RANGE", + Self::AclTableGroup => "SAI_OBJECT_TYPE_ACL_TABLE_GROUP", + Self::AclTableGroupMember => "SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER", + Self::Hostif => "SAI_OBJECT_TYPE_HOSTIF", + Self::MirrorSession => "SAI_OBJECT_TYPE_MIRROR_SESSION", + Self::Samplepacket => "SAI_OBJECT_TYPE_SAMPLEPACKET", + Self::Stp => "SAI_OBJECT_TYPE_STP", + Self::HostifTrapGroup => "SAI_OBJECT_TYPE_HOSTIF_TRAP_GROUP", + Self::Policer => "SAI_OBJECT_TYPE_POLICER", + Self::Wred => "SAI_OBJECT_TYPE_WRED", + Self::QosMap => "SAI_OBJECT_TYPE_QOS_MAP", + Self::Queue => "SAI_OBJECT_TYPE_QUEUE", + Self::Scheduler => "SAI_OBJECT_TYPE_SCHEDULER", + Self::SchedulerGroup => "SAI_OBJECT_TYPE_SCHEDULER_GROUP", + Self::BufferPool => "SAI_OBJECT_TYPE_BUFFER_POOL", + Self::BufferProfile => "SAI_OBJECT_TYPE_BUFFER_PROFILE", + Self::IngressPriorityGroup => "SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", + Self::LagMember => "SAI_OBJECT_TYPE_LAG_MEMBER", + Self::Hash => "SAI_OBJECT_TYPE_HASH", + Self::Udf => "SAI_OBJECT_TYPE_UDF", + Self::UdfMatch => "SAI_OBJECT_TYPE_UDF_MATCH", + Self::UdfGroup => "SAI_OBJECT_TYPE_UDF_GROUP", + Self::FdbEntry => "SAI_OBJECT_TYPE_FDB_ENTRY", + Self::Switch => "SAI_OBJECT_TYPE_SWITCH", + Self::HostifTrap => "SAI_OBJECT_TYPE_HOSTIF_TRAP", + Self::HostifTableEntry => "SAI_OBJECT_TYPE_HOSTIF_TABLE_ENTRY", + Self::NeighborEntry => "SAI_OBJECT_TYPE_NEIGHBOR_ENTRY", + Self::RouteEntry => "SAI_OBJECT_TYPE_ROUTE_ENTRY", + Self::Vlan => "SAI_OBJECT_TYPE_VLAN", + Self::VlanMember => "SAI_OBJECT_TYPE_VLAN_MEMBER", + Self::HostifPacket => "SAI_OBJECT_TYPE_HOSTIF_PACKET", + Self::TunnelMap => "SAI_OBJECT_TYPE_TUNNEL_MAP", + Self::Tunnel => "SAI_OBJECT_TYPE_TUNNEL", + Self::TunnelTermTableEntry => "SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY", + Self::FdbFlush => "SAI_OBJECT_TYPE_FDB_FLUSH", + Self::NextHopGroupMember => "SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER", + Self::StpPort => "SAI_OBJECT_TYPE_STP_PORT", + Self::RpfGroup => "SAI_OBJECT_TYPE_RPF_GROUP", + Self::RpfGroupMember => "SAI_OBJECT_TYPE_RPF_GROUP_MEMBER", + Self::L2mcGroup => "SAI_OBJECT_TYPE_L2MC_GROUP", + Self::L2mcGroupMember => "SAI_OBJECT_TYPE_L2MC_GROUP_MEMBER", + Self::IpmcGroup => "SAI_OBJECT_TYPE_IPMC_GROUP", + Self::IpmcGroupMember => "SAI_OBJECT_TYPE_IPMC_GROUP_MEMBER", + Self::L2mcEntry => "SAI_OBJECT_TYPE_L2MC_ENTRY", + Self::IpmcEntry => "SAI_OBJECT_TYPE_IPMC_ENTRY", + Self::McastFdbEntry => "SAI_OBJECT_TYPE_MCAST_FDB_ENTRY", + Self::HostifUserDefinedTrap => "SAI_OBJECT_TYPE_HOSTIF_USER_DEFINED_TRAP", + Self::Bridge => "SAI_OBJECT_TYPE_BRIDGE", + Self::BridgePort => "SAI_OBJECT_TYPE_BRIDGE_PORT", + Self::TunnelMapEntry => "SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY", + Self::Tam => "SAI_OBJECT_TYPE_TAM", + Self::Srv6Sidlist => "SAI_OBJECT_TYPE_SRV6_SIDLIST", + Self::PortPool => "SAI_OBJECT_TYPE_PORT_POOL", + Self::InsegEntry => "SAI_OBJECT_TYPE_INSEG_ENTRY", + Self::Dtel => "SAI_OBJECT_TYPE_DTEL", + Self::DtelQueueReport => "SAI_OBJECT_TYPE_DTEL_QUEUE_REPORT", + Self::DtelIntSession => "SAI_OBJECT_TYPE_DTEL_INT_SESSION", + Self::DtelReportSession => "SAI_OBJECT_TYPE_DTEL_REPORT_SESSION", + Self::DtelEvent => "SAI_OBJECT_TYPE_DTEL_EVENT", + Self::BfdSession => "SAI_OBJECT_TYPE_BFD_SESSION", + Self::IsolationGroup => "SAI_OBJECT_TYPE_ISOLATION_GROUP", + Self::IsolationGroupMember => "SAI_OBJECT_TYPE_ISOLATION_GROUP_MEMBER", + Self::TamMathFunc => "SAI_OBJECT_TYPE_TAM_MATH_FUNC", + Self::TamReport => "SAI_OBJECT_TYPE_TAM_REPORT", + Self::TamEventThreshold => "SAI_OBJECT_TYPE_TAM_EVENT_THRESHOLD", + Self::TamTelType => "SAI_OBJECT_TYPE_TAM_TEL_TYPE", + Self::TamTransport => "SAI_OBJECT_TYPE_TAM_TRANSPORT", + Self::TamTelemetry => "SAI_OBJECT_TYPE_TAM_TELEMETRY", + Self::TamCollector => "SAI_OBJECT_TYPE_TAM_COLLECTOR", + Self::TamEventAction => "SAI_OBJECT_TYPE_TAM_EVENT_ACTION", + Self::TamEvent => "SAI_OBJECT_TYPE_TAM_EVENT", + Self::NatZoneCounter => "SAI_OBJECT_TYPE_NAT_ZONE_COUNTER", + Self::NatEntry => "SAI_OBJECT_TYPE_NAT_ENTRY", + Self::TamInt => "SAI_OBJECT_TYPE_TAM_INT", + Self::Counter => "SAI_OBJECT_TYPE_COUNTER", + Self::DebugCounter => "SAI_OBJECT_TYPE_DEBUG_COUNTER", + Self::PortConnector => "SAI_OBJECT_TYPE_PORT_CONNECTOR", + Self::PortSerdes => "SAI_OBJECT_TYPE_PORT_SERDES", + Self::Macsec => "SAI_OBJECT_TYPE_MACSEC", + Self::MacsecPort => "SAI_OBJECT_TYPE_MACSEC_PORT", + Self::MacsecFlow => "SAI_OBJECT_TYPE_MACSEC_FLOW", + Self::MacsecSc => "SAI_OBJECT_TYPE_MACSEC_SC", + Self::MacsecSa => "SAI_OBJECT_TYPE_MACSEC_SA", + Self::SystemPort => "SAI_OBJECT_TYPE_SYSTEM_PORT", + Self::FineGrainedHashField => "SAI_OBJECT_TYPE_FINE_GRAINED_HASH_FIELD", + Self::SwitchTunnel => "SAI_OBJECT_TYPE_SWITCH_TUNNEL", + Self::MySidEntry => "SAI_OBJECT_TYPE_MY_SID_ENTRY", + Self::MyMac => "SAI_OBJECT_TYPE_MY_MAC", + Self::NextHopGroupMap => "SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MAP", + Self::Ipsec => "SAI_OBJECT_TYPE_IPSEC", + Self::IpsecPort => "SAI_OBJECT_TYPE_IPSEC_PORT", + Self::IpsecSa => "SAI_OBJECT_TYPE_IPSEC_SA", + Self::GenericProgrammable => "SAI_OBJECT_TYPE_GENERIC_PROGRAMMABLE", + Self::ArsProfile => "SAI_OBJECT_TYPE_ARS_PROFILE", + Self::Ars => "SAI_OBJECT_TYPE_ARS", + Self::AclTableChainGroup => "SAI_OBJECT_TYPE_ACL_TABLE_CHAIN_GROUP", + Self::TwampSession => "SAI_OBJECT_TYPE_TWAMP_SESSION", + Self::TamCounterSubscription => "SAI_OBJECT_TYPE_TAM_COUNTER_SUBSCRIPTION", + Self::PoeDevice => "SAI_OBJECT_TYPE_POE_DEVICE", + Self::PoePse => "SAI_OBJECT_TYPE_POE_PSE", + Self::PoePort => "SAI_OBJECT_TYPE_POE_PORT", + Self::IcmpEchoSession => "SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", + Self::PrefixCompressionTable => "SAI_OBJECT_TYPE_PREFIX_COMPRESSION_TABLE", + Self::PrefixCompressionEntry => "SAI_OBJECT_TYPE_PREFIX_COMPRESSION_ENTRY", + Self::SynceClock => "SAI_OBJECT_TYPE_SYNCE_CLOCK", + Self::Max => "SAI_OBJECT_TYPE_MAX", + Self::CustomRangeBase => "SAI_OBJECT_TYPE_CUSTOM_RANGE_BASE", + Self::ExtensionsRangeBase => "SAI_OBJECT_TYPE_EXTENSIONS_RANGE_BASE", + } + } +} + +impl From for u32 { + fn from(obj_type: SaiObjectType) -> Self { + obj_type.to_u32() + } +} + +impl TryFrom for SaiObjectType { + type Error = (); + + fn try_from(value: u32) -> Result { + Self::from_u32(value).ok_or(()) + } +} + +impl fmt::Display for SaiObjectType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.to_c_name()) + } +} + +impl FromStr for SaiObjectType { + type Err = (); + + fn from_str(s: &str) -> Result { + match s { + "SAI_OBJECT_TYPE_NULL" => Ok(Self::Null), + "SAI_OBJECT_TYPE_PORT" => Ok(Self::Port), + "SAI_OBJECT_TYPE_LAG" => Ok(Self::Lag), + "SAI_OBJECT_TYPE_VIRTUAL_ROUTER" => Ok(Self::VirtualRouter), + "SAI_OBJECT_TYPE_NEXT_HOP" => Ok(Self::NextHop), + "SAI_OBJECT_TYPE_NEXT_HOP_GROUP" => Ok(Self::NextHopGroup), + "SAI_OBJECT_TYPE_ROUTER_INTERFACE" => Ok(Self::RouterInterface), + "SAI_OBJECT_TYPE_ACL_TABLE" => Ok(Self::AclTable), + "SAI_OBJECT_TYPE_ACL_ENTRY" => Ok(Self::AclEntry), + "SAI_OBJECT_TYPE_ACL_COUNTER" => Ok(Self::AclCounter), + "SAI_OBJECT_TYPE_ACL_RANGE" => Ok(Self::AclRange), + "SAI_OBJECT_TYPE_ACL_TABLE_GROUP" => Ok(Self::AclTableGroup), + "SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER" => Ok(Self::AclTableGroupMember), + "SAI_OBJECT_TYPE_HOSTIF" => Ok(Self::Hostif), + "SAI_OBJECT_TYPE_MIRROR_SESSION" => Ok(Self::MirrorSession), + "SAI_OBJECT_TYPE_SAMPLEPACKET" => Ok(Self::Samplepacket), + "SAI_OBJECT_TYPE_STP" => Ok(Self::Stp), + "SAI_OBJECT_TYPE_HOSTIF_TRAP_GROUP" => Ok(Self::HostifTrapGroup), + "SAI_OBJECT_TYPE_POLICER" => Ok(Self::Policer), + "SAI_OBJECT_TYPE_WRED" => Ok(Self::Wred), + "SAI_OBJECT_TYPE_QOS_MAP" => Ok(Self::QosMap), + "SAI_OBJECT_TYPE_QUEUE" => Ok(Self::Queue), + "SAI_OBJECT_TYPE_SCHEDULER" => Ok(Self::Scheduler), + "SAI_OBJECT_TYPE_SCHEDULER_GROUP" => Ok(Self::SchedulerGroup), + "SAI_OBJECT_TYPE_BUFFER_POOL" => Ok(Self::BufferPool), + "SAI_OBJECT_TYPE_BUFFER_PROFILE" => Ok(Self::BufferProfile), + "SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP" => Ok(Self::IngressPriorityGroup), + "SAI_OBJECT_TYPE_LAG_MEMBER" => Ok(Self::LagMember), + "SAI_OBJECT_TYPE_HASH" => Ok(Self::Hash), + "SAI_OBJECT_TYPE_UDF" => Ok(Self::Udf), + "SAI_OBJECT_TYPE_UDF_MATCH" => Ok(Self::UdfMatch), + "SAI_OBJECT_TYPE_UDF_GROUP" => Ok(Self::UdfGroup), + "SAI_OBJECT_TYPE_FDB_ENTRY" => Ok(Self::FdbEntry), + "SAI_OBJECT_TYPE_SWITCH" => Ok(Self::Switch), + "SAI_OBJECT_TYPE_HOSTIF_TRAP" => Ok(Self::HostifTrap), + "SAI_OBJECT_TYPE_HOSTIF_TABLE_ENTRY" => Ok(Self::HostifTableEntry), + "SAI_OBJECT_TYPE_NEIGHBOR_ENTRY" => Ok(Self::NeighborEntry), + "SAI_OBJECT_TYPE_ROUTE_ENTRY" => Ok(Self::RouteEntry), + "SAI_OBJECT_TYPE_VLAN" => Ok(Self::Vlan), + "SAI_OBJECT_TYPE_VLAN_MEMBER" => Ok(Self::VlanMember), + "SAI_OBJECT_TYPE_HOSTIF_PACKET" => Ok(Self::HostifPacket), + "SAI_OBJECT_TYPE_TUNNEL_MAP" => Ok(Self::TunnelMap), + "SAI_OBJECT_TYPE_TUNNEL" => Ok(Self::Tunnel), + "SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY" => Ok(Self::TunnelTermTableEntry), + "SAI_OBJECT_TYPE_FDB_FLUSH" => Ok(Self::FdbFlush), + "SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER" => Ok(Self::NextHopGroupMember), + "SAI_OBJECT_TYPE_STP_PORT" => Ok(Self::StpPort), + "SAI_OBJECT_TYPE_RPF_GROUP" => Ok(Self::RpfGroup), + "SAI_OBJECT_TYPE_RPF_GROUP_MEMBER" => Ok(Self::RpfGroupMember), + "SAI_OBJECT_TYPE_L2MC_GROUP" => Ok(Self::L2mcGroup), + "SAI_OBJECT_TYPE_L2MC_GROUP_MEMBER" => Ok(Self::L2mcGroupMember), + "SAI_OBJECT_TYPE_IPMC_GROUP" => Ok(Self::IpmcGroup), + "SAI_OBJECT_TYPE_IPMC_GROUP_MEMBER" => Ok(Self::IpmcGroupMember), + "SAI_OBJECT_TYPE_L2MC_ENTRY" => Ok(Self::L2mcEntry), + "SAI_OBJECT_TYPE_IPMC_ENTRY" => Ok(Self::IpmcEntry), + "SAI_OBJECT_TYPE_MCAST_FDB_ENTRY" => Ok(Self::McastFdbEntry), + "SAI_OBJECT_TYPE_HOSTIF_USER_DEFINED_TRAP" => Ok(Self::HostifUserDefinedTrap), + "SAI_OBJECT_TYPE_BRIDGE" => Ok(Self::Bridge), + "SAI_OBJECT_TYPE_BRIDGE_PORT" => Ok(Self::BridgePort), + "SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY" => Ok(Self::TunnelMapEntry), + "SAI_OBJECT_TYPE_TAM" => Ok(Self::Tam), + "SAI_OBJECT_TYPE_SRV6_SIDLIST" => Ok(Self::Srv6Sidlist), + "SAI_OBJECT_TYPE_PORT_POOL" => Ok(Self::PortPool), + "SAI_OBJECT_TYPE_INSEG_ENTRY" => Ok(Self::InsegEntry), + "SAI_OBJECT_TYPE_DTEL" => Ok(Self::Dtel), + "SAI_OBJECT_TYPE_DTEL_QUEUE_REPORT" => Ok(Self::DtelQueueReport), + "SAI_OBJECT_TYPE_DTEL_INT_SESSION" => Ok(Self::DtelIntSession), + "SAI_OBJECT_TYPE_DTEL_REPORT_SESSION" => Ok(Self::DtelReportSession), + "SAI_OBJECT_TYPE_DTEL_EVENT" => Ok(Self::DtelEvent), + "SAI_OBJECT_TYPE_BFD_SESSION" => Ok(Self::BfdSession), + "SAI_OBJECT_TYPE_ISOLATION_GROUP" => Ok(Self::IsolationGroup), + "SAI_OBJECT_TYPE_ISOLATION_GROUP_MEMBER" => Ok(Self::IsolationGroupMember), + "SAI_OBJECT_TYPE_TAM_MATH_FUNC" => Ok(Self::TamMathFunc), + "SAI_OBJECT_TYPE_TAM_REPORT" => Ok(Self::TamReport), + "SAI_OBJECT_TYPE_TAM_EVENT_THRESHOLD" => Ok(Self::TamEventThreshold), + "SAI_OBJECT_TYPE_TAM_TEL_TYPE" => Ok(Self::TamTelType), + "SAI_OBJECT_TYPE_TAM_TRANSPORT" => Ok(Self::TamTransport), + "SAI_OBJECT_TYPE_TAM_TELEMETRY" => Ok(Self::TamTelemetry), + "SAI_OBJECT_TYPE_TAM_COLLECTOR" => Ok(Self::TamCollector), + "SAI_OBJECT_TYPE_TAM_EVENT_ACTION" => Ok(Self::TamEventAction), + "SAI_OBJECT_TYPE_TAM_EVENT" => Ok(Self::TamEvent), + "SAI_OBJECT_TYPE_NAT_ZONE_COUNTER" => Ok(Self::NatZoneCounter), + "SAI_OBJECT_TYPE_NAT_ENTRY" => Ok(Self::NatEntry), + "SAI_OBJECT_TYPE_TAM_INT" => Ok(Self::TamInt), + "SAI_OBJECT_TYPE_COUNTER" => Ok(Self::Counter), + "SAI_OBJECT_TYPE_DEBUG_COUNTER" => Ok(Self::DebugCounter), + "SAI_OBJECT_TYPE_PORT_CONNECTOR" => Ok(Self::PortConnector), + "SAI_OBJECT_TYPE_PORT_SERDES" => Ok(Self::PortSerdes), + "SAI_OBJECT_TYPE_MACSEC" => Ok(Self::Macsec), + "SAI_OBJECT_TYPE_MACSEC_PORT" => Ok(Self::MacsecPort), + "SAI_OBJECT_TYPE_MACSEC_FLOW" => Ok(Self::MacsecFlow), + "SAI_OBJECT_TYPE_MACSEC_SC" => Ok(Self::MacsecSc), + "SAI_OBJECT_TYPE_MACSEC_SA" => Ok(Self::MacsecSa), + "SAI_OBJECT_TYPE_SYSTEM_PORT" => Ok(Self::SystemPort), + "SAI_OBJECT_TYPE_FINE_GRAINED_HASH_FIELD" => Ok(Self::FineGrainedHashField), + "SAI_OBJECT_TYPE_SWITCH_TUNNEL" => Ok(Self::SwitchTunnel), + "SAI_OBJECT_TYPE_MY_SID_ENTRY" => Ok(Self::MySidEntry), + "SAI_OBJECT_TYPE_MY_MAC" => Ok(Self::MyMac), + "SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MAP" => Ok(Self::NextHopGroupMap), + "SAI_OBJECT_TYPE_IPSEC" => Ok(Self::Ipsec), + "SAI_OBJECT_TYPE_IPSEC_PORT" => Ok(Self::IpsecPort), + "SAI_OBJECT_TYPE_IPSEC_SA" => Ok(Self::IpsecSa), + "SAI_OBJECT_TYPE_GENERIC_PROGRAMMABLE" => Ok(Self::GenericProgrammable), + "SAI_OBJECT_TYPE_ARS_PROFILE" => Ok(Self::ArsProfile), + "SAI_OBJECT_TYPE_ARS" => Ok(Self::Ars), + "SAI_OBJECT_TYPE_ACL_TABLE_CHAIN_GROUP" => Ok(Self::AclTableChainGroup), + "SAI_OBJECT_TYPE_TWAMP_SESSION" => Ok(Self::TwampSession), + "SAI_OBJECT_TYPE_TAM_COUNTER_SUBSCRIPTION" => Ok(Self::TamCounterSubscription), + "SAI_OBJECT_TYPE_POE_DEVICE" => Ok(Self::PoeDevice), + "SAI_OBJECT_TYPE_POE_PSE" => Ok(Self::PoePse), + "SAI_OBJECT_TYPE_POE_PORT" => Ok(Self::PoePort), + "SAI_OBJECT_TYPE_ICMP_ECHO_SESSION" => Ok(Self::IcmpEchoSession), + "SAI_OBJECT_TYPE_PREFIX_COMPRESSION_TABLE" => Ok(Self::PrefixCompressionTable), + "SAI_OBJECT_TYPE_PREFIX_COMPRESSION_ENTRY" => Ok(Self::PrefixCompressionEntry), + "SAI_OBJECT_TYPE_SYNCE_CLOCK" => Ok(Self::SynceClock), + "SAI_OBJECT_TYPE_MAX" => Ok(Self::Max), + "SAI_OBJECT_TYPE_CUSTOM_RANGE_BASE" => Ok(Self::CustomRangeBase), + "SAI_OBJECT_TYPE_EXTENSIONS_RANGE_BASE" => Ok(Self::ExtensionsRangeBase), + _ => Err(()), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_int_conversion() { + assert_eq!(SaiObjectType::Null.to_u32(), 0); + assert_eq!(SaiObjectType::Port.to_u32(), 1); + assert_eq!(SaiObjectType::from_u32(0), Some(SaiObjectType::Null)); + assert_eq!(SaiObjectType::from_u32(1), Some(SaiObjectType::Port)); + assert_eq!(SaiObjectType::from_u32(u32::MAX), None); + } + + #[test] + fn test_string_conversion() { + assert_eq!(SaiObjectType::Null.to_c_name(), "SAI_OBJECT_TYPE_NULL"); + assert_eq!(SaiObjectType::Port.to_c_name(), "SAI_OBJECT_TYPE_PORT"); + assert_eq!( + "SAI_OBJECT_TYPE_NULL".parse::(), + Ok(SaiObjectType::Null) + ); + assert_eq!( + "SAI_OBJECT_TYPE_PORT".parse::(), + Ok(SaiObjectType::Port) + ); + assert!("INVALID".parse::().is_err()); + } + + #[test] + fn test_display() { + assert_eq!(format!("{}", SaiObjectType::Null), "SAI_OBJECT_TYPE_NULL"); + assert_eq!(format!("{}", SaiObjectType::Port), "SAI_OBJECT_TYPE_PORT"); + } +} diff --git a/crates/countersyncd/tests/data/constants.yml b/crates/countersyncd/tests/data/constants.yml new file mode 100644 index 00000000000..78fba94ac7d --- /dev/null +++ b/crates/countersyncd/tests/data/constants.yml @@ -0,0 +1,4 @@ +constants: + high_frequency_telemetry: + genl_family: "sonic_stel" + genl_multicast_group: "ipfix" diff --git a/crates/countersyncd/tests/integration_test.rs b/crates/countersyncd/tests/integration_test.rs new file mode 100644 index 00000000000..59042bd80ed --- /dev/null +++ b/crates/countersyncd/tests/integration_test.rs @@ -0,0 +1,295 @@ +#[cfg(test)] +mod end_to_end_tests { + use serial_test::serial; + use std::sync::Arc; + use std::time::Duration; + use tokio::{ + spawn, + sync::mpsc::{channel, Sender}, + }; + + use countersyncd::actor::{ + ipfix::IpfixActor, + stats_reporter::{StatsReporterActor, StatsReporterConfig}, + }; + + /// Mock writer for capturing stats output during testing + #[derive(Debug)] + pub struct TestWriter { + pub messages: Arc>>, + } + + impl TestWriter { + pub fn new() -> Self { + Self { + messages: Arc::new(std::sync::Mutex::new(Vec::new())), + } + } + + pub fn get_messages(&self) -> Vec { + self.messages.lock().unwrap().clone() + } + } + + impl Clone for TestWriter { + fn clone(&self) -> Self { + Self { + messages: Arc::clone(&self.messages), + } + } + } + + impl countersyncd::actor::stats_reporter::OutputWriter for TestWriter { + fn write_line(&mut self, line: &str) { + // Use std::sync::Mutex instead of tokio::sync::Mutex to avoid async issues + if let Ok(mut guard) = self.messages.lock() { + guard.push(line.to_string()); + } + } + } + + /// Creates a mock IPFIX template for testing (copied from working test in ipfix.rs) + fn create_test_ipfix_template() -> Vec { + vec![ + 0x00, 0x0A, 0x00, 0x2C, // line 0 Packet 1 - Version 10, Length 44 + 0x00, 0x00, 0x00, 0x00, // line 1 - Export time + 0x00, 0x00, 0x00, 0x01, // line 2 - Sequence number + 0x00, 0x00, 0x00, 0x00, // line 3 - Observation domain ID + 0x00, 0x02, 0x00, 0x1C, // line 4 - Set Header: Set ID=2, Length=28 + 0x01, 0x00, 0x00, 0x03, // line 5 - Template ID 256, 3 fields + 0x01, 0x45, 0x00, 0x08, // line 6 - Field ID 325, 8 bytes + 0x80, 0x01, 0x00, 0x08, // line 7 - Field ID 128, 8 bytes + 0x00, 0x01, 0x00, 0x02, // line 8 - Enterprise Number 1, Field ID 1, 2 bytes + 0x80, 0x02, 0x00, 0x08, // line 9 - Field ID 129, 8 bytes + 0x80, 0x03, 0x80, 0x04, // line 10 - Enterprise Number 128, Field ID 2 + ] + } + + /// Creates test IPFIX data records (matching the template) + fn create_test_ipfix_data() -> Vec { + vec![ + 0x00, 0x0A, 0x00, 0x2C, // line 0 - Version 10, Length 44 + 0x00, 0x00, 0x00, 0x00, // line 1 - Export time + 0x00, 0x00, 0x00, 0x02, // line 2 - Sequence number + 0x00, 0x00, 0x00, 0x00, // line 3 - Observation domain ID + 0x01, 0x00, 0x00, 0x1C, // line 4 - Data Set Header: Set ID=256, Length=28 + // Data Record (26 bytes total) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xE8, // Field 1 (8 bytes) = 1000 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xD0, // Field 2 (8 bytes) = 2000 + 0x00, 0x01, // Field 3 (2 bytes) = 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xA0, // Field 4 (8 bytes) = 4000 + ] + } + + /// Helper function to create enhanced netlink actor for testing + async fn create_test_netlink_with_data( + socket_sender: Sender>>, + test_data: Vec>, + ) -> tokio::task::JoinHandle<()> { + spawn(async move { + // Simulate netlink receiving IPFIX data + for data in test_data { + if let Err(e) = socket_sender.send(Arc::new(data)).await { + println!("Failed to send test netlink data: {}", e); + break; + } + tokio::time::sleep(Duration::from_millis(50)).await; + } + }) + } + + /// End-to-end system test that validates the IPFIX processing pipeline: + /// 1. Send IPFIX templates to IpfixActor + /// 2. Send IPFIX data through simulated netlink + /// 3. Verify that SAI statistics are generated and reported + #[tokio::test] + #[serial] // Ensure this test runs in isolation + async fn test_end_to_end_ipfix_processing() { + // Setup logging for the test + let _ = env_logger::builder().is_test(true).try_init(); + + // Create communication channels + let (ipfix_template_sender, ipfix_template_receiver) = channel(10); + let (socket_sender, socket_receiver) = channel(10); + let (saistats_sender, saistats_receiver) = channel(100); + + // Create test writer to capture output + let test_writer = TestWriter::new(); + let test_writer_clone = test_writer.clone(); + + // Initialize actors + let mut ipfix = IpfixActor::new(ipfix_template_receiver, socket_receiver); + ipfix.add_recipient(saistats_sender); + + let reporter_config = StatsReporterConfig { + interval: Duration::from_millis(100), // Fast reporting for test + detailed: true, + max_stats_per_report: Some(10), + }; + let stats_reporter = + StatsReporterActor::new(saistats_receiver, reporter_config, test_writer_clone); + + // Spawn actor tasks + let _ipfix_handle = tokio::task::spawn_blocking(move || { + // Create a new runtime for the IPFIX actor to ensure thread-local variables work correctly + let rt = + tokio::runtime::Runtime::new().expect("Failed to create runtime for IPFIX actor"); + rt.block_on(async move { + IpfixActor::run(ipfix).await; + }); + }); + + let _stats_handle = spawn(async move { + StatsReporterActor::run(stats_reporter).await; + }); + + // Give actors time to start up + tokio::time::sleep(Duration::from_millis(100)).await; + + // Step 1: Send IPFIX template (simulating SwssActor -> IpfixActor) + let template_data = create_test_ipfix_template(); + let template_message = countersyncd::message::ipfix::IPFixTemplatesMessage::new( + "test_session|PORT".to_string(), + Arc::new(template_data), + Some(vec!["Ethernet0".to_string(), "Ethernet1".to_string()]), + ); + + ipfix_template_sender + .send(template_message) + .await + .expect("Failed to send template message"); + + println!("Sent IPFIX template to IpfixActor"); + + // Give time for template processing + tokio::time::sleep(Duration::from_millis(200)).await; + + // Step 2: Send IPFIX data (simulating NetlinkActor -> IpfixActor) + let ipfix_data_packets = vec![ + create_test_ipfix_data(), + create_test_ipfix_data(), // Send multiple packets to see more stats + ]; + + // Start simulated netlink data sender + let _netlink_handle = + create_test_netlink_with_data(socket_sender, ipfix_data_packets).await; + + // Give time for data processing and stats reporting + tokio::time::sleep(Duration::from_millis(500)).await; + + // Step 3: Check that stats were generated and reported + let messages = test_writer.get_messages(); + + // Validate the test results + println!("Captured {} messages from stats reporter", messages.len()); + for (i, msg) in messages.iter().enumerate() { + println!("Message {}: {}", i, msg); + } + + // Step 4: Test session deletion + let delete_message = countersyncd::message::ipfix::IPFixTemplatesMessage::delete( + "test_session|PORT".to_string(), + ); + // Note: This might fail if actors have already shut down, which is expected in tests + let _ = ipfix_template_sender.send(delete_message).await; + + // Give time for deletion processing + tokio::time::sleep(Duration::from_millis(200)).await; + + // Verify that deletion was processed by checking messages again + let final_messages = test_writer.get_messages(); + println!("Final message count: {}", final_messages.len()); + + // For a complete test, we should see: + // 1. Template processing messages + // 2. Data processing messages + // 3. SAI stats generation + assert!( + final_messages.len() > 0, + "Should have received some stats messages" + ); + + println!("End-to-end test completed successfully"); + } + + /// Test helper to create a mock IPFIX data stream for direct injection + #[tokio::test] + async fn test_direct_ipfix_data_injection() { + // This test focuses on the IPFIX -> SAI stats portion of the pipeline + let (ipfix_template_sender, ipfix_template_receiver) = channel(10); + let (socket_sender, socket_receiver) = channel(10); + let (saistats_sender, saistats_receiver) = channel(100); + + let test_writer = TestWriter::new(); + let test_writer_clone = test_writer.clone(); + + // Setup IPFIX actor + let mut ipfix = IpfixActor::new(ipfix_template_receiver, socket_receiver); + ipfix.add_recipient(saistats_sender); + + // Setup stats reporter + let reporter_config = StatsReporterConfig { + interval: Duration::from_millis(50), + detailed: true, + max_stats_per_report: Some(5), + }; + let stats_reporter = + StatsReporterActor::new(saistats_receiver, reporter_config, test_writer_clone); + + // Spawn actors + let _ipfix_handle = tokio::task::spawn_blocking(move || { + // Create a new runtime for the IPFIX actor to ensure thread-local variables work correctly + let rt = + tokio::runtime::Runtime::new().expect("Failed to create runtime for IPFIX actor"); + rt.block_on(async move { + IpfixActor::run(ipfix).await; + }); + }); + + let _stats_handle = spawn(async move { + StatsReporterActor::run(stats_reporter).await; + }); + + // Give actors time to start + tokio::time::sleep(Duration::from_millis(50)).await; + + // Step 1: Send IPFIX template + let template_data = create_test_ipfix_template(); + let template_message = countersyncd::message::ipfix::IPFixTemplatesMessage::new( + "direct_test".to_string(), + Arc::new(template_data), + Some(vec!["Ethernet0".to_string(), "Ethernet1".to_string()]), + ); + + ipfix_template_sender + .send(template_message) + .await + .expect("Failed to send template message"); + + // Give time for template processing + tokio::time::sleep(Duration::from_millis(100)).await; + + // Step 2: Send IPFIX data + let data = create_test_ipfix_data(); + // Note: This might fail if actors have already shut down, which is expected in tests + let _ = socket_sender.send(Arc::new(data)).await; + + // Give time for data processing and stats reporting + tokio::time::sleep(Duration::from_millis(200)).await; + + // Step 3: Verify results + let messages = test_writer.get_messages(); + println!("Direct injection test captured {} messages", messages.len()); + for (i, msg) in messages.iter().enumerate() { + println!("Message {}: {}", i, msg); + } + + // We should have received some stats output + assert!( + messages.len() > 0, + "Should have received stats messages from direct injection" + ); + + println!("Direct injection test completed successfully"); + } +} diff --git a/crates/countersyncd/tests/test_common.rs b/crates/countersyncd/tests/test_common.rs new file mode 100644 index 00000000000..c605912140b --- /dev/null +++ b/crates/countersyncd/tests/test_common.rs @@ -0,0 +1,65 @@ +use log::LevelFilter::Debug; +use std::io::Write; +use std::sync::{Arc, Mutex, Once, OnceLock}; + +static INIT_ENV_LOGGER: Once = Once::new(); + +static LOG_BUFFER: OnceLock>>> = OnceLock::new(); + +fn get_log_buffer() -> &'static Arc>> { + LOG_BUFFER.get_or_init(|| Arc::new(Mutex::new(Vec::new()))) +} + +pub fn capture_logs() -> String { + INIT_ENV_LOGGER.call_once(|| { + env_logger::builder() + .is_test(true) + .filter_level(Debug) + .format({ + let buffer = get_log_buffer().clone(); + move |_, record| { + let mut buffer = buffer.lock().unwrap(); + writeln!(buffer, "[{}] {}", record.level(), record.args()).unwrap(); + Ok(()) + } + }) + .init(); + }); + + let buffer = get_log_buffer().lock().unwrap(); + String::from_utf8(buffer.clone()).expect("Log buffer should be valid UTF-8") +} + +pub fn clear_logs() { + let mut buffer = get_log_buffer().lock().unwrap(); + buffer.clear(); +} + +pub fn assert_logs(expected: Vec<&str>) { + let logs_string = capture_logs(); + let mut logs = logs_string.lines().collect::>(); + let mut reverse_expected = expected.clone(); + reverse_expected.reverse(); + logs.reverse(); + + let mut match_count = 0; + for line in logs { + if reverse_expected.is_empty() { + break; + } + if line.contains(reverse_expected[match_count]) { + match_count += 1; + } + + if match_count == reverse_expected.len() { + break; + } + } + assert_eq!( + match_count, + expected.len(), + "\nexpected logs \n{}\n, got logs \n{}\n", + expected.join("\n"), + logs_string + ); +} diff --git a/debian/rules b/debian/rules index 42e82b2f302..8412a6f1253 100755 --- a/debian/rules +++ b/debian/rules @@ -33,19 +33,34 @@ ifeq ($(ENABLE_ASAN), y) endif ifeq ($(ENABLE_GCOV), y) - configure_opts += --enable-gcov CFLAGS="-g -O0" CXXFLAGS="-g -O0" + configure_opts += --enable-gcov --enable-code-coverage CFLAGS="-g -O0" CXXFLAGS="-g -O0" endif override_dh_auto_configure: dh_auto_configure -- $(configure_opts) + # Configure Rust build for countersyncd + cargo fetch + cargo update -p swss-common + +override_dh_auto_build: + dh_auto_build + # Build and test countersyncd Rust project + cargo build --release override_dh_auto_install: dh_auto_install --destdir=debian/swss ifeq ($(ENABLE_GCOV), y) mkdir -p debian/swss/tmp/gcov - sh ./tests/gcov_support.sh collect swss + lcov -c --directory . --no-external --exclude "$(shell pwd)/tests/*" --exclude "$(shell pwd)/**/tests/*" --ignore-errors gcov --output-file coverage.info + lcov --add-tracefile coverage.info -o coverage.info + lcov_cobertura coverage.info -o coverage.xml + find ./ -type f -regex '.*\.\(h\|cpp\|gcno\|info\)' | tar -cf debian/swss/tmp/gcov/gcov-source.tar -T - endif +override_dh_auto_clean: + dh_auto_clean + # Clean Rust build artifacts + cargo clean || true + override_dh_strip: dh_strip --dbg-package=swss-dbg - diff --git a/debian/swss.install b/debian/swss.install index dc5ff8ea90e..bda357cd4d5 100644 --- a/debian/swss.install +++ b/debian/swss.install @@ -1,3 +1,4 @@ swssconfig/sample/netbouncer.json etc/swss/config.d neighsyncd/restore_neighbors.py usr/bin fpmsyncd/bgp_eoiu_marker.py usr/bin +target/release/countersyncd usr/bin diff --git a/dev/Dockerfile.yml b/dev/Dockerfile.yml new file mode 100644 index 00000000000..acb0d9054b2 --- /dev/null +++ b/dev/Dockerfile.yml @@ -0,0 +1,92 @@ +ARG DEBIAN_VERSION="bookworm" +FROM sonicdev-microsoft.azurecr.io:443/sonic-slave-${DEBIAN_VERSION}:latest + +ARG UID=1000 +ARG GID=1000 + +RUN groupadd -g ${GID} sonicdev && \ + useradd -u ${UID} -g ${GID} -ms /bin/bash sonicdev + +RUN mkdir -p /workspace && \ + mkdir -p /workspace/debs && \ + mkdir -p /workspace/tools && \ + chown -R sonicdev:sonicdev /workspace + +ENV PATH="${PATH}:/workspace/tools" + +RUN apt-get update && \ + sudo apt-get install -y \ + libhiredis-dev \ + libzmq3-dev \ + swig4.0 \ + libdbus-1-dev \ + libteam-dev \ + protobuf-compiler \ + libprotobuf-dev && \ + sudo pip3 install lcov_cobertura + +COPY dev/download_artifact.sh /workspace/tools/download_artifact.sh + +WORKDIR /workspace/debs + +ARG BRANCH_NAME="master" +ARG PLATFORM="amd64" +ARG DEBIAN_VERSION + +# SWSS COMMON + +ARG SWSS_COMMON_PROJECT_NAME="Azure.sonic-swss-common" +ARG SWSS_COMMON_ARTIFACT_NAME="sonic-swss-common" +ARG SWSS_COMMON_FILE_PATHS="/libswsscommon_1.0.0_${PLATFORM}.deb /libswsscommon-dev_1.0.0_${PLATFORM}.deb" + +RUN download_artifact.sh "${SWSS_COMMON_PROJECT_NAME}" "${BRANCH_NAME}" "${SWSS_COMMON_ARTIFACT_NAME}" "${SWSS_COMMON_FILE_PATHS}" + +# SAIREDIS + +ARG SAIREDIS_PROJECT_NAME="Azure.sonic-sairedis" +ARG SAIREDIS_ARTIFACT_NAME="sonic-sairedis" +ARG SAIREDIS_FILE_PATHS="\ + /libsaivs_1.0.0_${PLATFORM}.deb \ + /libsaivs-dev_1.0.0_${PLATFORM}.deb \ + /libsairedis_1.0.0_${PLATFORM}.deb \ + /libsairedis-dev_1.0.0_${PLATFORM}.deb \ + /libsaimetadata_1.0.0_${PLATFORM}.deb \ + /libsaimetadata-dev_1.0.0_${PLATFORM}.deb \ + /syncd-vs_1.0.0_${PLATFORM}.deb \ + " + +RUN download_artifact.sh "${SAIREDIS_PROJECT_NAME}" "${BRANCH_NAME}" "${SAIREDIS_ARTIFACT_NAME}" "${SAIREDIS_FILE_PATHS}" + +# COMMON LIB + +ARG COMMON_LIB_PROJECT_NAME="Azure.sonic-buildimage.common_libs" +ARG COMMON_LIB_ARTIFACT_NAME="common-lib" +ARG COMMON_LIB_FILE_PATHS="\ + /target/debs/${DEBIAN_VERSION}/libnl-3-200_3.7.0-0.2%2Bb1sonic1_${PLATFORM}.deb \ + /target/debs/${DEBIAN_VERSION}/libnl-3-dev_3.7.0-0.2%2Bb1sonic1_${PLATFORM}.deb \ + /target/debs/${DEBIAN_VERSION}/libnl-genl-3-200_3.7.0-0.2%2Bb1sonic1_${PLATFORM}.deb \ + /target/debs/${DEBIAN_VERSION}/libnl-genl-3-dev_3.7.0-0.2%2Bb1sonic1_${PLATFORM}.deb \ + /target/debs/${DEBIAN_VERSION}/libnl-route-3-200_3.7.0-0.2%2Bb1sonic1_${PLATFORM}.deb \ + /target/debs/${DEBIAN_VERSION}/libnl-route-3-dev_3.7.0-0.2%2Bb1sonic1_${PLATFORM}.deb \ + /target/debs/${DEBIAN_VERSION}/libnl-nf-3-200_3.7.0-0.2%2Bb1sonic1_${PLATFORM}.deb \ + /target/debs/${DEBIAN_VERSION}/libnl-nf-3-dev_3.7.0-0.2%2Bb1sonic1_${PLATFORM}.deb \ + /target/debs/${DEBIAN_VERSION}/libyang_1.0.73_${PLATFORM}.deb \ + " + +RUN download_artifact.sh "${COMMON_LIB_PROJECT_NAME}" "${BRANCH_NAME}" "${COMMON_LIB_ARTIFACT_NAME}" "${COMMON_LIB_FILE_PATHS}" + +# DASH API + +ARG DASH_API_PROJECT_NAME="sonic-net.sonic-dash-api" +ARG DASH_API_ARTIFACT_NAME="sonic-dash-api" +ARG DASH_API_FILE_PATHS="/libdashapi_1.0.0_${PLATFORM}.deb" + +RUN download_artifact.sh "${DASH_API_PROJECT_NAME}" "${BRANCH_NAME}" "${DASH_API_ARTIFACT_NAME}" "${DASH_API_FILE_PATHS}" + +RUN dpkg -i *.deb + +WORKDIR /workspace + +USER sonicdev + +ENTRYPOINT [ "bash" ] diff --git a/dev/docker-compose.yml b/dev/docker-compose.yml new file mode 100644 index 00000000000..ce51eb6781a --- /dev/null +++ b/dev/docker-compose.yml @@ -0,0 +1,18 @@ +services: + sonicdev: + container_name: sonicdev + build: + context: .. + dockerfile: dev/Dockerfile.yml + args: + - DEBIAN_VERSION + - UID + - GID + - BRANCH_NAME + - PLATFORM + volumes: + - ..:/workspace/sonic-swss + init: true + privileged: true + working_dir: /workspace/sonic-swss + diff --git a/dev/download_artifact.sh b/dev/download_artifact.sh new file mode 100755 index 00000000000..282ca504753 --- /dev/null +++ b/dev/download_artifact.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# download_artifact.sh +# +# targetPaths: space separated list of target paths to download from the artifact +# e.g. +# ./download_artifact.sh "Azure.sonic-swss-common" "master" "sonic-swss-common" "/libswsscommon-dev_1.0.0_amd64.deb /libswsscommon_1.0.0_amd64.deb" + +set -x -e + +pipelineName=${1} +branchName=${2} +artifactName=${3} +targetPaths=${4} + +queryPipelinesUrl="https://dev.azure.com/mssonic/build/_apis/pipelines" + +definitions=$(curl -s "${queryPipelinesUrl}" | jq -r ".value[] | select (.name == \"${pipelineName}\").id") + +queryBuildsUrl="https://dev.azure.com/mssonic/build/_apis/build/builds?definitions=${definitions}&branchName=refs/heads/${branchName}&resultFilter=succeeded&statusFilter=completed&api-version=6.0" + +buildId=$(curl -s ${queryBuildsUrl} | jq -r '.value[0].id') + +queryArtifactUrl="https://dev.azure.com/mssonic/build/_apis/build/builds/${buildId}/artifacts?artifactName=${artifactName}&api-version=6.0" + +function download_artifact { + + target_path=${1} + output_file=$(sed 's/.*\///' <<< ${target_path}) + + download_artifact_url=$(curl -s ${queryArtifactUrl} | jq -r '.resource.downloadUrl') + download_artifact_url=$(sed 's/zip$/file/' <<< ${download_artifact_url}) + download_artifact_url="$download_artifact_url&subPath=${target_path}" + + wget -O ${output_file} ${download_artifact_url} +} + +function download_artifacts { + target_paths_array=(${targetPaths}) + for target_path in "${target_paths_array[@]}" + do + download_artifact ${target_path} + done +} + +download_artifacts diff --git a/doc/swss-schema.md b/doc/swss-schema.md index 74bfd687b82..631594b345b 100644 --- a/doc/swss-schema.md +++ b/doc/swss-schema.md @@ -27,6 +27,8 @@ Stores information for physical switch ports managed by the switch chip. Ports t preemphasis = 1*8HEXDIG *( "," 1*8HEXDIG) ; list of hex values, one per lane idriver = 1*8HEXDIG *( "," 1*8HEXDIG) ; list of hex values, one per lane ipredriver = 1*8HEXDIG *( "," 1*8HEXDIG) ; list of hex values, one per lane + pt_interface_id = 1*4DIGIT ; Path Tracing Interface ID (1-4095) + pt_timestamp_template = "template1" / "template2" / "template3" / "template4" ; Path Tracing Timestamp Template ;QOS Mappings map_dscp_to_tc = ref_hash_key_reference @@ -1021,6 +1023,8 @@ Stores information for physical switch ports managed by the switch chip. Ports t mtu = 1*4DIGIT ; port MTU fec = 1*64VCHAR ; port fec mode autoneg = BIT ; auto-negotiation mode + pt_interface_id = 1*4DIGIT ; Path Tracing Interface ID (1-4095) + pt_timestamp_template = "template1" / "template2" / "template3" / "template4" ; Path Tracing Timestamp Template ### MGMT_PORT_TABLE ;Configuration for management port, including at least one key diff --git a/fdbsyncd/Makefile.am b/fdbsyncd/Makefile.am index b35ee5f3097..93271f4e788 100644 --- a/fdbsyncd/Makefile.am +++ b/fdbsyncd/Makefile.am @@ -15,7 +15,7 @@ fdbsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(COV_CFLAGS) $(CF fdbsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lswsscommon $(COV_LDFLAGS) if GCOV_ENABLED -fdbsyncd_LDADD += -lgcovpreload +fdbsyncd_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED diff --git a/fdbsyncd/fdbsync.cpp b/fdbsyncd/fdbsync.cpp index 0d71f721dce..3c1fae145a9 100644 --- a/fdbsyncd/fdbsync.cpp +++ b/fdbsyncd/fdbsync.cpp @@ -324,7 +324,7 @@ void FdbSync::updateLocalMac (struct m_fdb_info *info) if (fdb_type == FDB_TYPE_DYNAMIC) { - type = "dynamic"; + type = "dynamic extern_learn"; } else { @@ -384,7 +384,7 @@ void FdbSync::addLocalMac(string key, string op) if (m_fdb_mac[key].type == FDB_TYPE_DYNAMIC) { - type = "dynamic"; + type = "dynamic extern_learn"; } else { @@ -432,7 +432,7 @@ void FdbSync::updateMclagRemoteMac (struct m_fdb_info *info) if (fdb_type == FDB_TYPE_DYNAMIC) { - type = "dynamic"; + type = "dynamic extern_learn"; } else { @@ -511,7 +511,7 @@ void FdbSync::macRefreshStateDB(int vlan, string kmac) if (m_fdb_mac[key].type == FDB_TYPE_DYNAMIC) { - type = "dynamic"; + type = "dynamic extern_learn"; } else { diff --git a/fdbsyncd/fdbsyncd.cpp b/fdbsyncd/fdbsyncd.cpp index 4f9405cbfdd..70bff7b79f4 100644 --- a/fdbsyncd/fdbsyncd.cpp +++ b/fdbsyncd/fdbsyncd.cpp @@ -16,10 +16,10 @@ int main(int argc, char **argv) { Logger::linkToDbNative("fdbsyncd"); - DBConnector appDb(APPL_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); + DBConnector appDb("APPL_DB", 0); RedisPipeline pipelineAppDB(&appDb); - DBConnector stateDb(STATE_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); - DBConnector config_db(CONFIG_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); + DBConnector stateDb("STATE_DB", 0); + DBConnector config_db("CONFIG_DB", 0); FdbSync sync(&pipelineAppDB, &stateDb, &config_db); diff --git a/fpmsyncd/Makefile.am b/fpmsyncd/Makefile.am index 29b81d73814..b61ebdfa930 100644 --- a/fpmsyncd/Makefile.am +++ b/fpmsyncd/Makefile.am @@ -1,4 +1,4 @@ -INCLUDES = -I $(top_srcdir) -I $(top_srcdir)/warmrestart -I $(FPM_PATH) +INCLUDES = -I $(top_srcdir) -I $(top_srcdir)/warmrestart -I $(FPM_PATH) -I $(top_srcdir)/lib bin_PROGRAMS = fpmsyncd @@ -8,14 +8,15 @@ else DBGFLAGS = -g endif -fpmsyncd_SOURCES = fpmsyncd.cpp fpmlink.cpp routesync.cpp $(top_srcdir)/warmrestart/warmRestartHelper.cpp +fpmsyncd_SOURCES = fpmsyncd.cpp fpmlink.cpp routesync.cpp $(top_srcdir)/warmrestart/warmRestartHelper.cpp \ + $(top_srcdir)/lib/orch_zmq_config.cpp fpmsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) fpmsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) fpmsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lswsscommon if GCOV_ENABLED -fpmsyncd_LDADD += -lgcovpreload +fpmsyncd_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED diff --git a/fpmsyncd/fpm/fpm.h b/fpmsyncd/fpm/fpm.h index 8af9b30ae9e..11f13b168a3 100644 --- a/fpmsyncd/fpm/fpm.h +++ b/fpmsyncd/fpm/fpm.h @@ -92,7 +92,7 @@ /* * Largest message that can be sent to or received from the FPM. */ -#define FPM_MAX_MSG_LEN 4096 +#define FPM_MAX_MSG_LEN 16384 /* * Header that precedes each fpm message to/from the FPM. diff --git a/fpmsyncd/fpmlink.cpp b/fpmsyncd/fpmlink.cpp index 13d170a805c..0a7f1021ac8 100644 --- a/fpmsyncd/fpmlink.cpp +++ b/fpmsyncd/fpmlink.cpp @@ -43,6 +43,11 @@ bool FpmLink::isRawProcessing(struct nlmsghdr *h) rtm = (struct rtmsg *)NLMSG_DATA(h); + if (h->nlmsg_type == RTM_NEWSRV6LOCALSID || h->nlmsg_type == RTM_DELSRV6LOCALSID) + { + return true; + } + if (h->nlmsg_type != RTM_NEWROUTE && h->nlmsg_type != RTM_DELROUTE) { return false; @@ -276,6 +281,21 @@ void FpmLink::processFpmMessage(fpm_msg_hdr_t* hdr) /* EVPN Type5 Add route processing */ processRawMsg(nl_hdr); } + else if(nl_hdr->nlmsg_type == RTM_NEWSRV6VPNROUTE || nl_hdr->nlmsg_type == RTM_DELSRV6VPNROUTE) + { + /* rtnl api dont support RTM_NEWSRV6VPNROUTE/RTM_DELSRV6VPNROUTE yet. Processing as raw message*/ + processRawMsg(nl_hdr); + } + else if(nl_hdr->nlmsg_type == RTM_NEWNEXTHOP || nl_hdr->nlmsg_type == RTM_DELNEXTHOP) + { + /* rtnl api dont support RTM_NEWNEXTHOP/RTM_DELNEXTHOP yet. Processing as raw message*/ + processRawMsg(nl_hdr); + } + else if(nl_hdr->nlmsg_type == RTM_NEWPICCONTEXT || nl_hdr->nlmsg_type == RTM_DELPICCONTEXT) + { + /* rtnl api dont support RTM_NEWPICCONTEXT/RTM_DELPICCONTEXT yet. Processing as raw message*/ + processRawMsg(nl_hdr); + } else { NetDispatcher::getInstance().onNetlinkMessage(msg); @@ -315,4 +335,4 @@ bool FpmLink::send(nlmsghdr* nl_hdr) } return true; -} +} \ No newline at end of file diff --git a/fpmsyncd/fpmlink.h b/fpmsyncd/fpmlink.h index c025750edfb..cedeaa967c8 100644 --- a/fpmsyncd/fpmlink.h +++ b/fpmsyncd/fpmlink.h @@ -15,6 +15,13 @@ #include "fpmsyncd/fpminterface.h" #include "fpmsyncd/routesync.h" +#define RTM_NEWSRV6LOCALSID 1000 +#define RTM_DELSRV6LOCALSID 1001 +#define RTM_NEWPICCONTEXT 2000 +#define RTM_DELPICCONTEXT 2001 +#define RTM_NEWSRV6VPNROUTE 3000 +#define RTM_DELSRV6VPNROUTE 3001 + namespace swss { class FpmLink : public FpmInterface { diff --git a/fpmsyncd/fpmsyncd.cpp b/fpmsyncd/fpmsyncd.cpp index 5e16a6a6ca4..5300119101c 100644 --- a/fpmsyncd/fpmsyncd.cpp +++ b/fpmsyncd/fpmsyncd.cpp @@ -9,6 +9,7 @@ #include "subscriberstatetable.h" #include "warmRestartHelper.h" #include "fpmsyncd/fpmlink.h" +#include "fpmsyncd/fpmsyncd.h" #include "fpmsyncd/routesync.h" #include @@ -16,6 +17,26 @@ using namespace std; using namespace swss; +// gSelectTimeout specifies the maximum wait time in milliseconds (-1 == infinite) +static int gSelectTimeout; +#define INFINITE -1 +#define FLUSH_TIMEOUT 500 // 500 milliseconds +static int gFlushTimeout = FLUSH_TIMEOUT; +// consider the traffic is small if pipeline contains < 500 entries +#define SMALL_TRAFFIC 500 + +/** + * @brief fpmsyncd invokes redispipeline's flush with a timer + * + * redispipeline would automatically flush itself when full, + * but fpmsyncd can invoke pipeline's flush even if it's not full yet. + * + * By setting gSelectTimeout, fpmsyncd controls the flush interval. + * + * @param pipeline reference to the pipeline to be flushed + */ +void flushPipeline(RedisPipeline& pipeline); + /* * Default warm-restart timer interval for routing-stack app. To be used only if * no explicit value has been defined in configuration. @@ -61,7 +82,7 @@ int main(int argc, char **argv) DBConnector applStateDb("APPL_STATE_DB", 0); std::unique_ptr routeResponseChannel; - RedisPipeline pipeline(&db); + RedisPipeline pipeline(&db, ROUTE_SYNC_PPL_SIZE); RouteSync sync(&pipeline); DBConnector stateDb("STATE_DB", 0); @@ -77,6 +98,7 @@ int main(int argc, char **argv) NetDispatcher::getInstance().registerMessageHandler(RTM_DELLINK, &sync); rtnl_route_read_protocol_names(DefaultRtProtoPath); + nlmsg_set_default_size(FPM_MAX_MSG_LEN); std::string suppressionEnabledStr; deviceMetadataTable.hget("localhost", "suppress-fib-pending", suppressionEnabledStr); @@ -119,11 +141,11 @@ int main(int argc, char **argv) } /* If warm-restart feature is enabled, execute 'restoration' logic */ - bool warmStartEnabled = sync.m_warmStartHelper.checkAndStart(); + bool warmStartEnabled = sync.getWarmStartHelper().checkAndStart(); if (warmStartEnabled) { /* Obtain warm-restart timer defined for routing application */ - time_t warmRestartIval = sync.m_warmStartHelper.getRestartTimer(); + time_t warmRestartIval = sync.getWarmStartHelper().getRestartTimer(); if (!warmRestartIval) { warmStartTimer.setInterval(timespec{DEFAULT_ROUTING_RESTART_INTERVAL, 0}); @@ -134,7 +156,7 @@ int main(int argc, char **argv) } /* Execute restoration instruction and kick off warm-restart timer */ - if (sync.m_warmStartHelper.runRestoration()) + if (sync.getWarmStartHelper().runRestoration()) { warmStartTimer.start(); s.addSelectable(&warmStartTimer); @@ -149,15 +171,17 @@ int main(int argc, char **argv) } else { - sync.m_warmStartHelper.setState(WarmStart::WSDISABLED); + sync.getWarmStartHelper().setState(WarmStart::WSDISABLED); } + gSelectTimeout = INFINITE; + while (true) { Selectable *temps; /* Reading FPM messages forever (and calling "readMe" to read them) */ - s.select(&temps); + s.select(&temps, gSelectTimeout); /* * Upon expiration of the warm-restart timer or eoiu Hold Timer, proceed to run the @@ -185,7 +209,7 @@ int main(int argc, char **argv) } else if (temps == &eoiuCheckTimer) { - if (sync.m_warmStartHelper.inProgress()) + if (sync.getWarmStartHelper().inProgress()) { if (eoiuFlagsSet(bgpStateTable)) { @@ -284,10 +308,9 @@ int main(int argc, char **argv) sync.onRouteResponse(key, fieldValues); } } - else if (!warmStartEnabled || sync.m_warmStartHelper.isReconciled()) + else if (!warmStartEnabled || sync.getWarmStartHelper().isReconciled()) { - pipeline.flush(); - SWSS_LOG_DEBUG("Pipeline flushed"); + flushPipeline(pipeline); } } } @@ -295,12 +318,39 @@ int main(int argc, char **argv) { cout << "Connection lost, reconnecting..." << endl; } - catch (const exception& e) - { - cout << "Exception \"" << e.what() << "\" had been thrown in daemon" << endl; - return 0; - } } return 1; } + +void flushPipeline(RedisPipeline& pipeline) { + + size_t remaining = pipeline.size(); + + if (remaining == 0) { + gSelectTimeout = INFINITE; + return; + } + + int idle = pipeline.getIdleTime(); + + // flush the pipeline if + // 1. traffic is not scaled (only prevent fpmsyncd from flushing ppl too frequently in the scaled case) + // 2. the idle time since last flush has exceeded gFlushTimeout + // 3. idle <= 0, due to system clock drift, should not happen since we already use steady_clock for timing + if (remaining < SMALL_TRAFFIC || idle >= gFlushTimeout || idle <= 0) { + + pipeline.flush(); + + gSelectTimeout = INFINITE; + + SWSS_LOG_DEBUG("Pipeline flushed"); + } + else + { + // skip flushing ppl and set the timeout of fpmsyncd select function to be (gFlushTimeout - idle) + // so that fpmsyncd select function would block at most for (gFlushTimeout - idle) + // by doing this, we make sure every entry eventually gets flushed + gSelectTimeout = gFlushTimeout - idle; + } +} diff --git a/fpmsyncd/fpmsyncd.h b/fpmsyncd/fpmsyncd.h new file mode 100644 index 00000000000..82ed2749375 --- /dev/null +++ b/fpmsyncd/fpmsyncd.h @@ -0,0 +1,8 @@ +#ifndef __FPMSYNCD__ +#define __FPMSYNCD__ + + +// redispipeline has a maximum capacity of 50000 entries +#define ROUTE_SYNC_PPL_SIZE 50000 + +#endif \ No newline at end of file diff --git a/fpmsyncd/routesync.cpp b/fpmsyncd/routesync.cpp index caf62100846..0b4503262ae 100644 --- a/fpmsyncd/routesync.cpp +++ b/fpmsyncd/routesync.cpp @@ -6,6 +6,7 @@ #include "netmsg.h" #include "ipprefix.h" #include "dbconnector.h" +#include "lib/orch_zmq_config.h" #include "producerstatetable.h" #include "fpmsyncd/fpmlink.h" #include "fpmsyncd/routesync.h" @@ -13,6 +14,9 @@ #include "converter.h" #include #include +#include +#include +#include using namespace std; using namespace swss; @@ -23,6 +27,7 @@ using namespace swss; #define MGMT_VRF_PREFIX "mgmt" #define NHG_DELIMITER ',' +#define MY_SID_KEY_DELIMITER ':' #ifndef ETH_ALEN #define ETH_ALEN 6 @@ -33,10 +38,16 @@ using namespace swss; ((struct rtattr *)(((char *)(r)) + NLMSG_ALIGN(sizeof(struct ndmsg)))) #endif +#ifndef NHA__RTA +#define NHA_RTA(r) \ + ((struct rtattr *)(((char *)(r)) + NLMSG_ALIGN(sizeof(struct nhmsg)))) +#endif + #define VXLAN_VNI 0 #define VXLAN_RMAC 1 #define NH_ENCAP_VXLAN 100 +#define NH_ENCAP_SRV6_ROUTE 101 #define IPV4_MAX_BYTE 4 #define IPV6_MAX_BYTE 16 @@ -45,6 +56,70 @@ using namespace swss; #define ETHER_ADDR_STRLEN (3*ETH_ALEN) +#define DEFAULT_SRV6_MY_SID_BLOCK_LEN "32" +#define DEFAULT_SRV6_MY_SID_NODE_LEN "16" +#define DEFAULT_SRV6_MY_SID_FUNC_LEN "16" +#define DEFAULT_SRV6_MY_SID_ARG_LEN "0" + +enum srv6_localsid_action { + SRV6_LOCALSID_ACTION_UNSPEC = 0, + SRV6_LOCALSID_ACTION_END = 1, + SRV6_LOCALSID_ACTION_END_X = 2, + SRV6_LOCALSID_ACTION_END_T = 3, + SRV6_LOCALSID_ACTION_END_DX2 = 4, + SRV6_LOCALSID_ACTION_END_DX6 = 5, + SRV6_LOCALSID_ACTION_END_DX4 = 6, + SRV6_LOCALSID_ACTION_END_DT6 = 7, + SRV6_LOCALSID_ACTION_END_DT4 = 8, + SRV6_LOCALSID_ACTION_END_DT46 = 9, + SRV6_LOCALSID_ACTION_B6_ENCAPS = 10, + SRV6_LOCALSID_ACTION_B6_ENCAPS_RED = 11, + SRV6_LOCALSID_ACTION_B6_INSERT = 12, + SRV6_LOCALSID_ACTION_B6_INSERT_RED = 13, + SRV6_LOCALSID_ACTION_UN = 14, + SRV6_LOCALSID_ACTION_UA = 15, + SRV6_LOCALSID_ACTION_UDX2 = 16, + SRV6_LOCALSID_ACTION_UDX6 = 17, + SRV6_LOCALSID_ACTION_UDX4 = 18, + SRV6_LOCALSID_ACTION_UDT6 = 19, + SRV6_LOCALSID_ACTION_UDT4 = 20, + SRV6_LOCALSID_ACTION_UDT46 = 21, +}; + +enum { + SRV6_LOCALSID_UNSPEC = 0, + SRV6_LOCALSID_SID_VALUE = 1, + SRV6_LOCALSID_FORMAT = 2, + SRV6_LOCALSID_ACTION = 3, + SRV6_LOCALSID_VRFNAME = 4, + SRV6_LOCALSID_NH6 = 5, + SRV6_LOCALSID_NH4 = 6, + SRV6_LOCALSID_IIF = 7, + SRV6_LOCALSID_OIF = 8, + SRV6_LOCALSID_BPF = 9, + SRV6_LOCALSID_SIDLIST = 10, + SRV6_LOCALSID_ENCAP_SRC_ADDR = 11, + SRV6_LOCALSID_IFNAME = 12, +}; + +enum { + SRV6_LOCALSID_FORMAT_UNSPEC = 0, + SRV6_LOCALSID_FORMAT_BLOCK_LEN = 1, + SRV6_LOCALSID_FORMAT_NODE_LEN = 2, + SRV6_LOCALSID_FORMAT_FUNC_LEN = 3, + SRV6_LOCALSID_FORMAT_ARG_LEN = 4, +}; + +enum { + ROUTE_ENCAP_SRV6_UNSPEC = 0, + ROUTE_ENCAP_SRV6_VPN_SID = 1, + ROUTE_ENCAP_SRV6_ENCAP_SRC_ADDR = 2, + ROUTE_ENCAP_SRV6_PIC_ID = 3, + ROUTE_ENCAP_SRV6_NH_ID = 4, +}; + +#define MAX_MULTIPATH_NUM 514 + /* Returns name of the protocol passed number represents */ static string getProtocolString(int proto) { @@ -76,11 +151,17 @@ static decltype(auto) makeNlAddr(const T& ip) RouteSync::RouteSync(RedisPipeline *pipeline) : - m_routeTable(pipeline, APP_ROUTE_TABLE_NAME, true), - m_label_routeTable(pipeline, APP_LABEL_ROUTE_TABLE_NAME, true), + // When the feature ORCH_NORTHBOND_ROUTE_ZMQ_ENABLED is enabled, route events must be sent to orchagent via the ZMQ channel. + m_zmqClient(create_local_zmq_client(ORCH_NORTHBOND_ROUTE_ZMQ_ENABLED, false)), + m_routeTable(createProducerStateTable(pipeline, APP_ROUTE_TABLE_NAME, true, m_zmqClient)), + m_nexthop_groupTable(pipeline, APP_NEXTHOP_GROUP_TABLE_NAME, true), + m_label_routeTable(createProducerStateTable(pipeline, APP_LABEL_ROUTE_TABLE_NAME, true, m_zmqClient)), + m_pic_context_groupTable(pipeline, APP_PIC_CONTEXT_TABLE_NAME, true), m_vnet_routeTable(pipeline, APP_VNET_RT_TABLE_NAME, true), m_vnet_tunnelTable(pipeline, APP_VNET_RT_TUNNEL_TABLE_NAME, true), - m_warmStartHelper(pipeline, &m_routeTable, APP_ROUTE_TABLE_NAME, "bgp", "bgp"), + m_warmStartHelper(pipeline, m_routeTable.get(), APP_ROUTE_TABLE_NAME, "bgp", "bgp"), + m_srv6MySidTable(pipeline, APP_SRV6_MY_SID_TABLE_NAME, true), + m_srv6SidListTable(pipeline, APP_SRV6_SID_LIST_TABLE_NAME, true), m_nl_sock(NULL), m_link_cache(NULL) { m_nl_sock = nl_socket_alloc(); @@ -88,6 +169,38 @@ RouteSync::RouteSync(RedisPipeline *pipeline) : rtnl_link_alloc_cache(m_nl_sock, AF_UNSPEC, &m_link_cache); } +void RouteSync::setRouteWithWarmRestart(FieldValueTupleWrapperBase & fvw, + ProducerStateTable & table ) +{ + bool warmRestartInProgress = m_warmStartHelper.inProgress(); + + if (!warmRestartInProgress) + { + table.set(fvw.KeyOpFieldsValuesTupleVector()); + } + else + { + m_warmStartHelper.insertRefreshMap(fvw.KeyOpFieldsValuesTupleVector()[0]); + } +} + +void RouteSync::setTable(FieldValueTupleWrapperBase & fvw, + ProducerStateTable & table ) +{ + // Note: VNET tables don't use warm restart helper, so we directly set + table.set(fvw.KeyOpFieldsValuesTupleVector()); +} + +void RouteSync::delWithWarmRestart(FieldValueTupleWrapperBase && fvw, + ProducerStateTable & table) { + bool warmRestartInProgress = m_warmStartHelper.inProgress(); + if (!warmRestartInProgress) { + table.del(fvw.key); + } else { + m_warmStartHelper.insertRefreshMap(fvw.KeyOpFieldsValuesTupleVectorForDel()); + } +} + char *RouteSync::prefixMac2Str(char *mac, char *buf, int size) { char *ptr = buf; @@ -144,7 +257,270 @@ void RouteSync::parseEncap(struct rtattr *tb, uint32_t &encap_value, string &rma return; } -void RouteSync::getEvpnNextHopSep(string& nexthops, string& vni_list, +/** + * @parseEncapSrv6SteerRoute() - Parses encapsulated SRv6 attributes + * @tb: Pointer to rtattr to look for nested items in. + * @vpn_sid: (output) VPN SID. + * @src_addr: (output) source address for SRv6 encapsulation + * + * Return: void. + */ +void RouteSync::parseEncapSrv6SteerRoute(struct rtattr *tb, string &vpn_sid, + string &src_addr) +{ + struct rtattr *tb_encap[256] = {}; + char vpn_sid_buf[MAX_ADDR_SIZE + 1] = {0}; + char src_addr_buf[MAX_ADDR_SIZE + 1] = {0}; + + parseRtAttrNested(tb_encap, 256, tb); + + if (tb_encap[ROUTE_ENCAP_SRV6_VPN_SID]) + { + vpn_sid += inet_ntop(AF_INET6, RTA_DATA(tb_encap[ROUTE_ENCAP_SRV6_VPN_SID]), + vpn_sid_buf, MAX_ADDR_SIZE); + } + + if (tb_encap[ROUTE_ENCAP_SRV6_ENCAP_SRC_ADDR]) + { + src_addr += + inet_ntop(AF_INET6, RTA_DATA(tb_encap[ROUTE_ENCAP_SRV6_ENCAP_SRC_ADDR]), + src_addr_buf, MAX_ADDR_SIZE); + } + + SWSS_LOG_INFO("Rx vpn_sid:%s src_addr:%s ", vpn_sid.c_str(), + src_addr.c_str()); + + return; +} + +const char *RouteSync::mySidAction2Str(uint32_t action) +{ + switch (action) + { + case SRV6_LOCALSID_ACTION_UNSPEC: + return "unspec"; + case SRV6_LOCALSID_ACTION_END: + return "end"; + case SRV6_LOCALSID_ACTION_END_X: + return "end.x"; + case SRV6_LOCALSID_ACTION_END_T: + return "end.t"; + case SRV6_LOCALSID_ACTION_END_DX6: + return "end.dx6"; + case SRV6_LOCALSID_ACTION_END_DX4: + return "end.dx4"; + case SRV6_LOCALSID_ACTION_END_DT6: + return "end.dt6"; + case SRV6_LOCALSID_ACTION_END_DT4: + return "end.dt4"; + case SRV6_LOCALSID_ACTION_END_DT46: + return "end.dt46"; + case SRV6_LOCALSID_ACTION_UN: + return "un"; + case SRV6_LOCALSID_ACTION_UA: + return "ua"; + case SRV6_LOCALSID_ACTION_UDX6: + return "udx6"; + case SRV6_LOCALSID_ACTION_UDX4: + return "udx4"; + case SRV6_LOCALSID_ACTION_UDT6: + return "udt6"; + case SRV6_LOCALSID_ACTION_UDT4: + return "udt4"; + case SRV6_LOCALSID_ACTION_UDT46: + return "udt46"; + default: + return "unknown"; + } +} + +bool RouteSync::parseEncapSrv6VpnRoute(struct rtattr *tb, uint32_t &pic_id, + uint32_t &nhg_id) +{ + struct rtattr *tb_encap[256] = {}; + + parseRtAttrNested(tb_encap, 256, tb); + + if (tb_encap[ROUTE_ENCAP_SRV6_PIC_ID]) + pic_id = *((uint32_t *)RTA_DATA(tb_encap[ROUTE_ENCAP_SRV6_PIC_ID])); + else { + SWSS_LOG_ERROR("Failed to find rtattr ROUTE_ENCAP_SRV6_PIC_ID"); + return false; + } + + if (tb_encap[ROUTE_ENCAP_SRV6_NH_ID]) + nhg_id = *((uint32_t *)RTA_DATA(tb_encap[ROUTE_ENCAP_SRV6_NH_ID])); + else { + SWSS_LOG_ERROR("Failed to find rtattr ROUTE_ENCAP_SRV6_NH_ID"); + return false; + } + + SWSS_LOG_INFO("pic_id:%d nhg_id:%d ", pic_id, nhg_id); + + return true; +} + +/** + * @parseSrv6MySidFormat() - Parses srv6 MySid format + * @tb: Pointer to rtattr to look for nested items in. + * @block_len: (output) locator block length + * @node_len: (output) locator node length + * @func_len: (output) function length + * @arg_len: (output) argument length + * + * Return: true on success, false otherwise. + */ +bool RouteSync::parseSrv6MySidFormat(struct rtattr *tb, + string &block_len, + string &node_len, string &func_len, + string &arg_len) +{ + struct rtattr *tb_my_sid_format[256] = {}; + uint8_t block_len_buf, node_len_buf, func_len_buf, arg_len_buf; + + parseRtAttrNested(tb_my_sid_format, 4, tb); + + if (tb_my_sid_format[SRV6_LOCALSID_FORMAT_BLOCK_LEN]) + { + block_len_buf = *(uint8_t *)RTA_DATA( + tb_my_sid_format[SRV6_LOCALSID_FORMAT_BLOCK_LEN]); + block_len += to_string(block_len_buf); + } + else + { + block_len += DEFAULT_SRV6_MY_SID_BLOCK_LEN; + } + + if (tb_my_sid_format[SRV6_LOCALSID_FORMAT_NODE_LEN]) + { + node_len_buf = *(uint8_t *)RTA_DATA( + tb_my_sid_format[SRV6_LOCALSID_FORMAT_NODE_LEN]); + node_len += to_string(node_len_buf); + } + else + { + node_len += DEFAULT_SRV6_MY_SID_NODE_LEN; + } + + if (tb_my_sid_format[SRV6_LOCALSID_FORMAT_FUNC_LEN]) + { + func_len_buf = *(uint8_t *)RTA_DATA( + tb_my_sid_format[SRV6_LOCALSID_FORMAT_FUNC_LEN]); + func_len += to_string(func_len_buf); + } + else + { + func_len += DEFAULT_SRV6_MY_SID_FUNC_LEN; + } + + if (tb_my_sid_format[SRV6_LOCALSID_FORMAT_ARG_LEN]) + { + arg_len_buf = *(uint8_t *)RTA_DATA( + tb_my_sid_format[SRV6_LOCALSID_FORMAT_ARG_LEN]); + arg_len += to_string(arg_len_buf); + } + else + { + /* arg_len is optional, by default arg_len is 0 */ + arg_len += DEFAULT_SRV6_MY_SID_ARG_LEN; + } + + SWSS_LOG_INFO("Rx Srv6 MySid block_len:%s node_len:%s func_len:%s arg_len:%s", + block_len.c_str(), node_len.c_str(), func_len.c_str(), + arg_len.c_str()); + + return true; +} + +/** + * @parseSrv6MySid() - Parses sRv6 MySid attributes + * @tb: Pointer to rtattr to look for nested items in. + * @block_len: (output) locator block length + * @node_len: (output) locator node length + * @func_len: (output) function length + * @arg_len: (output) argument length + * @action: (output) behavior defined for the MySID. + * @vrf: (output) VRF name. + * @adj: (output) adjacency. + * + * Return: true on success, false otherwise. + */ +bool RouteSync::parseSrv6MySid(struct rtattr *tb[], string &block_len, + string &node_len, string &func_len, + string &arg_len, string &action, + string &vrf, string &adj, string &intf) +{ + uint32_t action_buf = SRV6_LOCALSID_ACTION_UNSPEC; + char vrf_buf[IFNAMSIZ + 1] = {0}; + char adj_buf[MAX_ADDR_SIZE + 1] = {0}; + char intf_buf[IFNAMSIZ + 1] = {0}; + + if (tb[SRV6_LOCALSID_FORMAT]) + { + if (!parseSrv6MySidFormat(tb[SRV6_LOCALSID_FORMAT], block_len, + node_len, func_len, arg_len)) + { + SWSS_LOG_ERROR("Invalid Srv6 MySid format: block_len=%s, " + "node_len=%s, func_len=%s, arg_len=%s", + block_len.c_str(), node_len.c_str(), func_len.c_str(), arg_len.c_str()); + + return false; + } + } + + if (tb[SRV6_LOCALSID_ACTION]) + { + action_buf = *(uint32_t *)RTA_DATA(tb[SRV6_LOCALSID_ACTION]); + } + + if (tb[SRV6_LOCALSID_NH6]) + { + struct in6_addr *nh6 = + (struct in6_addr *)RTA_DATA(tb[SRV6_LOCALSID_NH6]); + + inet_ntop(AF_INET6, nh6, adj_buf, MAX_ADDR_SIZE); + } + + if (tb[SRV6_LOCALSID_NH4]) + { + struct in_addr *nh4 = + (struct in_addr *)RTA_DATA(tb[SRV6_LOCALSID_NH4]); + + inet_ntop(AF_INET, nh4, adj_buf, MAX_ADDR_SIZE); + } + + if (tb[SRV6_LOCALSID_VRFNAME]) + { + memcpy(vrf_buf, (char *)RTA_DATA(tb[SRV6_LOCALSID_VRFNAME]), + strlen((char *)RTA_DATA(tb[SRV6_LOCALSID_VRFNAME]))); + } + + if (tb[SRV6_LOCALSID_IFNAME]) + { + memcpy(intf_buf, (char *)RTA_DATA(tb[SRV6_LOCALSID_IFNAME]), + strlen((char *)RTA_DATA(tb[SRV6_LOCALSID_IFNAME]))); + } + + action = mySidAction2Str(action_buf); + vrf = vrf_buf; + adj = adj_buf; + intf = intf_buf; + + if (action == "unknown") + { + SWSS_LOG_ERROR("Invalid Srv6 MySid: action=%s", action.c_str()); + return false; + } + + SWSS_LOG_INFO("Rx block_len:%s node_len:%s func_len:%s arg_len:%s " + "action:%s vrf:%s adj:%s intf:%s", + block_len.c_str(), node_len.c_str(), func_len.c_str(), + arg_len.c_str(), action.c_str(), vrf.c_str(), adj.c_str(), intf.c_str()); + + return true; +} + +void RouteSync::getEvpnNextHopSep(string& nexthops, string& vni_list, string& mac_list, string& intf_list) { nexthops += NHG_DELIMITER; @@ -465,6 +841,7 @@ void RouteSync::onEvpnRouteMsg(struct nlmsghdr *h, int len) inet_ntop(rtm->rtm_family, dstaddr, buf, MAX_ADDR_SIZE), dst_len); } + auto proto_str = getProtocolString(rtm->rtm_protocol); SWSS_LOG_INFO("Receive route message dest ip prefix: %s Op:%s", destipprefix, nlmsg_type == RTM_NEWROUTE ? "add":"del"); @@ -473,27 +850,13 @@ void RouteSync::onEvpnRouteMsg(struct nlmsghdr *h, int len) * Upon arrival of a delete msg we could either push the change right away, * or we could opt to defer it if we are going through a warm-reboot cycle. */ - bool warmRestartInProgress = m_warmStartHelper.inProgress(); - if (nlmsg_type == RTM_DELROUTE) { - if (!warmRestartInProgress) - { - m_routeTable.del(destipprefix); - return; - } - else - { - SWSS_LOG_INFO("Warm-Restart mode: Receiving delete msg: %s", - destipprefix); - - vector fvVector; - const KeyOpFieldsValuesTuple kfv = std::make_tuple(destipprefix, - DEL_COMMAND, - fvVector); - m_warmStartHelper.insertRefreshMap(kfv); - return; - } + SWSS_LOG_INFO("RouteTable del msg: %s", destipprefix); + delWithWarmRestart( + RouteTableFieldValueTupleWrapper{std::move(destipprefix), std::string()}, + *m_routeTable); + return; } else if (nlmsg_type != RTM_NEWROUTE) { @@ -545,145 +908,1105 @@ void RouteSync::onEvpnRouteMsg(struct nlmsghdr *h, int len) return; } - vector fvVector; - FieldValueTuple nh("nexthop", nexthops); - FieldValueTuple intf("ifname", intf_list); - FieldValueTuple vni("vni_label", vni_list); - FieldValueTuple mac("router_mac", mac_list); - - fvVector.push_back(nh); - fvVector.push_back(intf); - fvVector.push_back(vni); - fvVector.push_back(mac); - - if (!warmRestartInProgress) - { - m_routeTable.set(destipprefix, fvVector); - SWSS_LOG_DEBUG("RouteTable set msg: %s vtep:%s vni:%s mac:%s intf:%s", - destipprefix, nexthops.c_str(), vni_list.c_str(), mac_list.c_str(), intf_list.c_str()); - } - - /* - * During routing-stack restarting scenarios route-updates will be temporarily - * put on hold by warm-reboot logic. - */ - else - { - SWSS_LOG_INFO("Warm-Restart mode: RouteTable set msg: %s vtep:%s vni:%s mac:%s", - destipprefix, nexthops.c_str(), vni_list.c_str(), mac_list.c_str()); + SWSS_LOG_INFO("RouteTable set EVPN msg: %s vtep:%s vni:%s mac:%s intf:%s protocol:%s", + destipprefix, nexthops.c_str(), vni_list.c_str(), mac_list.c_str(), intf_list.c_str(), + proto_str.c_str()); + RouteTableFieldValueTupleWrapper fvw{std::move(destipprefix), std::move(proto_str)}; + fvw.nexthop = std::move(nexthops); + fvw.ifname = std::move(intf_list); + fvw.vni_label = std::move(vni_list); + fvw.router_mac = std::move(mac_list); - const KeyOpFieldsValuesTuple kfv = std::make_tuple(destipprefix, - SET_COMMAND, - fvVector); - m_warmStartHelper.insertRefreshMap(kfv); - } + setRouteWithWarmRestart(fvw, *m_routeTable); return; } -void RouteSync::onMsgRaw(struct nlmsghdr *h) +bool RouteSync::getSrv6SteerRouteNextHop(struct nlmsghdr *h, int received_bytes, + struct rtattr *tb[], string &vpn_sid, + string &src_addr) { - int len; - - if ((h->nlmsg_type != RTM_NEWROUTE) - && (h->nlmsg_type != RTM_DELROUTE)) - return; - /* Length validity. */ - len = (int)(h->nlmsg_len - NLMSG_LENGTH(sizeof(struct ndmsg))); - if (len < 0) - { - SWSS_LOG_ERROR("%s: Message received from netlink is of a broken size %d %zu", - __PRETTY_FUNCTION__, h->nlmsg_len, - (size_t)NLMSG_LENGTH(sizeof(struct ndmsg))); - return; - } - onEvpnRouteMsg(h, len); - return; -} + uint16_t encap = 0; -void RouteSync::onMsg(int nlmsg_type, struct nl_object *obj) -{ - if (nlmsg_type == RTM_NEWLINK || nlmsg_type == RTM_DELLINK) + if (!tb[RTA_MULTIPATH]) { - nl_cache_refill(m_nl_sock, m_link_cache); - return; - } + if (tb[RTA_ENCAP_TYPE]) + { + encap = *(uint16_t *)RTA_DATA(tb[RTA_ENCAP_TYPE]); + } - struct rtnl_route *route_obj = (struct rtnl_route *)obj; + if (tb[RTA_ENCAP] && tb[RTA_ENCAP_TYPE] && + *(uint16_t *)RTA_DATA(tb[RTA_ENCAP_TYPE]) == + NH_ENCAP_SRV6_ROUTE) + { + parseEncapSrv6SteerRoute(tb[RTA_ENCAP], vpn_sid, src_addr); + } + SWSS_LOG_DEBUG("Rx MsgType:%d encap:%d vpn_sid:%s src_addr:%s", + h->nlmsg_type, encap, vpn_sid.c_str(), + src_addr.c_str()); - /* Supports IPv4 or IPv6 address, otherwise return immediately */ - auto family = rtnl_route_get_family(route_obj); - /* Check for Label route. */ - if (family == AF_MPLS) - { - onLabelRouteMsg(nlmsg_type, obj); - return; + if (vpn_sid.empty()) + { + SWSS_LOG_ERROR("Received an invalid SRv6 route: vpn_sid is empty"); + return false; + } } - if (family != AF_INET && family != AF_INET6) + else { - SWSS_LOG_INFO("Unknown route family support (object: %s)", nl_object_get_type(obj)); - return; + /* This is a multipath route */ + SWSS_LOG_NOTICE("Multipath SRv6 routes aren't supported"); + return false; } - /* Get the index of the master device */ - unsigned int master_index = rtnl_route_get_table(route_obj); - char master_name[IFNAMSIZ] = {0}; + return true; +} +bool RouteSync::getSrv6VpnRouteNextHop(struct nlmsghdr *h, int received_bytes, + struct rtattr *tb[], uint32_t &pic_id, + uint32_t &nhg_id) +{ + uint16_t encap = 0; - /* if the table_id is not set in the route obj then route is for default vrf. */ - if (master_index) + if (!tb[RTA_MULTIPATH]) { - /* Get the name of the master device */ - getIfName(master_index, master_name, IFNAMSIZ); - - /* If the master device name starts with VNET_PREFIX, it is a VNET route. - The VNET name is exactly the name of the associated master device. */ - if (string(master_name).find(VNET_PREFIX) == 0) + if (tb[RTA_ENCAP_TYPE]) { - onVnetRouteMsg(nlmsg_type, obj, string(master_name)); + encap = *(uint16_t *)RTA_DATA(tb[RTA_ENCAP_TYPE]); } - /* Otherwise, it is a regular route (include VRF route). */ - else + + if (tb[RTA_ENCAP] && tb[RTA_ENCAP_TYPE] && + *(uint16_t *)RTA_DATA(tb[RTA_ENCAP_TYPE]) == + NH_ENCAP_SRV6_ROUTE) { - onRouteMsg(nlmsg_type, obj, master_name); + return parseEncapSrv6VpnRoute(tb[RTA_ENCAP], pic_id, nhg_id); } + + SWSS_LOG_DEBUG("Rx MsgType:%d encap:%d pic_id:%d nhg_id:%d", + h->nlmsg_type, encap, pic_id, + nhg_id); } else { - onRouteMsg(nlmsg_type, obj, NULL); + /* This is a multipath route */ + SWSS_LOG_NOTICE("Multipath SRv6 routes aren't supported"); + return false; } + + return false; } -/* - * Handle regular route (include VRF route) - * @arg nlmsg_type Netlink message type - * @arg obj Netlink object - * @arg vrf Vrf name - */ -void RouteSync::onRouteMsg(int nlmsg_type, struct nl_object *obj, char *vrf) -{ - struct rtnl_route *route_obj = (struct rtnl_route *)obj; - struct nl_addr *dip; - char destipprefix[IFNAMSIZ + MAX_ADDR_SIZE + 2] = {0}; +vector +RouteTableFieldValueTupleWrapper::fieldValueTupleVector() { + vector fvVector; + fvVector.push_back(FieldValueTuple("protocol", protocol.c_str())); + fvVector.push_back(FieldValueTuple("blackhole", blackhole.c_str())); + fvVector.push_back(FieldValueTuple("nexthop", nexthop.c_str())); + fvVector.push_back(FieldValueTuple("ifname", ifname.c_str())); + fvVector.push_back(FieldValueTuple("nexthop_group", nexthop_group.c_str())); + fvVector.push_back(FieldValueTuple("mpls_nh", mpls_nh.c_str())); + fvVector.push_back(FieldValueTuple("weight", weight.c_str())); + fvVector.push_back(FieldValueTuple("vni_label", vni_label.c_str())); + fvVector.push_back(FieldValueTuple("router_mac", router_mac.c_str())); + fvVector.push_back(FieldValueTuple("segment", segment.c_str())); + fvVector.push_back(FieldValueTuple("seg_src", seg_src.c_str())); + // Return value optimization will avoid copy of the following vector + return fvVector; +} - if (vrf) - { - /* - * Now vrf device name is required to start with VRF_PREFIX, - * it is difficult to split vrf_name:ipv6_addr. - */ - if (memcmp(vrf, VRF_PREFIX, strlen(VRF_PREFIX))) - { - if(memcmp(vrf, MGMT_VRF_PREFIX, strlen(MGMT_VRF_PREFIX))) - { - SWSS_LOG_ERROR("Invalid VRF name %s (ifindex %u)", vrf, rtnl_route_get_table(route_obj)); - } - else - { - dip = rtnl_route_get_dst(route_obj); - nl_addr2str(dip, destipprefix, MAX_ADDR_SIZE); - SWSS_LOG_INFO("Skip routes for Mgmt VRF name %s (ifindex %u) prefix: %s", vrf, - rtnl_route_get_table(route_obj), destipprefix); - } - return; + + +vector +LabelRouteTableFieldValueTupleWrapper::fieldValueTupleVector() { + vector fvVector; + fvVector.push_back(FieldValueTuple("protocol", protocol.c_str())); + fvVector.push_back(FieldValueTuple("blackhole", blackhole.c_str())); + fvVector.push_back(FieldValueTuple("nexthop", nexthop.c_str())); + fvVector.push_back(FieldValueTuple("ifname", ifname.c_str())); + fvVector.push_back(FieldValueTuple("mpls_nh", mpls_nh.c_str())); + fvVector.push_back(FieldValueTuple("mpls_pop", mpls_pop.c_str())); + return fvVector; +} + + + +vector +VnetRouteTableFieldValueTupleWrapper::fieldValueTupleVector() { + vector fvVector; + fvVector.push_back(FieldValueTuple("nexthop", nexthop.c_str())); + fvVector.push_back(FieldValueTuple("ifname", ifname.c_str())); + return fvVector; +} + + + +vector +VnetTunnelTableFieldValueTupleWrapper::fieldValueTupleVector() { + vector fvVector; + fvVector.push_back(FieldValueTuple("endpoint", endpoint.c_str())); + return fvVector; +} + + + +vector +NextHopGroupTableFieldValueTupleWrapper::fieldValueTupleVector() { + vector fvVector; + fvVector.push_back(FieldValueTuple("nexthop", nexthop.c_str())); + fvVector.push_back(FieldValueTuple("ifname", ifname.c_str())); + fvVector.push_back(FieldValueTuple("weight", weight.c_str())); + return fvVector; +} + + + +vector +Srv6MySidTableFieldValueTupleWrapper::fieldValueTupleVector() { + vector fvVector; + fvVector.push_back(FieldValueTuple("action", action.c_str())); + fvVector.push_back(FieldValueTuple("vrf", vrf.c_str())); + fvVector.push_back(FieldValueTuple("adj", adj.c_str())); + return fvVector; +} + + + +vector +Srv6SidListTableFieldValueTupleWrapper::fieldValueTupleVector() { + vector fvVector; + fvVector.push_back(FieldValueTuple("path", path.c_str())); + return fvVector; +} + + + +void RouteSync::onSrv6SteerRouteMsg(struct nlmsghdr *h, int len) +{ + struct rtmsg *rtm; + struct rtattr *tb[RTA_MAX + 1]; + void *dest = NULL; + char dstaddr[IPV6_MAX_BYTE] = {0}; + int dst_len = 0; + char destipprefix[MAX_ADDR_SIZE + 1] = {0}; + char routeTableKey[IFNAMSIZ + MAX_ADDR_SIZE + 2] = {0}; + int nlmsg_type = h->nlmsg_type; + unsigned int vrf_index; + + rtm = (struct rtmsg *)NLMSG_DATA(h); + + /* Parse attributes and extract fields of interest. */ + memset(tb, 0, sizeof(tb)); + netlink_parse_rtattr(tb, RTA_MAX, RTM_RTA(rtm), len); + + if (!tb[RTA_DST]) + { + SWSS_LOG_ERROR( + "Received an invalid SRv6 route: missing RTA_DST attribute"); + return; + } + + dest = RTA_DATA(tb[RTA_DST]); + + if (rtm->rtm_family == AF_INET) + { + if (rtm->rtm_dst_len > IPV4_MAX_BITLEN) + { + SWSS_LOG_ERROR( + "Received an invalid SRv6 route: prefix len %d is out of range", + rtm->rtm_dst_len); + return; + } + memcpy(dstaddr, dest, IPV4_MAX_BYTE); + dst_len = rtm->rtm_dst_len; + } + else if (rtm->rtm_family == AF_INET6) + { + if (rtm->rtm_dst_len > IPV6_MAX_BITLEN) + { + SWSS_LOG_ERROR( + "Received an invalid SRv6 route: prefix len %d is out of range", + rtm->rtm_dst_len); + return; + } + memcpy(dstaddr, dest, IPV6_MAX_BYTE); + dst_len = rtm->rtm_dst_len; + } + else + { + SWSS_LOG_ERROR( + "Received an invalid SRv6 route: invalid address family %d", + rtm->rtm_family); + return; + } + + inet_ntop(rtm->rtm_family, dstaddr, destipprefix, MAX_ADDR_SIZE); + + SWSS_LOG_DEBUG("Rx MsgType:%d Family:%d Prefix:%s/%d", nlmsg_type, + rtm->rtm_family, destipprefix, dst_len); + + /* Table corresponding to route. */ + if (tb[RTA_TABLE]) + { + vrf_index = *(int *)RTA_DATA(tb[RTA_TABLE]); + } + else + { + vrf_index = rtm->rtm_table; + } + + if (vrf_index) + { + if (!getIfName(vrf_index, routeTableKey, IFNAMSIZ)) + { + SWSS_LOG_ERROR("Fail to get the VRF name (ifindex %u)", vrf_index); + return; + } + /* + * Now vrf device name is required to start with VRF_PREFIX + */ + if (memcmp(routeTableKey, VRF_PREFIX, strlen(VRF_PREFIX))) + { + SWSS_LOG_ERROR("Invalid VRF name %s (ifindex %u)", routeTableKey, + vrf_index); + return; + } + routeTableKey[strlen(routeTableKey)] = ':'; + } + + if ((rtm->rtm_family == AF_INET && dst_len == IPV4_MAX_BITLEN) || + (rtm->rtm_family == AF_INET6 && dst_len == IPV6_MAX_BITLEN)) + { + snprintf(routeTableKey + strlen(routeTableKey), + sizeof(routeTableKey) - strlen(routeTableKey), "%s", + destipprefix); + } + else + { + snprintf(routeTableKey + strlen(routeTableKey), + sizeof(routeTableKey) - strlen(routeTableKey), "%s/%u", + destipprefix, dst_len); + } + + SWSS_LOG_INFO("Received route message dest ip prefix: %s Op:%s", + destipprefix, nlmsg_type == RTM_NEWROUTE ? "add" : "del"); + + if (nlmsg_type != RTM_NEWROUTE && nlmsg_type != RTM_DELROUTE) + { + SWSS_LOG_ERROR("Unknown message-type: %d for %s", nlmsg_type, + destipprefix); + return; + } + + switch (rtm->rtm_type) + { + case RTN_BLACKHOLE: + case RTN_UNREACHABLE: + case RTN_PROHIBIT: + SWSS_LOG_ERROR( + "RTN_BLACKHOLE route not expected (%s)", destipprefix); + return; + case RTN_UNICAST: + break; + + case RTN_MULTICAST: + case RTN_BROADCAST: + case RTN_LOCAL: + SWSS_LOG_NOTICE( + "BUM routes aren't supported yet (%s)", destipprefix); + return; + + default: + return; + } + + /* Get nexthop lists */ + string vpn_sid_str; + string src_addr_str; + bool ret; + + ret = getSrv6SteerRouteNextHop(h, len, tb, vpn_sid_str, src_addr_str); + if (ret == false) + { + SWSS_LOG_NOTICE( + "SRv6 Route issue with RouteTable msg: %s vpn_sid:%s src_addr:%s", + destipprefix, vpn_sid_str.c_str(), src_addr_str.c_str()); + return; + } + + if (vpn_sid_str.empty()) + { + SWSS_LOG_NOTICE("SRv6 IP Prefix: %s vpn_sid is empty", destipprefix); + return; + } + + if (nlmsg_type == RTM_DELROUTE) + { + string routeTableKeyStr = string(routeTableKey); + string srv6SidListTableKey = vpn_sid_str; + + SWSS_LOG_INFO("SRV6 RouteTable del msg: %s", routeTableKeyStr.c_str()); + delWithWarmRestart( + RouteTableFieldValueTupleWrapper{std::move(routeTableKeyStr), std::string()}, + *m_routeTable); + + auto it = m_srv6_sidlist_refcnt.find(srv6SidListTableKey); + if (it != m_srv6_sidlist_refcnt.end()) + { + assert (it->second > 0); + + /* Decrement the refcount for this SID list */ + (it->second)--; + SWSS_LOG_INFO("Refcount for SID list '%s' decreased to %u", + srv6SidListTableKey.c_str(), it->second); + + /* If the refcount drops to zero, remove the SID list from ApplDB */ + if (it->second == 0) + { + m_srv6SidListTable.del(srv6SidListTableKey); + SWSS_LOG_INFO("Refcount for SID list '%s' is zero. SID list removed from ApplDB", + srv6SidListTableKey.c_str()); + + m_srv6_sidlist_refcnt.erase(srv6SidListTableKey); + } + } + else + { + SWSS_LOG_WARN("SID list '%s' not found in the map.", srv6SidListTableKey.c_str()); + } + + return; + } + else if (nlmsg_type == RTM_NEWROUTE) + { + string routeTableKeyStr = string(routeTableKey); + /* Write SID list to SRV6_SID_LIST_TABLE */ + + string srv6SidListTableKey = vpn_sid_str; + + auto it = m_srv6_sidlist_refcnt.find(srv6SidListTableKey); + if (it != m_srv6_sidlist_refcnt.end()) + { + /* SID list already exists: just bump the refcount */ + (it->second)++; + SWSS_LOG_INFO("Refcount for SID list'%s' increased to %u", + srv6SidListTableKey.c_str(), it->second); + } + else + { + /* First time we see this SID list: program it into ApplDB and initialize the refcount to 1 */ + Srv6SidListTableFieldValueTupleWrapper fvw{srv6SidListTableKey}; + fvw.path = vpn_sid_str; + + setTable(fvw, m_srv6SidListTable); + SWSS_LOG_DEBUG("Srv6SidListTable set msg: %s path: %s", + srv6SidListTableKey.c_str(), vpn_sid_str.c_str()); + + m_srv6_sidlist_refcnt[srv6SidListTableKey] = 1; + SWSS_LOG_INFO("SID list '%s' created and refcount initialized to 1", + srv6SidListTableKey.c_str()); + } + + /* Write route to ROUTE_TABLE */ + + SWSS_LOG_INFO("SRV6 RouteTable set msg: %s vpn_sid:%s src_addr:%s", + routeTableKeyStr.c_str(), vpn_sid_str.c_str(), + src_addr_str.empty() ? "NONE" : src_addr_str.c_str()); + RouteTableFieldValueTupleWrapper rfvw{std::move(routeTableKeyStr), ""}; + rfvw.segment = std::move(srv6SidListTableKey); + + if (!src_addr_str.empty()) + { + rfvw.seg_src = std::move(src_addr_str); + } + setRouteWithWarmRestart(rfvw, *m_routeTable); + } + + return; +} + +void RouteSync::onSrv6MySidMsg(struct nlmsghdr *h, int len) +{ + struct rtmsg *rtm; + struct rtattr *tb[RTA_MAX + 1]; + void *sid_value_tmp = NULL; + char sid_value[IPV6_MAX_BYTE] = {0}; + char sid_value_str[MAX_ADDR_SIZE]; + int nlmsg_type = h->nlmsg_type; + + rtm = (struct rtmsg *)NLMSG_DATA(h); + + /* Parse attributes and extract fields of interest. */ + memset(tb, 0, sizeof(tb)); + netlink_parse_rtattr(tb, RTA_MAX, RTM_RTA(rtm), len); + + if (!tb[SRV6_LOCALSID_SID_VALUE]) + { + SWSS_LOG_ERROR( + "Received an invalid MySid route: missing SRV6_MY_SID_SID_VALUE attribute"); + return; + } + + sid_value_tmp = RTA_DATA(tb[SRV6_LOCALSID_SID_VALUE]); + + /* + * Only AF_INET6 is allowed for MySid routes + */ + if (rtm->rtm_family == AF_INET) + { + SWSS_LOG_ERROR( + "AF_INET address family is not allowed for MySid"); + return; + } + else if (rtm->rtm_family == AF_INET6) + { + if (rtm->rtm_dst_len > IPV6_MAX_BITLEN) + { + SWSS_LOG_ERROR("Received an invalid MySid: prefix len %d " + "is out of range", + rtm->rtm_dst_len); + return; + } + memcpy(sid_value, sid_value_tmp, IPV6_MAX_BYTE); + } + else + { + SWSS_LOG_ERROR( + "Received an invalid MySid route: invalid address family %d", + rtm->rtm_family); + return; + } + + inet_ntop(AF_INET6, sid_value, sid_value_str, MAX_ADDR_SIZE); + + SWSS_LOG_INFO("Rx MsgType:%d SidValue:%s", nlmsg_type, + sid_value_str); + + if (nlmsg_type != RTM_NEWSRV6LOCALSID && nlmsg_type != RTM_DELSRV6LOCALSID) + { + SWSS_LOG_ERROR("Unknown message-type: %d for %s", nlmsg_type, + sid_value_str); + return; + } + + /* Get nexthop lists */ + string block_len_str; + string node_len_str; + string func_len_str; + string arg_len_str; + string action_str; + string vrf_str; + string adj_str; + string intf_str; + string my_sid_table_key; + + if (!parseSrv6MySid(tb, block_len_str, node_len_str, + func_len_str, arg_len_str, action_str, vrf_str, + adj_str, intf_str)) + { + SWSS_LOG_ERROR("Invalid Srv6 MySid"); + return; + } + + if (block_len_str.empty()) + { + block_len_str = DEFAULT_SRV6_MY_SID_BLOCK_LEN; + } + + if (node_len_str.empty()) + { + node_len_str = DEFAULT_SRV6_MY_SID_NODE_LEN; + } + + if (func_len_str.empty()) + { + func_len_str = DEFAULT_SRV6_MY_SID_FUNC_LEN; + } + + if (arg_len_str.empty()) + { + arg_len_str = DEFAULT_SRV6_MY_SID_ARG_LEN; + } + + my_sid_table_key += block_len_str + MY_SID_KEY_DELIMITER; + my_sid_table_key += node_len_str + MY_SID_KEY_DELIMITER; + my_sid_table_key += func_len_str + MY_SID_KEY_DELIMITER; + my_sid_table_key += arg_len_str + MY_SID_KEY_DELIMITER; + my_sid_table_key += sid_value_str; + + if (nlmsg_type == RTM_DELSRV6LOCALSID) + { + m_srv6MySidTable.del(my_sid_table_key); + return; + } + + if (action_str.empty() || !(action_str.compare("unspec")) || + !(action_str.compare("unknown"))) + { + SWSS_LOG_NOTICE("Mysid IP Prefix: %s act is empty or invalid", + sid_value_str); + return; + } + + if (!(action_str.compare("end.dt6")) && vrf_str.empty()) + { + SWSS_LOG_NOTICE("Mysid End.DT6 IP Prefix: %s vrf is empty", + sid_value_str); + return; + } + + if (!(action_str.compare("end.dt4")) && vrf_str.empty()) + { + SWSS_LOG_NOTICE("Mysid End.DT4 IP Prefix: %s vrf is empty", + sid_value_str); + return; + } + + if (!(action_str.compare("end.dt46")) && vrf_str.empty()) + { + SWSS_LOG_NOTICE("Mysid End.DT46 IP Prefix: %s vrf is empty", + sid_value_str); + return; + } + + if (!(action_str.compare("udt6")) && vrf_str.empty()) + { + SWSS_LOG_NOTICE("Mysid uDT6 IP Prefix: %s vrf is empty", + sid_value_str); + return; + } + + if (!(action_str.compare("udt4")) && vrf_str.empty()) + { + SWSS_LOG_NOTICE("Mysid uDT4 IP Prefix: %s vrf is empty", + sid_value_str); + return; + } + + if (!(action_str.compare("udt46")) && vrf_str.empty()) + { + SWSS_LOG_NOTICE("Mysid uDT46 IP Prefix: %s vrf is empty", + sid_value_str); + return; + } + + if (!(action_str.compare("end.t")) && vrf_str.empty()) + { + SWSS_LOG_NOTICE("Mysid End.T IP Prefix: %s vrf is empty", + sid_value_str); + return; + } + + if (!(action_str.compare("end.x")) && adj_str.empty()) + { + SWSS_LOG_NOTICE("MySid End.X IP Prefix: %s adj is empty", + sid_value_str); + return; + } + + if (!(action_str.compare("ua")) && adj_str.empty()) + { + SWSS_LOG_NOTICE("MySid uA IP Prefix: %s adj is empty", + sid_value_str); + return; + } + + if (!(action_str.compare("end.dx6")) && adj_str.empty()) + { + SWSS_LOG_NOTICE("MySid End.DX6 IP Prefix: %s adj is empty", + sid_value_str); + return; + } + + if (!(action_str.compare("end.dx4")) && adj_str.empty()) + { + SWSS_LOG_NOTICE("MySid End.DX4 IP Prefix: %s adj is empty", + sid_value_str); + return; + } + + Srv6MySidTableFieldValueTupleWrapper fvw{my_sid_table_key}; + fvw.action = std::move(action_str); + if (!vrf_str.empty()) + { + fvw.vrf = std::move(vrf_str); + } + if (!adj_str.empty()) + { + fvw.adj = std::move(adj_str); + // Append the interface name to the adjacency if one is provided + if (!intf_str.empty()) + { + fvw.adj += "@" + intf_str; + } + } + + setTable(fvw, m_srv6MySidTable); + + return; +} + + + +void RouteSync::onSrv6VpnRouteMsg(struct nlmsghdr *h, int len) +{ + struct rtmsg *rtm; + struct rtattr *tb[RTA_MAX + 1]; + void *dest = NULL; + char dstaddr[IPV6_MAX_BYTE] = {0}; + int dst_len = 0; + char destipprefix[MAX_ADDR_SIZE + 1] = {0}; + char routeTableKey[IFNAMSIZ + MAX_ADDR_SIZE + 2] = {0}; + int nlmsg_type = h->nlmsg_type; + unsigned int vrf_index; + + rtm = (struct rtmsg *)NLMSG_DATA(h); + + /* Parse attributes and extract fields of interest. */ + memset(tb, 0, sizeof(tb)); + netlink_parse_rtattr(tb, RTA_MAX, RTM_RTA(rtm), len); + + if (!tb[RTA_DST]) + { + SWSS_LOG_ERROR( + "Received an invalid SRv6 route: missing RTA_DST attribute"); + return; + } + + dest = RTA_DATA(tb[RTA_DST]); + + if (rtm->rtm_family == AF_INET) + { + if (rtm->rtm_dst_len > IPV4_MAX_BITLEN) + { + SWSS_LOG_ERROR( + "Received an invalid SRv6 route: prefix len %d is out of range", + rtm->rtm_dst_len); + return; + } + memcpy(dstaddr, dest, IPV4_MAX_BYTE); + dst_len = rtm->rtm_dst_len; + } + else if (rtm->rtm_family == AF_INET6) + { + if (rtm->rtm_dst_len > IPV6_MAX_BITLEN) + { + SWSS_LOG_ERROR( + "Received an invalid SRv6 route: prefix len %d is out of range", + rtm->rtm_dst_len); + return; + } + memcpy(dstaddr, dest, IPV6_MAX_BYTE); + dst_len = rtm->rtm_dst_len; + } + else + { + SWSS_LOG_ERROR( + "Received an invalid SRv6 route: invalid address family %d", + rtm->rtm_family); + return; + } + + inet_ntop(rtm->rtm_family, dstaddr, destipprefix, MAX_ADDR_SIZE); + + SWSS_LOG_DEBUG("Rx MsgType:%d Family:%d Prefix:%s/%d", nlmsg_type, + rtm->rtm_family, destipprefix, dst_len); + + /* Table corresponding to route. */ + if (tb[RTA_TABLE]) + { + vrf_index = *(int *)RTA_DATA(tb[RTA_TABLE]); + } + else + { + vrf_index = rtm->rtm_table; + } + + if (vrf_index) + { + if (!getIfName(vrf_index, routeTableKey, IFNAMSIZ)) + { + SWSS_LOG_ERROR("Fail to get the VRF name (ifindex %u)", vrf_index); + return; + } + /* + * Now vrf device name is required to start with VRF_PREFIX + */ + if (memcmp(routeTableKey, VRF_PREFIX, strlen(VRF_PREFIX))) + { + SWSS_LOG_ERROR("Invalid VRF name %s (ifindex %u)", routeTableKey, + vrf_index); + return; + } + routeTableKey[strlen(routeTableKey)] = ':'; + } + + if ((rtm->rtm_family == AF_INET && dst_len == IPV4_MAX_BITLEN) || + (rtm->rtm_family == AF_INET6 && dst_len == IPV6_MAX_BITLEN)) + { + snprintf(routeTableKey + strlen(routeTableKey), + sizeof(routeTableKey) - strlen(routeTableKey), "%s", + destipprefix); + } + else + { + snprintf(routeTableKey + strlen(routeTableKey), + sizeof(routeTableKey) - strlen(routeTableKey), "%s/%u", + destipprefix, dst_len); + } + + SWSS_LOG_INFO("Received route message dest ip prefix: %s Op:%s", + destipprefix, nlmsg_type == RTM_NEWSRV6VPNROUTE ? "add" : "del"); + + if (nlmsg_type != RTM_NEWSRV6VPNROUTE && nlmsg_type != RTM_DELSRV6VPNROUTE) + { + SWSS_LOG_ERROR("Unknown message-type: %d for %s", nlmsg_type, + destipprefix); + return; + } + + switch (rtm->rtm_type) + { + case RTN_BLACKHOLE: + case RTN_UNREACHABLE: + case RTN_PROHIBIT: + SWSS_LOG_ERROR( + "RTN_BLACKHOLE route not expected (%s)", destipprefix); + return; + case RTN_UNICAST: + break; + + case RTN_MULTICAST: + case RTN_BROADCAST: + case RTN_LOCAL: + SWSS_LOG_NOTICE( + "BUM routes aren't supported yet (%s)", destipprefix); + return; + + default: + return; + } + + uint32_t pic_id; + uint32_t nhg_id; + bool ret; + + ret = getSrv6VpnRouteNextHop(h, len, tb, pic_id, nhg_id); + if(!ret){ + return ; + } + + if (nlmsg_type == RTM_DELSRV6VPNROUTE) + { + SWSS_LOG_INFO("RouteTable del msg: %s", routeTableKey); + delWithWarmRestart( + RouteTableFieldValueTupleWrapper{std::move(routeTableKey), std::string()}, + *m_routeTable); + return; + } + else if (nlmsg_type == RTM_NEWSRV6VPNROUTE) + { + auto nhg_it = m_nh_groups.find(nhg_id); + auto pic_it = m_nh_groups.find(pic_id); + if(nhg_it == m_nh_groups.end() || pic_it == m_nh_groups.end()) + { + SWSS_LOG_ERROR("Can not find pic or nexthop for vpn route :%s", routeTableKey); + return ; + } + + NextHopGroup &nhg = nhg_it->second; + NextHopGroup &pic = pic_it->second; + if(nhg.group.size() == 0) + { + vector fvVector; + struct NextHopField nhField; + getPicContextGroupFields(pic, nhField); + FieldValueTuple nh("nexthop", nhField.nexthops.c_str()); + FieldValueTuple vpn_sid("vpn_sid", nhField.vpn_sid.c_str()); + FieldValueTuple seg_srcs("seg_src", nhField.seg_srcs.c_str()); + FieldValueTuple pic_context_id("pic_context_id", ""); + FieldValueTuple nexthop_group("nexthop_group", ""); + fvVector.push_back(nh); + fvVector.push_back(vpn_sid); + fvVector.push_back(seg_srcs); + fvVector.push_back(pic_context_id); + fvVector.push_back(nexthop_group); + //Using route-table only for single next-hop + string nexthops, ifnames, weights; + getNextHopGroupFields(nhg, nexthops, ifnames, weights); + FieldValueTuple intf("ifname", ifnames.c_str()); + fvVector.push_back(intf); + if(!weights.empty()) + { + FieldValueTuple wg("weight", weights.c_str()); + fvVector.push_back(wg); + } + m_routeTable->set(routeTableKey, fvVector); + + SWSS_LOG_DEBUG("NextHop group id %d is a single nexthop address. Filling the route table %s with nexthop and ifname", nhg_id, destipprefix); + } + else{ + vector fvVectorVpnRoute; + FieldValueTuple pic_context_id("pic_context_id", getNextHopGroupKeyAsString(pic_id)); + fvVectorVpnRoute.push_back(pic_context_id); + + vector fvVector; + struct NextHopField nhField; + string key = getNextHopGroupKeyAsString(nhg_id); + getPicContextGroupFields(pic, nhField); + FieldValueTuple seg_srcs("seg_src", nhField.seg_srcs.c_str()); + fvVector.push_back(seg_srcs); + m_nexthop_groupTable.set(key.c_str(), fvVector); + + FieldValueTuple nexthop_group("nexthop_group", getNextHopGroupKeyAsString(nhg_id)); + fvVectorVpnRoute.push_back(nexthop_group); + + FieldValueTuple nh("nexthop", ""); + FieldValueTuple vpn_sid("vpn_sid", ""); + FieldValueTuple seg_srcs_route("seg_src", ""); + FieldValueTuple intf("ifname", ""); + fvVectorVpnRoute.push_back(nh); + fvVectorVpnRoute.push_back(vpn_sid); + fvVectorVpnRoute.push_back(seg_srcs_route); + fvVectorVpnRoute.push_back(intf); + m_routeTable->set(routeTableKey, fvVectorVpnRoute); + } + } + + return; +} +uint16_t RouteSync::getEncapType(struct nlmsghdr *h) +{ + int len; + uint16_t encap_type = 0; + struct rtmsg *rtm; + struct rtattr *tb[RTA_MAX + 1]; + + rtm = (struct rtmsg *)NLMSG_DATA(h); + + if (h->nlmsg_type != RTM_NEWROUTE && h->nlmsg_type != RTM_DELROUTE) + { + return 0; + } + + len = (int)(h->nlmsg_len - NLMSG_LENGTH(sizeof(struct rtmsg))); + if (len < 0) + { + return 0; + } + + memset(tb, 0, sizeof(tb)); + netlink_parse_rtattr(tb, RTA_MAX, RTM_RTA(rtm), len); + + if (!tb[RTA_MULTIPATH]) + { + if (tb[RTA_ENCAP_TYPE]) + { + encap_type = *(short *)RTA_DATA(tb[RTA_ENCAP_TYPE]); + } + } + else + { + /* This is a multipath route */ + int len; + struct rtnexthop *rtnh = + (struct rtnexthop *)RTA_DATA(tb[RTA_MULTIPATH]); + len = (int)RTA_PAYLOAD(tb[RTA_MULTIPATH]); + struct rtattr *subtb[RTA_MAX + 1]; + + for (;;) + { + if (len < (int)sizeof(*rtnh) || rtnh->rtnh_len > len) + { + break; + } + + if (rtnh->rtnh_len > sizeof(*rtnh)) + { + memset(subtb, 0, sizeof(subtb)); + netlink_parse_rtattr(subtb, RTA_MAX, RTNH_DATA(rtnh), + (int)(rtnh->rtnh_len - sizeof(*rtnh))); + if (subtb[RTA_ENCAP_TYPE]) + { + encap_type = *(uint16_t *)RTA_DATA(subtb[RTA_ENCAP_TYPE]); + break; + } + } + + if (rtnh->rtnh_len == 0) + { + break; + } + + len -= NLMSG_ALIGN(rtnh->rtnh_len); + rtnh = RTNH_NEXT(rtnh); + } + } + + SWSS_LOG_INFO("Rx MsgType:%d Encap:%d", h->nlmsg_type, encap_type); + + return encap_type; +} + +void RouteSync::onMsgRaw(struct nlmsghdr *h) +{ + int len; + + if ((h->nlmsg_type != RTM_NEWROUTE) + && (h->nlmsg_type != RTM_DELROUTE) + && (h->nlmsg_type != RTM_NEWNEXTHOP) + && (h->nlmsg_type != RTM_DELNEXTHOP) + && (h->nlmsg_type != RTM_NEWPICCONTEXT) + && (h->nlmsg_type != RTM_DELPICCONTEXT) + && (h->nlmsg_type != RTM_NEWSRV6VPNROUTE) + && (h->nlmsg_type != RTM_DELSRV6VPNROUTE) + && (h->nlmsg_type != RTM_NEWSRV6LOCALSID) + && (h->nlmsg_type != RTM_DELSRV6LOCALSID)) + return; + + if(h->nlmsg_type == RTM_NEWNEXTHOP || h->nlmsg_type == RTM_DELNEXTHOP) + { + len = (int)(h->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg))); + } + else if(h->nlmsg_type == RTM_NEWPICCONTEXT || h->nlmsg_type == RTM_DELPICCONTEXT) + { + len = (int)(h->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg))); + } + else + { + len = (int)(h->nlmsg_len - NLMSG_LENGTH(sizeof(struct ndmsg))); + } + /* Length validity. */ + if (len < 0) + { + SWSS_LOG_ERROR("%s: Message received from netlink is of a broken size %d %zu", + __PRETTY_FUNCTION__, h->nlmsg_len, + (size_t)NLMSG_LENGTH(sizeof(struct ndmsg))); + return; + } + + if(h->nlmsg_type == RTM_NEWNEXTHOP || h->nlmsg_type == RTM_DELNEXTHOP) + { + onNextHopMsg(h, len); + return; + } + if(h->nlmsg_type == RTM_NEWPICCONTEXT || h->nlmsg_type == RTM_DELPICCONTEXT) + { + onPicContextMsg(h, len); + return; + } + if ((h->nlmsg_type == RTM_NEWSRV6VPNROUTE) + || (h->nlmsg_type == RTM_DELSRV6VPNROUTE)) + { + onSrv6VpnRouteMsg(h, len); + return; + } + if ((h->nlmsg_type == RTM_NEWSRV6LOCALSID) + || (h->nlmsg_type == RTM_DELSRV6LOCALSID)) + { + onSrv6MySidMsg(h, len); + return; + } + + switch (getEncapType(h)) + { + case NH_ENCAP_SRV6_ROUTE: + onSrv6SteerRouteMsg(h, len); + break; + default: + /* + * Currently only SRv6 route, SRv6 My SID, and EVPN + * encapsulation types are supported. If the encapsulation + * type is not SRv6 route or SRv6 My SID, we fall back + * to EVPN. The onEvpnRouteMsg() handler will verify that the + * route is actually an EVPN route. If it is not, this handler + * will reject the route. + */ + onEvpnRouteMsg(h, len); + break; + } +} + +void RouteSync::onMsg(int nlmsg_type, struct nl_object *obj) +{ + if (nlmsg_type == RTM_NEWLINK || nlmsg_type == RTM_DELLINK) + { + nl_cache_refill(m_nl_sock, m_link_cache); + return; + } + + struct rtnl_route *route_obj = (struct rtnl_route *)obj; + + /* Supports IPv4 or IPv6 address, otherwise return immediately */ + auto family = rtnl_route_get_family(route_obj); + /* Check for Label route. */ + if (family == AF_MPLS) + { + onLabelRouteMsg(nlmsg_type, obj); + return; + } + if (family != AF_INET && family != AF_INET6) + { + SWSS_LOG_INFO("Unknown route family support (object: %s)", nl_object_get_type(obj)); + return; + } + + /* Get the index of the master device */ + unsigned int master_index = rtnl_route_get_table(route_obj); + char master_name[IFNAMSIZ] = {0}; + + /* if the table_id is not set in the route obj then route is for default vrf. */ + if (master_index) + { + /* Get the name of the master device */ + getIfName(master_index, master_name, IFNAMSIZ); + + /* If the master device name starts with VNET_PREFIX, it is a VNET route. + The VNET name is exactly the name of the associated master device. */ + if (string(master_name).find(VNET_PREFIX) == 0) + { + onVnetRouteMsg(nlmsg_type, obj, string(master_name)); + } + /* Otherwise, it is a regular route (include VRF route). */ + else + { + onRouteMsg(nlmsg_type, obj, master_name); + } + } + else + { + onRouteMsg(nlmsg_type, obj, NULL); + } +} + +/* + * Handle regular route (include VRF route) + * @arg nlmsg_type Netlink message type + * @arg obj Netlink object + * @arg vrf Vrf name + */ +void RouteSync::onRouteMsg(int nlmsg_type, struct nl_object *obj, char *vrf) +{ + struct rtnl_route *route_obj = (struct rtnl_route *)obj; + struct nl_addr *dip; + char destipprefix[IFNAMSIZ + MAX_ADDR_SIZE + 2] = {0}; + + if (vrf) + { + /* + * Now vrf device name is required to start with VRF_PREFIX, + * it is difficult to split vrf_name:ipv6_addr. + */ + if (memcmp(vrf, VRF_PREFIX, strlen(VRF_PREFIX))) + { + if(memcmp(vrf, MGMT_VRF_PREFIX, strlen(MGMT_VRF_PREFIX))) + { + SWSS_LOG_ERROR("Invalid VRF name %s (ifindex %u)", vrf, rtnl_route_get_table(route_obj)); + } + else + { + dip = rtnl_route_get_dst(route_obj); + nl_addr2str(dip, destipprefix, MAX_ADDR_SIZE); + SWSS_LOG_INFO("Skip routes for Mgmt VRF name %s (ifindex %u) prefix: %s", vrf, + rtnl_route_get_table(route_obj), destipprefix); + } + return; } memcpy(destipprefix, vrf, strlen(vrf)); destipprefix[strlen(vrf)] = ':'; @@ -696,163 +2019,499 @@ void RouteSync::onRouteMsg(int nlmsg_type, struct nl_object *obj, char *vrf) * Upon arrival of a delete msg we could either push the change right away, * or we could opt to defer it if we are going through a warm-reboot cycle. */ - bool warmRestartInProgress = m_warmStartHelper.inProgress(); - if (nlmsg_type == RTM_DELROUTE) { - if (!warmRestartInProgress) + SWSS_LOG_INFO("RouteTable del msg: %s", destipprefix); + delWithWarmRestart(RouteTableFieldValueTupleWrapper{std::move(destipprefix), ""}, + *m_routeTable); + return; + + } + else if (nlmsg_type != RTM_NEWROUTE) + { + SWSS_LOG_INFO("Unknown message-type: %d for %s", nlmsg_type, destipprefix); + return; + } + + if (!isSuppressionEnabled()) + { + sendOffloadReply(route_obj); + } + auto proto_num = rtnl_route_get_protocol(route_obj); + auto proto_str = getProtocolString(proto_num); + FieldValueTuple proto("protocol", proto_str); + + switch (rtnl_route_get_type(route_obj)) + { + case RTN_BLACKHOLE: + { + SWSS_LOG_INFO("RouteTable set blackhole msg: %s", destipprefix); + RouteTableFieldValueTupleWrapper fvw {std::move(destipprefix), std::move(proto_str)}; + fvw.blackhole = "true"; + setRouteWithWarmRestart(fvw, *m_routeTable); + return; + } + case RTN_UNICAST: + break; + + case RTN_MULTICAST: + case RTN_BROADCAST: + case RTN_LOCAL: + SWSS_LOG_INFO("BUM routes aren't supported yet (%s)", destipprefix); + return; + + default: + return; + } + + RouteTableFieldValueTupleWrapper fvw {destipprefix, std::move(proto_str)}; + string gw_list; + string intf_list; + string mpls_list; + string weights; + + string nhg_id_key; + uint32_t nhg_id = rtnl_route_get_nh_id(route_obj); + if(nhg_id) + { + const auto itg = m_nh_groups.find(nhg_id); + if(itg == m_nh_groups.end()) + { + SWSS_LOG_ERROR("NextHop group id %d not found. Dropping the route %s", nhg_id, destipprefix); + return; + } + NextHopGroup& nhg = itg->second; + if(nhg.group.size() == 0) + { + // Using route-table only for single next-hop + string nexthops = nhg.nexthop.empty() ? (rtnl_route_get_family(route_obj) == AF_INET ? "0.0.0.0" : "::") : nhg.nexthop; + string ifnames, weights; + + getNextHopGroupFields(nhg, nexthops, ifnames, weights, rtnl_route_get_family(route_obj)); + fvw.nexthop = std::move(nexthops); + fvw.ifname = std::move(ifnames); + if (!weights.empty()) + fvw.weight = std::move(weights); + + SWSS_LOG_DEBUG("NextHop group id %d is a single nexthop address. Filling the route table %s with nexthop and ifname", nhg_id, destipprefix); + } + else + { + nhg_id_key = getNextHopGroupKeyAsString(nhg_id); + fvw.nexthop_group = std::move(nhg_id_key); + installNextHopGroup(nhg_id); + } + } + else + { + struct nl_list_head *nhs = rtnl_route_get_nexthops(route_obj); + if (!nhs) { - m_routeTable.del(destipprefix); + SWSS_LOG_INFO("Nexthop list is empty for %s", destipprefix); return; } + + /* Get nexthop lists */ + + getNextHopList(route_obj, gw_list, mpls_list, intf_list); + weights = getNextHopWt(route_obj); + + vector alsv = tokenize(intf_list, NHG_DELIMITER); + + if (alsv.size() == 1) + { + if (alsv[0] == "eth0" || alsv[0] == "docker0" || alsv[0] == "eth1-midplane") + { + SWSS_LOG_DEBUG("Skip routes to eth0 or docker0 or eth1-midplane: %s %s %s", + destipprefix, gw_list.c_str(), intf_list.c_str()); + SWSS_LOG_INFO("RouteTable del msg for eth0/docker0/eth1-midplane route: %s", destipprefix); + delWithWarmRestart(RouteTableFieldValueTupleWrapper{std::move(destipprefix), ""}, + *m_routeTable); + return; + } + } + else + { + for (auto alias : alsv) + { + /* + * A change in FRR behavior from version 7.2 to 7.5 causes the default route to be updated to eth0 + * during interface up/down events. This skips routes to eth0 or docker0 to avoid such behavior. + */ + if (alias == "eth0" || alias == "docker0" || alias == "eth1-midplane") + { + SWSS_LOG_DEBUG("Skip routes to eth0 or docker0 or eth1-midplane: %s %s %s", + destipprefix, gw_list.c_str(), intf_list.c_str()); + continue; + } + } + } + + + fvw.nexthop = std::move(gw_list); + fvw.ifname = std::move(intf_list); + + if (!mpls_list.empty()) + { + fvw.mpls_nh = std::move(mpls_list); + } + if (!weights.empty()) + { + fvw.weight = std::move(weights); + } + } + + setRouteWithWarmRestart(fvw, *m_routeTable); + if (nhg_id) + { + SWSS_LOG_INFO("RouteTable set msg with NHG: %s nhg_id:%d", destipprefix, nhg_id); + } + else + { + SWSS_LOG_INFO("RouteTable set msg: %s nexthop:%s ifname:%s mpls:%s weight:%s", + destipprefix, gw_list.c_str(), intf_list.c_str(), + mpls_list.empty() ? "na" : mpls_list.c_str(), + weights.empty() ? "na" : weights.c_str()); + } +} + +/* + * Handle Nexthop msg + * @arg nlmsghdr Netlink messaged + */ +void RouteSync::onNextHopMsg(struct nlmsghdr *h, int len) +{ + int nlmsg_type = h->nlmsg_type; + uint32_t id = 0; + unsigned char addr_family; + int32_t ifindex = -1, grp_count = 0; + string ifname; + struct nhmsg *nhm = NULL; + struct rtattr *tb[NHA_MAX + 1] = {}; + struct in_addr ipv4 = {0}; + struct in6_addr ipv6 = {0}; + char gateway[INET6_ADDRSTRLEN] = {0}; + char ifname_unknown[IFNAMSIZ] = "unknown"; + + nhm = (struct nhmsg *)NLMSG_DATA(h); + + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wcast-align" + struct rtattr* rta = NHA_RTA(nhm); + #pragma GCC diagnostic pop + + netlink_parse_rtattr(tb, NHA_MAX, rta, len); + + if (!tb[NHA_ID]) { + SWSS_LOG_ERROR( + "Nexthop group without an ID received from the zebra"); + return; + } + + /* We use the ID key'd nhg table for kernel updates */ + id = *((uint32_t *)RTA_DATA(tb[NHA_ID])); + + addr_family = nhm->nh_family; + + if (nlmsg_type == RTM_NEWNEXTHOP) + { + if (tb[NHA_GROUP]) + { + SWSS_LOG_INFO("New nexthop group message!"); + + struct nexthop_grp *nha_grp = (struct nexthop_grp *)RTA_DATA(tb[NHA_GROUP]); + grp_count = (int)(RTA_PAYLOAD(tb[NHA_GROUP]) / sizeof(*nha_grp)); + + if (grp_count > MAX_MULTIPATH_NUM) + { + SWSS_LOG_ERROR("Nexthop group count (%d) exceeds the maximum allowed (%d). Clamping to maximum.", grp_count, MAX_MULTIPATH_NUM); + grp_count = MAX_MULTIPATH_NUM; + } + + vector> group(grp_count); + for (int i = 0; i < grp_count; i++) + { + group[i] = std::make_pair(nha_grp[i].id, nha_grp[i].weight + 1); + } + + auto it = m_nh_groups.find(id); + if (it != m_nh_groups.end()) + { + NextHopGroup &nhg = it->second; + nhg.group = group; + updateNextHopGroupDb(nhg); + } + else + { + NextHopGroup nhg = NextHopGroup(id, group); + m_nh_groups.insert({id, NextHopGroup(id, group)}); + updateNextHopGroupDb(nhg); + } + } else { - SWSS_LOG_INFO("Warm-Restart mode: Receiving delete msg: %s", - destipprefix); + if (tb[NHA_GATEWAY]) + { + if (addr_family == AF_INET) + { + memcpy(&ipv4, (void *)RTA_DATA(tb[NHA_GATEWAY]), 4); + inet_ntop(AF_INET, &ipv4, gateway, INET_ADDRSTRLEN); + } + else if (addr_family == AF_INET6) + { + memcpy(&ipv6, (void *)RTA_DATA(tb[NHA_GATEWAY]), 16); + inet_ntop(AF_INET6, &ipv6, gateway, INET6_ADDRSTRLEN); + } + else + { + SWSS_LOG_ERROR("Unexpected nexthop address family"); + return; + } + } + + if (tb[NHA_OIF]) + { + ifindex = *((int32_t *)RTA_DATA(tb[NHA_OIF])); + char if_name[IFNAMSIZ] = {0}; + if (!getIfName(ifindex, if_name, IFNAMSIZ)) + { + strcpy(if_name, ifname_unknown); + } + ifname = string(if_name); + if (ifname == "eth0" || ifname == "docker0" || ifname =="eth1-midplane") + { + SWSS_LOG_DEBUG("Skip routes to interface: %s id[%d]", ifname.c_str(), id); + return; + } + } - vector fvVector; - const KeyOpFieldsValuesTuple kfv = std::make_tuple(destipprefix, - DEL_COMMAND, - fvVector); - m_warmStartHelper.insertRefreshMap(kfv); - return; + SWSS_LOG_DEBUG("Received: id[%d], if[%d/%s] address[%s]", id, ifindex, ifname.c_str(), gateway); + m_nh_groups.insert({id, NextHopGroup(id, string(gateway), ifname)}); } } - else if (nlmsg_type != RTM_NEWROUTE) + else if (nlmsg_type == RTM_DELNEXTHOP) { - SWSS_LOG_INFO("Unknown message-type: %d for %s", nlmsg_type, destipprefix); - return; + SWSS_LOG_DEBUG("NextHopGroup del event: %d", id); + deleteNextHopGroup(id); } - if (!isSuppressionEnabled()) - { - sendOffloadReply(route_obj); - } + return; +} - switch (rtnl_route_get_type(route_obj)) - { - case RTN_BLACKHOLE: - { - vector fvVector; - FieldValueTuple fv("blackhole", "true"); - fvVector.push_back(fv); - m_routeTable.set(destipprefix, fvVector); - return; - } - case RTN_UNICAST: - break; +void netlink_parse_rtattr_nested(struct rtattr **tb, int max, + const struct rtattr *rta) +{ + netlink_parse_rtattr(tb, max, (struct rtattr *)RTA_DATA(rta), (int)RTA_PAYLOAD(rta)); +} - case RTN_MULTICAST: - case RTN_BROADCAST: - case RTN_LOCAL: - SWSS_LOG_INFO("BUM routes aren't supported yet (%s)", destipprefix); - return; +int RouteSync::parse_encap_seg6(const struct rtattr *tb, struct in6_addr *segs, + struct in6_addr *src) +{ + struct rtattr *tb_encap[256] = {}; + struct seg6_iptunnel_encap_pri *ipt = NULL; + struct in6_addr *segments = NULL; - default: - return; + netlink_parse_rtattr_nested(tb_encap, 256, tb); + + if (tb_encap[SEG6_IPTUNNEL_SRH]) { + ipt = (struct seg6_iptunnel_encap_pri *) + RTA_DATA(tb_encap[SEG6_IPTUNNEL_SRH]); + segments = ipt->srh[0].segments; + *segs = segments[0]; + *src = ipt->src; + return 1; } - struct nl_list_head *nhs = rtnl_route_get_nexthops(route_obj); - if (!nhs) + return 0; +} + +void RouteSync::onPicContextMsg(struct nlmsghdr *h, int len) +{ + int nlmsg_type = h->nlmsg_type; + uint32_t id = 0; + unsigned char addr_family; + int32_t ifindex = -1, grp_count = 0; + string ifname; + struct nhmsg *nhm = NULL; + struct rtattr *tb[NHA_MAX + 1] = {}; + struct nexthop_grp grp[MAX_MULTIPATH_NUM]; + struct in_addr ipv4 = {0}; + struct in6_addr ipv6 = {0}; + char gateway[INET6_ADDRSTRLEN] = {0}; + char seg6[INET6_ADDRSTRLEN] = {0}; + char seg6_srcs[INET6_ADDRSTRLEN] = {0}; + char ifname_unknown[IFNAMSIZ] = "unknown"; + uint16_t encap_type; + vector fvVector; + + SWSS_LOG_INFO("type %d len %d", nlmsg_type, len); + if ((nlmsg_type != RTM_NEWPICCONTEXT) + && (nlmsg_type != RTM_DELPICCONTEXT)) { - SWSS_LOG_INFO("Nexthop list is empty for %s", destipprefix); return; } - /* Get nexthop lists */ - string gw_list; - string intf_list; - string mpls_list; - getNextHopList(route_obj, gw_list, mpls_list, intf_list); - string weights = getNextHopWt(route_obj); + nhm = (struct nhmsg *)NLMSG_DATA(h); + + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wcast-align" + struct rtattr* rta = NHA_RTA(nhm); + #pragma GCC diagnostic pop + + netlink_parse_rtattr(tb, NHA_MAX, rta, len); + + if (!tb[NHA_ID]) { + SWSS_LOG_ERROR( + "Nexthop group without an ID received from the zebra"); + return; + } - vector alsv = tokenize(intf_list, NHG_DELIMITER); - for (auto alias : alsv) + /* We use the ID key'd nhg table for kernel updates */ + id = *((uint32_t *)RTA_DATA(tb[NHA_ID])); + + addr_family = nhm->nh_family; + + if (nlmsg_type == RTM_NEWPICCONTEXT) { - /* - * An FRR behavior change from 7.2 to 7.5 makes FRR update default route to eth0 in interface - * up/down events. Skipping routes to eth0 or docker0 to avoid such behavior - */ - if (alias == "eth0" || alias == "docker0") - { - SWSS_LOG_DEBUG("Skip routes to eth0 or docker0: %s %s %s", - destipprefix, gw_list.c_str(), intf_list.c_str()); - // If intf_list has only this interface, that means all of the next hops of this route - // have been removed and the next hop on the eth0/docker0 has become the only next hop. - // In this case since we do not want the route with next hop on eth0/docker0, we return. - // But still we need to clear the route from the APPL_DB. Otherwise the APPL_DB and data - // path will be left with stale route entry - if(alsv.size() == 1) + if(tb[NHA_GROUP]) + { + SWSS_LOG_INFO("New nexthop group message!"); + fvVector.emplace_back("nexthop_type", "pic_context_group"); + struct nexthop_grp *nha_grp = (struct nexthop_grp *)RTA_DATA(tb[NHA_GROUP]); + grp_count = (int)(RTA_PAYLOAD(tb[NHA_GROUP]) / sizeof(*nha_grp)); + + if(grp_count > MAX_MULTIPATH_NUM) + grp_count = MAX_MULTIPATH_NUM; + + fvVector.emplace_back("nexthop_count", to_string(grp_count)); + string nhid_list; + string weight_list; + for (int i = 0; i < grp_count; i++) { + nhid_list += to_string(nha_grp[i].id); + grp[i].id = nha_grp[i].id; + /* + The minimum weight value is 1, but kernel store it as zero (https://git.kernel.org/pub/scm/network/iproute2/iproute2.git/tree/ip/iproute.c?h=v5.19.0#n1028). + Adding one to weight to write the right value to the database. + */ + weight_list += to_string(nha_grp[i].weight + 1); + grp[i].weight = nha_grp[i].weight + 1; + if (i + 1 < grp_count) + { + nhid_list += NHG_DELIMITER; + weight_list += NHG_DELIMITER; + } + } + fvVector.emplace_back("nh_id", nhid_list); + fvVector.emplace_back("weight", weight_list); + } + else + { + if (tb[NHA_GATEWAY]) { - if (!warmRestartInProgress) + if (addr_family == AF_INET) { - SWSS_LOG_NOTICE("RouteTable del msg for route with only one nh on eth0/docker0: %s %s %s %s", - destipprefix, gw_list.c_str(), intf_list.c_str(), mpls_list.c_str()); - - m_routeTable.del(destipprefix); + memcpy(&ipv4, (void *)RTA_DATA(tb[NHA_GATEWAY]), 4); + inet_ntop(AF_INET, &ipv4, gateway, INET_ADDRSTRLEN); + } + else if (addr_family == AF_INET6) + { + memcpy(&ipv6, (void *)RTA_DATA(tb[NHA_GATEWAY]), 16); + inet_ntop(AF_INET6, &ipv6, gateway, INET6_ADDRSTRLEN); } else { - SWSS_LOG_NOTICE("Warm-Restart mode: Receiving delete msg for route with only nh on eth0/docker0: %s %s %s %s", - destipprefix, gw_list.c_str(), intf_list.c_str(), mpls_list.c_str()); - - vector fvVector; - const KeyOpFieldsValuesTuple kfv = std::make_tuple(destipprefix, - DEL_COMMAND, - fvVector); - m_warmStartHelper.insertRefreshMap(kfv); + SWSS_LOG_ERROR( + "Unexpected nexthop address family"); + return; } + fvVector.emplace_back("nexthop", string(gateway)); } - return; - } - } - auto proto_num = rtnl_route_get_protocol(route_obj); - auto proto_str = getProtocolString(proto_num); + if(tb[NHA_OIF]) + { + ifindex = *((int32_t *)RTA_DATA(tb[NHA_OIF])); + char if_name[IFNAMSIZ] = {0}; + if (!getIfName(ifindex, if_name, IFNAMSIZ)) + { + strcpy(if_name, ifname_unknown); + } + ifname = string(if_name); + if (ifname == "eth0" || ifname == "docker0") + { + SWSS_LOG_DEBUG("Skip routes to inteface: %s id[%d]", ifname.c_str(), id); + return; + } + fvVector.emplace_back("ifname", ifname); + } + if(tb[NHA_ENCAP] && tb[NHA_ENCAP_TYPE]) + { + struct in6_addr seg6_segs = {0}; + struct in6_addr seg6_src = {0}; + encap_type = *((uint16_t *)RTA_DATA(tb[NHA_ENCAP_TYPE])); + switch (encap_type) + { + case LWTUNNEL_ENCAP_SEG6: + fvVector.emplace_back("nexthop_type", "srv6"); + parse_encap_seg6(tb[NHA_ENCAP], &seg6_segs, &seg6_src); + inet_ntop(AF_INET6, &seg6_segs, seg6, INET6_ADDRSTRLEN); + inet_ntop(AF_INET6, &seg6_src, seg6_srcs, INET6_ADDRSTRLEN); + fvVector.emplace_back("vpn_sid", seg6); + fvVector.emplace_back("seg_src", seg6_srcs); + + break; + default: + SWSS_LOG_ERROR("unknown encap type: %d id[%d]", encap_type, id); + } - vector fvVector; - FieldValueTuple proto("protocol", proto_str); - FieldValueTuple gw("nexthop", gw_list); - FieldValueTuple intf("ifname", intf_list); + SWSS_LOG_INFO("seg6:%s seg6_srcs:%s", seg6, seg6_srcs); + } + else + { + fvVector.emplace_back("nexthop_type", "nh"); + fvVector.emplace_back("vpn_sid", ""); + fvVector.emplace_back("seg_src", ""); + } - fvVector.push_back(proto); - fvVector.push_back(gw); - fvVector.push_back(intf); - if (!mpls_list.empty()) - { - FieldValueTuple mpls_nh("mpls_nh", mpls_list); - fvVector.push_back(mpls_nh); - } - if (!weights.empty()) - { - FieldValueTuple wt("weight", weights); - fvVector.push_back(wt); - } + } - if (!warmRestartInProgress) - { - m_routeTable.set(destipprefix, fvVector); - SWSS_LOG_DEBUG("RouteTable set msg: %s %s %s %s", destipprefix, - gw_list.c_str(), intf_list.c_str(), mpls_list.c_str()); + if(grp_count) + { + vector> group; + for(int i = 0; i < grp_count; i++) + { + group.push_back(std::make_pair(grp[i].id, grp[i].weight)); + } + auto it = m_nh_groups.find(id); + if(it != m_nh_groups.end()) + { + NextHopGroup &nhg = it->second; + nhg.group = group; + updatePicContextGroupDb(nhg); + } + else + { + NextHopGroup nhg = NextHopGroup(id, group); + m_nh_groups.insert({id, nhg}); + updatePicContextGroupDb(nhg); + } + } + else + { + SWSS_LOG_DEBUG("Received: id[%d], if[%d/%s] address[%s]", id, ifindex, ifname.c_str(), gateway); + NextHopGroup nhg = NextHopGroup(id, string(gateway), ifname, seg6, seg6_srcs); + m_nh_groups.insert({id, nhg}); + } } - - /* - * During routing-stack restarting scenarios route-updates will be temporarily - * put on hold by warm-reboot logic. - */ - else + else if (nlmsg_type == RTM_DELPICCONTEXT) { - SWSS_LOG_INFO("Warm-Restart mode: RouteTable set msg: %s %s %s %s", destipprefix, - gw_list.c_str(), intf_list.c_str(), mpls_list.c_str()); - - const KeyOpFieldsValuesTuple kfv = std::make_tuple(destipprefix, - SET_COMMAND, - fvVector); - m_warmStartHelper.insertRefreshMap(kfv); + SWSS_LOG_DEBUG("NextHopGroup del event: %d", id); + deletePicContextGroup(id); } + + return; } -/* +/* * Handle label route * @arg nlmsg_type Netlink message type * @arg obj Netlink object @@ -870,7 +2529,10 @@ void RouteSync::onLabelRouteMsg(int nlmsg_type, struct nl_object *obj) if (nlmsg_type == RTM_DELROUTE) { - m_label_routeTable.del(destaddr); + SWSS_LOG_INFO("LabelRouteTable del msg: %s", destaddr); + delWithWarmRestart( + LabelRouteTableFieldValueTupleWrapper{std::move(destaddr), std::string()}, + *m_label_routeTable); return; } else if (nlmsg_type != RTM_NEWROUTE) @@ -891,14 +2553,18 @@ void RouteSync::onLabelRouteMsg(int nlmsg_type, struct nl_object *obj) return; } + auto proto_num = rtnl_route_get_protocol(route_obj); + auto proto_str = getProtocolString(proto_num); + FieldValueTuple proto("protocol", proto_str); + switch (rtnl_route_get_type(route_obj)) { case RTN_BLACKHOLE: { - vector fvVector; - FieldValueTuple fv("blackhole", "true"); - fvVector.push_back(fv); - m_label_routeTable.set(destaddr, fvVector); + SWSS_LOG_INFO("LabelRouteTable set blackhole msg: %s", destaddr); + LabelRouteTableFieldValueTupleWrapper fvw{std::move(destaddr), std::move(proto_str)}; + fvw.blackhole = "true"; + setRouteWithWarmRestart(fvw, *m_label_routeTable); return; } case RTN_UNICAST: @@ -927,23 +2593,18 @@ void RouteSync::onLabelRouteMsg(int nlmsg_type, struct nl_object *obj) string mpls_list; getNextHopList(route_obj, gw_list, mpls_list, intf_list); - vector fvVector; - FieldValueTuple gw("nexthop", gw_list); - FieldValueTuple intf("ifname", intf_list); - FieldValueTuple mpls_pop("mpls_pop", "1"); - - fvVector.push_back(gw); - fvVector.push_back(intf); + SWSS_LOG_INFO("LabelRouteTable set msg: %s %s %s %s", destaddr, + gw_list.c_str(), intf_list.c_str(), mpls_list.c_str()); + LabelRouteTableFieldValueTupleWrapper fvw{std::move(destaddr), std::move(proto_str)}; + fvw.nexthop = std::move(gw_list); + fvw.ifname = std::move(intf_list); + fvw.mpls_pop = "1"; if (!mpls_list.empty()) { - FieldValueTuple mpls_nh("mpls_nh", mpls_list); - fvVector.push_back(mpls_nh); + fvw.mpls_nh = std::move(mpls_list); } - fvVector.push_back(mpls_pop); - m_label_routeTable.set(destaddr, fvVector); - SWSS_LOG_INFO("LabelRouteTable set msg: %s %s %s %s", destaddr, - gw_list.c_str(), intf_list.c_str(), mpls_list.c_str()); + setRouteWithWarmRestart(fvw, *m_label_routeTable); } /* @@ -978,6 +2639,7 @@ void RouteSync::onVnetRouteMsg(int nlmsg_type, struct nl_object *obj, string vne /* Duplicated delete as we do not know if it is a VXLAN tunnel route*/ m_vnet_routeTable.del(vnet_dip); m_vnet_tunnelTable.del(vnet_dip); + return; } else if (nlmsg_type != RTM_NEWROUTE) @@ -1023,29 +2685,25 @@ void RouteSync::onVnetRouteMsg(int nlmsg_type, struct nl_object *obj, string vne the route is a VXLAN tunnel route. */ if (ifnames.find(VXLAN_IF_NAME_PREFIX) == 0) { - vector fvVector; - FieldValueTuple ep("endpoint", nexthops); - fvVector.push_back(ep); - - m_vnet_tunnelTable.set(vnet_dip, fvVector); SWSS_LOG_DEBUG("%s set msg: %s %s", APP_VNET_RT_TUNNEL_TABLE_NAME, vnet_dip.c_str(), nexthops.c_str()); + VnetTunnelTableFieldValueTupleWrapper fvw{std::move(vnet_dip)}; + fvw.endpoint = std::move(nexthops); + setTable(fvw, m_vnet_tunnelTable); return; } /* Regular VNET route */ else { - vector fvVector; - FieldValueTuple idx("ifname", ifnames); - fvVector.push_back(idx); + VnetRouteTableFieldValueTupleWrapper fvw{vnet_dip}; + fvw.ifname = ifnames; /* If the route has at least one next hop gateway, e.g., nexthops does not only have ',' */ if (nexthops.length() + 1 > (unsigned int)rtnl_route_get_nnexthops(route_obj)) { - FieldValueTuple nh("nexthop", nexthops); - fvVector.push_back(nh); SWSS_LOG_DEBUG("%s set msg: %s %s %s", APP_VNET_RT_TABLE_NAME, vnet_dip.c_str(), ifnames.c_str(), nexthops.c_str()); + fvw.nexthop = std::move(nexthops); } else { @@ -1053,7 +2711,7 @@ void RouteSync::onVnetRouteMsg(int nlmsg_type, struct nl_object *obj, string vne APP_VNET_RT_TABLE_NAME, vnet_dip.c_str(), ifnames.c_str()); } - m_vnet_routeTable.set(vnet_dip, fvVector); + setTable(fvw, m_vnet_routeTable); } } @@ -1296,14 +2954,12 @@ string RouteSync::getNextHopWt(struct rtnl_route *route_obj) struct rtnl_nexthop *nexthop = rtnl_route_nexthop_n(route_obj, i); /* Get the weight of next hop */ uint8_t weight = rtnl_route_nh_get_weight(nexthop); - if (weight) - { - result += to_string(weight); - } - else + if (weight == 0) { - return ""; + SWSS_LOG_INFO("Using default weight of 1 for nexthop"); + weight = 1; // default weight is 1 } + result += to_string(weight); if (i + 1 < rtnl_route_get_nnexthops(route_obj)) { @@ -1350,12 +3006,23 @@ bool RouteSync::sendOffloadReply(struct nlmsghdr* hdr) bool RouteSync::sendOffloadReply(struct rtnl_route* route_obj) { SWSS_LOG_ENTER(); + int ret = 0; nl_msg* msg{}; - rtnl_route_build_add_request(route_obj, NLM_F_CREATE, &msg); + ret = rtnl_route_build_add_request(route_obj, NLM_F_CREATE, &msg); + if (ret !=0) + { + SWSS_LOG_ERROR("Route build add returned %d", ret); + return false; + } auto nlMsg = makeUniqueWithDestructor(msg, nlmsg_free); + if (nlMsg.get() == NULL) + { + SWSS_LOG_ERROR("Error in allocation for sending offload reply"); + return false; + } return sendOffloadReply(nlmsg_hdr(nlMsg.get())); } @@ -1426,6 +3093,14 @@ void RouteSync::onRouteResponse(const std::string& key, const std::vectorsecond; + + if(nhg.installed) + { + //Nexthop group already installed + return; + } + nhg.installed = true; + updateNextHopGroupDb(nhg); +} + +/* + * delete the nexthop group entry + * @arg nh_id nexthop group id + * + */ +void RouteSync::deleteNextHopGroup(uint32_t nh_id) +{ + auto git = m_nh_groups.find(nh_id); + if(git == m_nh_groups.end()) + { + SWSS_LOG_ERROR("Nexthop not found: %d", nh_id); + return; + } + + NextHopGroup& nhg = git->second; + + if(nhg.installed) + { + string key = getNextHopGroupKeyAsString(nh_id); + SWSS_LOG_DEBUG("NextHopGroup table del: key [%s]", key.c_str()); + m_nexthop_groupTable.del(key); + } + m_nh_groups.erase(git); +} + +void RouteSync::deletePicContextGroup(uint32_t nh_id) +{ + auto git = m_nh_groups.find(nh_id); + if(git == m_nh_groups.end()) + { + SWSS_LOG_INFO("Nexthop not found: %d", nh_id); + return; + } + + NextHopGroup& nhg = git->second; + + if(nhg.installed) + { + string key = getNextHopGroupKeyAsString(nh_id); + m_pic_context_groupTable.del(key.c_str()); + SWSS_LOG_DEBUG("NextHopGroup table del: key [%s]", key.c_str()); + } + m_nh_groups.erase(git); +} + +/* + * update the nexthop group table in database + * @arg nhg the nexthop group + * + */ +void RouteSync::updateNextHopGroupDb(const NextHopGroup& nhg) +{ + string nexthops; + string ifnames; + string weights; + string key = getNextHopGroupKeyAsString(nhg.id); + getNextHopGroupFields(nhg, nexthops, ifnames, weights); + + SWSS_LOG_INFO("NextHopGroup table set: key [%s] nexthop[%s] ifname[%s] weight[%s]", + key.c_str(), nexthops.c_str(), ifnames.c_str(), + weights.empty() ? "NONE": weights.c_str()); + + NextHopGroupTableFieldValueTupleWrapper fvw{std::move(key)}; + fvw.nexthop = std::move(nexthops); + fvw.ifname = std::move(ifnames); + if(!weights.empty()) + { + fvw.weight = std::move(weights); + } + setTable(fvw, m_nexthop_groupTable); +} + +void RouteSync::updatePicContextGroupDb(const NextHopGroup& nhg) +{ + vector fvVector; + struct NextHopField nhField; + string key = getNextHopGroupKeyAsString(nhg.id); + getPicContextGroupFields(nhg, nhField); + + FieldValueTuple nh("nexthop", nhField.nexthops.c_str()); + FieldValueTuple ifname("ifname", nhField.ifnames.c_str()); + FieldValueTuple vpn_sid("vpn_sid", nhField.vpn_sid.c_str()); + FieldValueTuple seg_srcs("seg_src", nhField.seg_srcs.c_str()); + FieldValueTuple wg("weight", nhField.weights.c_str()); + fvVector.push_back(nh); + fvVector.push_back(ifname); + fvVector.push_back(vpn_sid); + fvVector.push_back(seg_srcs); + fvVector.push_back(wg); + + //TODO: Take care of warm reboot + m_pic_context_groupTable.set(key.c_str(), fvVector); +} + + +/* + * generate the database fields. + * @arg nhg the nexthop group + * + */ +void RouteSync::getNextHopGroupFields(const NextHopGroup& nhg, string& nexthops, string& ifnames, string& weights, uint8_t af /*= AF_INET*/) +{ + if(nhg.group.size() == 0) + { + if(!nhg.nexthop.empty()) + { + nexthops = nhg.nexthop; + } + else + { + nexthops = af == AF_INET ? "0.0.0.0" : "::"; + } + ifnames = nhg.intf; + } + else + { + int i = 0; + for(const auto& nh : nhg.group) + { + uint32_t id = nh.first; + auto itr = m_nh_groups.find(id); + if(itr == m_nh_groups.end()) + { + SWSS_LOG_ERROR("NextHop group is incomplete: %d", nhg.id); + return; + } + + NextHopGroup& nhgr = itr->second; + string weight = to_string(nh.second); + if(i) + { + nexthops += NHG_DELIMITER; + ifnames += NHG_DELIMITER; + weights += NHG_DELIMITER; + } + nexthops += nhgr.nexthop.empty() ? (af == AF_INET ? "0.0.0.0" : "::") : nhgr.nexthop; + ifnames += nhgr.intf; + weights += weight; + ++i; + } + } +} + + +/* + * generate the database fields. + * @arg pic context + * + */ +void RouteSync::getPicContextGroupFields(const NextHopGroup& nhg, struct NextHopField& nhField, uint8_t af /*= AF_INET*/) +{ + if(nhg.group.size() == 0) + { + if(!nhg.nexthop.empty()) + { + nhField.nexthops = nhg.nexthop; + } + else + { + nhField.nexthops = af == AF_INET ? "0.0.0.0" : "::"; + } + nhField.ifnames = nhg.intf; + nhField.vni_label += nhg.vni_label.empty() ? ("") : nhg.vni_label; + nhField.vpn_sid += nhg.vpn_sid.empty() ? ("") : nhg.vpn_sid; + nhField.seg_srcs += nhg.seg_src.empty() ? ("") : nhg.seg_src; + } + else + { + int i = 0; + for(const auto &nh : nhg.group) + { + uint32_t id = nh.first; + auto itr = m_nh_groups.find(id); + if(itr == m_nh_groups.end()) + { + SWSS_LOG_INFO("NextHop group is incomplete: %d", nhg.id); + return; + } + + NextHopGroup& nhgr = itr->second; + string weight = to_string(nh.second); + if(i) + { + nhField.nexthops += NHG_DELIMITER; + nhField.ifnames += NHG_DELIMITER; + nhField.vni_label += NHG_DELIMITER; + nhField.vpn_sid += NHG_DELIMITER; + nhField.weights += NHG_DELIMITER; + nhField.seg_srcs += NHG_DELIMITER; + } + nhField.nexthops += nhgr.nexthop.empty() ? (af == AF_INET ? "0.0.0.0" : "::") : nhgr.nexthop; + nhField.ifnames += nhgr.intf.empty() ? ("") : nhgr.intf; + nhField.vni_label += nhgr.vni_label.empty() ? ("") : nhgr.vni_label; + nhField.vpn_sid += nhgr.vpn_sid.empty() ? ("") : nhgr.vpn_sid; + nhField.weights += weight; + nhField.seg_srcs += nhgr.seg_src.empty() ? ("") : nhgr.seg_src; + ++i; + } + } +} diff --git a/fpmsyncd/routesync.h b/fpmsyncd/routesync.h index fd18b9d25ac..3c6f6ef1185 100644 --- a/fpmsyncd/routesync.h +++ b/fpmsyncd/routesync.h @@ -3,12 +3,16 @@ #include "dbconnector.h" #include "producerstatetable.h" +#include "zmqclient.h" +#include "zmqproducerstatetable.h" #include "netmsg.h" #include "linkcache.h" #include "fpminterface.h" #include "warmRestartHelper.h" #include #include +#include +#include #include @@ -23,12 +27,167 @@ using namespace std; /* Parse the Raw netlink msg */ extern void netlink_parse_rtattr(struct rtattr **tb, int max, struct rtattr *rta, int len); +extern void netlink_parse_rtattr_nested(struct rtattr **tb, int max, const struct rtattr *rta); namespace swss { +struct NextHopGroup { + uint32_t id; + vector> group; + string nexthop; + string intf; + bool installed; + string vni_label; + string vpn_sid; + string seg_src; + NextHopGroup(uint32_t id, const string& nexthop, const string& interface) : installed(false), id(id), nexthop(nexthop), intf(interface) {}; + NextHopGroup(uint32_t id, const vector>& group) : installed(false), id(id), group(group) {}; + NextHopGroup(uint32_t id, const string& nexthop, const string& interface, + const string& vpnsid, const string& segsrc) : installed(false), id(id), nexthop(nexthop), intf(interface), vpn_sid(vpnsid), seg_src(segsrc) {}; +}; + + +struct seg6_iptunnel_encap_pri { + int mode; + char segment_name[64]; + struct in6_addr src; + struct ipv6_sr_hdr srh[0]; +}; + /* Path to protocol name database provided by iproute2 */ constexpr auto DefaultRtProtoPath = "/etc/iproute2/rt_protos"; +class FieldValueTupleWrapperBase { + public: + FieldValueTupleWrapperBase(const string & _key) : key(_key) {} + FieldValueTupleWrapperBase(const string && _key) : key(std::move(_key)) {} + virtual ~FieldValueTupleWrapperBase() = default; + + virtual vector fieldValueTupleVector() = 0; + + vector KeyOpFieldsValuesTupleVector() { + // The following code calls the batched version of set() for the table. + // The reason for the DEL followed by a SET is that redis only overwrites + // hashset fields that are explicitly set against a given key. It does leaves + // previously set fields as is. If a route changes in such a way that earlier + // fields are not valid any more (Ex: from using nexthop to nexthop-group), + // then we would like to atomically cleanup earlier fields and set the new + // fields in the hash-set in redis. + vector kfvVector; + auto fvVector = fieldValueTupleVector(); + kfvVector.push_back(KeyOpFieldsValuesTuple {key.c_str(), "SET", fvVector}); + return kfvVector; + } + + // For DEL-only operations with warm restart support + KeyOpFieldsValuesTuple KeyOpFieldsValuesTupleVectorForDel() { + return KeyOpFieldsValuesTuple {key.c_str(), "DEL", {}}; + } + + string key = string(); +}; + +class RouteTableFieldValueTupleWrapper : public FieldValueTupleWrapperBase { + public: + RouteTableFieldValueTupleWrapper(const string & _key, string && _protocol) : + FieldValueTupleWrapperBase(_key), protocol(std::move(_protocol)) {} + RouteTableFieldValueTupleWrapper(const string && _key, string && _protocol) : + FieldValueTupleWrapperBase(std::move(_key)), protocol(std::move(_protocol)) {} + + vector fieldValueTupleVector() override; + + string protocol = string(); + string blackhole = string("false"); + string nexthop = string(); + string ifname = string(); + string nexthop_group = string(); + string mpls_nh = string(); + string weight = string(); + string vni_label = string(); + string router_mac = string(); + string segment = string(); + string seg_src = string(); +}; + +class LabelRouteTableFieldValueTupleWrapper : public FieldValueTupleWrapperBase { + public: + LabelRouteTableFieldValueTupleWrapper(const string & _key, string && _protocol) : + FieldValueTupleWrapperBase(_key), + protocol(std::move(_protocol)) {} + LabelRouteTableFieldValueTupleWrapper(const string && _key, string && _protocol) : + FieldValueTupleWrapperBase(std::move(_key)), + protocol(std::move(_protocol)) {} + + vector fieldValueTupleVector() override; + + string protocol = string(); + string blackhole = string("false"); + string nexthop = string(); + string ifname = string(); + string mpls_nh = string(); + string mpls_pop = string(); +}; + +class VnetRouteTableFieldValueTupleWrapper : public FieldValueTupleWrapperBase { + public: + VnetRouteTableFieldValueTupleWrapper(const string & _key) : FieldValueTupleWrapperBase(_key) {} + VnetRouteTableFieldValueTupleWrapper(const string && _key) + : FieldValueTupleWrapperBase(std::move(_key)) {} + + vector fieldValueTupleVector() override; + + string nexthop = string(); + string ifname = string(); +}; + +class VnetTunnelTableFieldValueTupleWrapper : public FieldValueTupleWrapperBase { + public: + VnetTunnelTableFieldValueTupleWrapper(const string & _key) : FieldValueTupleWrapperBase(_key) {} + VnetTunnelTableFieldValueTupleWrapper(const string && _key) + : FieldValueTupleWrapperBase(std::move(_key)) {} + + vector fieldValueTupleVector() override; + + string endpoint = string(); +}; + +class NextHopGroupTableFieldValueTupleWrapper : public FieldValueTupleWrapperBase { + public: + NextHopGroupTableFieldValueTupleWrapper(const string & _key) : FieldValueTupleWrapperBase(_key) {} + NextHopGroupTableFieldValueTupleWrapper(const string && _key) + : FieldValueTupleWrapperBase(std::move(_key)) {} + + vector fieldValueTupleVector() override; + + string nexthop = string(); + string ifname = string(); + string weight = string(); +}; + +class Srv6MySidTableFieldValueTupleWrapper : public FieldValueTupleWrapperBase { + public: + Srv6MySidTableFieldValueTupleWrapper(const string & _key) : FieldValueTupleWrapperBase(_key) {} + Srv6MySidTableFieldValueTupleWrapper(const string && _key) + : FieldValueTupleWrapperBase(std::move(_key)) {} + + vector fieldValueTupleVector() override; + + string action = string(); + string vrf = string(); + string adj = string(); +}; + +class Srv6SidListTableFieldValueTupleWrapper : public FieldValueTupleWrapperBase { + public: + Srv6SidListTableFieldValueTupleWrapper(const string & _key) : FieldValueTupleWrapperBase(_key) {} + Srv6SidListTableFieldValueTupleWrapper(const string && _key) + : FieldValueTupleWrapperBase(std::move(_key)) {} + + vector fieldValueTupleVector() override; + + string path = string(); +}; + class RouteSync : public NetMsg { public: @@ -47,6 +206,20 @@ class RouteSync : public NetMsg return m_isSuppressionEnabled; } + /* Helper method to set route table with warm restart support */ + void setRouteWithWarmRestart( + FieldValueTupleWrapperBase & fvw, + ProducerStateTable & table); + + void setTable( + FieldValueTupleWrapperBase & fvw, + ProducerStateTable & table); + + // Generic method for DEL operations with warm restart support + void delWithWarmRestart( + FieldValueTupleWrapperBase && fvw, + ProducerStateTable & table); + void onRouteResponse(const std::string& key, const std::vector& fieldValues); void onWarmStartEnd(swss::DBConnector& applStateDb); @@ -64,19 +237,36 @@ class RouteSync : public NetMsg m_fpmInterface = nullptr; } - WarmStartHelper m_warmStartHelper; + WarmStartHelper& getWarmStartHelper() + { + return m_warmStartHelper; + } private: + /* ZMQ client */ + shared_ptr m_zmqClient; /* regular route table */ - ProducerStateTable m_routeTable; + shared_ptr m_routeTable; /* label route table */ - ProducerStateTable m_label_routeTable; + shared_ptr m_label_routeTable; /* vnet route table */ ProducerStateTable m_vnet_routeTable; /* vnet vxlan tunnel table */ - ProducerStateTable m_vnet_tunnelTable; + ProducerStateTable m_vnet_tunnelTable; + /* Warm start helper */ + WarmStartHelper m_warmStartHelper; + /* srv6 mySid table */ + ProducerStateTable m_srv6MySidTable; + /* srv6 sid list table */ + ProducerStateTable m_srv6SidListTable; struct nl_cache *m_link_cache; struct nl_sock *m_nl_sock; + /* nexthop group table */ + ProducerStateTable m_nexthop_groupTable; + ProducerStateTable m_pic_context_groupTable; + map m_nh_groups; + /* SID list to refcount */ + map m_srv6_sidlist_refcnt; bool m_isSuppressionEnabled{false}; FpmInterface* m_fpmInterface {nullptr}; @@ -89,6 +279,18 @@ class RouteSync : public NetMsg void parseEncap(struct rtattr *tb, uint32_t &encap_value, string &rmac); + void parseEncapSrv6SteerRoute(struct rtattr *tb, string &vpn_sid, string &src_addr); + bool parseEncapSrv6VpnRoute(struct rtattr *tb, uint32_t &pic_id, uint32_t &nhg_id); + + bool parseSrv6MySid(struct rtattr *tb[], string &block_len, + string &node_len, string &func_len, + string &arg_len, string &action, string &vrf, + string &adj, string &intf); + + bool parseSrv6MySidFormat(struct rtattr *tb, string &block_len, + string &node_len, string &func_len, + string &arg_len); + void parseRtAttrNested(struct rtattr **tb, int max, struct rtattr *rta); @@ -98,11 +300,20 @@ class RouteSync : public NetMsg /* Handle prefix route */ void onEvpnRouteMsg(struct nlmsghdr *h, int len); + /* Handle routes containing an SRv6 nexthop */ + void onSrv6SteerRouteMsg(struct nlmsghdr *h, int len); + + /* Handle SRv6 MySID */ + void onSrv6MySidMsg(struct nlmsghdr *h, int len); + + /* Handle vpn route */ + void onSrv6VpnRouteMsg(struct nlmsghdr *h, int len); + /* Handle vnet route */ void onVnetRouteMsg(int nlmsg_type, struct nl_object *obj, string vnet); /* Get interface name based on interface index */ - bool getIfName(int if_index, char *if_name, size_t name_len); + virtual bool getIfName(int if_index, char *if_name, size_t name_len); /* Get interface if_index based on interface name */ rtnl_link* getLinkByName(const char *name); @@ -115,10 +326,15 @@ class RouteSync : public NetMsg string& mac_list, string& intf_list, string rmac, string vlan_id); - bool getEvpnNextHop(struct nlmsghdr *h, int received_bytes, struct rtattr *tb[], + virtual bool getEvpnNextHop(struct nlmsghdr *h, int received_bytes, struct rtattr *tb[], string& nexthops, string& vni_list, string& mac_list, string& intf_list); + bool getSrv6SteerRouteNextHop(struct nlmsghdr *h, int received_bytes, + struct rtattr *tb[], string &vpn_sid, string &src_addr); + bool getSrv6VpnRouteNextHop(struct nlmsghdr *h, int received_bytes, + struct rtattr *tb[], uint32_t &pic_id,uint32_t &nhg_id); + /* Get next hop list */ void getNextHopList(struct rtnl_route *route_obj, string& gw_list, string& mpls_list, string& intf_list); @@ -140,6 +356,35 @@ class RouteSync : public NetMsg /* Sends FPM message with RTM_F_OFFLOAD flag set for all routes in the table */ void sendOffloadReply(swss::DBConnector& db, const std::string& table); + + /* Get encap type */ + uint16_t getEncapType(struct nlmsghdr *h); + + const char *mySidAction2Str(uint32_t action); + + /* Handle Nexthop message */ + void onNextHopMsg(struct nlmsghdr *h, int len); + void onPicContextMsg(struct nlmsghdr *h, int len); + int parse_encap_seg6(const struct rtattr *tb, struct in6_addr *segs, struct in6_addr *src); + /* Get next hop group key */ + const string getNextHopGroupKeyAsString(uint32_t id) const; + void installNextHopGroup(uint32_t nh_id); + void deleteNextHopGroup(uint32_t nh_id); + void deletePicContextGroup(uint32_t nh_id); + void updateNextHopGroupDb(const NextHopGroup& nhg); + void updatePicContextGroupDb(const NextHopGroup& nhg); + void getNextHopGroupFields(const NextHopGroup& nhg, string& nexthops, string& ifnames, string& weights, uint8_t af = AF_INET); + void getPicContextGroupFields(const NextHopGroup& nhg, struct NextHopField& nhField, uint8_t af = AF_INET); + +}; +struct NextHopField { + string nexthops; + string ifnames; + string vni_label; + string vpn_sid; + string mpls_nh; + string weights; + string seg_srcs; }; } diff --git a/gcovpreload/Makefile b/gcovpreload/Makefile index c4328c72b92..5039fe50561 100644 --- a/gcovpreload/Makefile +++ b/gcovpreload/Makefile @@ -6,7 +6,7 @@ CXX:=$(shell sh -c 'type $${CXX%% *} >/dev/null 2>/dev/null && echo $(CXX) || ec DYLIBSUFFIX=so DYLIBNAME=$(LIBNAME).$(DYLIBSUFFIX) -DYLIB_MAKE_CMD=$(CC) -shared -fpic gcovpreload.c -o ${DYLIBNAME} +DYLIB_MAKE_CMD=$(CC) -shared -fpic gcovpreload.cpp -o ${DYLIBNAME} all: $(DYLIB_MAKE_CMD) diff --git a/gcovpreload/gcovpreload.c b/gcovpreload/gcovpreload.cpp similarity index 83% rename from gcovpreload/gcovpreload.c rename to gcovpreload/gcovpreload.cpp index 2141e9ef395..a545c217ce1 100644 --- a/gcovpreload/gcovpreload.c +++ b/gcovpreload/gcovpreload.cpp @@ -2,15 +2,15 @@ #include #include #include -#define SIMPLE_WAY + +extern "C" void __gcov_dump(); void sighandler(int signo) { #ifdef SIMPLE_WAY exit(signo); #else - extern void __gcov_flush(); - __gcov_flush(); /* flush out gcov stats data */ + __gcov_dump(); raise(signo); /* raise the signal again to crash process */ #endif } @@ -33,9 +33,9 @@ void ctor() struct sigaction sa; sa.sa_handler = sighandler; sigemptyset(&sa.sa_mask); - sa.sa_flags = SA_RESETHAND; + sa.sa_flags = (int)SA_RESETHAND; - for(i = 0; i < sizeof(sigs)/sizeof(sigs[0]); ++i) { + for(i = 0; i < (int)(sizeof(sigs)/sizeof(sigs[0])); ++i) { if (sigaction(sigs[i], &sa, NULL) == -1) { perror("Could not set signal handler"); } diff --git a/gearsyncd/gearparserbase.cpp b/gearsyncd/gearparserbase.cpp index c6cae36253f..e86e34e10cc 100644 --- a/gearsyncd/gearparserbase.cpp +++ b/gearsyncd/gearparserbase.cpp @@ -24,7 +24,7 @@ GearParserBase::init() { m_writeToDb = false; m_rootInit = false; - m_applDb = std::unique_ptr{new swss::DBConnector(APPL_DB, swss::DBConnector::DEFAULT_UNIXSOCKET, 0)}; + m_applDb = std::unique_ptr{new swss::DBConnector("APPL_DB", 0)}; m_producerStateTable = std::unique_ptr{new swss::ProducerStateTable(m_applDb.get(), APP_GEARBOX_TABLE_NAME)}; } diff --git a/gearsyncd/gearsyncd.cpp b/gearsyncd/gearsyncd.cpp index f79b079d825..55b6eca0a36 100644 --- a/gearsyncd/gearsyncd.cpp +++ b/gearsyncd/gearsyncd.cpp @@ -74,8 +74,8 @@ int main(int argc, char **argv) } } - DBConnector cfgDb(CONFIG_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); - DBConnector applDb(APPL_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); + DBConnector cfgDb("CONFIG_DB", 0); + DBConnector applDb("APPL_DB", 0); ProducerStateTable producerStateTable(&applDb, APP_GEARBOX_TABLE_NAME); WarmStart::initialize("gearsyncd", "swss"); diff --git a/lib/orch_zmq_config.cpp b/lib/orch_zmq_config.cpp new file mode 100644 index 00000000000..09bc66e0b0c --- /dev/null +++ b/lib/orch_zmq_config.cpp @@ -0,0 +1,145 @@ +#include +#include +#include + +#include "dbconnector.h" +#include "logger.h" +#include "orch_zmq_config.h" +#include + +#define ZMQ_TABLE_CONFIGFILE "/etc/swss/orch_zmq_tables.conf" + +// ZMQ none IPV6 address with port, for example: tcp://127.0.0.1:5555 tcp://localhost:5555 +const std::regex ZMQ_NONE_IPV6_ADDRESS_WITH_PORT("\\w+:\\/\\/[^:]+:\\d+"); + +// ZMQ IPV6 address with port, for example: tcp://[fe80::fb7:c6df:9d3a:3d7b]:5555 +const std::regex ZMQ_IPV6_ADDRESS_WITH_PORT("\\w+:\\/\\/\\[.*\\]+:\\d+"); + +std::set swss::load_zmq_tables() +{ + std::set tables; + std::ifstream config_file(ZMQ_TABLE_CONFIGFILE); + if (config_file.is_open()) + { + std::string table; + while (std::getline(config_file, table)) + { + tables.emplace(table); + } + config_file.close(); + } + + return tables; +} + +int swss::get_zmq_port() +{ + auto zmq_port = ORCH_ZMQ_PORT; + const char* nsid = std::getenv("NAMESPACE_ID"); + std::string nsid_str = nsid ? std::string(nsid) : ""; + if (!nsid_str.empty()) + { + try + { + // namespace start from 0, using original ZMQ port for global namespace + zmq_port += std::stoi(nsid) + 1; + } + catch (...) + { + SWSS_LOG_ERROR("Failed to convert %s to int, fallback to default port", nsid_str.c_str()); + } + } + return zmq_port; +} + +std::shared_ptr swss::create_zmq_client(std::string zmq_address, std::string vrf) +{ + // swssconfig running inside swss contianer, so need get ZMQ port according to namespace ID. + auto zmq_port = get_zmq_port(); + zmq_address = zmq_address + ":" + std::to_string(zmq_port); + SWSS_LOG_NOTICE("Create ZMQ server with address: %s, vrf: %s", zmq_address.c_str(), vrf.c_str()); + return std::make_shared(zmq_address, vrf); +} + +std::shared_ptr swss::create_zmq_server(std::string zmq_address, std::string vrf) +{ + // TODO: remove this check after orchagent.sh migrate to pass ZMQ address without port + if (!std::regex_search(zmq_address, ZMQ_NONE_IPV6_ADDRESS_WITH_PORT) + && !std::regex_search(zmq_address, ZMQ_IPV6_ADDRESS_WITH_PORT)) + { + auto zmq_port = get_zmq_port(); + zmq_address = zmq_address + ":" + std::to_string(zmq_port); + } + + SWSS_LOG_NOTICE("Create ZMQ server with address: %s, vrf: %s", zmq_address.c_str(), vrf.c_str()); + + // To prevent message loss between ZmqServer's bind operation and the creation of ZmqProducerStateTable, + // use lazy binding and call bind() only after the handler has been registered. + return std::make_shared(zmq_address, vrf, true); +} + +bool swss::get_feature_status(std::string feature, bool default_value) +{ + std::shared_ptr enabled = nullptr; + + try + { + swss::DBConnector config_db("CONFIG_DB", 0); + enabled = config_db.hget("DEVICE_METADATA|localhost", feature); + } + catch (const std::runtime_error &e) + { + SWSS_LOG_ERROR("Not found feature %s failed with exception: %s", feature.c_str(), e.what()); + return default_value; + } + + if (!enabled) + { + SWSS_LOG_NOTICE("Not found feature %s status, return default value.", feature.c_str()); + return default_value; + } + + SWSS_LOG_NOTICE("Get feature %s status: %s", feature.c_str(), enabled->c_str()); + return *enabled == "true"; +} + +std::shared_ptr swss::create_local_zmq_client(std::string feature, bool default_value) +{ + auto enable = get_feature_status(feature, default_value); + if (enable) { + SWSS_LOG_NOTICE("Feature %s enabled, Create ZMQ client : %s", feature.c_str(), ZMQ_LOCAL_ADDRESS); + return create_zmq_client(ZMQ_LOCAL_ADDRESS); + } + + return nullptr; +} + +std::shared_ptr swss::createProducerStateTable(DBConnector *db, const std::string &tableName, std::shared_ptr zmqClient) +{ + swss::ProducerStateTable *tablePtr = nullptr; + if (zmqClient != nullptr) { + SWSS_LOG_NOTICE("Create ZmqProducerStateTable : %s", tableName.c_str()); + tablePtr = new swss::ZmqProducerStateTable(db, tableName, *zmqClient); + } + else { + SWSS_LOG_NOTICE("Create ProducerStateTable : %s", tableName.c_str()); + tablePtr = new swss::ProducerStateTable(db, tableName); + } + + return std::shared_ptr(tablePtr); +} + +std::shared_ptr swss::createProducerStateTable(RedisPipeline *pipeline, const std::string& tableName, bool buffered, std::shared_ptr zmqClient) +{ + swss::ProducerStateTable *tablePtr = nullptr; + if (zmqClient != nullptr) { + SWSS_LOG_NOTICE("Create ZmqProducerStateTable : %s", tableName.c_str()); + tablePtr = new swss::ZmqProducerStateTable(pipeline, tableName, *zmqClient); + } + else { + SWSS_LOG_NOTICE("Create ProducerStateTable : %s", tableName.c_str()); + tablePtr = new swss::ProducerStateTable(pipeline, tableName, buffered); + } + + return std::shared_ptr(tablePtr); +} diff --git a/lib/orch_zmq_config.h b/lib/orch_zmq_config.h new file mode 100644 index 00000000000..68aff440dbd --- /dev/null +++ b/lib/orch_zmq_config.h @@ -0,0 +1,47 @@ +#ifndef SWSS_ORCH_ZMQ_CONFIG_H +#define SWSS_ORCH_ZMQ_CONFIG_H + +#include +#include +#include + +#include "dbconnector.h" +#include "zmqclient.h" +#include "zmqserver.h" +#include "zmqproducerstatetable.h" + +/* + * swssconfig will only connect to local orchagent ZMQ endpoint. + */ +#define ZMQ_LOCAL_ADDRESS "tcp://localhost" + +/* + * Feature flag to enable the gNMI service to send DASH events to orchagent via the ZMQ channel. + */ +#define ORCH_NORTHBOND_DASH_ZMQ_ENABLED "orch_northbond_dash_zmq_enabled" + +/* + * Feature flag to enable the fpmsyncd to send ROUTE events to orchagent via the ZMQ channel. + */ +#define ORCH_NORTHBOND_ROUTE_ZMQ_ENABLED "orch_northbond_route_zmq_enabled" + +namespace swss { + +std::set load_zmq_tables(); + +int get_zmq_port(); + +std::shared_ptr create_zmq_client(std::string zmq_address, std::string vrf=""); + +std::shared_ptr create_zmq_server(std::string zmq_address, std::string vrf=""); + +bool get_feature_status(std::string feature, bool default_value); + +std::shared_ptr create_local_zmq_client(std::string feature, bool default_value); + +std::shared_ptr createProducerStateTable(DBConnector *db, const std::string &tableName, std::shared_ptr zmqClient); + +std::shared_ptr createProducerStateTable(RedisPipeline *pipeline, const std::string &tableName, bool buffered, std::shared_ptr zmqClient); +} + +#endif /* SWSS_ORCH_ZMQ_CONFIG_H */ diff --git a/lib/recorder.cpp b/lib/recorder.cpp index 449039adff9..e9af745cdf9 100644 --- a/lib/recorder.cpp +++ b/lib/recorder.cpp @@ -93,12 +93,12 @@ void RecWriter::record(const std::string& val) { return ; } - record_ofs << swss::getTimestamp() << "|" << val << std::endl; if (isRotate()) { setRotate(false); logfileReopen(); } + record_ofs << swss::getTimestamp() << "|" << val << std::endl; } diff --git a/mclagsyncd/Makefile.am b/mclagsyncd/Makefile.am index d4b4b03c402..eb4fc20d0c4 100644 --- a/mclagsyncd/Makefile.am +++ b/mclagsyncd/Makefile.am @@ -15,7 +15,7 @@ mclagsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) mclagsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lswsscommon if GCOV_ENABLED -mclagsyncd_LDADD += -lgcovpreload +mclagsyncd_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED diff --git a/mclagsyncd/mclaglink.cpp b/mclagsyncd/mclaglink.cpp index b8040c16466..4414dd031df 100644 --- a/mclagsyncd/mclaglink.cpp +++ b/mclagsyncd/mclaglink.cpp @@ -192,7 +192,10 @@ void MclagLink::setPortIsolate(char *msg) static const unordered_set supported { BRCM_PLATFORM_SUBSTRING, BFN_PLATFORM_SUBSTRING, - CTC_PLATFORM_SUBSTRING + CTC_PLATFORM_SUBSTRING, + CLX_PLATFORM_SUBSTRING, + MRVL_PRST_PLATFORM_SUBSTRING, + MRVL_TL_PLATFORM_SUBSTRING }; const char *platform = getenv("platform"); @@ -1843,7 +1846,7 @@ MclagLink::~MclagLink() void MclagLink::accept() { struct sockaddr_in client_addr; - socklen_t client_len; + socklen_t client_len = sizeof(struct sockaddr_in); m_connection_socket = ::accept(m_server_socket, (struct sockaddr *)&client_addr, &client_len); diff --git a/mclagsyncd/mclaglink.h b/mclagsyncd/mclaglink.h index 09129fd88f9..1657c8f1eeb 100644 --- a/mclagsyncd/mclaglink.h +++ b/mclagsyncd/mclaglink.h @@ -54,6 +54,9 @@ #define BRCM_PLATFORM_SUBSTRING "broadcom" #define BFN_PLATFORM_SUBSTRING "barefoot" #define CTC_PLATFORM_SUBSTRING "centec" +#define CLX_PLATFORM_SUBSTRING "clounix" +#define MRVL_PRST_PLATFORM_SUBSTRING "marvell-prestera" +#define MRVL_TL_PLATFORM_SUBSTRING "marvell-teralynx" using namespace std; diff --git a/natsyncd/Makefile.am b/natsyncd/Makefile.am index cdee9d52ae6..562d452c418 100644 --- a/natsyncd/Makefile.am +++ b/natsyncd/Makefile.am @@ -15,7 +15,7 @@ natsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) natsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lnl-nf-3 -lswsscommon if GCOV_ENABLED -natsyncd_LDADD += -lgcovpreload +natsyncd_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED diff --git a/neighsyncd/Makefile.am b/neighsyncd/Makefile.am index cb61a83bbca..1f34e9e92ff 100644 --- a/neighsyncd/Makefile.am +++ b/neighsyncd/Makefile.am @@ -15,7 +15,7 @@ neighsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) neighsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lswsscommon if GCOV_ENABLED -neighsyncd_LDADD += -lgcovpreload +neighsyncd_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED diff --git a/neighsyncd/neighsync.cpp b/neighsyncd/neighsync.cpp index 46f51b9266e..8db632b12f3 100644 --- a/neighsyncd/neighsync.cpp +++ b/neighsyncd/neighsync.cpp @@ -14,6 +14,7 @@ #include "neighsync.h" #include "warm_restart.h" #include +#include using namespace std; using namespace swss; @@ -98,18 +99,28 @@ void NeighSync::onMsg(int nlmsg_type, struct nl_object *obj) { if ((isLinkLocalEnabled(intfName) == false) && (nlmsg_type != RTM_DELNEIGH)) { + SWSS_LOG_INFO("LinkLocal address received, ignoring for %s", ipStr); return; } } /* Ignore IPv6 multicast link-local addresses as neighbors */ if (family == IPV6_NAME && IN6_IS_ADDR_MC_LINKLOCAL(nl_addr_get_binary_addr(rtnl_neigh_get_dst(neigh)))) + { + SWSS_LOG_INFO("Multicast LinkLocal address received, ignoring for %s", ipStr); return; + } key+= ipStr; int state = rtnl_neigh_get_state(neigh); if (state == NUD_NOARP) { - return; + /* For externally learned neighbors, e.g. VXLAN EVPN, we want to keep + * these neighbors. */ + if (!(rtnl_neigh_get_flags(neigh) & NTF_EXT_LEARNED)) + { + SWSS_LOG_INFO("NOARP address received, ignoring for %s", ipStr); + return; + } } bool delete_key = false; diff --git a/orchagent/Makefile.am b/orchagent/Makefile.am index 2af9d4aa0d7..755f6205b7b 100644 --- a/orchagent/Makefile.am +++ b/orchagent/Makefile.am @@ -8,26 +8,30 @@ INCLUDES = -I $(top_srcdir)/lib \ -I pbh \ -I nhg -if GCOV_ENABLED SUBDIRS = p4orch/tests -endif CFLAGS_SAI = -I /usr/include/sai swssdir = $(datadir)/swss dist_swss_DATA = \ + nvda_port_trim_drop.lua \ + eliminate_events.lua \ rif_rates.lua \ - pfc_detect_innovium.lua \ + pfc_detect_marvell_teralynx.lua \ pfc_detect_mellanox.lua \ pfc_detect_broadcom.lua \ + pfc_detect_marvell_prestera.lua \ pfc_detect_barefoot.lua \ pfc_detect_nephos.lua \ pfc_detect_cisco-8000.lua \ pfc_detect_vs.lua \ pfc_restore.lua \ pfc_restore_cisco-8000.lua \ + pfc_detect_clounix.lua \ port_rates.lua \ + port_flr.lua \ + drop_monitor.lua \ watermark_queue.lua \ watermark_pg.lua \ watermark_bufferpool.lua \ @@ -48,6 +52,7 @@ orchagent_SOURCES = \ $(top_srcdir)/lib/gearboxutils.cpp \ $(top_srcdir)/lib/subintf.cpp \ $(top_srcdir)/lib/recorder.cpp \ + $(top_srcdir)/lib/orch_zmq_config.cpp \ orchdaemon.cpp \ orch.cpp \ notifications.cpp \ @@ -67,6 +72,7 @@ orchagent_SOURCES = \ copporch.cpp \ tunneldecaporch.cpp \ qosorch.cpp \ + buffer/bufferhelper.cpp \ bufferorch.cpp \ mirrororch.cpp \ fdborch.cpp \ @@ -80,6 +86,8 @@ orchagent_SOURCES = \ saiattr.cpp \ switch/switch_capabilities.cpp \ switch/switch_helper.cpp \ + switch/trimming/capabilities.cpp \ + switch/trimming/helper.cpp \ switchorch.cpp \ pfcwdorch.cpp \ pfcactionhandler.cpp \ @@ -88,6 +96,7 @@ orchagent_SOURCES = \ vrforch.cpp \ countercheckorch.cpp \ vxlanorch.cpp \ + tunneltermhelper.cpp \ vnetorch.cpp \ dtelorch.cpp \ flexcounterorch.cpp \ @@ -103,17 +112,33 @@ orchagent_SOURCES = \ macsecorch.cpp \ lagid.cpp \ bfdorch.cpp \ + icmporch.cpp \ srv6orch.cpp \ response_publisher.cpp \ nvgreorch.cpp \ zmqorch.cpp \ + dash/dashenifwdorch.cpp \ + dash/dashenifwdinfo.cpp \ dash/dashorch.cpp \ dash/dashrouteorch.cpp \ dash/dashvnetorch.cpp \ dash/dashaclorch.cpp \ dash/dashaclgroupmgr.cpp \ + dash/dashmeterorch.cpp \ dash/dashtagmgr.cpp \ - dash/pbutils.cpp + dash/dashtunnelorch.cpp \ + dash/pbutils.cpp \ + dash/dashhaorch.cpp \ + dash/dashportmaporch.cpp \ + twamporch.cpp \ + stporch.cpp \ + nexthopkey.cpp \ + macsecpost.cpp \ + high_frequency_telemetry/hftelorch.cpp \ + high_frequency_telemetry/hftelprofile.cpp \ + high_frequency_telemetry/counternameupdater.cpp \ + high_frequency_telemetry/hftelutils.cpp \ + high_frequency_telemetry/hftelgroup.cpp orchagent_SOURCES += flex_counter/flex_counter_manager.cpp flex_counter/flex_counter_stat_manager.cpp flex_counter/flow_counter_handler.cpp flex_counter/flowcounterrouteorch.cpp orchagent_SOURCES += debug_counter/debug_counter.cpp debug_counter/drop_counter.cpp @@ -136,9 +161,10 @@ orchagent_SOURCES += p4orch/p4orch.cpp \ orchagent_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) orchagent_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) -orchagent_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lpthread -lsairedis -lsaimeta -lsaimetadata -lswsscommon -lzmq -lprotobuf -ldashapi +orchagent_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lpthread -lsairedis -lsaimeta -lsaimetadata -lswsscommon -lzmq -lprotobuf -ldashapi -ljemalloc -routeresync_SOURCES = routeresync.cpp +routeresync_SOURCES = routeresync.cpp \ + $(top_srcdir)/lib/orch_zmq_config.cpp routeresync_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) routeresync_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) routeresync_LDADD = $(LDFLAGS_ASAN) -lswsscommon @@ -148,9 +174,9 @@ orchagent_restart_check_CPPFLAGS = $(DBGFLAGS) $(AM_CPPFLAGS) $(CFLAGS_COMMON) $ orchagent_restart_check_LDADD = $(LDFLAGS_ASAN) -lhiredis -lswsscommon -lpthread if GCOV_ENABLED -orchagent_LDADD += -lgcovpreload -routeresync_LDADD += -lgcovpreload -orchagent_restart_check_LDADD += -lgcovpreload +orchagent_SOURCES += ../gcovpreload/gcovpreload.cpp +routeresync_SOURCES += ../gcovpreload/gcovpreload.cpp +orchagent_restart_check_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED diff --git a/orchagent/aclorch.cpp b/orchagent/aclorch.cpp old mode 100644 new mode 100755 index 707462799f7..16ad7a95633 --- a/orchagent/aclorch.cpp +++ b/orchagent/aclorch.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include "aclorch.h" #include "logger.h" #include "schema.h" @@ -11,6 +12,7 @@ #include "timer.h" #include "crmorch.h" #include "sai_serialize.h" +#include "directory.h" using namespace std; using namespace swss; @@ -28,11 +30,14 @@ extern sai_switch_api_t* sai_switch_api; extern sai_object_id_t gSwitchId; extern PortsOrch* gPortsOrch; extern CrmOrch *gCrmOrch; +extern SwitchOrch *gSwitchOrch; extern string gMySwitchType; +extern Directory gDirectory; #define MIN_VLAN_ID 1 // 0 is a reserved VLAN ID #define MAX_VLAN_ID 4095 // 4096 is a reserved VLAN ID + #define STATE_DB_ACL_ACTION_FIELD_IS_ACTION_LIST_MANDATORY "is_action_list_mandatory" #define STATE_DB_ACL_ACTION_FIELD_ACTION_LIST "action_list" #define STATE_DB_ACL_L3V4V6_SUPPORTED "supported_L3V4V6" @@ -41,11 +46,18 @@ extern string gMySwitchType; #define ACL_COUNTER_DEFAULT_POLLING_INTERVAL_MS 10000 // ms #define ACL_COUNTER_DEFAULT_ENABLED_STATE false + +#define EGR_SET_DSCP_TABLE_ID "EgressSetDSCP" +#define MAX_META_DATA_VALUE 4095 + const int TCP_PROTOCOL_NUM = 6; // TCP protocol number +#define MAC_EXACT_MATCH "ff:ff:ff:ff:ff:ff" + acl_rule_attr_lookup_t aclMatchLookup = { { MATCH_IN_PORTS, SAI_ACL_ENTRY_ATTR_FIELD_IN_PORTS }, + { MATCH_OUT_PORT, SAI_ACL_ENTRY_ATTR_FIELD_OUT_PORT }, { MATCH_OUT_PORTS, SAI_ACL_ENTRY_ATTR_FIELD_OUT_PORTS }, { MATCH_SRC_IP, SAI_ACL_ENTRY_ATTR_FIELD_SRC_IP }, { MATCH_DST_IP, SAI_ACL_ENTRY_ATTR_FIELD_DST_IP }, @@ -70,10 +82,15 @@ acl_rule_attr_lookup_t aclMatchLookup = { MATCH_TUNNEL_VNI, SAI_ACL_ENTRY_ATTR_FIELD_TUNNEL_VNI }, { MATCH_INNER_ETHER_TYPE, SAI_ACL_ENTRY_ATTR_FIELD_INNER_ETHER_TYPE }, { MATCH_INNER_IP_PROTOCOL, SAI_ACL_ENTRY_ATTR_FIELD_INNER_IP_PROTOCOL }, + { MATCH_INNER_SRC_MAC, SAI_ACL_ENTRY_ATTR_FIELD_INNER_SRC_MAC }, + { MATCH_INNER_DST_MAC, SAI_ACL_ENTRY_ATTR_FIELD_INNER_DST_MAC }, + { MATCH_INNER_SRC_IP, SAI_ACL_ENTRY_ATTR_FIELD_INNER_SRC_IP}, { MATCH_INNER_L4_SRC_PORT, SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_SRC_PORT }, { MATCH_INNER_L4_DST_PORT, SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_DST_PORT }, { MATCH_BTH_OPCODE, SAI_ACL_ENTRY_ATTR_FIELD_BTH_OPCODE}, - { MATCH_AETH_SYNDROME, SAI_ACL_ENTRY_ATTR_FIELD_AETH_SYNDROME} + { MATCH_AETH_SYNDROME, SAI_ACL_ENTRY_ATTR_FIELD_AETH_SYNDROME}, + { MATCH_TUNNEL_TERM, SAI_ACL_ENTRY_ATTR_FIELD_TUNNEL_TERMINATED}, + { MATCH_METADATA, SAI_ACL_ENTRY_ATTR_FIELD_ACL_USER_META} }; static acl_range_type_lookup_t aclRangeTypeLookup = @@ -93,6 +110,12 @@ static acl_rule_attr_lookup_t aclL3ActionLookup = { ACTION_PACKET_ACTION, SAI_ACL_ENTRY_ATTR_ACTION_PACKET_ACTION }, { ACTION_REDIRECT_ACTION, SAI_ACL_ENTRY_ATTR_ACTION_REDIRECT }, { ACTION_DO_NOT_NAT_ACTION, SAI_ACL_ENTRY_ATTR_ACTION_NO_NAT }, + { ACTION_DISABLE_TRIM, SAI_ACL_ENTRY_ATTR_ACTION_PACKET_TRIM_DISABLE } +}; + +static acl_rule_attr_lookup_t aclInnerActionLookup = +{ + { ACTION_INNER_SRC_MAC_REWRITE_ACTION, SAI_ACL_ENTRY_ATTR_ACTION_SET_INNER_SRC_MAC}, }; static acl_rule_attr_lookup_t aclMirrorStageLookup = @@ -120,6 +143,13 @@ static acl_packet_action_lookup_t aclPacketActionLookup = { { PACKET_ACTION_FORWARD, SAI_PACKET_ACTION_FORWARD }, { PACKET_ACTION_DROP, SAI_PACKET_ACTION_DROP }, + { PACKET_ACTION_COPY, SAI_PACKET_ACTION_COPY }, +}; + +static acl_rule_attr_lookup_t aclMetadataDscpActionLookup = +{ + { ACTION_META_DATA, SAI_ACL_ENTRY_ATTR_ACTION_SET_ACL_META_DATA}, + { ACTION_DSCP, SAI_ACL_ENTRY_ATTR_ACTION_SET_DSCP} }; static acl_dtel_flow_op_type_lookup_t aclDTelFlowOpTypeLookup = @@ -349,6 +379,42 @@ static acl_table_action_list_lookup_t defaultAclActionList = } } } + }, + { + // MARK_META + TABLE_TYPE_MARK_META, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_SET_ACL_META_DATA + } + } + } + }, + { + // MARK_METAV6 + TABLE_TYPE_MARK_META_V6, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_SET_ACL_META_DATA + } + } + } + }, + { + // EGR_SET_DSCP + TABLE_TYPE_EGR_SET_DSCP, + { + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_SET_DSCP + } + } + } } }; @@ -414,6 +480,18 @@ static acl_table_match_field_lookup_t stageMandatoryMatchFields = } } } + }, + { + // EGR_SET_DSCP + TABLE_TYPE_EGR_SET_DSCP, + { + { + ACL_STAGE_EGRESS, + { + SAI_ACL_TABLE_ATTR_FIELD_ACL_USER_META + } + } + } } }; @@ -553,6 +631,58 @@ bool AclTableRangeMatch::validateAclRuleMatch(const AclRule& rule) const return true; } +void AclRule::TunnelNH::load(const std::string& target) +{ + parse(target); + + VxlanTunnelOrch* vxlan_orch = gDirectory.get(); + /* Only the first call creates the SAI object, further calls just increment the ref count */ + oid = vxlan_orch->createNextHopTunnel(tunnel_name, endpoint_ip, mac, vni); +} + +void AclRule::TunnelNH::parse(const std::string& target) +{ + /* Expected Format: endpoint_ip@tunnel_name[,vni][,mac] */ + auto at_pos = target.find('@'); + if (at_pos == std::string::npos) + { + throw std::logic_error("Invalid format for Tunnel Next Hop"); + } + + endpoint_ip = swss::IpAddress(target.substr(0, at_pos)); + std::stringstream ss(target.substr(at_pos + 1)); + + vector components; + while (ss.good()) + { + std::string substr; + getline(ss, substr, ','); + components.push_back(substr); + } + if (components.empty()) + { + throw std::logic_error("Invalid format for Tunnel Next Hop"); + } + + tunnel_name = components[0]; + if (components.size() >= 2) + { + vni = static_cast(std::stoul(components[1])); + } + if (components.size() == 3) + { + mac = swss::MacAddress(components[2]); + } +} + +void AclRule::TunnelNH::clear() +{ + oid = SAI_NULL_OBJECT_ID; + VxlanTunnelOrch* vxlan_orch = gDirectory.get(); + vxlan_orch->removeNextHopTunnel(tunnel_name, endpoint_ip, mac, vni); +} + + string AclTableType::getName() const { return m_name; @@ -706,11 +836,16 @@ bool AclTableTypeParser::parseAclTableTypeActions(const std::string& value, AclT auto mirrorAction = aclMirrorStageLookup.find(action); auto dtelAction = aclDTelActionLookup.find(action); auto otherAction = aclOtherActionLookup.find(action); - + auto metadataAction = aclMetadataDscpActionLookup.find(action); + auto innerAction = aclInnerActionLookup.find(action); if (l3Action != aclL3ActionLookup.end()) { saiActionAttr = l3Action->second; } + else if (innerAction != aclInnerActionLookup.end()) + { + saiActionAttr = innerAction->second; + } else if (mirrorAction != aclMirrorStageLookup.end()) { saiActionAttr = mirrorAction->second; @@ -723,6 +858,10 @@ bool AclTableTypeParser::parseAclTableTypeActions(const std::string& value, AclT { saiActionAttr = otherAction->second; } + else if (metadataAction != aclMetadataDscpActionLookup.end()) + { + saiActionAttr = metadataAction->second; + } else { SWSS_LOG_ERROR("Unknown action %s", action.c_str()); @@ -805,6 +944,17 @@ bool AclRule::validateAddMatch(string attr_name, string attr_value) { return false; } + else if (attr_name == MATCH_TUNNEL_TERM) + { + matchData.data.booldata = (to_upper(attr_value) == "TRUE");; + } + else if (attr_name == MATCH_INNER_DST_MAC || attr_name == MATCH_INNER_SRC_MAC) + { + swss::MacAddress mac(attr_value); + swss::MacAddress mask(MAC_EXACT_MATCH); + memcpy(matchData.data.mac, mac.getMac(), sizeof(sai_mac_t)); + memcpy(matchData.mask.mac, mask.getMac(), sizeof(sai_mac_t)); + } else if (attr_name == MATCH_IN_PORTS) { auto ports = tokenize(attr_value, ','); @@ -865,6 +1015,23 @@ bool AclRule::validateAddMatch(string attr_name, string attr_value) matchData.data.objlist.count = static_cast(outPorts.size()); matchData.data.objlist.list = outPorts.data(); } + else if (attr_name == MATCH_OUT_PORT) + { + auto alias = attr_value; + Port port; + if (!gPortsOrch->getPort(alias, port)) + { + SWSS_LOG_ERROR("Failed to locate port %s", alias.c_str()); + return false; + } + if (port.m_type != Port::PHY) + { + SWSS_LOG_ERROR("Cannot bind rule to %s: OUT_PORT can only match physical interfaces", alias.c_str()); + return false; + } + + matchData.data.oid = port.m_port_id; + } else if (attr_name == MATCH_IP_TYPE) { if (!processIpType(attr_value, matchData.data.u32)) @@ -928,7 +1095,7 @@ bool AclRule::validateAddMatch(string attr_name, string attr_value) matchData.data.u8 = to_uint(attr_value); matchData.mask.u8 = 0xFF; } - else if (attr_name == MATCH_SRC_IP || attr_name == MATCH_DST_IP) + else if (attr_name == MATCH_SRC_IP || attr_name == MATCH_DST_IP || attr_name == MATCH_INNER_SRC_IP) { IpPrefix ip(attr_value); @@ -1032,6 +1199,18 @@ bool AclRule::validateAddMatch(string attr_name, string attr_value) return false; } } + else if (attr_name == MATCH_METADATA) + { + matchData.data.u32 = to_uint(attr_value); + matchData.mask.u32 = 0xFFFFFFFF; + + if (matchData.data.u32 < m_pAclOrch->getAclMetaDataMin() || matchData.data.u32 > m_pAclOrch->getAclMetaDataMax()) + { + SWSS_LOG_ERROR("Invalid MATCH_METADATA configuration: %s, expected value between %d - %d", attr_value.c_str(), + m_pAclOrch->getAclMetaDataMin(), m_pAclOrch->getAclMetaDataMax()); + return false; + } + } } catch (exception &e) { @@ -1207,7 +1386,17 @@ void AclRule::decreaseNextHopRefCount() } m_redirect_target_next_hop_group.clear(); } - + if (m_redirect_target_tun_nh.oid != SAI_NULL_OBJECT_ID) + { + try + { + m_redirect_target_tun_nh.clear(); + } + catch (const std::runtime_error& e) + { + SWSS_LOG_ERROR("Failed to remove tunnel nh reference %s, ACL Rule: %s", e.what(), m_id.c_str()); + } + } return; } @@ -1599,7 +1788,7 @@ bool AclRule::getCreateCounter() const return m_createCounter; } -shared_ptr AclRule::makeShared(AclOrch *acl, MirrorOrch *mirror, DTelOrch *dtel, const string& rule, const string& table, const KeyOpFieldsValuesTuple& data) +shared_ptr AclRule::makeShared(AclOrch *acl, MirrorOrch *mirror, DTelOrch *dtel, const string& rule, const string& table, const KeyOpFieldsValuesTuple& data, MetaDataMgr * m_metadataMgr) { shared_ptr aclRule; @@ -1615,6 +1804,14 @@ shared_ptr AclRule::makeShared(AclOrch *acl, MirrorOrch *mirror, DTelOr { return make_shared(acl, rule, table); } + else if (aclInnerActionLookup.find(action) != aclInnerActionLookup.cend()) + { + return make_shared(acl, rule, table); + } + else if (acl->isUsingEgrSetDscp(table) || table == EGR_SET_DSCP_TABLE_ID) + { + return make_shared(acl, rule, table, m_metadataMgr); + } else if (aclDTelActionLookup.find(action) != aclDTelActionLookup.cend()) { if (!dtel) @@ -1843,6 +2040,12 @@ bool AclRulePacket::validateAddAction(string attr_name, string _attr_value) actionData.parameter.booldata = true; action_str = ACTION_DO_NOT_NAT_ACTION; } + // handle PACKET_ACTION_DISABLE_TRIM in ACTION_PACKET_ACTION + else if (attr_value == PACKET_ACTION_DISABLE_TRIM) + { + actionData.parameter.booldata = true; + action_str = ACTION_DISABLE_TRIM; + } else { return false; @@ -1896,21 +2099,38 @@ sai_object_id_t AclRulePacket::getRedirectObjectId(const string& redirect_value) try { NextHopKey nh(target); - if (!m_pAclOrch->m_neighOrch->hasNextHop(nh)) + if (m_pAclOrch->m_neighOrch->hasNextHop(nh)) { - SWSS_LOG_ERROR("ACL Redirect action target next hop ip: '%s' doesn't exist on the switch", nh.to_string().c_str()); - return SAI_NULL_OBJECT_ID; + m_redirect_target_next_hop = target; + m_pAclOrch->m_neighOrch->increaseNextHopRefCount(nh); + return m_pAclOrch->m_neighOrch->getNextHopId(nh); } - - m_redirect_target_next_hop = target; - m_pAclOrch->m_neighOrch->increaseNextHopRefCount(nh); - return m_pAclOrch->m_neighOrch->getNextHopId(nh); } catch (...) { // no error, just try next variant } + // Try to parse if this is a tunnel nexthop. + try + { + m_redirect_target_tun_nh.load(target); + if (SAI_NULL_OBJECT_ID != m_redirect_target_tun_nh.oid) + { + SWSS_LOG_INFO("Tunnel Next Hop Found: oid:0x%" PRIx64 ", target: %s", m_redirect_target_tun_nh.oid, target.c_str()); + return m_redirect_target_tun_nh.oid; + } + } + catch (std::logic_error& e) + { + // no error, just try next variant + } + catch (const std::runtime_error& e) + { + SWSS_LOG_ERROR("Failed to create/fetch tunnel next hop, %s, err: %s", target.c_str(), e.what()); + return SAI_NULL_OBJECT_ID; + } + // try to parse nh group the set of try { @@ -1958,6 +2178,70 @@ void AclRulePacket::onUpdate(SubjectType, void *) // Do nothing } +AclRuleInnerSrcMacRewrite::AclRuleInnerSrcMacRewrite(AclOrch *aclOrch, string rule, string table, bool createCounter) : + AclRule(aclOrch, rule, table, createCounter) + { + } + + bool AclRuleInnerSrcMacRewrite::validateAddAction(string attr_name, string _attr_value) + { + SWSS_LOG_ENTER(); + + sai_acl_action_data_t actionData; + + auto action_str = attr_name; + + if (attr_name == ACTION_INNER_SRC_MAC_REWRITE_ACTION) + { + if (!_attr_value.empty()) + { + MacAddress inner_src_mac_addr; + try + { + inner_src_mac_addr = MacAddress(_attr_value); + } + catch (invalid_argument &e) + { + SWSS_LOG_ERROR("Invalid Mac Address %s", _attr_value.c_str()); + return false; + } + + memcpy(actionData.parameter.mac, inner_src_mac_addr.getMac(), sizeof(sai_mac_t)); + action_str = ACTION_INNER_SRC_MAC_REWRITE_ACTION; + SWSS_LOG_INFO("Converting the Mac address %s to SAI acl action parameter", _attr_value.c_str()); + } + + else + { + return false; + } + } + else + { + return false; + } + + actionData.enable = true; + return setAction(aclInnerActionLookup[action_str], actionData); + } + + bool AclRuleInnerSrcMacRewrite::validate() + { + SWSS_LOG_ENTER(); + + if ((m_rangeConfig.empty() && m_matches.empty()) || m_actions.size() != 1 ) + { + return false; + } + + return true; + } + + void AclRuleInnerSrcMacRewrite::onUpdate(SubjectType type, void *cntx) + { + //do nothing + } + AclRuleMirror::AclRuleMirror(AclOrch *aclOrch, MirrorOrch *mirror, string rule, string table) : AclRule(aclOrch, rule, table), m_state(false), @@ -2004,6 +2288,23 @@ bool AclRuleMirror::validate() return true; } +bool AclRuleMirror::createCounter() +{ + SWSS_LOG_ENTER(); + + bool state = false; + + m_pMirrorOrch->getSessionStatus(m_sessionName, state); + + // If the mirror session is active, create the ACL counter + if(state) + { + return AclRule::createCounter(); + } + + return true; +} + bool AclRuleMirror::createRule() { SWSS_LOG_ENTER(); @@ -2133,7 +2434,11 @@ void AclRuleMirror::onUpdate(SubjectType type, void *cntx) if (update->active) { SWSS_LOG_INFO("Activating mirroring ACL %s for session %s", m_id.c_str(), m_sessionName.c_str()); - activate(); + // During mirror session activation, the newly created counter needs to be registered to the FC. + if(activate() && hasCounter()) + { + m_pAclOrch->registerFlexCounter(*this); + } } else { @@ -2142,6 +2447,105 @@ void AclRuleMirror::onUpdate(SubjectType type, void *cntx) } } +AclRuleUnderlaySetDscp::AclRuleUnderlaySetDscp(AclOrch *aclOrch, string rule, string table, MetaDataMgr* m_metaDataMgr, bool createCounter): + AclRule(aclOrch, rule, table, createCounter), + table_id(table), + m_metaDataMgr(m_metaDataMgr) +{ +} + +uint32_t AclRuleUnderlaySetDscp::getDscpValue() const +{ + return cachedDscpValue; +} + +uint32_t AclRuleUnderlaySetDscp::getMetadata() const +{ + return cachedMetadata; +} + +bool AclRuleUnderlaySetDscp::validateAddAction(string attr_name, string _attr_value) +{ + SWSS_LOG_ENTER(); + + string attr_value = to_upper(_attr_value); + + sai_object_id_t table_oid = m_pAclOrch->getTableById(table_id); + auto aclTable = m_pAclOrch->getTableByOid(table_oid); + string type = aclTable->type.getName(); + string key = table_id + ":" + m_id; + // we handle the allocation of metadata for here. based on SET_DSCP action, we check if a metadata is already allocated then we reuse it + // otherwise we allocate a new metadata. This metadata is then set an the action for the Rule of this table. We also cache the SET_DSCP + // value and the allocated metadata in a the rule structure itself so that when we go to addRule we can use these to add the + // egr_set_dscp rule + if (attr_name == ACTION_DSCP && (type == TABLE_TYPE_MARK_META || type == TABLE_TYPE_MARK_META_V6)) + { + if (!m_pAclOrch->isUsingEgrSetDscp(table_id)) + { + + SWSS_LOG_ERROR("Unexpected Error. Table %s not asssociated with EGR_SET_DSCP table", table_id.c_str()); + return false; + } + if (m_pAclOrch->hasMetaDataRefCount(key) > 0) + { + SWSS_LOG_ERROR("Metadata already allocated for Rule %s in table %s. Remove and Re-add the rule.", m_id.c_str(), table_id.c_str()); + return false; + } + u_int8_t actionDscpValue = uint8_t(std::stoi(attr_value)); + cachedDscpValue = actionDscpValue; + auto metadata = m_metaDataMgr->getFreeMetaData(actionDscpValue); + + if (!m_metaDataMgr->isValidMetaData(metadata)) + { + SWSS_LOG_ERROR("Failed to get free metadata for DSCP value %d", actionDscpValue); + return false; + } + SWSS_LOG_ERROR("Allocated metadata %d for DSCP value %d", metadata, actionDscpValue); + cachedMetadata = metadata; + attr_name = ACTION_META_DATA; + attr_value = std::to_string(metadata); + m_pAclOrch->addMetaDataRef(key, metadata); + } + + + sai_acl_action_data_t actionData; + actionData.parameter.u32 = 0; + + SWSS_LOG_INFO("attr_name: %s, attr_value: %s int val %d", attr_name.c_str(), attr_value.c_str(), to_uint(attr_value)); + // we only handle DSCP and META_DATA actions for now. + if (attr_name == ACTION_DSCP || attr_name == ACTION_META_DATA) + { + actionData.parameter.u32 = to_uint(attr_value); + if (attr_name == ACTION_META_DATA && (actionData.parameter.u32 < m_pAclOrch->getAclMetaDataMin() || actionData.parameter.u32 > m_pAclOrch->getAclMetaDataMax())) + { + return false; + } + } + else + { + return false; + } + + actionData.enable = true; + return setAction(aclMetadataDscpActionLookup[attr_name], actionData); +} + +bool AclRuleUnderlaySetDscp::validate() +{ + SWSS_LOG_ENTER(); + if ( m_actions.size() != 1) + { + return false; + } + + return true; +} + +void AclRuleUnderlaySetDscp::onUpdate(SubjectType, void *) +{ + // Do nothing +} + AclTable::AclTable(AclOrch *pAclOrch, string id) noexcept : m_pAclOrch(pAclOrch), id(id) { @@ -2202,9 +2606,10 @@ bool AclTable::addStageMandatoryRangeFields() SWSS_LOG_ENTER(); string platform = getenv("platform") ? getenv("platform") : ""; + string sub_platform = getenv("sub_platform") ? getenv("sub_platform") : ""; auto match = SAI_ACL_TABLE_ATTR_FIELD_ACL_RANGE_TYPE; - if ((platform == BRCM_PLATFORM_SUBSTRING) && + if ((platform == BRCM_PLATFORM_SUBSTRING) && (sub_platform != BRCM_DNX_PLATFORM_SUBSTRING) && (stage == ACL_STAGE_EGRESS)) { return false; @@ -2585,6 +2990,12 @@ bool AclTable::add(shared_ptr newRule) if (ruleIter != rules.end()) { // If ACL rule already exists, delete it first + if (ruleIter->second->hasCounter()) + { + // Deregister the flex counter before deleting the rule + // A new flex counter will be created when the new rule is added + m_pAclOrch->deregisterFlexCounter(*(ruleIter->second)); + } if (ruleIter->second->remove()) { rules.erase(ruleIter); @@ -2953,9 +3364,10 @@ AclRange *AclRange::create(sai_acl_range_type_t type, int min, int max) // work around to avoid syncd termination on SAI error due to max count of ranges reached // can be removed when syncd start passing errors to the SAI callers char *platform = getenv("platform"); - if (platform && strstr(platform, MLNX_PLATFORM_SUBSTRING)) + if (platform) { - if (m_ranges.size() >= MLNX_MAX_RANGES_COUNT) + if ((strstr(platform, MLNX_PLATFORM_SUBSTRING) && m_ranges.size() >= MLNX_MAX_RANGES_COUNT) || + (strstr(platform, CLX_PLATFORM_SUBSTRING) && m_ranges.size() >= CLNX_MAX_RANGES_COUNT)) { SWSS_LOG_ERROR("Maximum numbers of ACL ranges reached"); return NULL; @@ -3074,10 +3486,11 @@ void AclOrch::init(vector& connectors, PortsOrch *portOrch, Mirr platform == CISCO_8000_PLATFORM_SUBSTRING || platform == MLNX_PLATFORM_SUBSTRING || platform == BFN_PLATFORM_SUBSTRING || - platform == MRVL_PLATFORM_SUBSTRING || - platform == INVM_PLATFORM_SUBSTRING || + platform == MRVL_PRST_PLATFORM_SUBSTRING || + platform == MRVL_TL_PLATFORM_SUBSTRING || platform == NPS_PLATFORM_SUBSTRING || platform == XS_PLATFORM_SUBSTRING || + platform == CLX_PLATFORM_SUBSTRING || platform == VS_PLATFORM_SUBSTRING) { m_mirrorTableCapabilities = @@ -3095,8 +3508,8 @@ void AclOrch::init(vector& connectors, PortsOrch *portOrch, Mirr }; } - if ( platform == MRVL_PLATFORM_SUBSTRING || - platform == INVM_PLATFORM_SUBSTRING || + if ( platform == MRVL_PRST_PLATFORM_SUBSTRING || + platform == MRVL_TL_PLATFORM_SUBSTRING || platform == VS_PLATFORM_SUBSTRING) { m_L3V4V6Capability = @@ -3130,8 +3543,9 @@ void AclOrch::init(vector& connectors, PortsOrch *portOrch, Mirr // In Broadcom DNX platform also, V4 and V6 rules are stored in different tables if (platform == MLNX_PLATFORM_SUBSTRING || platform == CISCO_8000_PLATFORM_SUBSTRING || - platform == MRVL_PLATFORM_SUBSTRING || + platform == MRVL_PRST_PLATFORM_SUBSTRING || platform == XS_PLATFORM_SUBSTRING || + platform == CLX_PLATFORM_SUBSTRING || (platform == BRCM_PLATFORM_SUBSTRING && sub_platform == BRCM_DNX_PLATFORM_SUBSTRING)) { m_isCombinedMirrorV6Table = false; @@ -3142,24 +3556,126 @@ void AclOrch::init(vector& connectors, PortsOrch *portOrch, Mirr } - // Store the capabilities in state database - // TODO: Move this part of the code into syncd - vector fvVector; - for (auto const& it : m_mirrorTableCapabilities) + if (platform == VS_PLATFORM_SUBSTRING) { - string value = it.second ? "true" : "false"; - if (it.first == TABLE_TYPE_MIRROR) - { - fvVector.emplace_back(TABLE_TYPE_MIRROR, value); - } - else if (it.first == TABLE_TYPE_MIRRORV6) + // For testing on VS the following values will be used. + m_switchMetaDataCapabilities[TABLE_ACL_USER_META_DATA_RANGE_CAPABLE] = "true"; + m_switchMetaDataCapabilities[TABLE_ACL_USER_META_DATA_MIN] = "1"; + m_switchMetaDataCapabilities[TABLE_ACL_USER_META_DATA_MAX] = "7"; + m_switchMetaDataCapabilities[TABLE_ACL_ENTRY_ATTR_META_CAPABLE] = "true"; + m_switchMetaDataCapabilities[TABLE_ACL_ENTRY_ACTION_META_CAPABLE] = "true"; + m_metaDataMgr.populateRange(1,7); + } + else + { + // check switch capability of Metadata attribute, action and range. + // SAI_SWITCH_ATTR_ACL_USER_META_DATA_RANGE support and range values. + // SAI_ACL_ENTRY_ATTR_ACTION_SET_ACL_META_DATA + // SAI_ACL_ENTRY_ATTR_FIELD_ACL_USER_META + + sai_status_t status = SAI_STATUS_SUCCESS; + sai_attr_capability_t capability; + uint16_t metadataMin = 0; + uint16_t metadataMax = 0; + m_switchMetaDataCapabilities[TABLE_ACL_USER_META_DATA_MIN] = "0"; + m_switchMetaDataCapabilities[TABLE_ACL_USER_META_DATA_MAX] = "0"; + m_switchMetaDataCapabilities[TABLE_ACL_USER_META_DATA_RANGE_CAPABLE] = "false"; + m_switchMetaDataCapabilities[TABLE_ACL_ENTRY_ATTR_META_CAPABLE] = "false"; + m_switchMetaDataCapabilities[TABLE_ACL_ENTRY_ACTION_META_CAPABLE] = "false"; + + status = sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_ACL_USER_META_DATA_RANGE, &capability); + if (status != SAI_STATUS_SUCCESS) { - fvVector.emplace_back(TABLE_TYPE_MIRRORV6, value); + SWSS_LOG_WARN("Could not query ACL_USER_META_DATA_RANGE %d", status); } else { - // ignore - } + if (capability.get_implemented) + { + sai_attribute_t attrs[1]; + attrs[0].id = SAI_SWITCH_ATTR_ACL_USER_META_DATA_RANGE; + sai_status_t status = sai_switch_api->get_switch_attribute(gSwitchId, 1, attrs); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Could not get range for ACL_USER_META_DATA_RANGE %d", status); + } + else + { + SWSS_LOG_NOTICE("ACL_USER_META_DATA_RANGE, min: %u, max: %u", attrs[0].value.u32range.min, attrs[0].value.u32range.max); + m_switchMetaDataCapabilities[TABLE_ACL_USER_META_DATA_RANGE_CAPABLE] = "true"; + if (attrs[0].value.u32range.min > MAX_META_DATA_VALUE) + { + SWSS_LOG_ERROR("Unsupported ACL_USER_META_DATA_RANGE min value"); + metadataMin = 0; + } + else + { + metadataMin = uint16_t(attrs[0].value.u32range.min); + } + if (attrs[0].value.u32range.max > MAX_META_DATA_VALUE) + { + metadataMax = MAX_META_DATA_VALUE; + } + else + { + metadataMax = uint16_t(attrs[0].value.u32range.max); + } + m_switchMetaDataCapabilities[TABLE_ACL_USER_META_DATA_MIN] = std::to_string(metadataMin); + m_switchMetaDataCapabilities[TABLE_ACL_USER_META_DATA_MAX] = std::to_string(metadataMax); + } + + } + SWSS_LOG_NOTICE("ACL_USER_META_DATA_RANGE capability %d", capability.get_implemented); + } + status = sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_ACL_ENTRY, SAI_ACL_ENTRY_ATTR_FIELD_ACL_USER_META, &capability); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Could not query ACL_ENTRY_ATTR_FIELD_ACL_USER_META %d", status); + } + else + { + if (capability.set_implemented) + { + m_switchMetaDataCapabilities[TABLE_ACL_ENTRY_ATTR_META_CAPABLE] = "true"; + } + SWSS_LOG_NOTICE("ACL_ENTRY_ATTR_FIELD_ACL_USER_META capability %d", capability.set_implemented); + } + + status = sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_ACL_ENTRY, SAI_ACL_ENTRY_ATTR_ACTION_SET_ACL_META_DATA, &capability); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Could not query ACL_ENTRY_ATTR_ACTION_SET_ACL_META_DATA %d", status); + } + else + { + if (capability.set_implemented) + { + m_switchMetaDataCapabilities[TABLE_ACL_ENTRY_ACTION_META_CAPABLE] = "true"; + } + SWSS_LOG_NOTICE("ACL_ENTRY_ATTR_ACTION_SET_ACL_META_DATA capability %d", capability.set_implemented); + } + + m_metaDataMgr.populateRange(metadataMin, metadataMax); + + } + // Store the capabilities in state database + // TODO: Move this part of the code into syncd + vector fvVector; + for (auto const& it : m_mirrorTableCapabilities) + { + string value = it.second ? "true" : "false"; + if (it.first == TABLE_TYPE_MIRROR) + { + fvVector.emplace_back(TABLE_TYPE_MIRROR, value); + } + else if (it.first == TABLE_TYPE_MIRRORV6) + { + fvVector.emplace_back(TABLE_TYPE_MIRRORV6, value); + } + else + { + // ignore + } } m_switchOrch->set_switch_capability(fvVector); @@ -3194,14 +3710,14 @@ void AclOrch::init(vector& connectors, PortsOrch *portOrch, Mirr m_mirrorV6TableId[stage] = ""; } - initDefaultTableTypes(); + initDefaultTableTypes(platform, sub_platform); // Attach observers m_mirrorOrch->attach(this); gPortsOrch->attach(this); } -void AclOrch::initDefaultTableTypes() +void AclOrch::initDefaultTableTypes(const string& platform, const string& sub_platform) { SWSS_LOG_ENTER(); @@ -3288,12 +3804,26 @@ void AclOrch::initDefaultTableTypes() .build() ); - addAclTableType( - builder.withName(TABLE_TYPE_PFCWD) - .withBindPointType(SAI_ACL_BIND_POINT_TYPE_PORT) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_TC)) - .build() - ); + // Use SAI_ACL_BIND_POINT_TYPE_SWITCH in BRCM DNX platforms to use shared egress ACL table for PFCWD. + if (platform == BRCM_PLATFORM_SUBSTRING && sub_platform == BRCM_DNX_PLATFORM_SUBSTRING) + { + addAclTableType( + builder.withName(TABLE_TYPE_PFCWD) + .withBindPointType(SAI_ACL_BIND_POINT_TYPE_SWITCH) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_TC)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_OUT_PORT)) + .build() + ); + } + else + { + addAclTableType( + builder.withName(TABLE_TYPE_PFCWD) + .withBindPointType(SAI_ACL_BIND_POINT_TYPE_PORT) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_TC)) + .build() + ); + } addAclTableType( builder.withName(TABLE_TYPE_DROP) @@ -3396,7 +3926,44 @@ void AclOrch::initDefaultTableTypes() .build() ); } + if (isAclMetaDataSupported()) + { + addAclTableType( + builder.withName(TABLE_TYPE_MARK_META) + .withBindPointType(SAI_ACL_BIND_POINT_TYPE_PORT) + .withBindPointType(SAI_ACL_BIND_POINT_TYPE_LAG) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_SRC_IP)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DST_IP)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IP_PROTOCOL)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_SRC_PORT)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_DST_PORT)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_TCP_FLAGS)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DSCP)) + .build() + ); + addAclTableType( + builder.withName(TABLE_TYPE_MARK_META_V6) + .withBindPointType(SAI_ACL_BIND_POINT_TYPE_PORT) + .withBindPointType(SAI_ACL_BIND_POINT_TYPE_LAG) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_SRC_IPV6)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DST_IPV6)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IPV6_NEXT_HEADER)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_SRC_PORT)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_DST_PORT)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DSCP)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_TCP_FLAGS)) + .build() + ); + + addAclTableType( + builder.withName(TABLE_TYPE_EGR_SET_DSCP) + .withBindPointType(SAI_ACL_BIND_POINT_TYPE_PORT) + .withBindPointType(SAI_ACL_BIND_POINT_TYPE_LAG) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ACL_USER_META)) + .build() + ); + } // Placeholder for control plane tables addAclTableType(builder.withName(TABLE_TYPE_CTRLPLANE).build()); } @@ -3494,8 +4061,12 @@ void AclOrch::putAclActionCapabilityInDB(acl_stage_type_t stage) string delimiter; ostringstream acl_action_value_stream; ostringstream is_action_list_mandatory_stream; - - for (const auto& action_map: {aclL3ActionLookup, aclMirrorStageLookup, aclDTelActionLookup}) + acl_rule_attr_lookup_t metadataActionLookup = {}; + if (isAclMetaDataSupported()) + { + metadataActionLookup = aclMetadataDscpActionLookup; + } + for (const auto& action_map: {aclL3ActionLookup, aclMirrorStageLookup, aclDTelActionLookup, metadataActionLookup, aclInnerActionLookup}) { for (const auto& it: action_map) { @@ -3735,6 +4306,27 @@ void AclOrch::getAddDeletePorts(AclTable &newT, newPortSet.insert(p); } + // if the table type is TABLE_TYPE_EGR_SET_DSCP we use a single instance of this + // table with all the tables of type TABLE_TYPE_MARK_META/v6 therefoere we need to + // to collect all the ports from the tables of type TABLE_TYPE_MARK_META/v6 and + // put them in the newPortSet. + if (curT.id == EGR_SET_DSCP_TABLE_ID) + { + for(auto iter : m_egrSetDscpRef) + { + auto tableOid = getTableById(iter); + auto existingtable = m_AclTables.at(tableOid); + for (auto p : existingtable.pendingPortSet) + { + newPortSet.insert(p); + } + for (auto p : existingtable.portSet) + { + newPortSet.insert(p); + } + } + } + // Collect current ports for (auto p : curT.pendingPortSet) { @@ -3830,7 +4422,6 @@ bool AclOrch::updateAclTablePorts(AclTable &newTable, AclTable &curTable) bool AclOrch::updateAclTable(AclTable ¤tTable, AclTable &newTable) { SWSS_LOG_ENTER(); - currentTable.description = newTable.description; if (!updateAclTablePorts(newTable, currentTable)) { @@ -3841,10 +4432,193 @@ bool AclOrch::updateAclTable(AclTable ¤tTable, AclTable &newTable) return true; } -bool AclOrch::updateAclTable(string table_id, AclTable &table) +EgressSetDscpTableStatus AclOrch::addEgrSetDscpTable(string table_id, AclTable &table, string orignalTableTypeName) { SWSS_LOG_ENTER(); + EgressSetDscpTableStatus status = EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_NOT_REQUIRED; + AclTable egrSetDscpTable(this); + + // we only add the EGR_SET_DSCP table if the table type is TABLE_TYPE_UNDERLAY_SET_DSCP or TABLE_TYPE_UNDERLAY_SET_DSCPV6 + // otherwise we return EGRESS_SET_DSCP_TABLE_NOT_REQUIRED. + if (orignalTableTypeName == TABLE_TYPE_UNDERLAY_SET_DSCP || orignalTableTypeName == TABLE_TYPE_UNDERLAY_SET_DSCPV6) + { + if (!isAclMetaDataSupported()) + { + SWSS_LOG_ERROR("Platform does not support MARK_META/MARK_METAV6 tables."); + return EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_NOT_SUPPORTED; + } + AclTable egrSetDscpTable(this); + + // copy ports from the TABLE_TYPE_UNDERLAY_SET_DSCP/v6 to the egrSetDscpTable. + std::set ports; + ports.insert(table.portSet.begin(), table.portSet.end()); + ports.insert(table.pendingPortSet.begin(), table.pendingPortSet.end()); + + for (auto alias : ports) + { + Port port; + if (!gPortsOrch->getPort(alias, port)) + { + SWSS_LOG_INFO("Add unready port %s to pending list for ACL table %s", + alias.c_str(), EGR_SET_DSCP_TABLE_ID); + egrSetDscpTable.pendingPortSet.emplace(alias); + continue; + } + + sai_object_id_t bind_port_id; + if (!getAclBindPortId(port, bind_port_id)) + { + SWSS_LOG_ERROR("Failed to get port %s bind port ID for ACL table %s", + alias.c_str(), EGR_SET_DSCP_TABLE_ID); + return EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_FAILED; + } + egrSetDscpTable.link(bind_port_id); + egrSetDscpTable.portSet.emplace(alias); + } + + egrSetDscpTable.id = EGR_SET_DSCP_TABLE_ID; + egrSetDscpTable.stage = ACL_STAGE_EGRESS; + auto egrSetDscpTableType = getAclTableType(TABLE_TYPE_EGR_SET_DSCP); + sai_object_id_t egrSetDscp_oid = getTableById(EGR_SET_DSCP_TABLE_ID); + // create the EGR_SET_DSCP fisrt time if not present. Otherwise update the existing table. + if (m_egrSetDscpRef.empty()) + { + // Create EGR_SET_DSCP table + egrSetDscpTable.validateAddType(*egrSetDscpTableType); + egrSetDscpTable.addMandatoryActions(); + if (!egrSetDscpTable.validate()) + { + SWSS_LOG_ERROR("Failed to validate ACL table %s", + EGR_SET_DSCP_TABLE_ID); + // since we failed to create the table, there is no need for rollback. + return EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_FAILED; + } + if (!addAclTable(egrSetDscpTable)) + { + SWSS_LOG_ERROR("Failed to create ACL table EgressSetDSCP"); + // since we failed to create the table, there is no need for rollback. + return EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_FAILED; + } + } + else + { + if (updateAclTable(m_AclTables[egrSetDscp_oid], egrSetDscpTable)) + { + SWSS_LOG_NOTICE("Successfully updated existing ACL table EgressSetDSCP"); + // We do not set the status here as we still have to update + // TABLE_TYPE_MARK_META/V6 table. + } + else + { + SWSS_LOG_ERROR("Failed to update existing ACL table EgressSetDSCP"); + // there is no need for roollback as we have not made any changes to the MARK_META/V6 tables. + // We can simply return false. + return EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_FAILED; + } + } + // keep track of the fact that this table is now associated with the EGR_SET_DSCP table. + + m_egrSetDscpRef.insert(table_id); + SWSS_LOG_NOTICE("Added ACL table %s to EgrSetDscpRef", table_id.c_str()); + status = EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_SUCCESS; + + } + + return status; +} + +bool AclOrch::removeEgrSetDscpTable(string table_id) +{ + m_egrSetDscpRef.erase(table_id); + SWSS_LOG_INFO("attempting to remove %s reference", table_id.c_str()); + if (m_egrSetDscpRef.size() == 0) + { + if (!removeAclTable(EGR_SET_DSCP_TABLE_ID)) + { + m_egrSetDscpRef.insert(table_id); + SWSS_LOG_ERROR("Failed to remove ACL table %s", EGR_SET_DSCP_TABLE_ID); + return false; + } + } + else + { + //create a dummy table with no ports. The updateAclTable will remove the + // unique ports which were associated with table_id. + // The way this works is as follows. + // The getAddDeletePorts function collects all the ports of the tables which + // are in m_egrSetDscpRef set and adds those ports to the EGR_SET_DSCP. + // As a result the EGR_SET_DSCP is associated with all the ports to which the + // TABLE_TYPE_UNDERLAY_SET_DSCP/V6 tables are attached. + // + // when we want to remove one of the tables referencing the EGR_SET_DSCP. + // we remove it from m_egrSetDscpRef, then send a updateAclTable with a + // EGR_SET_DSCP table with no assiciated ports. + // The getAddDeletePorts collects all the ports except for the one assocated + // with the table we just removed from m_egrSetDscpRef and updated the EGR_SET_DSCP + // with new port set. + AclTable dummyTable(this); + dummyTable.id = EGR_SET_DSCP_TABLE_ID; + dummyTable.stage = ACL_STAGE_EGRESS; + if (!updateAclTable(EGR_SET_DSCP_TABLE_ID, dummyTable, "")) + { + m_egrSetDscpRef.insert(table_id); + SWSS_LOG_ERROR("Failed to remove ACL table %s", EGR_SET_DSCP_TABLE_ID); + return false; + } + SWSS_LOG_NOTICE("Successfully removed the %s table from the reference of %s", table_id.c_str(), EGR_SET_DSCP_TABLE_ID); + } + return true; +} + +bool AclOrch::addEgrSetDscpRule(string key, string dscpAction) +{ + auto metadata = m_egrDscpRuleMetadata[key]; + + if (m_metadataEgrDscpRule[metadata].size() == 1) + { + // Create EGR_SET_DSCP rule. set the match criteria to metadata value and action to dscpAction. + auto egrSetDscpRule = make_shared(this, std::to_string(metadata), EGR_SET_DSCP_TABLE_ID, &m_metaDataMgr); + egrSetDscpRule->validateAddMatch(MATCH_METADATA, std::to_string(metadata)); + egrSetDscpRule->validateAddAction(ACTION_DSCP, dscpAction); + + if (egrSetDscpRule->validate()) + { + if (!addAclRule(egrSetDscpRule, EGR_SET_DSCP_TABLE_ID)) + { + SWSS_LOG_ERROR("Failed to create ACL rule %d in table %s", metadata, EGR_SET_DSCP_TABLE_ID); + return false; + } + } + else + { + SWSS_LOG_ERROR("Failed to validate ACL rule %d in table %s", metadata, EGR_SET_DSCP_TABLE_ID); + return false; + } + } + return true; +} + +bool AclOrch::removeEgrSetDscpRule(string key) +{ + auto metadata = m_egrDscpRuleMetadata[key]; + if (getMetaDataRefCount(metadata) == 1) + { + if(!removeAclRule(EGR_SET_DSCP_TABLE_ID, std::to_string(metadata))) + { + SWSS_LOG_ERROR("Failed to remove ACL rule %s in table %s", key.c_str(), EGR_SET_DSCP_TABLE_ID); + return false; + } + } + removeMetaDataRef(key, metadata); + m_metaDataMgr.recycleMetaData(metadata); + SWSS_LOG_ERROR("Freeing metadata %d for Rule %s", metadata, key.c_str()); + + return true; +} +bool AclOrch::updateAclTable(string table_id, AclTable &table) +{ + SWSS_LOG_ENTER(); auto tableOid = getTableById(table_id); if (tableOid == SAI_NULL_OBJECT_ID) { @@ -3861,6 +4635,34 @@ bool AclOrch::updateAclTable(string table_id, AclTable &table) return true; } +bool AclOrch::updateAclTable(string table_id, AclTable &table, string orignalTableTypeName) +{ + SWSS_LOG_ENTER(); + + // we call the addEgrSetDscpTable to add the EGR_SET_DSCP table if the table type is TABLE_TYPE_UNDERLAY_SET_DSCP or TABLE_TYPE_UNDERLAY_SET_DSCPV6 + // for other tables it simply retuns EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_NOT_REQUIRED + EgressSetDscpTableStatus egrSetDscpStatus = addEgrSetDscpTable(table_id, table, orignalTableTypeName); + bool status = false; + if (egrSetDscpStatus == EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_FAILED || + egrSetDscpStatus == EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_NOT_SUPPORTED) + { + SWSS_LOG_INFO("Failed to add/update ACL table %s with %s",table_id.c_str(), orignalTableTypeName.c_str()); + return false; + } + + status = updateAclTable(table_id,table); + // if we have not updated the EGR_SET_DSCP, we simply need to return the status. + // otherewise we need to undo the changes we made to the EGR_SET_DSCP if the update + // of the MARK_META table failed. + if (egrSetDscpStatus == EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_SUCCESS && !status) + { + // This is the scenario where we have successfully updated the EGR_SET_DSCP but failed to update the MARK_META table. + SWSS_LOG_ERROR("Reverting changes to EGR_SET_DSCP because update of %s failed", table_id.c_str()); + removeEgrSetDscpTable(table_id); + } + return status; +} + bool AclOrch::addAclTable(AclTable &newTable) { SWSS_LOG_ENTER(); @@ -3967,6 +4769,20 @@ bool AclOrch::addAclTable(AclTable &newTable) m_mirrorV6TableId[table_stage] = table_id; } + // We use SAI_ACL_BIND_POINT_TYPE_SWITCH for PFCWD table in DNX platform. + // This bind type requires to bind the table to switch. + string platform = getenv("platform") ? getenv("platform") : ""; + string sub_platform = getenv("sub_platform") ? getenv("sub_platform") : ""; + if (platform == BRCM_PLATFORM_SUBSTRING && sub_platform == BRCM_DNX_PLATFORM_SUBSTRING && + newTable.type.getName() == TABLE_TYPE_PFCWD) + { + if(!gSwitchOrch->bindAclTableToSwitch(ACL_STAGE_EGRESS, newTable.getOid())) + { + return false; + } + newTable.bindToSwitch = true; + } + return true; } else @@ -3976,10 +4792,44 @@ bool AclOrch::addAclTable(AclTable &newTable) } } +bool AclOrch::addAclTable(string table_id, AclTable &newTable, string orignalTableTypeName) +{ + SWSS_LOG_ENTER(); + // we call the addEgrSetDscpTable to add the EGR_SET_DSCP table if the table type is TABLE_TYPE_UNDERLAY_SET_DSCP + // or TABLE_TYPE_UNDERLAY_SET_DSCPV6. For other tables it simply retuns EGRESS_SET_DSCP_TABLE_NOT_REQUIRED. + EgressSetDscpTableStatus egrSetDscpStatus = addEgrSetDscpTable(table_id, newTable, orignalTableTypeName); + bool status = false; + if (egrSetDscpStatus == EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_FAILED || + egrSetDscpStatus == EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_NOT_SUPPORTED) + { + return false; + } + status = addAclTable(newTable); + // if we have not updated the EGR_SET_DSCP, we simply need to return the status. + // otherewise we need to undo the changes we made to the EGR_SET_DSCP if the update + // of the MARK_META table failed. + if (egrSetDscpStatus == EgressSetDscpTableStatus::EGRESS_SET_DSCP_TABLE_SUCCESS && !status) + { + // This is the scenario where we have successfully updated the EGR_SET_DSCP but failed to update the MARK_META table. + SWSS_LOG_ERROR("Reverting changes to EGR_SET_DSCP because update of %s failed", table_id.c_str()); + removeEgrSetDscpTable(table_id); + } + return status; +} + bool AclOrch::removeAclTable(string table_id) { SWSS_LOG_ENTER(); + if (m_egrSetDscpRef.find(table_id) != m_egrSetDscpRef.end()) + { + if (!removeEgrSetDscpTable(table_id)) + { + SWSS_LOG_ERROR("Failed to remove Egress Set DSCP table associated with ACL table %s", table_id.c_str()); + return false; + } + } + sai_object_id_t table_oid = getTableById(table_id); if (table_oid == SAI_NULL_OBJECT_ID) { @@ -3989,7 +4839,23 @@ bool AclOrch::removeAclTable(string table_id) /* If ACL rules associate with this table, remove the rules first.*/ bool suc = m_AclTables[table_oid].clear(); - if (!suc) return false; + if (!suc) + { + SWSS_LOG_ERROR("Failed to remove ACL rules in table %s", table_id.c_str()); + return false; + } + // Unbind table from switch if needed. + AclTable &table = m_AclTables.at(table_oid); + if (table.bindToSwitch) + { + // Only bind egress table to switch for now. + assert(table->stage == ACL_STAGE_EGRESS); + if(!gSwitchOrch->unbindAclTableFromSwitch(ACL_STAGE_EGRESS, table.getOid())) + { + SWSS_LOG_ERROR("Failed to unbind ACL table %s from switch", table_id.c_str()); + return false; + } + } if (deleteUnbindAclTable(table_oid) == SAI_STATUS_SUCCESS) { @@ -4074,28 +4940,56 @@ bool AclOrch::removeAclTableType(const string& tableTypeName) bool AclOrch::addAclRule(shared_ptr newRule, string table_id) { + SWSS_LOG_ENTER(); + bool needsEgrSetDscp = false; + string key = table_id + ":" + newRule->getId(); + // if the table is using EGR_SET_DSCP, we need to add the EGR_SET_DSCP rule. + if (isUsingEgrSetDscp(table_id)) + { + needsEgrSetDscp = true; + string dscpAction = std::to_string(std::static_pointer_cast(newRule)->getDscpValue()); + if (!addEgrSetDscpRule(key, dscpAction)) + { + SWSS_LOG_ERROR("Failed to add Egress Set Dscp rule for Rule %s in table %s.", + newRule->getId().c_str(), table_id.c_str()); + return false; + } + } + // add the regular rule. + bool status = true; sai_object_id_t table_oid = getTableById(table_id); if (table_oid == SAI_NULL_OBJECT_ID) { SWSS_LOG_ERROR("Failed to add ACL rule in ACL table %s. Table doesn't exist", table_id.c_str()); - return false; + status = false; } - - if (!m_AclTables[table_oid].add(newRule)) + if (status && !m_AclTables[table_oid].add(newRule)) { - return false; + status = false; } - if (newRule->hasCounter()) + if (status && newRule->hasCounter()) { registerFlexCounter(*newRule); } - - return true; + if(!status && needsEgrSetDscp) + { + removeEgrSetDscpRule(key); + return false; + } + return status; } bool AclOrch::removeAclRule(string table_id, string rule_id) { + string key = table_id + ":" + rule_id; + if (m_egrDscpRuleMetadata.find(key) != m_egrDscpRuleMetadata.end()) + { + if (!removeEgrSetDscpRule(key)) + { + return false; + } + } sai_object_id_t table_oid = getTableById(table_id); if (table_oid == SAI_NULL_OBJECT_ID) { @@ -4352,6 +5246,94 @@ bool AclOrch::isAclActionEnumValueSupported(sai_acl_action_type_t action, sai_ac return it->second.find(param.s32) != it->second.cend(); } +bool AclOrch::isAclMetaDataSupported() const +{ + if (m_switchMetaDataCapabilities.at(TABLE_ACL_USER_META_DATA_RANGE_CAPABLE) == "true" && + m_switchMetaDataCapabilities.at(TABLE_ACL_ENTRY_ATTR_META_CAPABLE) == "true" && + m_switchMetaDataCapabilities.at(TABLE_ACL_ENTRY_ACTION_META_CAPABLE) == "true") + { + return true; + } + return false; +} + +uint16_t AclOrch::getAclMetaDataMin() const +{ + if (m_switchMetaDataCapabilities.at(TABLE_ACL_USER_META_DATA_RANGE_CAPABLE) == "true") + { + return uint16_t(std::stoi(m_switchMetaDataCapabilities.at(TABLE_ACL_USER_META_DATA_MIN))); + } + return 0; +} + +uint16_t AclOrch::getAclMetaDataMax() const +{ + if (m_switchMetaDataCapabilities.at(TABLE_ACL_USER_META_DATA_RANGE_CAPABLE) == "true") + { + return uint16_t(std::stoi(m_switchMetaDataCapabilities.at(TABLE_ACL_USER_META_DATA_MAX))); + } + return 0; +} + +bool AclOrch::isUsingEgrSetDscp(const string& table) const +{ + if (m_egrSetDscpRef.find(table) != m_egrSetDscpRef.end()) + { + return true; + } + return false; +} + +string AclOrch::translateUnderlaySetDscpTableTypeName(const string& tableTypeName) const +{ + // The TABLE_TYPE_UNDERLAY_SET_DSCP/V6 is translated to table translates into TABLE_TYPE_MARK_META/V6 + if (tableTypeName == TABLE_TYPE_UNDERLAY_SET_DSCP) + { + return TABLE_TYPE_MARK_META; + } + else if(tableTypeName == TABLE_TYPE_UNDERLAY_SET_DSCPV6) + { + return TABLE_TYPE_MARK_META_V6; + } + return tableTypeName; +} + +void AclOrch::addMetaDataRef(string key, uint16_t metadata) +{ + m_egrDscpRuleMetadata[key] = metadata; + if (m_metadataEgrDscpRule.find(metadata) == m_metadataEgrDscpRule.end()) + { + m_metadataEgrDscpRule[metadata] = set(); + } + m_metadataEgrDscpRule[metadata].insert(key); + +} + +void AclOrch::removeMetaDataRef(string key, uint16_t metadata) +{ + m_metadataEgrDscpRule[metadata].erase(key); + m_egrDscpRuleMetadata.erase(key); +} + +uint32_t AclOrch::getMetaDataRefCount(uint16_t metadata) +{ + if (m_metadataEgrDscpRule.find(metadata) != m_metadataEgrDscpRule.end()) + { + return uint32_t(m_metadataEgrDscpRule[metadata].size()); + } + return 0; +} + +uint32_t AclOrch::hasMetaDataRefCount(string key) +{ + if (m_egrDscpRuleMetadata.find(key) != m_egrDscpRuleMetadata.end()) + { + auto md = m_egrDscpRuleMetadata[key]; + return uint32_t(m_metadataEgrDscpRule[md].size()); + } + return 0; +} + void AclOrch::doAclTableTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -4429,6 +5411,15 @@ void AclOrch::doAclTableTask(Consumer &consumer) } } + // For the case of Table type TABLE_TYPE_UNDERLAY_SET_DSCP/V6 we need to translate + // it to TABLE_TYPE_MARK_META/V6. We retain the original table type name in orignalTableTypeName + // and pass it ot the updateAclTable/ addAclTable functions. There based on the orignalTableTypeName + // we create/update the EgrSetDscp table. + string firstTableTypeName; + string unused; + string orignalTableTypeName = tableTypeName; + tableTypeName = translateUnderlaySetDscpTableTypeName(tableTypeName); + auto tableType = getAclTableType(tableTypeName); if (!tableType) { @@ -4454,7 +5445,7 @@ void AclOrch::doAclTableTask(Consumer &consumer) m_AclTables[table_oid])) { // Update the existing table using the info in newTable - if (updateAclTable(m_AclTables[table_oid], newTable)) + if (updateAclTable(table_id, newTable, orignalTableTypeName)) { SWSS_LOG_NOTICE("Successfully updated existing ACL table %s", table_id.c_str()); @@ -4471,7 +5462,7 @@ void AclOrch::doAclTableTask(Consumer &consumer) } else { - if (addAclTable(newTable)) + if (addAclTable(table_id, newTable, orignalTableTypeName)) { // Mark ACL table as ACTIVE setAclTableStatus(table_id, AclObjectStatus::ACTIVE); @@ -4479,6 +5470,7 @@ void AclOrch::doAclTableTask(Consumer &consumer) } else { + //we have failed to create the MarkMeta table, we need to remove the EgrSetDscp table setAclTableStatus(table_id, AclObjectStatus::PENDING_CREATION); it++; } @@ -4574,7 +5566,7 @@ void AclOrch::doAclRuleTask(Consumer &consumer) try { - newRule = AclRule::makeShared(this, m_mirrorOrch, m_dTelOrch, rule_id, table_id, t); + newRule = AclRule::makeShared(this, m_mirrorOrch, m_dTelOrch, rule_id, table_id, t, &m_metaDataMgr); } catch (exception &e) { @@ -4596,7 +5588,7 @@ void AclOrch::doAclRuleTask(Consumer &consumer) { bHasTCPFlag = true; } - if (attr_name == MATCH_SRC_IP || attr_name == MATCH_DST_IP) + if (attr_name == MATCH_SRC_IP || attr_name == MATCH_DST_IP || attr_name == MATCH_INNER_SRC_IP) { bHasIPV4 = true; } @@ -4653,13 +5645,13 @@ void AclOrch::doAclRuleTask(Consumer &consumer) } if (bHasIPV4 && bHasIPV6) - { - if (type == TABLE_TYPE_L3V4V6) - { - SWSS_LOG_ERROR("Rule '%s' is invalid since it has both v4 and v6 matchfields.", rule_id.c_str()); - bAllAttributesOk = false; - } - } + { + if (type == TABLE_TYPE_L3V4V6) + { + SWSS_LOG_ERROR("Rule '%s' is invalid since it has both v4 and v6 matchfields.", rule_id.c_str()); + bAllAttributesOk = false; + } + } // validate and create ACL rule if (bAllAttributesOk && newRule->validate()) @@ -5105,3 +6097,88 @@ void AclOrch::removeAllAclRuleStatus() } } +void MetaDataMgr::populateRange(uint16_t min, uint16_t max) +{ + metaMin = min; + metaMax = max; + SWSS_LOG_INFO("Metadata range %d to %d", metaMin,metaMax); + for (uint16_t i = metaMin; i <= metaMax; i++) + { + m_freeMetadata.push_back(i); + } + initComplete = true; +} + +bool MetaDataMgr::isValidMetaData(uint16_t metadata) +{ + if (metadata >= metaMin && metadata <= metaMax) + { + return true; + } + return false; +} + +uint16_t MetaDataMgr::getFreeMetaData(uint8_t dscp) +{ + uint16_t metadata = (uint16_t)(metaMax + 1); + SWSS_LOG_INFO("Metadata Request for dscp %d", dscp); + + if (initComplete) + { + if (m_dscpMetadata.find(dscp) != m_dscpMetadata.end()) + { + // dscp value has a metadata value assigned to it. + metadata = m_dscpMetadata[dscp]; + SWSS_LOG_INFO("Metadata %d has already been allocated for dscp %d, refcount %d", metadata, dscp, m_MetadataRef[metadata]+1); + + } + else + { + if (m_freeMetadata.empty()) + { + SWSS_LOG_ERROR("Metadata Value not available for allocation."); + return metadata; + } + metadata = m_freeMetadata.front(); + m_freeMetadata.erase(m_freeMetadata.begin()); + m_dscpMetadata[dscp] = metadata; + SWSS_LOG_INFO("New Metadata %d allocated for dscp %d", metadata, dscp); + } + m_MetadataRef[metadata] = (uint16_t)(m_MetadataRef[metadata] + 1); + } + else + { + SWSS_LOG_ERROR("Metadata request before Initialization complete."); + } + return metadata; +} + +void MetaDataMgr::recycleMetaData(uint16_t metadata) +{ + if (initComplete) + { + m_MetadataRef[metadata] = (uint16_t)(m_MetadataRef[metadata] - 1); + SWSS_LOG_INFO("Freeing Metadata %d refcount %d", metadata, m_MetadataRef[metadata]); + if (m_MetadataRef[metadata] == 0) + { + + for (auto iter = m_dscpMetadata.begin(); iter != m_dscpMetadata.end();) + { + if ( iter->second == metadata) + { + m_dscpMetadata.erase(iter++); + m_freeMetadata.push_front(metadata); + break; + } + else + { + ++iter; + } + } + } + } + else + { + SWSS_LOG_ERROR("Unexpected: Metadata free before Initialization complete."); + } +} diff --git a/orchagent/aclorch.h b/orchagent/aclorch.h old mode 100644 new mode 100755 index abeaf519e2e..6e0e51ea7d3 --- a/orchagent/aclorch.h +++ b/orchagent/aclorch.h @@ -15,6 +15,7 @@ #include "mirrororch.h" #include "dtelorch.h" #include "observer.h" +#include "vxlanorch.h" #include "flex_counter_manager.h" #include "acltable.h" @@ -23,6 +24,7 @@ #define RULE_PRIORITY "PRIORITY" #define MATCH_IN_PORTS "IN_PORTS" +#define MATCH_OUT_PORT "OUT_PORT" #define MATCH_OUT_PORTS "OUT_PORTS" #define MATCH_SRC_IP "SRC_IP" #define MATCH_DST_IP "DST_IP" @@ -49,8 +51,13 @@ #define MATCH_INNER_IP_PROTOCOL "INNER_IP_PROTOCOL" #define MATCH_INNER_L4_SRC_PORT "INNER_L4_SRC_PORT" #define MATCH_INNER_L4_DST_PORT "INNER_L4_DST_PORT" +#define MATCH_INNER_SRC_MAC "INNER_SRC_MAC" +#define MATCH_INNER_DST_MAC "INNER_DST_MAC" +#define MATCH_INNER_SRC_IP "INNER_SRC_IP" #define MATCH_BTH_OPCODE "BTH_OPCODE" #define MATCH_AETH_SYNDROME "AETH_SYNDROME" +#define MATCH_TUNNEL_TERM "TUNNEL_TERM" +#define MATCH_METADATA "META_DATA" #define BIND_POINT_TYPE_PORT "PORT" #define BIND_POINT_TYPE_PORTCHANNEL "PORTCHANNEL" @@ -58,6 +65,7 @@ #define ACTION_PACKET_ACTION "PACKET_ACTION" #define ACTION_REDIRECT_ACTION "REDIRECT_ACTION" #define ACTION_DO_NOT_NAT_ACTION "DO_NOT_NAT_ACTION" +#define ACTION_DISABLE_TRIM "DISABLE_TRIM_ACTION" #define ACTION_MIRROR_ACTION "MIRROR_ACTION" #define ACTION_MIRROR_INGRESS_ACTION "MIRROR_INGRESS_ACTION" #define ACTION_MIRROR_EGRESS_ACTION "MIRROR_EGRESS_ACTION" @@ -68,11 +76,16 @@ #define ACTION_DTEL_FLOW_SAMPLE_PERCENT "FLOW_SAMPLE_PERCENT" #define ACTION_DTEL_REPORT_ALL_PACKETS "REPORT_ALL_PACKETS" #define ACTION_COUNTER "COUNTER" +#define ACTION_META_DATA "META_DATA_ACTION" +#define ACTION_DSCP "DSCP_ACTION" +#define ACTION_INNER_SRC_MAC_REWRITE_ACTION "INNER_SRC_MAC_REWRITE_ACTION" -#define PACKET_ACTION_FORWARD "FORWARD" -#define PACKET_ACTION_DROP "DROP" -#define PACKET_ACTION_REDIRECT "REDIRECT" -#define PACKET_ACTION_DO_NOT_NAT "DO_NOT_NAT" +#define PACKET_ACTION_FORWARD "FORWARD" +#define PACKET_ACTION_DROP "DROP" +#define PACKET_ACTION_COPY "COPY" +#define PACKET_ACTION_REDIRECT "REDIRECT" +#define PACKET_ACTION_DO_NOT_NAT "DO_NOT_NAT" +#define PACKET_ACTION_DISABLE_TRIM "DISABLE_TRIM" #define DTEL_FLOW_OP_NOP "NOP" #define DTEL_FLOW_OP_POSTCARD "POSTCARD" @@ -94,6 +107,7 @@ #define IP_TYPE_ARP_REPLY "ARP_REPLY" #define MLNX_MAX_RANGES_COUNT 16 +#define CLNX_MAX_RANGES_COUNT 16 #define INGRESS_TABLE_DROP "IngressTableDrop" #define EGRESS_TABLE_DROP "EgressTableDrop" #define RULE_OPER_ADD 0 @@ -101,6 +115,12 @@ #define ACL_COUNTER_FLEX_COUNTER_GROUP "ACL_STAT_COUNTER" +#define TABLE_ACL_USER_META_DATA_RANGE_CAPABLE "ACL_USER_META_DATA_RANGE_CAPABLE" +#define TABLE_ACL_USER_META_DATA_MIN "ACL_USER_META_DATA_MIN" +#define TABLE_ACL_USER_META_DATA_MAX "ACL_USER_META_DATA_MAX" +#define TABLE_ACL_ENTRY_ATTR_META_CAPABLE "ACL_ENTRY_ATTR_META_CAPABLE" +#define TABLE_ACL_ENTRY_ACTION_META_CAPABLE "ACL_ENTRY_ACTION_META_CAPABLE" + enum AclObjectStatus { ACTIVE = 0, @@ -109,6 +129,14 @@ enum AclObjectStatus PENDING_REMOVAL }; +enum EgressSetDscpTableStatus +{ + EGRESS_SET_DSCP_TABLE_FAILED = 0, + EGRESS_SET_DSCP_TABLE_SUCCESS, + EGRESS_SET_DSCP_TABLE_NOT_REQUIRED, + EGRESS_SET_DSCP_TABLE_NOT_SUPPORTED +}; + struct AclActionCapabilities { set actionList; @@ -165,6 +193,24 @@ class AclTableRangeMatch: public AclTableMatchInterface private: vector m_rangeList; }; + +class MetaDataMgr +{ +public: + void populateRange(uint16_t min, uint16_t max); + uint16_t getFreeMetaData(uint8_t dscp); + void recycleMetaData(uint16_t metadata); + bool isValidMetaData(uint16_t metadata); + +private: + bool initComplete = false; + uint16_t metaMin = 0; + uint16_t metaMax = 0; + list m_freeMetadata; + map m_dscpMetadata; + map m_MetadataRef; +}; + class AclTableType { public: @@ -248,6 +294,22 @@ class AclTable; class AclRule { public: + struct TunnelNH + { + TunnelNH() = default; + ~TunnelNH() = default; + + void load(const std::string& target); + void parse(const std::string& target); + void clear(); + + std::string tunnel_name; + swss::IpAddress endpoint_ip; + swss::MacAddress mac; + uint32_t vni = 0; + sai_object_id_t oid = SAI_NULL_OBJECT_ID; + }; + AclRule(AclOrch *pAclOrch, string rule, string table, bool createCounter = true); virtual bool validateAddPriority(string attr_name, string attr_value); virtual bool validateAddMatch(string attr_name, string attr_value); @@ -278,7 +340,13 @@ class AclRule bool getCreateCounter() const; const vector& getRangeConfig() const; - static shared_ptr makeShared(AclOrch *acl, MirrorOrch *mirror, DTelOrch *dtel, const string& rule, const string& table, const KeyOpFieldsValuesTuple&); + static shared_ptr makeShared(AclOrch *acl, + MirrorOrch *mirror, + DTelOrch *dtel, + const string& rule, + const string& table, + const KeyOpFieldsValuesTuple&, + MetaDataMgr * m_metadataMgr); virtual ~AclRule() {} protected: @@ -315,6 +383,7 @@ class AclRule map m_matches; string m_redirect_target_next_hop; string m_redirect_target_next_hop_group; + AclRule::TunnelNH m_redirect_target_tun_nh; vector m_rangeConfig; vector m_ranges; @@ -336,12 +405,23 @@ class AclRulePacket: public AclRule sai_object_id_t getRedirectObjectId(const string& redirect_param); }; +class AclRuleInnerSrcMacRewrite: public AclRule + { + public: + AclRuleInnerSrcMacRewrite(AclOrch *m_pAclOrch, string rule, string table, bool createCounter = true); + + bool validateAddAction(string attr_name, string attr_value); + bool validate(); + void onUpdate(SubjectType, void *) override; + }; + class AclRuleMirror: public AclRule { public: AclRuleMirror(AclOrch *m_pAclOrch, MirrorOrch *m_pMirrorOrch, string rule, string table); bool validateAddAction(string attr_name, string attr_value); bool validate(); + bool createCounter(); bool createRule(); bool removeRule(); void onUpdate(SubjectType, void *) override; @@ -377,6 +457,23 @@ class AclRuleDTelWatchListEntry: public AclRule bool INT_session_valid; }; +class AclRuleUnderlaySetDscp: public AclRule +{ +public: + AclRuleUnderlaySetDscp(AclOrch *m_pAclOrch, string rule, string table, MetaDataMgr* m_metaDataMgr, bool createCounter = true); + + bool validateAddAction(string attr_name, string attr_value); + bool validate(); + void onUpdate(SubjectType, void *) override; + uint32_t getDscpValue() const; + uint32_t getMetadata() const; +protected: + uint32_t cachedDscpValue; + uint32_t cachedMetadata; + string table_id; + MetaDataMgr* m_metaDataMgr; +}; + class AclTable { public: @@ -451,6 +548,9 @@ class AclTable // Set to store the not configured ACL table port alias set pendingPortSet; + // Is the ACL table bound to switch? + bool bindToSwitch = false; + private: sai_object_id_t m_oid = SAI_NULL_OBJECT_ID; AclOrch *m_pAclOrch = nullptr; @@ -487,6 +587,14 @@ class AclOrch : public Orch, public Observer bool addAclTable(AclTable &aclTable); bool removeAclTable(string table_id); + bool addAclTable(string table_id, AclTable &aclTable, string orignalTableTypeName); + bool updateAclTable(string table_id, AclTable &table, string orignalTableTypeName); + EgressSetDscpTableStatus addEgrSetDscpTable(string table_id, AclTable &table, string orignalTableTypeName); + + bool removeEgrSetDscpTable(string table_id); + bool addEgrSetDscpRule(string key, string dscpAction); + bool removeEgrSetDscpRule(string key); + bool addAclTableType(const AclTableType& tableType); bool removeAclTableType(const string& tableTypeName); bool updateAclTable(AclTable ¤tTable, AclTable &newTable); @@ -506,11 +614,22 @@ class AclOrch : public Orch, public Observer bool isAclActionListMandatoryOnTableCreation(acl_stage_type_t stage) const; bool isAclActionSupported(acl_stage_type_t stage, sai_acl_action_type_t action) const; bool isAclActionEnumValueSupported(sai_acl_action_type_t action, sai_acl_action_parameter_t param) const; + bool isUsingEgrSetDscp(const string& table) const; + string translateUnderlaySetDscpTableTypeName(const string& tableTypeName) const; + bool isAclMetaDataSupported() const; + uint16_t getAclMetaDataMin() const; + uint16_t getAclMetaDataMax() const; + + void addMetaDataRef(string key, uint16_t metadata); + void removeMetaDataRef(string key, uint16_t metadata); + uint32_t getMetaDataRefCount(uint16_t metadata); + uint32_t hasMetaDataRefCount(string key); bool m_isCombinedMirrorV6Table = true; map m_mirrorTableCapabilities; map m_L3V4V6Capability; - + map m_switchMetaDataCapabilities; + void registerFlexCounter(const AclRule& rule); void deregisterFlexCounter(const AclRule& rule); @@ -530,7 +649,7 @@ class AclOrch : public Orch, public Observer void doAclRuleTask(Consumer &consumer); void doAclTableTypeTask(Consumer &consumer); void init(vector& connectors, PortsOrch *portOrch, MirrorOrch *mirrorOrch, NeighOrch *neighOrch, RouteOrch *routeOrch); - void initDefaultTableTypes(); + void initDefaultTableTypes(const string& platform, const string& sub_platform); void queryMirrorTableCapability(); void queryAclActionCapability(); @@ -586,8 +705,12 @@ class AclOrch : public Orch, public Observer Table m_aclTableStateTable; Table m_aclRuleStateTable; + MetaDataMgr m_metaDataMgr; map m_mirrorTableId; map m_mirrorV6TableId; + set m_egrSetDscpRef; + map> m_metadataEgrDscpRule; + map m_egrDscpRuleMetadata; acl_capabilities_t m_aclCapabilities; acl_action_enum_values_capabilities_t m_aclEnumActionCapabilities; diff --git a/orchagent/acltable.h b/orchagent/acltable.h index 1b1cdeb29ae..8f51fd9a096 100644 --- a/orchagent/acltable.h +++ b/orchagent/acltable.h @@ -35,6 +35,11 @@ extern "C" { #define TABLE_TYPE_MCLAG "MCLAG" #define TABLE_TYPE_MUX "MUX" #define TABLE_TYPE_DROP "DROP" +#define TABLE_TYPE_MARK_META "MARK_META" +#define TABLE_TYPE_MARK_META_V6 "MARK_METAV6" +#define TABLE_TYPE_EGR_SET_DSCP "EGR_SET_DSCP" +#define TABLE_TYPE_UNDERLAY_SET_DSCP "UNDERLAY_SET_DSCP" +#define TABLE_TYPE_UNDERLAY_SET_DSCPV6 "UNDERLAY_SET_DSCPV6" typedef enum { diff --git a/orchagent/bfdorch.cpp b/orchagent/bfdorch.cpp index 25c6c20cf2d..095655c3040 100644 --- a/orchagent/bfdorch.cpp +++ b/orchagent/bfdorch.cpp @@ -7,6 +7,7 @@ #include "sai_serialize.h" #include "directory.h" #include "notifications.h" +#include "schema.h" using namespace std; using namespace swss; @@ -27,6 +28,7 @@ extern sai_object_id_t gVirtualRouterId; extern PortsOrch* gPortsOrch; extern sai_switch_api_t* sai_switch_api; extern Directory gDirectory; +extern string gMySwitchType; const map session_type_map = { @@ -62,16 +64,25 @@ BfdOrch::BfdOrch(DBConnector *db, string tableName, TableConnector stateDbBfdSes m_bfdStateNotificationConsumer = new swss::NotificationConsumer(notificationsDb, "NOTIFICATIONS"); auto bfdStateNotificatier = new Notifier(m_bfdStateNotificationConsumer, this, "BFD_STATE_NOTIFICATIONS"); - // Clean up state database BFD entries + m_stateDbConnector = std::make_unique("STATE_DB", 0); + m_stateSoftBfdSessionTable = std::make_unique(m_stateDbConnector.get(), STATE_BFD_SOFTWARE_SESSION_TABLE_NAME); + + SWSS_LOG_NOTICE("Switch type is: %s", gMySwitchType.c_str()); + vector keys; + // Clean up state database BFD entries m_stateBfdSessionTable.getKeys(keys); - for (auto alias : keys) { m_stateBfdSessionTable.del(alias); } - + // Clean up state database software BFD entries + m_stateSoftBfdSessionTable->getKeys(keys); + for (auto alias : keys) + { + m_stateSoftBfdSessionTable->del(alias); + } Orch::addExecutor(bfdStateNotificatier); register_state_change_notif = false; } @@ -81,10 +92,33 @@ BfdOrch::~BfdOrch(void) SWSS_LOG_ENTER(); } +std::string BfdOrch::createStateDBKey(const std::string &input) { + // Replace ':' with '|' to convert key to StateDB format. + std::string result = input; + size_t pos = result.find(':'); // Find the first colon + if (pos != std::string::npos) { + result[pos] = '|'; // Replace the first colon with '|' + + // Find the second colon + pos = result.find(':', pos + 1); + if (pos != std::string::npos) { + result[pos] = '|'; // Replace the second colon with '|' + } + } + return result; +} + void BfdOrch::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); - + BgpGlobalStateOrch* bgp_global_state_orch = gDirectory.get(); + bool tsa_enabled = false; + bool use_software_bfd = true; + if (bgp_global_state_orch) + { + tsa_enabled = bgp_global_state_orch->getTsaState(); + use_software_bfd = bgp_global_state_orch->getSoftwareBfd(); + } auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) { @@ -96,19 +130,83 @@ void BfdOrch::doTask(Consumer &consumer) if (op == SET_COMMAND) { - if (!create_bfd_session(key, data)) + if (use_software_bfd) { - it++; + //program entry in software BFD table + m_stateSoftBfdSessionTable->set(createStateDBKey(key), data); + it = consumer.m_toSync.erase(it); continue; } + + bool tsa_shutdown_enabled = false; + for (auto i : data) + { + auto value = fvValue(i); + //shutdown_bfd_during_tsa parameter is used by the BFD session creator to ensure that the the + //specified session gets removed when the device goes into TSA state. + //if this parameter is not specified or set to false for a session, the + // corrosponding BFD session would be maintained even in TSA state. + if (fvField(i) == "shutdown_bfd_during_tsa" && value == "true" ) + { + tsa_shutdown_enabled = true; + break; + } + } + if (tsa_shutdown_enabled) + { + bfd_session_cache[key] = data; + if (!tsa_enabled) + { + if (!create_bfd_session(key, data)) + { + it++; + continue; + } + } + else + { + notify_session_state_down(key); + } + } + else + { + if (!create_bfd_session(key, data)) + { + it++; + continue; + } + } } else if (op == DEL_COMMAND) { - if (!remove_bfd_session(key)) + if (use_software_bfd) { - it++; + //delete entry from software BFD table + m_stateSoftBfdSessionTable->del(createStateDBKey(key)); + it = consumer.m_toSync.erase(it); continue; } + + if (bfd_session_cache.find(key) != bfd_session_cache.end() ) + { + bfd_session_cache.erase(key); + if (!tsa_enabled) + { + if (!remove_bfd_session(key)) + { + it++; + continue; + } + } + } + else + { + if (!remove_bfd_session(key)) + { + it++; + continue; + } + } } else { @@ -298,6 +396,12 @@ bool BfdOrch::create_bfd_session(const string& key, const vector(value); } + else if (fvField(i) == "shutdown_bfd_during_tsa") + { + //since we are handling shutdown_bfd_during_tsa in the caller function, we need to ignore it here. + //failure to ignore this parameter would cause error log. + continue; + } else SWSS_LOG_ERROR("Unsupported BFD attribute %s\n", fvField(i).c_str()); } @@ -551,3 +655,187 @@ uint32_t BfdOrch::bfd_src_port(void) return (port++); } +void BfdOrch::notify_session_state_down(const string& key) +{ + SWSS_LOG_ENTER(); + size_t found_vrf = key.find(delimiter); + if (found_vrf == string::npos) + { + SWSS_LOG_ERROR("Failed to parse key %s, no vrf is given", key.c_str()); + return; + } + + size_t found_ifname = key.find(delimiter, found_vrf + 1); + if (found_ifname == string::npos) + { + SWSS_LOG_ERROR("Failed to parse key %s, no ifname is given", key.c_str()); + return; + } + string vrf_name = key.substr(0, found_vrf); + string alias = key.substr(found_vrf + 1, found_ifname - found_vrf - 1); + IpAddress peer_address(key.substr(found_ifname + 1)); + BfdUpdate update; + update.peer = get_state_db_key(vrf_name, alias, peer_address); + update.state = SAI_BFD_SESSION_STATE_DOWN; + notify(SUBJECT_TYPE_BFD_SESSION_STATE_CHANGE, static_cast(&update)); +} + +void BfdOrch::handleTsaStateChange(bool tsaState) +{ + SWSS_LOG_ENTER(); + for (auto it : bfd_session_cache) + { + if (tsaState == true) + { + if (bfd_session_map.find(it.first) != bfd_session_map.end()) + { + notify_session_state_down(it.first); + remove_bfd_session(it.first); + } + } + else + { + if (bfd_session_map.find(it.first) == bfd_session_map.end()) + { + create_bfd_session(it.first, it.second); + } + } + } +} + +void BfdOrch::createSoftwareBfdSession(const string &key, const vector& data) +{ + m_stateSoftBfdSessionTable->set(createStateDBKey(key), data); + SWSS_LOG_NOTICE("Software BFD session created for %s", key.c_str()); +} + +void BfdOrch::removeSoftwareBfdSession(const string &key) +{ + m_stateSoftBfdSessionTable->del(createStateDBKey(key)); + SWSS_LOG_NOTICE("Software BFD session removed for %s", key.c_str()); +} + +void BfdOrch::removeAllSoftwareBfdSessions() +{ + vector keys; + m_stateSoftBfdSessionTable->getKeys(keys); + + for (auto key : keys) + { + removeSoftwareBfdSession(key); + } +} + +BgpGlobalStateOrch::BgpGlobalStateOrch(DBConnector *db, string tableName): + Orch(db, tableName) +{ + SWSS_LOG_ENTER(); + tsa_enabled = false; + bool ipv6 = true; + bfd_offload = (offload_supported(!ipv6) && offload_supported(ipv6)); +} + +BgpGlobalStateOrch::~BgpGlobalStateOrch(void) +{ + SWSS_LOG_ENTER(); +} + +bool BgpGlobalStateOrch::getTsaState() +{ + SWSS_LOG_ENTER(); + return tsa_enabled; +} + +bool BgpGlobalStateOrch::getSoftwareBfd() +{ + SWSS_LOG_ENTER(); + return !bfd_offload; +} + +bool BgpGlobalStateOrch::offload_supported(bool get_ipv6) +{ + sai_attribute_t attr; + sai_status_t status; + sai_attr_capability_t capability; + + attr.id = SAI_SWITCH_ATTR_SUPPORTED_IPV4_BFD_SESSION_OFFLOAD_TYPE; + if(get_ipv6) + { + attr.id = SAI_SWITCH_ATTR_SUPPORTED_IPV6_BFD_SESSION_OFFLOAD_TYPE; + } + + status = sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_SWITCH, + attr.id, &capability); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Unable to query BFD offload capability"); + return false; + } + if (!capability.get_implemented) + { + SWSS_LOG_NOTICE("BFD offload type not implemented"); + return false; + } + + uint32_t list[1] = { 1 }; + attr.value.u32list.count = 1; + attr.value.u32list.list = list; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + if(status == SAI_STATUS_SUCCESS && attr.value.u32list.count > 0) + { + SWSS_LOG_INFO("BFD offload type: %d", attr.value.u32list.list[0]); + return (attr.value.u32list.list[0] != SAI_BFD_SESSION_OFFLOAD_TYPE_NONE); + } + SWSS_LOG_ERROR("Could not get supported BFD offload type, rv: %d", status); + return false; +} + +void BgpGlobalStateOrch::doTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + + string key = kfvKey(t); + string op = kfvOp(t); + auto data = kfvFieldsValues(t); + + if (op == SET_COMMAND) + { + for (auto i : data) + { + auto value = fvValue(i); + auto type = fvField(i); + SWSS_LOG_INFO("SET on key %s, data T %s, V %s\n", key.c_str(), type.c_str(), value.c_str()); + if (type == "tsa_enabled") + { + bool state = true ? value == "true" : false; + if (tsa_enabled != state) + { + SWSS_LOG_NOTICE("BgpGlobalStateOrch TSA state Changed to %d from %d.\n", int(state), int(tsa_enabled)); + tsa_enabled = state; + + BfdOrch* bfd_orch = gDirectory.get(); + if (bfd_orch) + { + bfd_orch->handleTsaStateChange(state); + } + } + } + } + } + else if (op == DEL_COMMAND) + { + SWSS_LOG_ERROR("DEL on key %s is not expected.\n", key.c_str()); + } + else + { + SWSS_LOG_ERROR("Unknown operation type %s\n", op.c_str()); + } + it = consumer.m_toSync.erase(it); + } +} + diff --git a/orchagent/bfdorch.h b/orchagent/bfdorch.h index 4a0cb9edfba..291abc9201c 100644 --- a/orchagent/bfdorch.h +++ b/orchagent/bfdorch.h @@ -17,6 +17,15 @@ class BfdOrch: public Orch, public Subject void doTask(swss::NotificationConsumer &consumer); BfdOrch(swss::DBConnector *db, std::string tableName, TableConnector stateDbBfdSessionTable); virtual ~BfdOrch(void); + void handleTsaStateChange(bool tsaState); + + /* APIs for HaOrch to create passive BFD sessions on DPU.*/ + virtual void createSoftwareBfdSession( + const std::string& key, + const std::vector& data); + virtual void removeSoftwareBfdSession( + const std::string& key); + virtual void removeAllSoftwareBfdSessions(); private: bool create_bfd_session(const std::string& key, const std::vector& data); @@ -26,17 +35,39 @@ class BfdOrch: public Orch, public Subject uint32_t bfd_gen_id(void); uint32_t bfd_src_port(void); + void notify_session_state_down(const std::string& key); bool register_bfd_state_change_notification(void); void update_port_number(std::vector &attrs); sai_status_t retry_create_bfd_session(sai_object_id_t &bfd_session_id, vector attrs); + std::string createStateDBKey(const std::string &input); std::map bfd_session_map; std::map bfd_session_lookup; swss::Table m_stateBfdSessionTable; + std::unique_ptr m_stateDbConnector; + std::unique_ptr m_stateSoftBfdSessionTable; + swss::NotificationConsumer* m_bfdStateNotificationConsumer; bool register_state_change_notif; + std::map> bfd_session_cache; + }; +class BgpGlobalStateOrch : public Orch +{ +public: + void doTask(Consumer &consumer); + BgpGlobalStateOrch(swss::DBConnector *db, std::string tableName); + virtual ~BgpGlobalStateOrch(void); + bool getTsaState(); + bool getSoftwareBfd(); + +private: + bool tsa_enabled; + bool bfd_offload; + bool offload_supported(bool get_ipv6); + +}; #endif /* SWSS_BFDORCH_H */ diff --git a/orchagent/buffer/buffercontainer.h b/orchagent/buffer/buffercontainer.h new file mode 100644 index 00000000000..351ebaa3a65 --- /dev/null +++ b/orchagent/buffer/buffercontainer.h @@ -0,0 +1,71 @@ +#pragma once + +#include +#include + +#include +#include +#include + +class BufferContainer +{ +public: + BufferContainer() = default; + virtual ~BufferContainer() = default; + + std::unordered_map fieldValueMap; +}; + +class BufferProfileConfig final : public BufferContainer +{ +public: + BufferProfileConfig() = default; + ~BufferProfileConfig() = default; + + inline bool isTrimmingProhibited() const + { + return ((pgRefCount > 0) || (iBufProfListRefCount > 0) || (eBufProfListRefCount)) ? true : false; + } + + std::uint64_t pgRefCount = 0; + std::uint64_t iBufProfListRefCount = 0; + std::uint64_t eBufProfListRefCount = 0; + + bool isTrimmingEligible = false; +}; + +class BufferPriorityGroupConfig final : public BufferContainer +{ +public: + BufferPriorityGroupConfig() = default; + ~BufferPriorityGroupConfig() = default; + + struct { + std::string value; + bool is_set = false; + } profile; +}; + +class IngressBufferProfileListConfig final : public BufferContainer +{ +public: + IngressBufferProfileListConfig() = default; + ~IngressBufferProfileListConfig() = default; + + struct { + std::unordered_set value; + bool is_set = false; + } profile_list; +}; + +class EgressBufferProfileListConfig final : public BufferContainer +{ +public: + EgressBufferProfileListConfig() = default; + ~EgressBufferProfileListConfig() = default; + + struct { + std::unordered_set value; + bool is_set = false; + } profile_list; +}; diff --git a/orchagent/buffer/bufferhelper.cpp b/orchagent/buffer/bufferhelper.cpp new file mode 100644 index 00000000000..6a585057e9f --- /dev/null +++ b/orchagent/buffer/bufferhelper.cpp @@ -0,0 +1,303 @@ +// includes ----------------------------------------------------------------------------------------------------------- + +#include +#include + +#include + +#include "bufferschema.h" + +#include "bufferhelper.h" + +using namespace swss; + +// helper ------------------------------------------------------------------------------------------------------------- + +void BufferHelper::parseBufferConfig(BufferProfileConfig &cfg) const +{ + auto &map = cfg.fieldValueMap; + + const auto &cit = map.find(BUFFER_PROFILE_PACKET_DISCARD_ACTION); + if (cit != map.cend()) + { + cfg.isTrimmingEligible = cit->second == BUFFER_PROFILE_PACKET_DISCARD_ACTION_TRIM ? true : false; + } +} + +void BufferHelper::parseBufferConfig(BufferPriorityGroupConfig &cfg) const +{ + auto &map = cfg.fieldValueMap; + + const auto &cit = map.find(BUFFER_PG_PROFILE); + if (cit != map.cend()) + { + cfg.profile.value = cit->second; + cfg.profile.is_set = true; + } +} + +void BufferHelper::parseBufferConfig(IngressBufferProfileListConfig &cfg) const +{ + auto &map = cfg.fieldValueMap; + + const auto &cit = map.find(BUFFER_PORT_INGRESS_PROFILE_LIST_PROFILE_LIST); + if (cit != map.cend()) + { + auto profList = tokenize(cit->second, ','); + + cfg.profile_list.value.insert(profList.begin(), profList.end()); + cfg.profile_list.is_set = true; + } +} + +void BufferHelper::parseBufferConfig(EgressBufferProfileListConfig &cfg) const +{ + auto &map = cfg.fieldValueMap; + + const auto &cit = map.find(BUFFER_PORT_EGRESS_PROFILE_LIST_PROFILE_LIST); + if (cit != map.cend()) + { + auto profList = tokenize(cit->second, ','); + + cfg.profile_list.value.insert(profList.begin(), profList.end()); + cfg.profile_list.is_set = true; + } +} + +template<> +void BufferHelper::setObjRef(const BufferProfileConfig &cfg) +{ + // No actions are required +} + +template<> +void BufferHelper::setObjRef(const BufferPriorityGroupConfig &cfg) +{ + if (cfg.profile.is_set) + { + const auto &cit = profMap.find(cfg.profile.value); + if (cit != profMap.cend()) + { + cit->second.pgRefCount++; + } + } +} + +template<> +void BufferHelper::setObjRef(const IngressBufferProfileListConfig &cfg) +{ + if (cfg.profile_list.is_set) + { + for (const auto &cit1 : cfg.profile_list.value) + { + const auto &cit2 = profMap.find(cit1); + if (cit2 != profMap.cend()) + { + cit2->second.iBufProfListRefCount++; + } + } + } +} + +template<> +void BufferHelper::setObjRef(const EgressBufferProfileListConfig &cfg) +{ + if (cfg.profile_list.is_set) + { + for (const auto &cit1 : cfg.profile_list.value) + { + const auto &cit2 = profMap.find(cit1); + if (cit2 != profMap.cend()) + { + cit2->second.eBufProfListRefCount++; + } + } + } +} + +template<> +void BufferHelper::delObjRef(const BufferProfileConfig &cfg) +{ + // No actions are required +} + +template<> +void BufferHelper::delObjRef(const BufferPriorityGroupConfig &cfg) +{ + if (cfg.profile.is_set) + { + const auto &cit = profMap.find(cfg.profile.value); + if (cit != profMap.cend()) + { + cit->second.pgRefCount--; + } + } +} + +template<> +void BufferHelper::delObjRef(const IngressBufferProfileListConfig &cfg) +{ + if (cfg.profile_list.is_set) + { + for (const auto &cit1 : cfg.profile_list.value) + { + const auto &cit2 = profMap.find(cit1); + if (cit2 != profMap.cend()) + { + cit2->second.iBufProfListRefCount--; + } + } + } +} + +template<> +void BufferHelper::delObjRef(const EgressBufferProfileListConfig &cfg) +{ + if (cfg.profile_list.is_set) + { + for (const auto &cit1 : cfg.profile_list.value) + { + const auto &cit2 = profMap.find(cit1); + if (cit2 != profMap.cend()) + { + cit2->second.eBufProfListRefCount--; + } + } + } +} + +template<> +auto BufferHelper::getBufferObjMap() const -> const std::unordered_map& +{ + return profMap; +} + +template<> +auto BufferHelper::getBufferObjMap() const -> const std::unordered_map& +{ + return pgMap; +} + +template<> +auto BufferHelper::getBufferObjMap() const -> const std::unordered_map& +{ + return iBufProfListMap; +} + +template<> +auto BufferHelper::getBufferObjMap() const -> const std::unordered_map& +{ + return eBufProfListMap; +} + +template<> +auto BufferHelper::getBufferObjMap() -> std::unordered_map& +{ + return profMap; +} + +template<> +auto BufferHelper::getBufferObjMap() -> std::unordered_map& +{ + return pgMap; +} + +template<> +auto BufferHelper::getBufferObjMap() -> std::unordered_map& +{ + return iBufProfListMap; +} + +template<> +auto BufferHelper::getBufferObjMap() -> std::unordered_map& +{ + return eBufProfListMap; +} + +template +void BufferHelper::setBufferConfig(const std::string &key, const T &cfg) +{ + auto &map = getBufferObjMap(); + + const auto &cit = map.find(key); + if (cit != map.cend()) + { + delObjRef(cit->second); + } + setObjRef(cfg); + + map[key] = cfg; +} + +template void BufferHelper::setBufferConfig(const std::string &key, const BufferProfileConfig &cfg); +template void BufferHelper::setBufferConfig(const std::string &key, const BufferPriorityGroupConfig &cfg); +template void BufferHelper::setBufferConfig(const std::string &key, const IngressBufferProfileListConfig &cfg); +template void BufferHelper::setBufferConfig(const std::string &key, const EgressBufferProfileListConfig &cfg); + +template +bool BufferHelper::getBufferConfig(T &cfg, const std::string &key) const +{ + auto &map = getBufferObjMap(); + + const auto &cit = map.find(key); + if (cit != map.cend()) + { + cfg = cit->second; + return true; + } + + return false; +} + +template bool BufferHelper::getBufferConfig(BufferProfileConfig &cfg, const std::string &key) const; +template bool BufferHelper::getBufferConfig(BufferPriorityGroupConfig &cfg, const std::string &key) const; +template bool BufferHelper::getBufferConfig(IngressBufferProfileListConfig &cfg, const std::string &key) const; +template bool BufferHelper::getBufferConfig(EgressBufferProfileListConfig &cfg, const std::string &key) const; + +void BufferHelper::delBufferProfileConfig(const std::string &key) +{ + const auto &cit = profMap.find(key); + if (cit == profMap.cend()) + { + return; + } + + delObjRef(cit->second); + profMap.erase(cit); +} + +void BufferHelper::delBufferPriorityGroupConfig(const std::string &key) +{ + const auto &cit = pgMap.find(key); + if (cit == pgMap.cend()) + { + return; + } + + delObjRef(cit->second); + pgMap.erase(cit); +} + +void BufferHelper::delIngressBufferProfileListConfig(const std::string &key) +{ + const auto &cit = iBufProfListMap.find(key); + if (cit == iBufProfListMap.cend()) + { + return; + } + + delObjRef(cit->second); + iBufProfListMap.erase(cit); +} + +void BufferHelper::delEgressBufferProfileListConfig(const std::string &key) +{ + const auto &cit = eBufProfListMap.find(key); + if (cit == eBufProfListMap.cend()) + { + return; + } + + delObjRef(cit->second); + eBufProfListMap.erase(cit); +} diff --git a/orchagent/buffer/bufferhelper.h b/orchagent/buffer/bufferhelper.h new file mode 100644 index 00000000000..264ac085550 --- /dev/null +++ b/orchagent/buffer/bufferhelper.h @@ -0,0 +1,44 @@ +#pragma once + +#include +#include + +#include "buffercontainer.h" + +class BufferHelper final +{ +public: + BufferHelper() = default; + ~BufferHelper() = default; + + void parseBufferConfig(BufferProfileConfig &cfg) const; + void parseBufferConfig(BufferPriorityGroupConfig &cfg) const; + void parseBufferConfig(IngressBufferProfileListConfig &cfg) const; + void parseBufferConfig(EgressBufferProfileListConfig &cfg) const; + + template + void setBufferConfig(const std::string &key, const T &cfg); + template + bool getBufferConfig(T &cfg, const std::string &key) const; + + void delBufferProfileConfig(const std::string &key); + void delBufferPriorityGroupConfig(const std::string &key); + void delIngressBufferProfileListConfig(const std::string &key); + void delEgressBufferProfileListConfig(const std::string &key); + +private: + template + auto getBufferObjMap() const -> const std::unordered_map&; + template + auto getBufferObjMap() -> std::unordered_map&; + + template + void setObjRef(const T &cfg); + template + void delObjRef(const T &cfg); + + std::unordered_map profMap; + std::unordered_map pgMap; + std::unordered_map iBufProfListMap; + std::unordered_map eBufProfListMap; +}; diff --git a/orchagent/buffer/bufferschema.h b/orchagent/buffer/bufferschema.h new file mode 100644 index 00000000000..f53c8774cce --- /dev/null +++ b/orchagent/buffer/bufferschema.h @@ -0,0 +1,13 @@ +#pragma once + +// defines ------------------------------------------------------------------------------------------------------------ + +#define BUFFER_PROFILE_PACKET_DISCARD_ACTION_DROP "drop" +#define BUFFER_PROFILE_PACKET_DISCARD_ACTION_TRIM "trim" + +#define BUFFER_PROFILE_PACKET_DISCARD_ACTION "packet_discard_action" + +#define BUFFER_PG_PROFILE "profile" + +#define BUFFER_PORT_INGRESS_PROFILE_LIST_PROFILE_LIST "profile_list" +#define BUFFER_PORT_EGRESS_PROFILE_LIST_PROFILE_LIST "profile_list" diff --git a/orchagent/bufferorch.cpp b/orchagent/bufferorch.cpp index 91f13578a6f..f6b02edf3ba 100644 --- a/orchagent/bufferorch.cpp +++ b/orchagent/bufferorch.cpp @@ -8,6 +8,9 @@ #include #include #include +#include + +#include "buffer/bufferschema.h" using namespace std; @@ -23,9 +26,6 @@ extern string gMySwitchType; extern string gMyHostName; extern string gMyAsicName; -#define BUFFER_POOL_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS "60000" - - static const vector bufferPoolWatermarkStatIds = { SAI_BUFFER_POOL_STAT_WATERMARK_BYTES, @@ -52,9 +52,7 @@ std::map> queue_port_flags; BufferOrch::BufferOrch(DBConnector *applDb, DBConnector *confDb, DBConnector *stateDb, vector &tableNames) : Orch(applDb, tableNames), - m_flexCounterDb(new DBConnector("FLEX_COUNTER_DB", 0)), - m_flexCounterTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_TABLE)), - m_flexCounterGroupTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)), + m_counterNameMapUpdater(new CounterNameMapUpdater("COUNTERS_DB", COUNTERS_BUFFER_POOL_NAME_MAP)), m_countersDb(new DBConnector("COUNTERS_DB", 0)), m_stateBufferMaximumValueTable(stateDb, STATE_BUFFER_MAXIMUM_VALUE_TABLE) { @@ -78,6 +76,11 @@ void BufferOrch::initTableHandlers() m_bufferHandlerMap.insert(buffer_handler_pair(APP_BUFFER_PG_TABLE_NAME, &BufferOrch::processPriorityGroup)); m_bufferHandlerMap.insert(buffer_handler_pair(APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, &BufferOrch::processIngressBufferProfileList)); m_bufferHandlerMap.insert(buffer_handler_pair(APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, &BufferOrch::processEgressBufferProfileList)); + + m_bufferFlushHandlerMap.insert(buffer_flush_handler_pair(APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, &BufferOrch::processIngressBufferProfileListBulk)); + m_bufferFlushHandlerMap.insert(buffer_flush_handler_pair(APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, &BufferOrch::processEgressBufferProfileListBulk)); + m_bufferFlushHandlerMap.insert(buffer_flush_handler_pair(APP_BUFFER_PG_TABLE_NAME, &BufferOrch::processPriorityGroupBulk)); + m_bufferFlushHandlerMap.insert(buffer_flush_handler_pair(APP_BUFFER_QUEUE_TABLE_NAME, &BufferOrch::processQueueBulk)); } void BufferOrch::initBufferReadyLists(DBConnector *applDb, DBConnector *confDb) @@ -229,22 +232,23 @@ void BufferOrch::initBufferConstants() void BufferOrch::initFlexCounterGroupTable(void) { string bufferPoolWmPluginName = "watermark_bufferpool.lua"; + string bufferPoolWmSha; try { string bufferPoolLuaScript = swss::loadLuaScript(bufferPoolWmPluginName); - string bufferPoolWmSha = swss::loadRedisScript(m_countersDb.get(), bufferPoolLuaScript); - - vector fvTuples; - fvTuples.emplace_back(BUFFER_POOL_PLUGIN_FIELD, bufferPoolWmSha); - fvTuples.emplace_back(POLL_INTERVAL_FIELD, BUFFER_POOL_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS); - - m_flexCounterGroupTable->set(BUFFER_POOL_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, fvTuples); + bufferPoolWmSha = swss::loadRedisScript(m_countersDb.get(), bufferPoolLuaScript); } catch (const runtime_error &e) { SWSS_LOG_ERROR("Buffer pool watermark lua script and/or flex counter group not set successfully. Runtime error: %s", e.what()); } + + setFlexCounterGroupParameter(BUFFER_POOL_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, + BUFFER_POOL_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS, + "", // do not touch stats_mode + BUFFER_POOL_PLUGIN_FIELD, + bufferPoolWmSha); } bool BufferOrch::isPortReady(const std::string& port_name) const @@ -275,7 +279,7 @@ void BufferOrch::clearBufferPoolWatermarkCounterIdList(const sai_object_id_t obj if (m_isBufferPoolWatermarkCounterIdListGenerated) { string key = BUFFER_POOL_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP ":" + sai_serialize_object_id(object_id); - m_flexCounterTable->del(key); + stopFlexCounterPolling(gSwitchId, key); } } @@ -326,37 +330,32 @@ void BufferOrch::generateBufferPoolWatermarkCounterIdList(void) if (!noWmClrCapability) { - vector fvs; - - fvs.emplace_back(STATS_MODE_FIELD, STATS_MODE_READ_AND_CLEAR); - m_flexCounterGroupTable->set(BUFFER_POOL_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, fvs); + setFlexCounterGroupStatsMode(BUFFER_POOL_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, + STATS_MODE_READ_AND_CLEAR); } // Push buffer pool watermark COUNTER_ID_LIST to FLEX_COUNTER_TABLE on a per buffer pool basis - vector fvTuples; - fvTuples.emplace_back(BUFFER_POOL_COUNTER_ID_LIST, statList); + string stats_mode; + bitMask = 1; + for (const auto &it : *(m_buffer_type_maps[APP_BUFFER_POOL_TABLE_NAME])) { string key = BUFFER_POOL_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP ":" + sai_serialize_object_id(it.second.m_saiObjectId); + stats_mode = ""; + if (noWmClrCapability) { - string stats_mode = STATS_MODE_READ_AND_CLEAR; if (noWmClrCapability & bitMask) { stats_mode = STATS_MODE_READ; } - fvTuples.emplace_back(STATS_MODE_FIELD, stats_mode); - m_flexCounterTable->set(key, fvTuples); - fvTuples.pop_back(); bitMask <<= 1; } - else - { - m_flexCounterTable->set(key, fvTuples); - } + + startFlexCounterPolling(gSwitchId, key, statList, BUFFER_POOL_COUNTER_ID_LIST, stats_mode); } m_isBufferPoolWatermarkCounterIdListGenerated = true; @@ -370,6 +369,25 @@ const object_reference_map &BufferOrch::getBufferPoolNameOidMap(void) return *m_buffer_type_maps[APP_BUFFER_POOL_TABLE_NAME]; } +void BufferOrch::getBufferObjectsWithNonZeroProfile(vector &nonZeroQueues, const string &table) +{ + for (auto &&queueRef: (*m_buffer_type_maps[table])) + { + for (auto &&profileRef: queueRef.second.m_objsReferencingByMe) + { + if (profileRef.second.find("_zero_") == std::string::npos) + { + SWSS_LOG_INFO("Selected key %s with profile %s", queueRef.first.c_str(), profileRef.second.c_str()); + nonZeroQueues.push_back(queueRef.first); + } + else + { + SWSS_LOG_INFO("Skipped key %s with profile %s", queueRef.first.c_str(), profileRef.second.c_str()); + } + } + } +} + task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); @@ -380,7 +398,8 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) string op = kfvOp(tuple); string xoff; - SWSS_LOG_DEBUG("object name:%s", object_name.c_str()); + SWSS_LOG_DEBUG("KEY: %s, OP: %s", object_name.c_str(), op.c_str()); + if (m_buffer_type_maps[map_type_name]->find(object_name) != m_buffer_type_maps[map_type_name]->end()) { sai_object = (*(m_buffer_type_maps[map_type_name]))[object_name].m_saiObjectId; @@ -391,7 +410,6 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) return task_process_status::task_need_retry; } } - SWSS_LOG_DEBUG("processing command:%s", op.c_str()); if (op == SET_COMMAND) { @@ -401,7 +419,8 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) string field = fvField(*i); string value = fvValue(*i); - SWSS_LOG_DEBUG("field:%s, value:%s", field.c_str(), value.c_str()); + SWSS_LOG_DEBUG("FIELD: %s, VALUE: %s", field.c_str(), value.c_str()); + sai_attribute_t attr; if (field == buffer_size_field_name) { @@ -524,7 +543,7 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) // Specifically, we push the buffer pool name to oid mapping upon the creation of the oid // In pg and queue case, this mapping installment is deferred to FlexCounterOrch at a reception of field // "FLEX_COUNTER_STATUS" - m_countersDb->hset(COUNTERS_BUFFER_POOL_NAME_MAP, object_name, sai_serialize_object_id(sai_object)); + m_counterNameMapUpdater->setCounterNameMap(object_name, sai_object); } // Only publish the result when shared headroom pool is enabled and it has been successfully applied to SAI @@ -564,7 +583,7 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) } auto it_to_delete = (m_buffer_type_maps[map_type_name])->find(object_name); (m_buffer_type_maps[map_type_name])->erase(it_to_delete); - m_countersDb->hdel(COUNTERS_BUFFER_POOL_NAME_MAP, object_name); + m_counterNameMapUpdater->delCounterNameMap(object_name); vector fvs; m_publisher.publish(APP_BUFFER_POOL_TABLE_NAME, object_name, fvs, ReturnCode(SAI_STATUS_SUCCESS), true); @@ -587,7 +606,8 @@ task_process_status BufferOrch::processBufferProfile(KeyOpFieldsValuesTuple &tup string op = kfvOp(tuple); string pool_name; - SWSS_LOG_DEBUG("object name:%s", object_name.c_str()); + SWSS_LOG_DEBUG("KEY: %s, OP: %s", object_name.c_str(), op.c_str()); + if (m_buffer_type_maps[map_type_name]->find(object_name) != m_buffer_type_maps[map_type_name]->end()) { sai_object = (*(m_buffer_type_maps[map_type_name]))[object_name].m_saiObjectId; @@ -598,16 +618,21 @@ task_process_status BufferOrch::processBufferProfile(KeyOpFieldsValuesTuple &tup return task_process_status::task_need_retry; } } - SWSS_LOG_DEBUG("processing command:%s", op.c_str()); + if (op == SET_COMMAND) { + BufferProfileConfig cfg; + m_bufHlpr.getBufferConfig(cfg, object_name); vector attribs; for (auto i = kfvFieldsValues(tuple).begin(); i != kfvFieldsValues(tuple).end(); i++) { string field = fvField(*i); string value = fvValue(*i); - SWSS_LOG_DEBUG("field:%s, value:%s", field.c_str(), value.c_str()); + cfg.fieldValueMap[field] = value; + + SWSS_LOG_DEBUG("FIELD: %s, VALUE: %s", field.c_str(), value.c_str()); + sai_attribute_t attr; if (field == buffer_pool_field_name) { @@ -696,12 +721,46 @@ task_process_status BufferOrch::processBufferProfile(KeyOpFieldsValuesTuple &tup attr.value.u64 = (uint64_t)stoul(value); attribs.push_back(attr); } + else if (field == BUFFER_PROFILE_PACKET_DISCARD_ACTION) + { + attr.id = SAI_BUFFER_PROFILE_ATTR_PACKET_ADMISSION_FAIL_ACTION; + + if (value == BUFFER_PROFILE_PACKET_DISCARD_ACTION_DROP) + { + attr.value.s32 = SAI_BUFFER_PROFILE_PACKET_ADMISSION_FAIL_ACTION_DROP; + } + else if (value == BUFFER_PROFILE_PACKET_DISCARD_ACTION_TRIM) + { + attr.value.s32 = SAI_BUFFER_PROFILE_PACKET_ADMISSION_FAIL_ACTION_DROP_AND_TRIM; + } + else + { + SWSS_LOG_ERROR("Failed to parse buffer profile(%s) field(%s): invalid value(%s)", + object_name.c_str(), field.c_str(), value.c_str() + ); + return task_process_status::task_failed; + } + + attribs.push_back(attr); + } else { SWSS_LOG_ERROR("Unknown buffer profile field specified:%s, ignoring", field.c_str()); continue; } } + + m_bufHlpr.parseBufferConfig(cfg); + + if (cfg.isTrimmingEligible && cfg.isTrimmingProhibited()) + { + SWSS_LOG_ERROR( + "Failed to configure buffer profile(%s): trimming is prohibited by dependency constraint check", + object_name.c_str() + ); + return task_process_status::task_failed; + } + if (SAI_NULL_OBJECT_ID != sai_object) { vector attribs_to_retry; @@ -721,7 +780,7 @@ task_process_status BufferOrch::processBufferProfile(KeyOpFieldsValuesTuple &tup } } - for (auto &attribute : attribs) + for (auto &attribute : attribs_to_retry) { sai_status = sai_buffer_api->set_buffer_profile_attribute(sai_object, &attribute); if (SAI_STATUS_SUCCESS != sai_status) @@ -753,6 +812,9 @@ task_process_status BufferOrch::processBufferProfile(KeyOpFieldsValuesTuple &tup SWSS_LOG_NOTICE("Created buffer profile %s with type %s", object_name.c_str(), map_type_name.c_str()); } + // Update config state + m_bufHlpr.setBufferConfig(object_name, cfg); + // Add reference to the buffer pool object setObjectReference(m_buffer_type_maps, map_type_name, object_name, buffer_pool_field_name, pool_name); } @@ -783,6 +845,7 @@ task_process_status BufferOrch::processBufferProfile(KeyOpFieldsValuesTuple &tup SWSS_LOG_NOTICE("Remove buffer profile %s with type %s", object_name.c_str(), map_type_name.c_str()); removeObject(m_buffer_type_maps, map_type_name, object_name); + m_bufHlpr.delBufferProfileConfig(object_name); } else { @@ -807,9 +870,13 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) sai_uint32_t range_low, range_high; bool need_update_sai = true; bool local_port = false; + bool counter_was_added = false; + bool counter_needs_to_add = false; + string old_buffer_profile_name; string local_port_name; - SWSS_LOG_DEBUG("Processing:%s", key.c_str()); + SWSS_LOG_DEBUG("KEY: %s, OP: %s", key.c_str(), op.c_str()); + tokens = tokenize(key, delimiter); vector port_names; @@ -827,7 +894,11 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) return task_process_status::task_invalid_entry; } - if((tokens[0] == gMyHostName) && (tokens[1] == gMyAsicName)) + string tmp_token_1 = tokens[1]; + string tmp_gMyAsicName = gMyAsicName; + boost::algorithm::to_lower(tmp_token_1); + boost::algorithm::to_lower(tmp_gMyAsicName); + if((tokens[0] == gMyHostName) && (tmp_token_1 == tmp_gMyAsicName)) { local_port = true; local_port_name = tokens[2]; @@ -849,6 +920,9 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) } } + QueueTask task; + task.kofvs = tuple; + if (op == SET_COMMAND) { ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_profile_field_name, @@ -866,7 +940,6 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) return task_process_status::task_failed; } - string old_buffer_profile_name; if (doesObjectExist(m_buffer_type_maps, APP_BUFFER_QUEUE_TABLE_NAME, key, buffer_profile_field_name, old_buffer_profile_name) && (old_buffer_profile_name == buffer_profile_name)) { @@ -884,11 +957,14 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) SWSS_LOG_NOTICE("Set buffer queue %s to %s", key.c_str(), buffer_profile_name.c_str()); setObjectReference(m_buffer_type_maps, APP_BUFFER_QUEUE_TABLE_NAME, key, buffer_profile_field_name, buffer_profile_name); + + // Counter operation + counter_needs_to_add = buffer_profile_name.find("_zero_") == std::string::npos; + SWSS_LOG_INFO("%s to create counter for %s with new profile %s", counter_needs_to_add ? "Need" : "No need", key.c_str(), buffer_profile_name.c_str()); } else if (op == DEL_COMMAND) { - auto &typemap = (*m_buffer_type_maps[APP_BUFFER_QUEUE_TABLE_NAME]); - if (typemap.find(key) == typemap.end()) + if (!doesObjectExist(m_buffer_type_maps, APP_BUFFER_QUEUE_TABLE_NAME, key, buffer_profile_field_name, old_buffer_profile_name)) { SWSS_LOG_INFO("%s doesn't not exist, don't need to notfiy SAI", key.c_str()); need_update_sai = false; @@ -897,6 +973,7 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) SWSS_LOG_NOTICE("Remove buffer queue %s", key.c_str()); removeObject(m_buffer_type_maps, APP_BUFFER_QUEUE_TABLE_NAME, key); m_partiallyAppliedQueues.erase(key); + counter_needs_to_add = false; } else { @@ -904,6 +981,9 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) return task_process_status::task_invalid_entry; } + counter_was_added = !old_buffer_profile_name.empty() && old_buffer_profile_name.find("_zero_") == std::string::npos; + SWSS_LOG_INFO("%s to remove counter for %s with old profile %s", counter_was_added ? "Need" : "No need", key.c_str(), old_buffer_profile_name.c_str()); + sai_attribute_t attr; attr.id = SAI_QUEUE_ATTR_BUFFER_PROFILE_ID; attr.value.oid = sai_buffer_profile; @@ -922,6 +1002,12 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) SWSS_LOG_ERROR("Port with alias:%s not found", port_name.c_str()); return task_process_status::task_invalid_entry; } + + QueueTask::PortContext portContext; + portContext.port_name = port_name; + portContext.local_port = local_port; + portContext.local_port_name = local_port_name; + for (size_t ind = range_low; ind <= range_high; ind++) { SWSS_LOG_DEBUG("processing queue:%zd", ind); @@ -956,7 +1042,52 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) if (need_update_sai) { SWSS_LOG_DEBUG("Applying buffer profile:0x%" PRIx64 " to queue index:%zd, queue sai_id:0x%" PRIx64, sai_buffer_profile, ind, queue_id); - sai_status_t sai_status = sai_queue_api->set_queue_attribute(queue_id, &attr); + } + + QueueTask::QueueContext queueContext; + queueContext.queue_id = queue_id; + queueContext.attr = SaiAttrWrapper(SAI_OBJECT_TYPE_QUEUE, attr); + queueContext.counter_was_added = counter_was_added; + queueContext.counter_needs_to_add = counter_needs_to_add; + queueContext.index = ind; + queueContext.update_sai = need_update_sai; + + portContext.queues.emplace_back(queueContext); + } + + task.ports.emplace_back(portContext); + } + + m_queueBulk[op].emplace_back(task); + + return task_process_status::task_success; +} + +task_process_status BufferOrch::processQueuePost(const QueueTask& task) +{ + SWSS_LOG_ENTER(); + + const auto& key = kfvKey(task.kofvs); + const auto& op = kfvOp(task.kofvs); + const auto tokens = tokenize(key, delimiter); + Port port; + + for (const auto& portContext: task.ports) + { + const auto& port_name = portContext.port_name; + if (!gPortsOrch->getPort(port_name, port)) + { + SWSS_LOG_ERROR("Port with alias:%s not found", port_name.c_str()); + return task_process_status::task_invalid_entry; + } + for (const auto& queueContext: portContext.queues) + { + const auto ind = queueContext.index; + + if (queueContext.update_sai) + { + const auto sai_status = queueContext.status; + if (sai_status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to set queue's buffer profile attribute, status:%d", sai_status); @@ -966,20 +1097,27 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) return handle_status; } } - // create/remove a port queue counter for the queue buffer - else + // create/remove a port queue counter for the queue buffer. + // For VOQ chassis, flexcounterorch adds the Queue Counters for all egress and VOQ queues of all front panel and system ports + // to the FLEX_COUNTER_DB irrespective of BUFFER_QUEUE configuration. So Port Queue counter needs to be updated only for non VOQ switch. + else if (gMySwitchType != "voq") { auto flexCounterOrch = gDirectory.get(); - auto queues = tokens[1]; - if (op == SET_COMMAND && - (flexCounterOrch->getQueueCountersState() || flexCounterOrch->getQueueWatermarkCountersState())) + if (flexCounterOrch->isCreateOnlyConfigDbBuffers()) { - gPortsOrch->createPortBufferQueueCounters(port, queues); - } - else if (op == DEL_COMMAND && - (flexCounterOrch->getQueueCountersState() || flexCounterOrch->getQueueWatermarkCountersState())) - { - gPortsOrch->removePortBufferQueueCounters(port, queues); + auto queues = tokens[1]; + if (!queueContext.counter_was_added && queueContext.counter_needs_to_add && + (flexCounterOrch->getQueueCountersState() || flexCounterOrch->getQueueWatermarkCountersState())) + { + SWSS_LOG_INFO("Creating counters for %s %zd", port_name.c_str(), ind); + gPortsOrch->createPortBufferQueueCounters(port, queues); + } + else if (queueContext.counter_was_added && !queueContext.counter_needs_to_add && + (flexCounterOrch->getQueueCountersState() || flexCounterOrch->getQueueWatermarkCountersState())) + { + SWSS_LOG_INFO("Removing counters for %s %zd", port_name.c_str(), ind); + gPortsOrch->removePortBufferQueueCounters(port, queues); + } } } } @@ -1032,17 +1170,23 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) // should be applied to a physical port before the physical port is brought up to // carry traffic. Here, we alert to application through syslog when such a wrong // set order is detected. - for (const auto &port_name : port_names) + for (const auto &portContext : task.ports) { + const auto& port_name = portContext.port_name; + const auto& local_port_name = portContext.local_port_name; + const auto local_port = portContext.local_port; + if(local_port == true) { - if (gPortsOrch->isPortAdminUp(local_port_name)) { + if (gPortsOrch->isPortAdminUp(local_port_name)) + { SWSS_LOG_WARN("Queue profile '%s' applied after port %s is up", key.c_str(), port_name.c_str()); } } else { - if (gPortsOrch->isPortAdminUp(port_name)) { + if (gPortsOrch->isPortAdminUp(port_name)) + { SWSS_LOG_WARN("Queue profile '%s' applied after port %s is up", key.c_str(), port_name.c_str()); } } @@ -1052,6 +1196,72 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) return task_process_status::task_success; } +void BufferOrch::processQueueBulk(Consumer& consumer) +{ + SWSS_LOG_ENTER(); + + for (const auto op: {DEL_COMMAND, SET_COMMAND}) + { + std::vector oids; + std::vector attrs; + std::vector statuses; + + auto& bulk = m_queueBulk[op]; + for (const auto& task: bulk) + { + for (const auto& port: task.ports) + { + for (const auto& queue: port.queues) + { + if (queue.update_sai) + { + oids.push_back(queue.queue_id); + attrs.push_back(queue.attr.getSaiAttr()); + statuses.push_back(SAI_STATUS_NOT_EXECUTED); + } + } + } + } + + const auto objectCount = static_cast(oids.size()); + + if (objectCount > 0) + { + SWSS_LOG_TIMER("Set %u queues buffer profile", objectCount); + + sai_queue_api->set_queues_attribute(objectCount, oids.data(), attrs.data(), + SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR, statuses.data()); + } + + size_t i = 0; + for (auto& task: bulk) + { + for (auto& port: task.ports) + { + for (auto& queue: port.queues) + { + if (queue.update_sai) + { + queue.status = statuses[i]; + i++; + } + } + } + } + + for (const auto& task: bulk) + { + auto task_status = processQueuePost(task); + if (task_status == task_process_status::task_need_retry) + { + consumer.m_toSync.emplace(kfvKey(task.kofvs), task.kofvs); + } + } + + bulk.clear(); + } +} + /* Input sample "BUFFER_PG|Ethernet4,Ethernet45|10-15" */ @@ -1065,8 +1275,12 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup vector tokens; sai_uint32_t range_low, range_high; bool need_update_sai = true; + bool counter_was_added = false; + bool counter_needs_to_add = false; + string old_buffer_profile_name; + + SWSS_LOG_DEBUG("KEY: %s, OP: %s", key.c_str(), op.c_str()); - SWSS_LOG_DEBUG("processing:%s", key.c_str()); tokens = tokenize(key, delimiter); if (tokens.size() != 2) { @@ -1080,6 +1294,9 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup return task_process_status::task_invalid_entry; } + PriorityGroupTask task; + task.kofvs = tuple; + if (op == SET_COMMAND) { ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_profile_field_name, @@ -1097,7 +1314,6 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup return task_process_status::task_failed; } - string old_buffer_profile_name; if (doesObjectExist(m_buffer_type_maps, APP_BUFFER_PG_TABLE_NAME, key, buffer_profile_field_name, old_buffer_profile_name) && (old_buffer_profile_name == buffer_profile_name)) { @@ -1105,14 +1321,51 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup return task_process_status::task_success; } + BufferPriorityGroupConfig cfg; + m_bufHlpr.getBufferConfig(cfg, key); + + for (const auto &cit : kfvFieldsValues(tuple)) + { + auto field = fvField(cit); + auto value = fvValue(cit); + + SWSS_LOG_DEBUG("FIELD: %s, VALUE: %s", field.c_str(), value.c_str()); + + cfg.fieldValueMap[field] = value; + } + + m_bufHlpr.parseBufferConfig(cfg); + + if (cfg.profile.is_set) + { + BufferProfileConfig profCfg; + + if (m_bufHlpr.getBufferConfig(profCfg, cfg.profile.value)) + { + if (profCfg.isTrimmingEligible) + { + SWSS_LOG_ERROR( + "Failed to configure ingress priority group(%s): buffer profile(%s) is trimming eligible", + key.c_str(), cfg.profile.value.c_str() + ); + return task_process_status::task_failed; + } + } + } + + m_bufHlpr.setBufferConfig(key, cfg); + SWSS_LOG_NOTICE("Set buffer PG %s to %s", key.c_str(), buffer_profile_name.c_str()); setObjectReference(m_buffer_type_maps, APP_BUFFER_PG_TABLE_NAME, key, buffer_profile_field_name, buffer_profile_name); + + // Counter operation + counter_needs_to_add = buffer_profile_name.find("_zero_") == std::string::npos; + SWSS_LOG_INFO("%s to create counter for priority group %s with new profile %s", counter_needs_to_add ? "Need" : "No need", key.c_str(), buffer_profile_name.c_str()); } else if (op == DEL_COMMAND) { - auto &typemap = (*m_buffer_type_maps[APP_BUFFER_PG_TABLE_NAME]); - if (typemap.find(key) == typemap.end()) + if (!doesObjectExist(m_buffer_type_maps, APP_BUFFER_PG_TABLE_NAME, key, buffer_profile_field_name, old_buffer_profile_name)) { SWSS_LOG_INFO("%s doesn't not exist, don't need to notfiy SAI", key.c_str()); need_update_sai = false; @@ -1120,6 +1373,7 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup sai_buffer_profile = SAI_NULL_OBJECT_ID; SWSS_LOG_NOTICE("Remove buffer PG %s", key.c_str()); removeObject(m_buffer_type_maps, APP_BUFFER_PG_TABLE_NAME, key); + m_bufHlpr.delBufferPriorityGroupConfig(key); } else { @@ -1127,6 +1381,9 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup return task_process_status::task_invalid_entry; } + counter_was_added = !old_buffer_profile_name.empty() && old_buffer_profile_name.find("_zero_") == std::string::npos; + SWSS_LOG_INFO("%s to remove counter for priority group %s with old profile %s", counter_was_added ? "Need" : "No need", key.c_str(), old_buffer_profile_name.c_str()); + sai_attribute_t attr; attr.id = SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE; attr.value.oid = sai_buffer_profile; @@ -1139,6 +1396,10 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup SWSS_LOG_ERROR("Port with alias:%s not found", port_name.c_str()); return task_process_status::task_invalid_entry; } + + PriorityGroupTask::PortContext portContext; + portContext.port_name = port_name; + for (size_t ind = range_low; ind <= range_high; ind++) { SWSS_LOG_DEBUG("processing pg:%zd", ind); @@ -1149,34 +1410,83 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup } else { + sai_object_id_t pg_id = port.m_priority_group_ids[ind]; if (need_update_sai) { - sai_object_id_t pg_id; - pg_id = port.m_priority_group_ids[ind]; SWSS_LOG_DEBUG("Applying buffer profile:0x%" PRIx64 " to port:%s pg index:%zd, pg sai_id:0x%" PRIx64, sai_buffer_profile, port_name.c_str(), ind, pg_id); - sai_status_t sai_status = sai_buffer_api->set_ingress_priority_group_attribute(pg_id, &attr); - if (sai_status != SAI_STATUS_SUCCESS) + } + + PriorityGroupTask::PgContext pgContext; + pgContext.pg_id = pg_id; + pgContext.attr = SaiAttrWrapper(SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP, attr); + pgContext.counter_was_added = counter_was_added; + pgContext.counter_needs_to_add = counter_needs_to_add; + pgContext.index = ind; + pgContext.update_sai = need_update_sai; + + portContext.pgs.emplace_back(pgContext); + } + } + + task.ports.emplace_back(portContext); + } + + m_priorityGroupBulk[op].emplace_back(task); + + return task_process_status::task_success; +} + +task_process_status BufferOrch::processPriorityGroupPost(const PriorityGroupTask& task) +{ + SWSS_LOG_ENTER(); + + const auto& key = kfvKey(task.kofvs); + const auto& op = kfvOp(task.kofvs); + const auto tokens = tokenize(key, delimiter); + Port port; + + for (const auto& portContext: task.ports) + { + const auto& port_name = portContext.port_name; + if (!gPortsOrch->getPort(port_name, port)) + { + SWSS_LOG_ERROR("Port with alias:%s not found", port_name.c_str()); + return task_process_status::task_invalid_entry; + } + for (const auto& pg: portContext.pgs) + { + const auto ind = pg.index; + + if (pg.update_sai) + { + const auto sai_status = pg.status; + + if (sai_status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set port:%s pg:%zd buffer profile attribute, status:%d", port_name.c_str(), ind, sai_status); + task_process_status handle_status = handleSaiSetStatus(SAI_API_BUFFER, sai_status); + if (handle_status != task_process_status::task_success) { - SWSS_LOG_ERROR("Failed to set port:%s pg:%zd buffer profile attribute, status:%d", port_name.c_str(), ind, sai_status); - task_process_status handle_status = handleSaiSetStatus(SAI_API_BUFFER, sai_status); - if (handle_status != task_process_status::task_success) - { - return handle_status; - } + return handle_status; } - // create or remove a port PG counter for the PG buffer - else + } + // create or remove a port PG counter for the PG buffer + else + { + auto flexCounterOrch = gDirectory.get(); + if (flexCounterOrch->isCreateOnlyConfigDbBuffers()) { - auto flexCounterOrch = gDirectory.get(); auto pgs = tokens[1]; - if (op == SET_COMMAND && + if (!pg.counter_was_added && pg.counter_needs_to_add && (flexCounterOrch->getPgCountersState() || flexCounterOrch->getPgWatermarkCountersState())) { + SWSS_LOG_INFO("Creating counters for priority group %s %zd", port_name.c_str(), ind); gPortsOrch->createPortBufferPgCounters(port, pgs); } - else if (op == DEL_COMMAND && - (flexCounterOrch->getPgCountersState() || flexCounterOrch->getPgWatermarkCountersState())) + else if (pg.counter_was_added && !pg.counter_needs_to_add && + (flexCounterOrch->getPgCountersState() || flexCounterOrch->getPgWatermarkCountersState())) { + SWSS_LOG_INFO("Removing counters for priority group %s %zd", port_name.c_str(), ind); gPortsOrch->removePortBufferPgCounters(port, pgs); } } @@ -1214,7 +1524,6 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup } /* save the last command (set or delete) */ pg_port_flags[port_name][ind] = op; - } } @@ -1231,9 +1540,11 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup // should be applied to a physical port before the physical port is brought up to // carry traffic. Here, we alert to application through syslog when such a wrong // set order is detected. - for (const auto &port_name : port_names) + for (const auto &portContext : task.ports) { - if (gPortsOrch->isPortAdminUp(port_name)) { + const auto& port_name = portContext.port_name; + if (gPortsOrch->isPortAdminUp(port_name)) + { SWSS_LOG_WARN("PG profile '%s' applied after port %s is up", key.c_str(), port_name.c_str()); } } @@ -1242,6 +1553,73 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup return task_process_status::task_success; } +void BufferOrch::processPriorityGroupBulk(Consumer& consumer) +{ + SWSS_LOG_ENTER(); + + for (const auto op: {DEL_COMMAND, SET_COMMAND}) + { + std::vector oids; + std::vector attrs; + std::vector statuses; + + auto& bulk = m_priorityGroupBulk[op]; + for (const auto& task: bulk) + { + for (const auto& port: task.ports) + { + for (const auto& pg: port.pgs) + { + if (pg.update_sai) + { + oids.push_back(pg.pg_id); + attrs.push_back(pg.attr.getSaiAttr()); + statuses.push_back(SAI_STATUS_NOT_EXECUTED); + } + } + } + } + + const auto objectCount = static_cast(oids.size()); + + if (objectCount > 0) + { + SWSS_LOG_TIMER("Set %u ingress priority groups buffer profile", objectCount); + + sai_buffer_api->set_ingress_priority_groups_attribute(objectCount, oids.data(), attrs.data(), + SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR, statuses.data()); + } + + size_t i = 0; + for (auto& task: bulk) + { + for (auto& port: task.ports) + { + for (auto& pg: port.pgs) + { + if (pg.update_sai) + { + pg.status = statuses[i]; + i++; + } + } + } + } + + for (const auto& task: bulk) + { + auto task_status = processPriorityGroupPost(task); + if (task_status == task_process_status::task_need_retry) + { + consumer.m_toSync.emplace(kfvKey(task.kofvs), task.kofvs); + } + } + + bulk.clear(); + } +} + + /* Input sample:"i_port.profile0,i_port.profile1" */ @@ -1252,7 +1630,7 @@ task_process_status BufferOrch::processIngressBufferProfileList(KeyOpFieldsValue string key = kfvKey(tuple); string op = kfvOp(tuple); - SWSS_LOG_DEBUG("processing:%s", key.c_str()); + SWSS_LOG_DEBUG("KEY: %s, OP: %s", key.c_str(), op.c_str()); vector port_names = tokenize(key, list_item_delimiter); vector profile_list; @@ -1284,6 +1662,43 @@ task_process_status BufferOrch::processIngressBufferProfileList(KeyOpFieldsValue return task_process_status::task_success; } + IngressBufferProfileListConfig cfg; + m_bufHlpr.getBufferConfig(cfg, key); + + for (const auto &cit : kfvFieldsValues(tuple)) + { + auto field = fvField(cit); + auto value = fvValue(cit); + + SWSS_LOG_DEBUG("FIELD: %s, VALUE: %s", field.c_str(), value.c_str()); + + cfg.fieldValueMap[field] = value; + } + + m_bufHlpr.parseBufferConfig(cfg); + + if (cfg.profile_list.is_set) + { + for (const auto &cit : cfg.profile_list.value) + { + BufferProfileConfig profCfg; + + if (m_bufHlpr.getBufferConfig(profCfg, cit)) + { + if (profCfg.isTrimmingEligible) + { + SWSS_LOG_ERROR( + "Failed to configure ingress buffer profile list(%s): buffer profile(%s) is trimming eligible", + key.c_str(), cit.c_str() + ); + return task_process_status::task_failed; + } + } + } + } + + m_bufHlpr.setBufferConfig(key, cfg); + setObjectReference(m_buffer_type_maps, APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, key, buffer_profile_list_field_name, profile_name_list); attr.value.objlist.count = (uint32_t)profile_list.size(); @@ -1293,6 +1708,7 @@ task_process_status BufferOrch::processIngressBufferProfileList(KeyOpFieldsValue { SWSS_LOG_NOTICE("%s has been removed from BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE", key.c_str()); removeObject(m_buffer_type_maps, APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, key); + m_bufHlpr.delIngressBufferProfileListConfig(key); attr.value.objlist.count = 0; attr.value.objlist.list = profile_list.data(); } @@ -1301,6 +1717,9 @@ task_process_status BufferOrch::processIngressBufferProfileList(KeyOpFieldsValue SWSS_LOG_ERROR("Unknown command %s when handling BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE key %s", op.c_str(), key.c_str()); } + PortBufferProfileListTask task; + task.kofvs = tuple; + for (string port_name : port_names) { if (!gPortsOrch->getPort(port_name, port)) @@ -1308,7 +1727,21 @@ task_process_status BufferOrch::processIngressBufferProfileList(KeyOpFieldsValue SWSS_LOG_ERROR("Port with alias:%s not found", port_name.c_str()); return task_process_status::task_invalid_entry; } - sai_status_t sai_status = sai_port_api->set_port_attribute(port.m_port_id, &attr); + + task.ports.emplace_back(PortBufferProfileListTask::PortContext{port_name, port.m_port_id, SaiAttrWrapper{SAI_OBJECT_TYPE_PORT, attr}, SAI_STATUS_NOT_EXECUTED}); + } + + m_portIngressBufferProfileListBulk[op].push_back(task); + + return task_process_status::task_success; +} + +task_process_status BufferOrch::processIngressBufferProfileListPost(const PortBufferProfileListTask& task) +{ + for (const auto& portContext: task.ports) + { + const auto& port_name = portContext.port_name; + sai_status_t sai_status = portContext.status; if (sai_status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to set ingress buffer profile list on port, status:%d, key:%s", sai_status, port_name.c_str()); @@ -1323,6 +1756,60 @@ task_process_status BufferOrch::processIngressBufferProfileList(KeyOpFieldsValue return task_process_status::task_success; } +void BufferOrch::processIngressBufferProfileListBulk(Consumer& consumer) +{ + SWSS_LOG_ENTER(); + + for (const auto op: {DEL_COMMAND, SET_COMMAND}) + { + std::vector oids; + std::vector attrs; + std::vector statuses; + + auto& bulk = m_portIngressBufferProfileListBulk[op]; + for (const auto& task: bulk) + { + for (const auto& port: task.ports) + { + oids.push_back(port.port_oid); + attrs.push_back(port.attr.getSaiAttr()); + statuses.push_back(SAI_STATUS_NOT_EXECUTED); + } + } + + const auto objectCount = static_cast(oids.size()); + + if (objectCount > 0) + { + SWSS_LOG_TIMER("Set %u ports ingress buffer profile list", objectCount); + + sai_port_api->set_ports_attribute(objectCount, oids.data(), attrs.data(), + SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR, statuses.data()); + } + + size_t i = 0; + for (auto& task: bulk) + { + for (auto& port: task.ports) + { + port.status = statuses[i]; + i++; + } + } + + for (const auto& task: bulk) + { + auto task_status = processIngressBufferProfileListPost(task); + if (task_status == task_process_status::task_need_retry) + { + consumer.m_toSync.emplace(kfvKey(task.kofvs), task.kofvs); + } + } + + bulk.clear(); + } +} + /* Input sample:"e_port.profile0,e_port.profile1" */ @@ -1332,7 +1819,9 @@ task_process_status BufferOrch::processEgressBufferProfileList(KeyOpFieldsValues Port port; string key = kfvKey(tuple); string op = kfvOp(tuple); - SWSS_LOG_DEBUG("processing:%s", key.c_str()); + + SWSS_LOG_DEBUG("KEY: %s, OP: %s", key.c_str(), op.c_str()); + vector port_names = tokenize(key, list_item_delimiter); vector profile_list; sai_attribute_t attr; @@ -1363,6 +1852,43 @@ task_process_status BufferOrch::processEgressBufferProfileList(KeyOpFieldsValues return task_process_status::task_success; } + EgressBufferProfileListConfig cfg; + m_bufHlpr.getBufferConfig(cfg, key); + + for (const auto &cit : kfvFieldsValues(tuple)) + { + auto field = fvField(cit); + auto value = fvValue(cit); + + SWSS_LOG_DEBUG("FIELD: %s, VALUE: %s", field.c_str(), value.c_str()); + + cfg.fieldValueMap[field] = value; + } + + m_bufHlpr.parseBufferConfig(cfg); + + if (cfg.profile_list.is_set) + { + for (const auto &cit : cfg.profile_list.value) + { + BufferProfileConfig profCfg; + + if (m_bufHlpr.getBufferConfig(profCfg, cit)) + { + if (profCfg.isTrimmingEligible) + { + SWSS_LOG_ERROR( + "Failed to configure egress buffer profile list(%s): buffer profile(%s) is trimming eligible", + key.c_str(), cit.c_str() + ); + return task_process_status::task_failed; + } + } + } + } + + m_bufHlpr.setBufferConfig(key, cfg); + setObjectReference(m_buffer_type_maps, APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, key, buffer_profile_list_field_name, profile_name_list); attr.value.objlist.count = (uint32_t)profile_list.size(); @@ -1372,6 +1898,7 @@ task_process_status BufferOrch::processEgressBufferProfileList(KeyOpFieldsValues { SWSS_LOG_NOTICE("%s has been removed from BUFFER_PORT_EGRESS_PROFILE_LIST_TABLE", key.c_str()); removeObject(m_buffer_type_maps, APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, key); + m_bufHlpr.delEgressBufferProfileListConfig(key); attr.value.objlist.count = 0; attr.value.objlist.list = profile_list.data(); } @@ -1380,6 +1907,9 @@ task_process_status BufferOrch::processEgressBufferProfileList(KeyOpFieldsValues SWSS_LOG_ERROR("Unknown command %s when handling BUFFER_PORT_EGRESS_PROFILE_LIST_TABLE key %s", op.c_str(), key.c_str()); } + PortBufferProfileListTask task; + task.kofvs = tuple; + for (string port_name : port_names) { if (!gPortsOrch->getPort(port_name, port)) @@ -1387,7 +1917,21 @@ task_process_status BufferOrch::processEgressBufferProfileList(KeyOpFieldsValues SWSS_LOG_ERROR("Port with alias:%s not found", port_name.c_str()); return task_process_status::task_invalid_entry; } - sai_status_t sai_status = sai_port_api->set_port_attribute(port.m_port_id, &attr); + + task.ports.emplace_back(PortBufferProfileListTask::PortContext{port_name, port.m_port_id, SaiAttrWrapper{SAI_OBJECT_TYPE_PORT, attr}, SAI_STATUS_NOT_EXECUTED}); + } + + m_portEgressBufferProfileListBulk[op].push_back(task); + + return task_process_status::task_success; +} + +task_process_status BufferOrch::processEgressBufferProfileListPost(const PortBufferProfileListTask& task) +{ + for (const auto& portContext: task.ports) + { + const auto& port_name = portContext.port_name; + sai_status_t sai_status = portContext.status; if (sai_status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to set egress buffer profile list on port, status:%d, key:%s", sai_status, port_name.c_str()); @@ -1402,6 +1946,60 @@ task_process_status BufferOrch::processEgressBufferProfileList(KeyOpFieldsValues return task_process_status::task_success; } +void BufferOrch::processEgressBufferProfileListBulk(Consumer& consumer) +{ + SWSS_LOG_ENTER(); + + for (const auto op: {DEL_COMMAND, SET_COMMAND}) + { + std::vector oids; + std::vector attrs; + std::vector statuses; + + auto& bulk = m_portEgressBufferProfileListBulk[op]; + for (const auto& task: bulk) + { + for (const auto& port: task.ports) + { + oids.push_back(port.port_oid); + attrs.push_back(port.attr.getSaiAttr()); + statuses.push_back(SAI_STATUS_NOT_EXECUTED); + } + } + + const auto objectCount = static_cast(oids.size()); + + if (objectCount > 0) + { + SWSS_LOG_TIMER("Set %u ports egress buffer profile list", objectCount); + + sai_port_api->set_ports_attribute(objectCount, oids.data(), attrs.data(), + SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR, statuses.data()); + } + + size_t i = 0; + for (auto& task: bulk) + { + for (auto& port: task.ports) + { + port.status = statuses[i]; + i++; + } + } + + for (const auto& task: bulk) + { + auto task_status = processEgressBufferProfileListPost(task); + if (task_status == task_process_status::task_need_retry) + { + consumer.m_toSync.emplace(kfvKey(task.kofvs), task.kofvs); + } + } + + bulk.clear(); + } +} + void BufferOrch::doTask() { // The hidden dependency tree: @@ -1434,6 +2032,7 @@ void BufferOrch::doTask() continue; consumer->drain(); } + gPortsOrch->flushCounters(); } void BufferOrch::doTask(Consumer &consumer) @@ -1454,11 +2053,12 @@ void BufferOrch::doTask(Consumer &consumer) return; } + auto map_type_name = consumer.getTableName(); + auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) { /* Make sure the handler is initialized for the task */ - auto map_type_name = consumer.getTableName(); if (m_bufferHandlerMap.find(map_type_name) == m_bufferHandlerMap.end()) { SWSS_LOG_ERROR("No handler for key:%s found.", map_type_name.c_str()); @@ -1491,4 +2091,11 @@ void BufferOrch::doTask(Consumer &consumer) break; } } + + if (m_bufferFlushHandlerMap.find(map_type_name) != m_bufferFlushHandlerMap.end()) + { + (this->*(m_bufferFlushHandlerMap[map_type_name]))(consumer); + } + + gPortsOrch->flushCounters(); } diff --git a/orchagent/bufferorch.h b/orchagent/bufferorch.h index de1e75c0a66..8182d77a1b7 100644 --- a/orchagent/bufferorch.h +++ b/orchagent/bufferorch.h @@ -7,8 +7,13 @@ #include "orch.h" #include "portsorch.h" #include "redisapi.h" +#include "saiattr.h" + +#include "buffer/bufferhelper.h" +#include "high_frequency_telemetry/counternameupdater.h" #define BUFFER_POOL_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP "BUFFER_POOL_WATERMARK_STAT_COUNTER" +#define BUFFER_POOL_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS "60000" const string buffer_size_field_name = "size"; const string buffer_pool_type_field_name = "type"; @@ -29,6 +34,68 @@ const string buffer_value_both = "both"; const string buffer_profile_list_field_name = "profile_list"; const string buffer_headroom_type_field_name= "headroom_type"; +struct PortBufferProfileListTask +{ + struct PortContext + { + std::string port_name; + sai_object_id_t port_oid = SAI_NULL_OBJECT_ID; + SaiAttrWrapper attr = {}; + sai_status_t status = SAI_STATUS_NOT_EXECUTED; + }; + + KeyOpFieldsValuesTuple kofvs; + std::vector ports; +}; + +struct PriorityGroupTask +{ + struct PgContext + { + size_t index; + bool update_sai = true; + bool counter_was_added = false; + bool counter_needs_to_add = false; + sai_object_id_t pg_id = SAI_NULL_OBJECT_ID; + SaiAttrWrapper attr = {}; + sai_status_t status = SAI_STATUS_NOT_EXECUTED; + }; + + struct PortContext + { + std::string port_name; + std::vector pgs; + }; + + KeyOpFieldsValuesTuple kofvs; + std::vector ports; +}; + +struct QueueTask +{ + struct QueueContext + { + size_t index; + bool update_sai = true; + bool counter_was_added = false; + bool counter_needs_to_add = false; + sai_object_id_t queue_id = SAI_NULL_OBJECT_ID; + SaiAttrWrapper attr = {}; + sai_status_t status = SAI_STATUS_NOT_EXECUTED; + }; + + struct PortContext + { + std::string port_name; + bool local_port = false; + std::string local_port_name; + std::vector queues; + }; + + KeyOpFieldsValuesTuple kofvs; + std::vector ports; +}; + class BufferOrch : public Orch { public: @@ -37,12 +104,17 @@ class BufferOrch : public Orch static type_map m_buffer_type_maps; void generateBufferPoolWatermarkCounterIdList(void); const object_reference_map &getBufferPoolNameOidMap(void); + void getBufferObjectsWithNonZeroProfile(vector &nonZeroQueues, const string &table); private: typedef task_process_status (BufferOrch::*buffer_table_handler)(KeyOpFieldsValuesTuple &tuple); typedef map buffer_table_handler_map; typedef pair buffer_handler_pair; + typedef void (BufferOrch::*buffer_table_flush_handler)(Consumer& consumer); + typedef map buffer_table_flush_handler_map; + typedef pair buffer_flush_handler_pair; + void doTask() override; virtual void doTask(Consumer& consumer); void clearBufferPoolWatermarkCounterIdList(const sai_object_id_t object_id); @@ -54,25 +126,47 @@ class BufferOrch : public Orch void initBufferConstants(); task_process_status processBufferPool(KeyOpFieldsValuesTuple &tuple); task_process_status processBufferProfile(KeyOpFieldsValuesTuple &tuple); + + // These methods process input task and add operations to the bulk buffer. This is first stage. task_process_status processQueue(KeyOpFieldsValuesTuple &tuple); task_process_status processPriorityGroup(KeyOpFieldsValuesTuple &tuple); task_process_status processIngressBufferProfileList(KeyOpFieldsValuesTuple &tuple); task_process_status processEgressBufferProfileList(KeyOpFieldsValuesTuple &tuple); + // These methods flush the bulk buffer and update SAI return status codes per task. + void processQueueBulk(Consumer& consumer); + void processPriorityGroupBulk(Consumer& consumer); + void processIngressBufferProfileListBulk(Consumer& consumer); + void processEgressBufferProfileListBulk(Consumer& consumer); + + // These methods are invoked by the corresponding *Bulk methods after SAI operations complete. + // These handle SAI return status code per task. This is second stage. + task_process_status processQueuePost(const QueueTask& task); + task_process_status processPriorityGroupPost(const PriorityGroupTask& task); + task_process_status processIngressBufferProfileListPost(const PortBufferProfileListTask& task); + task_process_status processEgressBufferProfileListPost(const PortBufferProfileListTask& task); + buffer_table_handler_map m_bufferHandlerMap; + buffer_table_flush_handler_map m_bufferFlushHandlerMap; std::unordered_map m_ready_list; std::unordered_map> m_port_ready_list_ref; - unique_ptr m_flexCounterDb; - unique_ptr m_flexCounterGroupTable; - unique_ptr m_flexCounterTable; - Table m_stateBufferMaximumValueTable; + unique_ptr m_counterNameMapUpdater; unique_ptr m_countersDb; bool m_isBufferPoolWatermarkCounterIdListGenerated = false; set m_partiallyAppliedQueues; + + // Bulk task buffers per DB operation + std::map> m_portIngressBufferProfileListBulk; + std::map> m_portEgressBufferProfileListBulk; + std::map> m_priorityGroupBulk; + std::map> m_queueBulk; + + // Buffer OA helper + BufferHelper m_bufHlpr; }; #endif /* SWSS_BUFFORCH_H */ diff --git a/orchagent/bulker.h b/orchagent/bulker.h index dcbc134d3bb..4faee5185a1 100644 --- a/orchagent/bulker.h +++ b/orchagent/bulker.h @@ -39,6 +39,13 @@ typedef sai_status_t (*sai_bulk_set_inbound_routing_entry_attribute_fn) ( _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_status_t *object_statuses); +typedef sai_status_t (*sai_bulk_set_outbound_port_map_port_range_entry_attribute_fn) ( + _In_ uint32_t object_count, + _In_ const sai_outbound_port_map_port_range_entry_t *entry, + _In_ const sai_attribute_t *attr_list, + _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses); + static inline bool operator==(const sai_ip_prefix_t& a, const sai_ip_prefix_t& b) { if (a.addr_family != b.addr_family) return false; @@ -96,6 +103,14 @@ static inline bool operator==(const sai_inseg_entry_t& a, const sai_inseg_entry_ ; } +static inline bool operator==(const sai_neighbor_entry_t& a, const sai_neighbor_entry_t& b) +{ + return a.switch_id == b.switch_id + && a.rif_id == b.rif_id + && a.ip_address == b.ip_address + ; +} + static inline bool operator==(const sai_inbound_routing_entry_t& a, const sai_inbound_routing_entry_t& b) { return a.switch_id == b.switch_id @@ -126,11 +141,20 @@ static inline bool operator==(const sai_pa_validation_entry_t& a, const sai_pa_v static inline bool operator==(const sai_outbound_routing_entry_t& a, const sai_outbound_routing_entry_t& b) { return a.switch_id == b.switch_id - && a.eni_id == b.eni_id + && a.outbound_routing_group_id == b.outbound_routing_group_id && a.destination == b.destination ; } +static inline bool operator==(const sai_outbound_port_map_port_range_entry_t& a, const sai_outbound_port_map_port_range_entry_t& b) +{ + return a.switch_id == b.switch_id + && a.outbound_port_map_id == b.outbound_port_map_id + && a.dst_port_range.min == b.dst_port_range.min + && a.dst_port_range.max == b.dst_port_range.max + ; +} + static inline std::size_t hash_value(const sai_ip_prefix_t& a) { size_t seed = 0; @@ -203,6 +227,19 @@ namespace std } }; + template <> + struct hash + { + size_t operator()(const sai_neighbor_entry_t& a) const noexcept + { + size_t seed = 0; + boost::hash_combine(seed, a.switch_id); + boost::hash_combine(seed, a.rif_id); + boost::hash_combine(seed, a.ip_address); + return seed; + } + }; + template <> struct hash { @@ -236,7 +273,7 @@ namespace std { size_t seed = 0; boost::hash_combine(seed, a.switch_id); - boost::hash_combine(seed, a.eni_id); + boost::hash_combine(seed, a.outbound_routing_group_id); boost::hash_combine(seed, a.destination); return seed; } @@ -255,6 +292,20 @@ namespace std return seed; } }; + + template <> + struct hash + { + size_t operator()(const sai_outbound_port_map_port_range_entry_t& a) const noexcept + { + size_t seed = 0; + boost::hash_combine(seed, a.switch_id); + boost::hash_combine(seed, a.outbound_port_map_id); + boost::hash_combine(seed, a.dst_port_range.min); + boost::hash_combine(seed, a.dst_port_range.max); + return seed; + } + }; } // SAI typedef which is not available in SAI 1.5 @@ -321,6 +372,20 @@ struct SaiBulkerTraits //using bulk_set_entry_attribute_fn = sai_bulk_object_set_attribute_fn; }; +template<> +struct SaiBulkerTraits +{ + using entry_t = sai_object_id_t; + using api_t = sai_next_hop_api_t; + using create_entry_fn = sai_create_next_hop_fn; + using remove_entry_fn = sai_remove_next_hop_fn; + using set_entry_attribute_fn = sai_set_next_hop_attribute_fn; + using bulk_create_entry_fn = sai_bulk_object_create_fn; + using bulk_remove_entry_fn = sai_bulk_object_remove_fn; + // TODO: wait until available in SAI + //using bulk_set_entry_attribute_fn = sai_bulk_object_set_attribute_fn; +}; + template<> struct SaiBulkerTraits { @@ -334,6 +399,31 @@ struct SaiBulkerTraits using bulk_set_entry_attribute_fn = sai_bulk_set_inseg_entry_attribute_fn; }; +template<> +struct SaiBulkerTraits +{ + using entry_t = sai_neighbor_entry_t; + using api_t = sai_neighbor_api_t; + using create_entry_fn = sai_create_neighbor_entry_fn; + using remove_entry_fn = sai_remove_neighbor_entry_fn; + using set_entry_attribute_fn = sai_set_neighbor_entry_attribute_fn; + using bulk_create_entry_fn = sai_bulk_create_neighbor_entry_fn; + using bulk_remove_entry_fn = sai_bulk_remove_neighbor_entry_fn; + using bulk_set_entry_attribute_fn = sai_bulk_set_neighbor_entry_attribute_fn; +}; + +template<> +struct SaiBulkerTraits +{ + using entry_t = sai_object_id_t; + using api_t = sai_dash_meter_api_t; + using create_entry_fn = sai_create_meter_rule_fn; + using remove_entry_fn = sai_remove_meter_rule_fn; + using set_entry_attribute_fn = sai_set_meter_rule_attribute_fn; + using bulk_create_entry_fn = sai_bulk_object_create_fn; + using bulk_remove_entry_fn = sai_bulk_object_remove_fn; +}; + template<> struct SaiBulkerTraits { @@ -398,6 +488,28 @@ struct SaiBulkerTraits using bulk_set_entry_attribute_fn = sai_bulk_set_outbound_routing_entry_attribute_fn; }; +template<> +struct SaiBulkerTraits +{ + // cannot set entry_t or the non-bulk create/remove functions since there are multiple object types defined in the DASH tunnel API + using api_t = sai_dash_tunnel_api_t; + using bulk_create_entry_fn = sai_bulk_object_create_fn; + using bulk_remove_entry_fn = sai_bulk_object_remove_fn; +}; + +template<> +struct SaiBulkerTraits +{ + // Need to bulk port map objects and port map range entries from the same DASH API + // entry_t, bulk_create/remove_entry_fn are only used by EntityBulker so we can use them for + // port map port range bulking w/o affecting port map object bulking + using api_t = sai_dash_outbound_port_map_api_t; + using entry_t = sai_outbound_port_map_port_range_entry_t; + using bulk_create_entry_fn = sai_bulk_create_outbound_port_map_port_range_entry_fn; + using bulk_remove_entry_fn = sai_bulk_remove_outbound_port_map_port_range_entry_fn; + using bulk_set_entry_attribute_fn = sai_bulk_set_outbound_port_map_port_range_entry_attribute_fn; +}; + template class EntityBulker { @@ -436,6 +548,7 @@ class EntityBulker return *object_status; } + create_order.push_back(it->first); auto& attrs = it->second.first; attrs.insert(attrs.end(), attr_list, attr_list + attr_count); it->second.second = object_status; @@ -481,6 +594,7 @@ class EntityBulker auto rc = removing_entries.emplace(std::piecewise_construct, std::forward_as_tuple(*entry), std::forward_as_tuple(object_status)); + remove_order.push_back(rc.first->first); bool inserted = rc.second; SWSS_LOG_INFO("EntityBulker.remove_entry %zu, %d\n", removing_entries.size(), inserted); @@ -500,11 +614,14 @@ class EntityBulker assert(attr); if (!attr) throw std::invalid_argument("attr is null"); - // Insert or find the key (entry) - auto& attrs = setting_entries.emplace(std::piecewise_construct, + auto rc = setting_entries.emplace(std::piecewise_construct, std::forward_as_tuple(*entry), - std::forward_as_tuple() - ).first->second; + std::forward_as_tuple()); + auto it = rc.first; + set_order.push_back(it->first); + + // Insert or find the key (entry) + auto& attrs = it->second; // Insert attr attrs.emplace_back(std::piecewise_construct, @@ -520,10 +637,14 @@ class EntityBulker { std::vector rs; - for (auto& i: removing_entries) + for (auto const& entry : remove_order) { - auto const& entry = i.first; - sai_status_t *object_status = i.second; + auto i = removing_entries.find(entry); + if (i == removing_entries.end()) + { + continue; + } + sai_status_t *object_status = i->second; if (*object_status == SAI_STATUS_NOT_EXECUTED) { rs.push_back(entry); @@ -537,6 +658,7 @@ class EntityBulker flush_removing_entries(rs); removing_entries.clear(); + remove_order.clear(); } // Creating @@ -546,11 +668,15 @@ class EntityBulker std::vector tss; std::vector cs; - for (auto const& i: creating_entries) + for (auto const& entry : create_order) { - auto const& entry = i.first; - auto const& attrs = i.second.first; - sai_status_t *object_status = i.second.second; + auto i = creating_entries.find(entry); + if (i == creating_entries.end()) + { + continue; + } + auto const& attrs = i->second.first; + sai_status_t *object_status = i->second.second; if (*object_status == SAI_STATUS_NOT_EXECUTED) { rs.push_back(entry); @@ -566,6 +692,7 @@ class EntityBulker flush_creating_entries(rs, tss, cs); creating_entries.clear(); + create_order.clear(); } // Setting @@ -574,11 +701,24 @@ class EntityBulker std::vector rs; std::vector ts; std::vector status_vector; + // Use a set to keep track of the entries that have been processed. + std::unordered_set entries; - for (auto const& i: setting_entries) + for (auto const& entry : set_order) { - auto const& entry = i.first; - auto const& attrs = i.second; + // Skip the entry if it is alreay processed. + // All attributes of an entry are processed in the first run. + if (entries.count(entry) != 0) + { + continue; + } + auto i = setting_entries.find(entry); + if (i == setting_entries.end()) + { + continue; + } + entries.insert(entry); + auto const& attrs = i->second; for (auto const& ia: attrs) { auto const& attr = ia.first; @@ -599,6 +739,7 @@ class EntityBulker flush_setting_entries(rs, ts, status_vector); setting_entries.clear(); + set_order.clear(); } } @@ -607,6 +748,9 @@ class EntityBulker removing_entries.clear(); creating_entries.clear(); setting_entries.clear(); + remove_order.clear(); + create_order.clear(); + set_order.clear(); } size_t creating_entries_count() const @@ -634,6 +778,12 @@ class EntityBulker return removing_entries.find(entry) != removing_entries.end(); } + bool bulk_entry_pending_removal_or_set(const Te& entry) const + { + return removing_entries.find(entry) != removing_entries.end() || + setting_entries.find(entry) != setting_entries.end(); + } + private: std::unordered_map< // A map of Te, // entry -> @@ -658,6 +808,10 @@ class EntityBulker sai_status_t * // OUT object_status > removing_entries; + std::vector create_order; + std::vector set_order; + std::vector remove_order; + size_t max_bulk_size; typename Ts::bulk_create_entry_fn create_entries; @@ -811,6 +965,15 @@ inline EntityBulker::EntityBulker(sai_mpls_api_t *api, size_t ma set_entries_attribute = api->set_inseg_entries_attribute; } +template <> +inline EntityBulker::EntityBulker(sai_neighbor_api_t *api, size_t max_bulk_size) : + max_bulk_size(max_bulk_size) +{ + create_entries = api->create_neighbor_entries; + remove_entries = api->remove_neighbor_entries; + set_entries_attribute = api->set_neighbor_entries_attribute; +} + template <> inline EntityBulker::EntityBulker(sai_dash_inbound_routing_api_t *api, size_t max_bulk_size) : max_bulk_size(max_bulk_size) { @@ -843,6 +1006,14 @@ inline EntityBulker::EntityBulker(sai_dash_outb set_entries_attribute = nullptr; } +template <> +inline EntityBulker::EntityBulker(sai_dash_outbound_port_map_api_t *api, size_t max_bulk_size) : max_bulk_size(max_bulk_size) +{ + create_entries = api->create_outbound_port_map_port_range_entries; + remove_entries = api->remove_outbound_port_map_port_range_entries; + set_entries_attribute = nullptr; +} + template class ObjectBulker { @@ -855,6 +1026,12 @@ class ObjectBulker throw std::logic_error("Not implemented"); } + ObjectBulker(typename Ts::api_t* next_hop_group_api, sai_object_id_t switch_id, size_t max_bulk_size, sai_object_type_extensions_t object_type) : + max_bulk_size(max_bulk_size) + { + throw std::logic_error("Not implemented"); + } + sai_status_t create_entry( _Out_ sai_object_id_t *object_id, _In_ uint32_t attr_count, @@ -946,6 +1123,7 @@ class ObjectBulker // Creating if (!creating_entries.empty()) { + create_statuses.clear(); std::vector rs; std::vector tss; std::vector cs; @@ -1023,6 +1201,10 @@ class ObjectBulker return removing_entries.size(); } + sai_status_t create_status(sai_object_id_t object) { + return create_statuses[object]; + } + private: struct object_entry { @@ -1057,11 +1239,13 @@ class ObjectBulker // object_id -> object_status std::unordered_map removing_entries; - typename Ts::bulk_create_entry_fn create_entries; - typename Ts::bulk_remove_entry_fn remove_entries; + sai_bulk_object_create_fn create_entries; + sai_bulk_object_remove_fn remove_entries; // TODO: wait until available in SAI //typename Ts::bulk_set_entry_attribute_fn set_entries_attribute; + std::unordered_map create_statuses; + sai_status_t flush_removing_entries( _Inout_ std::vector &rs) { @@ -1120,6 +1304,7 @@ class ObjectBulker for (size_t i = 0; i < count; i++) { + create_statuses.emplace(object_ids[i], statuses[i]); sai_object_id_t *pid = rs[i]; *pid = (statuses[i] == SAI_STATUS_SUCCESS) ? object_ids[i] : SAI_NULL_OBJECT_ID; } @@ -1174,6 +1359,17 @@ inline ObjectBulker::ObjectBulker(SaiBulkerTraits +inline ObjectBulker::ObjectBulker(SaiBulkerTraits::api_t *api, sai_object_id_t switch_id, size_t max_bulk_size) : + switch_id(switch_id), + max_bulk_size(max_bulk_size) +{ + create_entries = api->create_next_hops; + remove_entries = api->remove_next_hops; + // TODO: wait until available in SAI + //set_entries_attribute = ; +} + template <> inline ObjectBulker::ObjectBulker(SaiBulkerTraits::api_t *api, sai_object_id_t switch_id, size_t max_bulk_size) : switch_id(switch_id), @@ -1182,3 +1378,48 @@ inline ObjectBulker::ObjectBulker(SaiBulkerTraitscreate_vnets; remove_entries = api->remove_vnets; } + +template <> +inline ObjectBulker::ObjectBulker(SaiBulkerTraits::api_t *api, sai_object_id_t switch_id, size_t max_bulk_size) : + switch_id(switch_id), + max_bulk_size(max_bulk_size) +{ + create_entries = api->create_meter_rules; + remove_entries = api->remove_meter_rules; +} + +template <> +inline ObjectBulker::ObjectBulker(SaiBulkerTraits::api_t *api, sai_object_id_t switch_id, size_t max_bulk_size, sai_object_type_extensions_t object_type) : + switch_id(switch_id), + max_bulk_size(max_bulk_size) +{ + switch (object_type) + { + case SAI_OBJECT_TYPE_DASH_TUNNEL: + create_entries = api->create_dash_tunnels; + remove_entries = api->remove_dash_tunnels; + break; + case SAI_OBJECT_TYPE_DASH_TUNNEL_MEMBER: + create_entries = api->create_dash_tunnel_members; + remove_entries = api->remove_dash_tunnel_members; + break; + case SAI_OBJECT_TYPE_DASH_TUNNEL_NEXT_HOP: + create_entries = api->create_dash_tunnel_next_hops; + remove_entries = api->remove_dash_tunnel_next_hops; + break; + default: + std::string type_str = sai_serialize_object_type((sai_object_type_t) object_type); + std::stringstream ss; + ss << "Invalid object type for sai_dash_tunnel_api_t: " << type_str; + throw std::invalid_argument(ss.str()); + } +} + +template <> +inline ObjectBulker::ObjectBulker(SaiBulkerTraits::api_t *api, sai_object_id_t switch_id, size_t max_bulk_size) : + switch_id(switch_id), + max_bulk_size(max_bulk_size) +{ + create_entries = api->create_outbound_port_maps; + remove_entries = api->remove_outbound_port_maps; +} diff --git a/orchagent/copporch.cpp b/orchagent/copporch.cpp index 8a58ae73a08..05080c8ddcc 100644 --- a/orchagent/copporch.cpp +++ b/orchagent/copporch.cpp @@ -1,3 +1,9 @@ +extern "C" { +#include +#include +#include +} + #include "sai.h" #include "copporch.h" #include "portsorch.h" @@ -26,6 +32,7 @@ extern sai_object_id_t gSwitchId; extern PortsOrch* gPortsOrch; extern Directory gDirectory; extern bool gIsNatSupported; +extern bool gTraditionalFlexCounter; #define FLEX_COUNTER_UPD_INTERVAL 1 @@ -88,9 +95,60 @@ static map trap_id_map = { {"dest_nat_miss", SAI_HOSTIF_TRAP_TYPE_DNAT_MISS}, {"ldp", SAI_HOSTIF_TRAP_TYPE_LDP}, {"bfd_micro", SAI_HOSTIF_TRAP_TYPE_BFD_MICRO}, - {"bfdv6_micro", SAI_HOSTIF_TRAP_TYPE_BFDV6_MICRO} + {"bfdv6_micro", SAI_HOSTIF_TRAP_TYPE_BFDV6_MICRO}, + {"neighbor_miss", SAI_HOSTIF_TRAP_TYPE_NEIGHBOR_MISS} }; +/* + * List of default supported traps used as a fallback when the vendor SAI + * does not support capability query for the HOSTIF object. + */ +const vector default_supported_trap_ids = { + SAI_HOSTIF_TRAP_TYPE_STP, + SAI_HOSTIF_TRAP_TYPE_LACP, + SAI_HOSTIF_TRAP_TYPE_EAPOL, + SAI_HOSTIF_TRAP_TYPE_LLDP, + SAI_HOSTIF_TRAP_TYPE_PVRST, + SAI_HOSTIF_TRAP_TYPE_IGMP_TYPE_QUERY, + SAI_HOSTIF_TRAP_TYPE_IGMP_TYPE_LEAVE, + SAI_HOSTIF_TRAP_TYPE_IGMP_TYPE_V1_REPORT, + SAI_HOSTIF_TRAP_TYPE_IGMP_TYPE_V2_REPORT, + SAI_HOSTIF_TRAP_TYPE_IGMP_TYPE_V3_REPORT, + SAI_HOSTIF_TRAP_TYPE_SAMPLEPACKET, + SAI_HOSTIF_TRAP_TYPE_SWITCH_CUSTOM_RANGE_BASE, + SAI_HOSTIF_TRAP_TYPE_ARP_REQUEST, + SAI_HOSTIF_TRAP_TYPE_ARP_RESPONSE, + SAI_HOSTIF_TRAP_TYPE_DHCP, + SAI_HOSTIF_TRAP_TYPE_OSPF, + SAI_HOSTIF_TRAP_TYPE_PIM, + SAI_HOSTIF_TRAP_TYPE_VRRP, + SAI_HOSTIF_TRAP_TYPE_BGP, + SAI_HOSTIF_TRAP_TYPE_DHCPV6, + SAI_HOSTIF_TRAP_TYPE_OSPFV6, + SAI_HOSTIF_TRAP_TYPE_ISIS, + SAI_HOSTIF_TRAP_TYPE_VRRPV6, + SAI_HOSTIF_TRAP_TYPE_BGPV6, + SAI_HOSTIF_TRAP_TYPE_IPV6_NEIGHBOR_DISCOVERY, + SAI_HOSTIF_TRAP_TYPE_IPV6_MLD_V1_V2, + SAI_HOSTIF_TRAP_TYPE_IPV6_MLD_V1_REPORT, + SAI_HOSTIF_TRAP_TYPE_IPV6_MLD_V1_DONE, + SAI_HOSTIF_TRAP_TYPE_MLD_V2_REPORT, + SAI_HOSTIF_TRAP_TYPE_IP2ME, + SAI_HOSTIF_TRAP_TYPE_SSH, + SAI_HOSTIF_TRAP_TYPE_SNMP, + SAI_HOSTIF_TRAP_TYPE_ROUTER_CUSTOM_RANGE_BASE, + SAI_HOSTIF_TRAP_TYPE_L3_MTU_ERROR, + SAI_HOSTIF_TRAP_TYPE_TTL_ERROR, + SAI_HOSTIF_TRAP_TYPE_UDLD, + SAI_HOSTIF_TRAP_TYPE_BFD, + SAI_HOSTIF_TRAP_TYPE_BFDV6, + SAI_HOSTIF_TRAP_TYPE_SNAT_MISS, + SAI_HOSTIF_TRAP_TYPE_DNAT_MISS, + SAI_HOSTIF_TRAP_TYPE_LDP, + SAI_HOSTIF_TRAP_TYPE_BFD_MICRO, + SAI_HOSTIF_TRAP_TYPE_BFDV6_MICRO + /* This list is intended to remain static and should not be updated with new traps. */ +}; std::string get_trap_name_by_type(sai_hostif_trap_type_t trap_type) { @@ -103,7 +161,13 @@ std::string get_trap_name_by_type(sai_hostif_trap_type_t trap_type) } } - return trap_name_to_id_map.at(trap_type); + auto it = trap_name_to_id_map.find(trap_type); + if (it == trap_name_to_id_map.end()) + { + return ""; + } + + return it->second; } static map packet_action_map = { @@ -121,17 +185,19 @@ const string default_trap_group = "default"; const vector default_trap_ids = { SAI_HOSTIF_TRAP_TYPE_TTL_ERROR }; + const uint HOSTIF_TRAP_COUNTER_POLLING_INTERVAL_MS = 10000; CoppOrch::CoppOrch(DBConnector* db, string tableName) : Orch(db, tableName), m_counter_db(std::shared_ptr(new DBConnector("COUNTERS_DB", 0))), - m_flex_db(std::shared_ptr(new DBConnector("FLEX_COUNTER_DB", 0))), m_asic_db(std::shared_ptr(new DBConnector("ASIC_DB", 0))), + m_state_db(std::shared_ptr(new DBConnector("STATE_DB", 0))), m_counter_table(std::unique_ptr(new Table(m_counter_db.get(), COUNTERS_TRAP_NAME_MAP))), m_vidToRidTable(std::unique_ptr
(new Table(m_asic_db.get(), "VIDTORID"))), - m_flex_counter_group_table(std::unique_ptr(new ProducerTable(m_flex_db.get(), FLEX_COUNTER_GROUP_TABLE))), - m_trap_counter_manager(HOSTIF_TRAP_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, HOSTIF_TRAP_COUNTER_POLLING_INTERVAL_MS, false) + m_trap_counter_manager(HOSTIF_TRAP_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, HOSTIF_TRAP_COUNTER_POLLING_INTERVAL_MS, false), + m_trapCapabilityTable(std::unique_ptr
(new Table(m_state_db.get(), STATE_COPP_TRAP_CAPABILITY_TABLE_NAME))), + m_trapTable(std::unique_ptr
(new Table(m_state_db.get(), STATE_COPP_TRAP_TABLE_NAME))) { SWSS_LOG_ENTER(); auto intervT = timespec { .tv_sec = FLEX_COUNTER_UPD_INTERVAL , .tv_nsec = 0 }; @@ -139,11 +205,100 @@ CoppOrch::CoppOrch(DBConnector* db, string tableName) : auto executorT = new ExecutableTimer(m_FlexCounterUpdTimer, this, "FLEX_COUNTER_UPD_TIMER"); Orch::addExecutor(executorT); + /* Query SAI for supported trap IDs and publish to STATE_DB */ + publishTrapIdsCapability(); + initDefaultHostIntfTable(); initDefaultTrapGroup(); initDefaultTrapIds(); + }; +bool CoppOrch::isTrapIdSupported(sai_hostif_trap_type_t trap_id) const +{ + return supported_trap_ids.find(trap_id) != supported_trap_ids.end(); +} + +void CoppOrch::updateTrapOperStatus(sai_hostif_trap_type_t trap_type, const string& hw_status) +{ + SWSS_LOG_ENTER(); + + string trap_name = get_trap_name_by_type(trap_type); + if (trap_name.empty()) + { + SWSS_LOG_ERROR("Failed to get trap name for type %d", trap_type); + return; + } + + // Update or add the hw_status field in the table + vector hwStatusFvs; + hwStatusFvs.emplace_back("hw_status", hw_status); + m_trapTable->set(trap_name, hwStatusFvs); +} + +// Query SAI for trap IDs capability and publish to the COPP_TRAP_CAPABILITY_TABLE +void CoppOrch::publishTrapIdsCapability() +{ + SWSS_LOG_ENTER(); + + sai_s32_list_t enum_values_capability; + + supported_trap_ids.clear(); + + const auto* meta = sai_metadata_get_attr_metadata(SAI_OBJECT_TYPE_HOSTIF_TRAP, SAI_HOSTIF_TRAP_ATTR_TRAP_TYPE); + if (!meta || !meta->isenum) + { + SWSS_LOG_WARN("sai_metadata_get_attr_metadata for SAI_HOSTIF_TRAP_ATTR_TRAP_TYPE Failed"); + return; + } + + vector values_list(meta->enummetadata->valuescount); + enum_values_capability.count = static_cast(values_list.size()); + enum_values_capability.list = values_list.data(); + + sai_status_t status = sai_query_attribute_enum_values_capability(gSwitchId, + SAI_OBJECT_TYPE_HOSTIF_TRAP, + SAI_HOSTIF_TRAP_ATTR_TRAP_TYPE, + &enum_values_capability); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_NOTICE("SAI capability query for SAI_HOSTIF_TRAP_ATTR_TRAP_TYPE is failed," + " falling back to default supported trap IDs"); + // Populate enum_values_capability with default_supported_trap_ids + enum_values_capability.count = static_cast(default_supported_trap_ids.size()); + values_list.assign(default_supported_trap_ids.begin(), default_supported_trap_ids.end()); + enum_values_capability.list = values_list.data(); + } + + SWSS_LOG_NOTICE("Number of supported trap IDs: %u", enum_values_capability.count); + + string trap_id_list_str; + + for (uint32_t i = 0; i < enum_values_capability.count; i++) + { + + auto trap_str = get_trap_name_by_type(static_cast(enum_values_capability.list[i])); + if (trap_str.empty()) + { + SWSS_LOG_NOTICE("Unknown trap id enum value: %d", enum_values_capability.list[i]); + continue; + } + + supported_trap_ids.insert(static_cast(enum_values_capability.list[i])); + + if (!trap_id_list_str.empty()) + { + trap_id_list_str += ","; + } + trap_id_list_str += trap_str; + } + + SWSS_LOG_NOTICE("Publishing supported trap IDs to STATE_DB"); + vector trapCapabilityFvs; + trapCapabilityFvs.push_back(FieldValueTuple("trap_ids", trap_id_list_str)); + m_trapCapabilityTable->set("traps", trapCapabilityFvs); +} + void CoppOrch::initDefaultHostIntfTable() { SWSS_LOG_ENTER(); @@ -189,13 +344,17 @@ void CoppOrch::initDefaultTrapIds() attr.value.oid = m_trap_group_map[default_trap_group]; trap_id_attrs.push_back(attr); - /* Mellanox platform doesn't support trap priority setting */ - /* Marvell platform doesn't support trap priority. */ + /* + * Use a default trap priority > 0 to avoid undesirable packet trapping + * behavior on some platforms that use 0 as default SAI-internal priority. + * Note: Mellanox and Marvell platforms don't support trap priority setting. + */ + char *platform = getenv("platform"); - if (!platform || (!strstr(platform, MLNX_PLATFORM_SUBSTRING) && (!strstr(platform, MRVL_PLATFORM_SUBSTRING)))) + if (!platform || (!strstr(platform, MLNX_PLATFORM_SUBSTRING) && (!strstr(platform, MRVL_PRST_PLATFORM_SUBSTRING)))) { attr.id = SAI_HOSTIF_TRAP_ATTR_TRAP_PRIORITY; - attr.value.u32 = 0; + attr.value.u32 = 1; trap_id_attrs.push_back(attr); } @@ -245,6 +404,14 @@ void CoppOrch::getTrapIdList(vector &trap_id_name_list, vector &po } } + policer_object obj; + obj.policer_id = policer_id; + /* Save the CREATE_ONLY attributes for future use */ + for (sai_uint32_t ind = 0; ind < policer_attribs.size(); ind++) + { + auto attr = policer_attribs[ind]; + if(attr.id == SAI_POLICER_ATTR_METER_TYPE) + { + obj.meter = (sai_meter_type_t)attr.value.s32; + } + else if(attr.id == SAI_POLICER_ATTR_MODE) + { + obj.mode = (sai_policer_mode_t)attr.value.s32; + } + else if(attr.id == SAI_POLICER_ATTR_COLOR_SOURCE) + { + obj.color = (sai_policer_color_source_t)attr.value.s32; + } + } + SWSS_LOG_NOTICE("Bind policer to trap group %s:", trap_group_name.c_str()); - m_trap_group_policer_map[m_trap_group_map[trap_group_name]] = policer_id; + m_trap_group_policer_map[m_trap_group_map[trap_group_name]] = obj; return true; } @@ -613,7 +802,7 @@ task_process_status CoppOrch::processCoppRule(Consumer& consumer) if (!trap_id_attribs.empty()) { vector group_trap_ids; - TrapIdAttribs trap_attr; + TrapIdAttribs trap_attr = m_trap_group_trap_id_attrs[trap_group_name]; getTrapIdsFromTrapGroup(m_trap_group_map[trap_group_name], group_trap_ids); for (auto trap_id : group_trap_ids) @@ -752,7 +941,7 @@ void CoppOrch::doTask(SelectableTimer &timer) for (auto it = m_pendingAddToFlexCntr.begin(); it != m_pendingAddToFlexCntr.end(); ) { const auto id = sai_serialize_object_id(it->first); - if (m_vidToRidTable->hget("", id, value)) + if (!gTraditionalFlexCounter || m_vidToRidTable->hget("", id, value)) { SWSS_LOG_INFO("Registering %s, id %s", it->second.c_str(), id.c_str()); @@ -780,6 +969,7 @@ void CoppOrch::getTrapAddandRemoveList(string trap_group_name, { vector tmp_trap_ids = trap_ids; + if(m_trap_group_map.find(trap_group_name) == m_trap_group_map.end()) { add_trap_ids = trap_ids; @@ -844,7 +1034,7 @@ bool CoppOrch::trapGroupProcessTrapIdChange (string trap_group_name, { if (m_syncdTrapIds.find(i)!= m_syncdTrapIds.end()) { - if (!removeTrap(m_syncdTrapIds[i].trap_obj)) + if (!removeTrap(m_syncdTrapIds[i].trap_obj, i)) { return false; } @@ -889,7 +1079,7 @@ bool CoppOrch::trapGroupProcessTrapIdChange (string trap_group_name, */ if (m_syncdTrapIds[i].trap_group_obj == m_trap_group_map[trap_group_name]) { - if (!removeTrap(m_syncdTrapIds[i].trap_obj)) + if (!removeTrap(m_syncdTrapIds[i].trap_obj, i)) { return false; } @@ -933,7 +1123,7 @@ bool CoppOrch::processTrapGroupDel (string trap_group_name) if (it.second.trap_group_obj == m_trap_group_map[trap_group_name]) { trap_ids_to_reset.push_back(it.first); - if (!removeTrap(it.second.trap_obj)) + if (!removeTrap(it.second.trap_obj, it.first)) { return false; } @@ -995,8 +1185,8 @@ bool CoppOrch::getAttribsFromTrapGroup (vector &fv_tuple, { /* Mellanox platform doesn't support trap priority setting */ /* Marvell platform doesn't support trap priority. */ - char *platform = getenv("platform"); - if (!platform || (!strstr(platform, MLNX_PLATFORM_SUBSTRING) && (!strstr(platform, MRVL_PLATFORM_SUBSTRING)))) + char *platform = getenv("platform"); + if (!platform || (!strstr(platform, MLNX_PLATFORM_SUBSTRING) && (!strstr(platform, MRVL_PRST_PLATFORM_SUBSTRING)))) { attr.id = SAI_HOSTIF_TRAP_ATTR_TRAP_PRIORITY, attr.value.u32 = (uint32_t)stoul(fvValue(*i)); @@ -1107,12 +1297,14 @@ bool CoppOrch::getAttribsFromTrapGroup (vector &fv_tuple, bool CoppOrch::trapGroupUpdatePolicer (string trap_group_name, vector &policer_attribs) { - sai_object_id_t policer_id = getPolicer(trap_group_name); - if (m_trap_group_map.find(trap_group_name) == m_trap_group_map.end()) { return false; } + + auto policer_object = getPolicer(trap_group_name); + auto policer_id = policer_object.policer_id; + if (SAI_NULL_OBJECT_ID == policer_id) { SWSS_LOG_WARN("Creating policer for existing Trap group: %" PRIx64 " (name:%s).", @@ -1128,6 +1320,35 @@ bool CoppOrch::trapGroupUpdatePolicer (string trap_group_name, for (sai_uint32_t ind = 0; ind < policer_attribs.size(); ind++) { auto policer_attr = policer_attribs[ind]; + /* + Updating the CREATE_ONLY attributes of the policer will cause a crash + If modified, throw an error log and proceed with changeable attributes + */ + if(policer_attr.id == SAI_POLICER_ATTR_METER_TYPE) + { + if (policer_object.meter != (sai_meter_type_t)policer_attr.value.s32) + { + SWSS_LOG_ERROR("Trying to modify policer attribute: (meter), trap group: (%s)", trap_group_name.c_str()); + } + continue; + } + else if(policer_attr.id == SAI_POLICER_ATTR_MODE) + { + if (policer_object.mode != (sai_policer_mode_t)policer_attr.value.s32) + { + SWSS_LOG_ERROR("Trying to modify policer attribute: (mode), trap group: (%s)", trap_group_name.c_str()); + } + continue; + } + else if(policer_attr.id == SAI_POLICER_ATTR_COLOR_SOURCE) + { + if (policer_object.color != (sai_policer_color_source_t)policer_attr.value.s32) + { + SWSS_LOG_ERROR("Trying to modify policer attribute: (color), trap group: (%s)", trap_group_name.c_str()); + } + continue; + } + sai_status_t sai_status = sai_policer_api->set_policer_attribute(policer_id, &policer_attr); if (sai_status != SAI_STATUS_SUCCESS) @@ -1154,24 +1375,26 @@ void CoppOrch::initTrapRatePlugin() } std::string trapRatePluginName = "trap_rates.lua"; + std::string trapSha; try { std::string trapLuaScript = swss::loadLuaScript(trapRatePluginName); - std::string trapSha = swss::loadRedisScript(m_counter_db.get(), trapLuaScript); - - vector fieldValues; - fieldValues.emplace_back(FLOW_COUNTER_PLUGIN_FIELD, trapSha); - fieldValues.emplace_back(STATS_MODE_FIELD, STATS_MODE_READ); - m_flex_counter_group_table->set(HOSTIF_TRAP_COUNTER_FLEX_COUNTER_GROUP, fieldValues); + trapSha = swss::loadRedisScript(m_counter_db.get(), trapLuaScript); } catch (const runtime_error &e) { SWSS_LOG_ERROR("Trap flex counter groups were not set successfully: %s", e.what()); } + + setFlexCounterGroupParameter(HOSTIF_TRAP_COUNTER_FLEX_COUNTER_GROUP, + "", // Do not touch poll interval + STATS_MODE_READ, + FLOW_COUNTER_PLUGIN_FIELD, + trapSha); m_trap_rate_plugin_loaded = true; } -bool CoppOrch::removeTrap(sai_object_id_t hostif_trap_id) +bool CoppOrch::removeTrap(sai_object_id_t hostif_trap_id, sai_hostif_trap_type_t trap_type) { unbindTrapCounter(hostif_trap_id); @@ -1186,6 +1409,8 @@ bool CoppOrch::removeTrap(sai_object_id_t hostif_trap_id) return parseHandleSaiStatusFailure(handle_status); } } + + updateTrapOperStatus(trap_type, "not-installed"); return true; } diff --git a/orchagent/copporch.h b/orchagent/copporch.h index d774db64bab..76755cf3a32 100644 --- a/orchagent/copporch.h +++ b/orchagent/copporch.h @@ -1,9 +1,16 @@ #ifndef SWSS_COPPORCH_H #define SWSS_COPPORCH_H +extern "C" { +#include +#include +#include +} + #include #include #include +#include #include "dbconnector.h" #include "orch.h" #include "flex_counter_manager.h" @@ -46,8 +53,18 @@ struct copp_trap_objects sai_hostif_trap_type_t trap_type; }; +struct policer_object +{ + sai_object_id_t policer_id; + sai_meter_type_t meter; + sai_policer_mode_t mode; + sai_policer_color_source_t color; + + policer_object() : policer_id(SAI_NULL_OBJECT_ID) {} +}; + /* TrapGroupPolicerTable: trap group ID, policer ID */ -typedef std::map TrapGroupPolicerTable; +typedef std::map TrapGroupPolicerTable; /* TrapIdTrapObjectsTable: trap ID, copp trap objects */ typedef std::map TrapIdTrapObjectsTable; /* TrapGroupHostIfMap: trap group ID, host interface ID */ @@ -88,11 +105,14 @@ class CoppOrch : public Orch std::map m_pendingAddToFlexCntr; std::shared_ptr m_counter_db; - std::shared_ptr m_flex_db; std::shared_ptr m_asic_db; + std::shared_ptr m_state_db; std::unique_ptr
m_counter_table; std::unique_ptr
m_vidToRidTable; - std::unique_ptr m_flex_counter_group_table; + std::unique_ptr
m_trapCapabilityTable; + std::unique_ptr
m_trapTable; + + std::unordered_set supported_trap_ids; FlexCounterManager m_trap_counter_manager; @@ -104,6 +124,9 @@ class CoppOrch : public Orch void initDefaultTrapGroup(); void initDefaultTrapIds(); void initTrapRatePlugin(); + bool isTrapIdSupported(sai_hostif_trap_type_t trap_id) const; + void updateTrapOperStatus(sai_hostif_trap_type_t trap_type, const std::string& hw_status); + void publishTrapIdsCapability(); task_process_status processCoppRule(Consumer& consumer); bool isValidList(std::vector &trap_id_list, std::vector &all_items) const; @@ -113,7 +136,7 @@ class CoppOrch : public Orch bool createPolicer(std::string trap_group, std::vector &policer_attribs); bool removePolicer(std::string trap_group_name); - sai_object_id_t getPolicer(std::string trap_group_name); + policer_object getPolicer(std::string trap_group_name); bool createGenetlinkHostIf(std::string trap_group_name, std::vector &hostif_attribs); bool removeGenetlinkHostIf(std::string trap_group_name); @@ -140,7 +163,7 @@ class CoppOrch : public Orch bool trapGroupUpdatePolicer (std::string trap_group_name, std::vector &policer_attribs); - bool removeTrap(sai_object_id_t hostif_trap_id); + bool removeTrap(sai_object_id_t hostif_trap_id, sai_hostif_trap_type_t trap_type); bool bindTrapCounter(sai_object_id_t hostif_trap_id, sai_hostif_trap_type_t trap_type); void unbindTrapCounter(sai_object_id_t hostif_trap_id); diff --git a/orchagent/crmorch.cpp b/orchagent/crmorch.cpp index bfd69525958..98125ac5b43 100644 --- a/orchagent/crmorch.cpp +++ b/orchagent/crmorch.cpp @@ -16,14 +16,14 @@ #define CRM_EXCEEDED_MSG_MAX 10 #define CRM_ACL_RESOURCE_COUNT 256 +using namespace std; +using namespace swss; + extern sai_object_id_t gSwitchId; extern sai_switch_api_t *sai_switch_api; extern sai_acl_api_t *sai_acl_api; extern event_handle_t g_events_handle; - -using namespace std; -using namespace swss; - +extern string gMySwitchType; const map crmResTypeNameMap = { @@ -64,6 +64,11 @@ const map crmResTypeNameMap = { CrmResourceType::CRM_DASH_IPV6_ACL_GROUP, "DASH_IPV6_ACL_GROUP" }, { CrmResourceType::CRM_DASH_IPV4_ACL_RULE, "DASH_IPV4_ACL_RULE" }, { CrmResourceType::CRM_DASH_IPV6_ACL_RULE, "DASH_IPV6_ACL_RULE" }, + { CrmResourceType::CRM_DASH_IPV4_METER_POLICY, "DASH_IPV4_METER_POLICY" }, + { CrmResourceType::CRM_DASH_IPV4_METER_RULE, "DASH_IPV4_METER_RULE" }, + { CrmResourceType::CRM_DASH_IPV6_METER_POLICY, "DASH_IPV6_METER_POLICY" }, + { CrmResourceType::CRM_DASH_IPV6_METER_RULE, "DASH_IPV6_METER_RULE" }, + { CrmResourceType::CRM_TWAMP_ENTRY, "TWAMP_ENTRY" } }; const map crmResSaiAvailAttrMap = @@ -84,6 +89,7 @@ const map crmResSaiAvailAttrMap = { CrmResourceType::CRM_IPMC_ENTRY, SAI_SWITCH_ATTR_AVAILABLE_IPMC_ENTRY}, { CrmResourceType::CRM_SNAT_ENTRY, SAI_SWITCH_ATTR_AVAILABLE_SNAT_ENTRY }, { CrmResourceType::CRM_DNAT_ENTRY, SAI_SWITCH_ATTR_AVAILABLE_DNAT_ENTRY }, + { CrmResourceType::CRM_TWAMP_ENTRY, SAI_SWITCH_ATTR_AVAILABLE_TWAMP_SESSION } }; const map crmResSaiObjAttrMap = @@ -125,6 +131,11 @@ const map crmResSaiObjAttrMap = { CrmResourceType::CRM_DASH_IPV6_ACL_GROUP, (sai_object_type_t)SAI_OBJECT_TYPE_DASH_ACL_GROUP }, { CrmResourceType::CRM_DASH_IPV4_ACL_RULE, (sai_object_type_t)SAI_OBJECT_TYPE_DASH_ACL_RULE }, { CrmResourceType::CRM_DASH_IPV6_ACL_RULE, (sai_object_type_t)SAI_OBJECT_TYPE_DASH_ACL_RULE }, + { CrmResourceType::CRM_DASH_IPV4_METER_POLICY, (sai_object_type_t)SAI_OBJECT_TYPE_METER_POLICY }, + { CrmResourceType::CRM_DASH_IPV6_METER_POLICY, (sai_object_type_t)SAI_OBJECT_TYPE_METER_POLICY }, + { CrmResourceType::CRM_DASH_IPV4_METER_RULE, (sai_object_type_t)SAI_OBJECT_TYPE_METER_RULE }, + { CrmResourceType::CRM_DASH_IPV6_METER_RULE, (sai_object_type_t)SAI_OBJECT_TYPE_METER_RULE }, + { CrmResourceType::CRM_TWAMP_ENTRY, SAI_OBJECT_TYPE_NULL } }; const map crmResAddrFamilyAttrMap = @@ -185,7 +196,12 @@ const map crmThreshTypeResMap = { "dash_ipv4_acl_group_threshold_type", CrmResourceType::CRM_DASH_IPV4_ACL_GROUP }, { "dash_ipv6_acl_group_threshold_type", CrmResourceType::CRM_DASH_IPV6_ACL_GROUP }, { "dash_ipv4_acl_rule_threshold_type", CrmResourceType::CRM_DASH_IPV4_ACL_RULE }, - { "dash_ipv6_acl_rule_threshold_type", CrmResourceType::CRM_DASH_IPV6_ACL_RULE } + { "dash_ipv6_acl_rule_threshold_type", CrmResourceType::CRM_DASH_IPV6_ACL_RULE }, + { "dash_ipv4_meter_policy_threshold_type", CrmResourceType::CRM_DASH_IPV4_METER_POLICY }, + { "dash_ipv6_meter_policy_threshold_type", CrmResourceType::CRM_DASH_IPV6_METER_POLICY }, + { "dash_ipv4_meter_rule_threshold_type", CrmResourceType::CRM_DASH_IPV4_METER_RULE }, + { "dash_ipv6_meter_rule_threshold_type", CrmResourceType::CRM_DASH_IPV6_METER_RULE }, + { "twamp_entry_threshold_type", CrmResourceType::CRM_TWAMP_ENTRY } }; const map crmThreshLowResMap = @@ -226,7 +242,12 @@ const map crmThreshLowResMap = { "dash_ipv4_acl_group_low_threshold", CrmResourceType::CRM_DASH_IPV4_ACL_GROUP }, { "dash_ipv6_acl_group_low_threshold", CrmResourceType::CRM_DASH_IPV6_ACL_GROUP }, { "dash_ipv4_acl_rule_low_threshold", CrmResourceType::CRM_DASH_IPV4_ACL_RULE }, - { "dash_ipv6_acl_rule_low_threshold", CrmResourceType::CRM_DASH_IPV6_ACL_RULE } + { "dash_ipv6_acl_rule_low_threshold", CrmResourceType::CRM_DASH_IPV6_ACL_RULE }, + { "dash_ipv4_meter_policy_low_threshold", CrmResourceType::CRM_DASH_IPV4_METER_POLICY }, + { "dash_ipv6_meter_policy_low_threshold", CrmResourceType::CRM_DASH_IPV6_METER_POLICY }, + { "dash_ipv4_meter_rule_low_threshold", CrmResourceType::CRM_DASH_IPV4_METER_RULE }, + { "dash_ipv6_meter_rule_low_threshold", CrmResourceType::CRM_DASH_IPV6_METER_RULE }, + { "twamp_entry_low_threshold", CrmResourceType::CRM_TWAMP_ENTRY } }; const map crmThreshHighResMap = @@ -267,7 +288,12 @@ const map crmThreshHighResMap = { "dash_ipv4_acl_group_high_threshold", CrmResourceType::CRM_DASH_IPV4_ACL_GROUP }, { "dash_ipv6_acl_group_high_threshold", CrmResourceType::CRM_DASH_IPV6_ACL_GROUP }, { "dash_ipv4_acl_rule_high_threshold", CrmResourceType::CRM_DASH_IPV4_ACL_RULE }, - { "dash_ipv6_acl_rule_high_threshold", CrmResourceType::CRM_DASH_IPV6_ACL_RULE } + { "dash_ipv6_acl_rule_high_threshold", CrmResourceType::CRM_DASH_IPV6_ACL_RULE }, + { "dash_ipv4_meter_policy_high_threshold", CrmResourceType::CRM_DASH_IPV4_METER_POLICY }, + { "dash_ipv6_meter_policy_high_threshold", CrmResourceType::CRM_DASH_IPV6_METER_POLICY }, + { "dash_ipv4_meter_rule_high_threshold", CrmResourceType::CRM_DASH_IPV4_METER_RULE }, + { "dash_ipv6_meter_rule_high_threshold", CrmResourceType::CRM_DASH_IPV6_METER_RULE }, + { "twamp_entry_high_threshold", CrmResourceType::CRM_TWAMP_ENTRY } }; const map crmThreshTypeMap = @@ -315,7 +341,12 @@ const map crmAvailCntsTableMap = { "crm_stats_dash_ipv4_acl_group_available", CrmResourceType::CRM_DASH_IPV4_ACL_GROUP }, { "crm_stats_dash_ipv6_acl_group_available", CrmResourceType::CRM_DASH_IPV6_ACL_GROUP }, { "crm_stats_dash_ipv4_acl_rule_available", CrmResourceType::CRM_DASH_IPV4_ACL_RULE }, - { "crm_stats_dash_ipv6_acl_rule_available", CrmResourceType::CRM_DASH_IPV6_ACL_RULE } + { "crm_stats_dash_ipv6_acl_rule_available", CrmResourceType::CRM_DASH_IPV6_ACL_RULE }, + { "crm_stats_dash_ipv4_meter_policy_available", CrmResourceType::CRM_DASH_IPV4_METER_POLICY }, + { "crm_stats_dash_ipv6_meter_policy_available", CrmResourceType::CRM_DASH_IPV6_METER_POLICY }, + { "crm_stats_dash_ipv4_meter_rule_available", CrmResourceType::CRM_DASH_IPV4_METER_RULE }, + { "crm_stats_dash_ipv6_meter_rule_available", CrmResourceType::CRM_DASH_IPV6_METER_RULE }, + { "crm_stats_twamp_entry_available", CrmResourceType::CRM_TWAMP_ENTRY } }; const map crmUsedCntsTableMap = @@ -356,7 +387,12 @@ const map crmUsedCntsTableMap = { "crm_stats_dash_ipv4_acl_group_used", CrmResourceType::CRM_DASH_IPV4_ACL_GROUP }, { "crm_stats_dash_ipv6_acl_group_used", CrmResourceType::CRM_DASH_IPV6_ACL_GROUP }, { "crm_stats_dash_ipv4_acl_rule_used", CrmResourceType::CRM_DASH_IPV4_ACL_RULE }, - { "crm_stats_dash_ipv6_acl_rule_used", CrmResourceType::CRM_DASH_IPV6_ACL_RULE } + { "crm_stats_dash_ipv6_acl_rule_used", CrmResourceType::CRM_DASH_IPV6_ACL_RULE }, + { "crm_stats_dash_ipv4_meter_policy_used", CrmResourceType::CRM_DASH_IPV4_METER_POLICY }, + { "crm_stats_dash_ipv6_meter_policy_used", CrmResourceType::CRM_DASH_IPV6_METER_POLICY }, + { "crm_stats_dash_ipv4_meter_rule_used", CrmResourceType::CRM_DASH_IPV4_METER_RULE }, + { "crm_stats_dash_ipv6_meter_rule_used", CrmResourceType::CRM_DASH_IPV6_METER_RULE }, + { "crm_stats_twamp_entry_used", CrmResourceType::CRM_TWAMP_ENTRY }, }; CrmOrch::CrmOrch(DBConnector *db, string tableName): @@ -800,6 +836,12 @@ bool CrmOrch::getResAvailability(CrmResourceType type, CrmResourceEntry &res) bool CrmOrch::getDashAclGroupResAvailability(CrmResourceType type, CrmResourceEntry &res) { + if (gMySwitchType != "dpu") + { + res.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; + return false; + } + sai_object_type_t objType = crmResSaiObjAttrMap.at(type); for (auto &cnt : res.countersMap) @@ -864,6 +906,12 @@ void CrmOrch::getResAvailableCounters() case CrmResourceType::CRM_SRV6_MY_SID_ENTRY: case CrmResourceType::CRM_MPLS_NEXTHOP: case CrmResourceType::CRM_SRV6_NEXTHOP: + case CrmResourceType::CRM_TWAMP_ENTRY: + { + getResAvailability(res.first, res.second); + break; + } + case CrmResourceType::CRM_DASH_VNET: case CrmResourceType::CRM_DASH_ENI: case CrmResourceType::CRM_DASH_ENI_ETHER_ADDRESS_MAP: @@ -871,6 +919,10 @@ void CrmOrch::getResAvailableCounters() case CrmResourceType::CRM_DASH_IPV6_INBOUND_ROUTING: case CrmResourceType::CRM_DASH_IPV4_OUTBOUND_ROUTING: case CrmResourceType::CRM_DASH_IPV6_OUTBOUND_ROUTING: + case CrmResourceType::CRM_DASH_IPV4_METER_POLICY: + case CrmResourceType::CRM_DASH_IPV6_METER_POLICY: + case CrmResourceType::CRM_DASH_IPV4_METER_RULE: + case CrmResourceType::CRM_DASH_IPV6_METER_RULE: case CrmResourceType::CRM_DASH_IPV4_PA_VALIDATION: case CrmResourceType::CRM_DASH_IPV6_PA_VALIDATION: case CrmResourceType::CRM_DASH_IPV4_OUTBOUND_CA_TO_PA: @@ -878,6 +930,12 @@ void CrmOrch::getResAvailableCounters() case CrmResourceType::CRM_DASH_IPV4_ACL_GROUP: case CrmResourceType::CRM_DASH_IPV6_ACL_GROUP: { + if (gMySwitchType != "dpu") + { + res.second.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; + break; + } + getResAvailability(res.first, res.second); break; } @@ -893,6 +951,17 @@ void CrmOrch::getResAvailableCounters() attr.value.aclresource.count = CRM_ACL_RESOURCE_COUNT; attr.value.aclresource.list = resources.data(); sai_status_t status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + if ((status == SAI_STATUS_NOT_SUPPORTED) || + (status == SAI_STATUS_NOT_IMPLEMENTED) || + SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || + SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) + { + // mark unsupported resources + res.second.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; + SWSS_LOG_NOTICE("CRM resource %s not supported", crmResTypeNameMap.at(res.first).c_str()); + break; + } + if (status == SAI_STATUS_BUFFER_OVERFLOW) { resources.resize(attr.value.aclresource.count); @@ -928,6 +997,16 @@ void CrmOrch::getResAvailableCounters() for (auto &cnt : res.second.countersMap) { sai_status_t status = sai_acl_api->get_acl_table_attribute(cnt.second.id, 1, &attr); + if ((status == SAI_STATUS_NOT_SUPPORTED) || + (status == SAI_STATUS_NOT_IMPLEMENTED) || + SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || + SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) + { + // mark unsupported resources + res.second.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; + SWSS_LOG_NOTICE("CRM resource %s not supported", crmResTypeNameMap.at(res.first).c_str()); + break; + } if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to get ACL table attribute %u , rv:%d", attr.id, status); diff --git a/orchagent/crmorch.h b/orchagent/crmorch.h index 9eb60011855..e3d62e172eb 100644 --- a/orchagent/crmorch.h +++ b/orchagent/crmorch.h @@ -49,7 +49,12 @@ enum class CrmResourceType CRM_DASH_IPV4_ACL_GROUP, CRM_DASH_IPV6_ACL_GROUP, CRM_DASH_IPV4_ACL_RULE, - CRM_DASH_IPV6_ACL_RULE + CRM_DASH_IPV6_ACL_RULE, + CRM_DASH_IPV4_METER_POLICY, + CRM_DASH_IPV6_METER_POLICY, + CRM_DASH_IPV4_METER_RULE, + CRM_DASH_IPV6_METER_RULE, + CRM_TWAMP_ENTRY }; enum class CrmThresholdType diff --git a/orchagent/dash/dashaclgroupmgr.cpp b/orchagent/dash/dashaclgroupmgr.cpp index 22a730e7fe9..9e1a75930a7 100644 --- a/orchagent/dash/dashaclgroupmgr.cpp +++ b/orchagent/dash/dashaclgroupmgr.cpp @@ -9,6 +9,7 @@ #include "dashaclorch.h" #include "saihelper.h" #include "pbutils.h" +#include "taskworker.h" extern sai_dash_acl_api_t* sai_dash_acl_api; extern sai_dash_eni_api_t* sai_dash_eni_api; @@ -126,9 +127,22 @@ sai_attr_id_t getSaiStage(DashAclDirection d, sai_ip_addr_family_t f, DashAclSta return stage->second; } -DashAclGroupMgr::DashAclGroupMgr(DashOrch *dashorch, DashAclOrch *aclorch) : +DashAclRuleInfo::DashAclRuleInfo(const DashAclRule &rule) : + m_src_tags(rule.m_src_tags), + m_dst_tags(rule.m_dst_tags) +{ + SWSS_LOG_ENTER(); +} + +bool DashAclRuleInfo::isTagUsed(const std::string &tag_id) const +{ + return (m_src_tags.find(tag_id) != end(m_src_tags)) || (m_dst_tags.find(tag_id) != end(m_dst_tags)); +} + +DashAclGroupMgr::DashAclGroupMgr(DBConnector *db, DashOrch *dashorch, DashAclOrch *aclorch) : m_dash_orch(dashorch), - m_dash_acl_orch(aclorch) + m_dash_acl_orch(aclorch), + m_dash_acl_rules_table(new Table(db, APP_DASH_ACL_RULE_TABLE_NAME)) { SWSS_LOG_ENTER(); } @@ -138,10 +152,6 @@ void DashAclGroupMgr::init(DashAclGroup& group) SWSS_LOG_ENTER(); group.m_dash_acl_group_id = SAI_NULL_OBJECT_ID; - for (auto& rule: group.m_dash_acl_rule_table) - { - rule.second.m_dash_acl_rule_id = SAI_NULL_OBJECT_ID; - } } void DashAclGroupMgr::create(DashAclGroup& group) @@ -202,6 +212,7 @@ void DashAclGroupMgr::remove(DashAclGroup& group) CrmResourceType crm_rtype = (group.m_ip_version == SAI_IP_ADDR_FAMILY_IPV4) ? CrmResourceType::CRM_DASH_IPV4_ACL_GROUP : CrmResourceType::CRM_DASH_IPV6_ACL_GROUP; + // Will also delete/zero out ACL rule count for this group, no need to do so separately gCrmOrch->decCrmDashAclUsedCounter(crm_rtype, group.m_dash_acl_group_id); group.m_dash_acl_group_id = SAI_NULL_OBJECT_ID; @@ -220,12 +231,6 @@ task_process_status DashAclGroupMgr::remove(const string& group_id) auto& group = group_it->second; - if (!group.m_dash_acl_rule_table.empty()) - { - SWSS_LOG_ERROR("ACL group %s still has %zu rules", group_id.c_str(), group.m_dash_acl_rule_table.size()); - return task_need_retry; - } - if (isBound(group)) { SWSS_LOG_ERROR("ACL group %s still has %zu references", group_id.c_str(), group.m_in_tables.size() + group.m_out_tables.size()); @@ -235,6 +240,7 @@ task_process_status DashAclGroupMgr::remove(const string& group_id) remove(group); m_groups_table.erase(group_id); + detachTags(group_id, group.m_tags); SWSS_LOG_INFO("Removed ACL group %s", group_id.c_str()); return task_success; @@ -247,102 +253,7 @@ bool DashAclGroupMgr::exists(const string& group_id) const return m_groups_table.find(group_id) != m_groups_table.end(); } -void DashAclGroupMgr::onUpdate(const string& group_id, const string& tag_id, const DashTag& tag) -{ - SWSS_LOG_ENTER(); - - auto group_it = m_groups_table.find(group_id); - if (group_it == m_groups_table.end()) - { - return; - } - - auto& group = group_it->second; - if (isBound(group)) - { - // If the group is bound to at least one ENI refresh the full group to update the affected rules. - // When the group is bound to the ENI we need to make sure that the update of the affected rules will be atomic. - SWSS_LOG_INFO("Update full ACL group %s", group_id.c_str()); - - refreshAclGroupFull(group_id); - } - else - { - // If the group is not bound to ENI update the rule immediately. - SWSS_LOG_INFO("Update ACL group %s", group_id.c_str()); - for (auto& rule_it: group.m_dash_acl_rule_table) - { - auto& rule = rule_it.second; - if (rule.m_src_tags.find(tag_id) != rule.m_src_tags.end() || rule.m_dst_tags.find(tag_id) != rule.m_dst_tags.end()) - { - removeRule(group, rule); - createRule(group, rule); - } - } - } -} - -void DashAclGroupMgr::refreshAclGroupFull(const string &group_id) -{ - SWSS_LOG_ENTER(); - - auto& group = m_groups_table[group_id]; - - DashAclGroup new_group = group; - init(new_group); - create(new_group); - - for (auto& rule: new_group.m_dash_acl_rule_table) - { - createRule(new_group, rule.second); - } - - for (const auto& table: new_group.m_in_tables) - { - const auto& eni_id = table.first; - const auto& stages = table.second; - - const auto eni = m_dash_orch->getEni(eni_id); - ABORT_IF_NOT(eni != nullptr, "Failed to get ENI %s", eni_id.c_str()); - - for (const auto& stage: stages) - { - bind(new_group, *eni, DashAclDirection::IN, stage); - } - } - - for (const auto& table: new_group.m_out_tables) - { - const auto& eni_id = table.first; - const auto& stages = table.second; - - const auto eni = m_dash_orch->getEni(eni_id); - ABORT_IF_NOT(eni != nullptr, "Failed to get ENI %s", eni_id.c_str()); - - for (const auto& stage: stages) - { - bind(new_group, *eni, DashAclDirection::OUT, stage); - } - } - - removeAclGroupFull(group); - - group = new_group; -} - -void DashAclGroupMgr::removeAclGroupFull(DashAclGroup& group) -{ - SWSS_LOG_ENTER(); - - for (auto& rule: group.m_dash_acl_rule_table) - { - removeRule(group, rule.second); - } - - remove(group); -} - -void DashAclGroupMgr::createRule(DashAclGroup& group, DashAclRule& rule) +DashAclRuleInfo DashAclGroupMgr::createRule(DashAclGroup& group, DashAclRule& rule) { SWSS_LOG_ENTER(); @@ -350,6 +261,8 @@ void DashAclGroupMgr::createRule(DashAclGroup& group, DashAclRule& rule) vector src_prefixes = {}; vector dst_prefixes = {}; + DashAclRuleInfo rule_info = rule; + auto any_ip = [] (const auto& g) { sai_ip_prefix_t ip_prefix = {}; @@ -403,9 +316,9 @@ void DashAclGroupMgr::createRule(DashAclGroup& group, DashAclRule& rule) for (const auto &tag : rule.m_src_tags) { const auto& prefixes = m_dash_acl_orch->getDashAclTagMgr().getPrefixes(tag); - src_prefixes.insert(src_prefixes.end(), prefixes.begin(), prefixes.end()); + group.m_tags.insert(tag); } for (const auto &tag : rule.m_dst_tags) @@ -414,6 +327,7 @@ void DashAclGroupMgr::createRule(DashAclGroup& group, DashAclRule& rule) dst_prefixes.insert(dst_prefixes.end(), prefixes.begin(), prefixes.end()); + group.m_tags.insert(tag); } if (src_prefixes.empty()) @@ -450,7 +364,7 @@ void DashAclGroupMgr::createRule(DashAclGroup& group, DashAclRule& rule) attrs.back().id = SAI_DASH_ACL_RULE_ATTR_DASH_ACL_GROUP_ID; attrs.back().value.oid = group.m_dash_acl_group_id; - auto status = sai_dash_acl_api->create_dash_acl_rule(&rule.m_dash_acl_rule_id, gSwitchId, static_cast(attrs.size()), attrs.data()); + auto status = sai_dash_acl_api->create_dash_acl_rule(&rule_info.m_dash_acl_rule_id, gSwitchId, static_cast(attrs.size()), attrs.data()); if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to create ACL rule: %d, %s", status, sai_serialize_status(status).c_str()); @@ -460,6 +374,8 @@ void DashAclGroupMgr::createRule(DashAclGroup& group, DashAclRule& rule) CrmResourceType crm_rtype = (group.m_ip_version == SAI_IP_ADDR_FAMILY_IPV4) ? CrmResourceType::CRM_DASH_IPV4_ACL_RULE : CrmResourceType::CRM_DASH_IPV6_ACL_RULE; gCrmOrch->incCrmDashAclUsedCounter(crm_rtype, group.m_dash_acl_group_id); + + return rule_info; } task_process_status DashAclGroupMgr::createRule(const string& group_id, const string& rule_id, DashAclRule& rule) @@ -474,91 +390,30 @@ task_process_status DashAclGroupMgr::createRule(const string& group_id, const st } auto& group = group_it->second; - auto acl_rule_it = group.m_dash_acl_rule_table.find(rule_id); - ABORT_IF_NOT(acl_rule_it == group.m_dash_acl_rule_table.end(), "Failed to create ACL rule %s. Rule already exist in ACL group %s", rule_id.c_str(), group_id.c_str()); - - createRule(group, rule); - - group.m_dash_acl_rule_table.emplace(rule_id, rule); - attachTags(group_id, rule.m_src_tags); - attachTags(group_id, rule.m_dst_tags); - - SWSS_LOG_INFO("Created ACL rule %s:%s", group_id.c_str(), rule_id.c_str()); - - return task_success; -} - -task_process_status DashAclGroupMgr::updateRule(const string& group_id, const string& rule_id, DashAclRule& rule) -{ - SWSS_LOG_ENTER(); - - if (isBound(group_id)) - { - SWSS_LOG_INFO("Failed to update dash ACL rule %s:%s, ACL group is bound to the ENI", group_id.c_str(), rule_id.c_str()); - return task_failed; - } - - if (ruleExists(group_id, rule_id)) - { - removeRule(group_id, rule_id); - } - - createRule(group_id, rule_id, rule); - - return task_success; -} - -void DashAclGroupMgr::removeRule(DashAclGroup& group, DashAclRule& rule) -{ - SWSS_LOG_ENTER(); - - if (rule.m_dash_acl_rule_id == SAI_NULL_OBJECT_ID) - { - return; - } - - // Remove the ACL group - auto status = sai_dash_acl_api->remove_dash_acl_rule(rule.m_dash_acl_rule_id); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to remove ACL rule: %d, %s", status, sai_serialize_status(status).c_str()); - handleSaiRemoveStatus((sai_api_t)SAI_API_DASH_ACL, status); - } - - CrmResourceType crm_resource = (group.m_ip_version == SAI_IP_ADDR_FAMILY_IPV4) ? - CrmResourceType::CRM_DASH_IPV4_ACL_RULE : CrmResourceType::CRM_DASH_IPV6_ACL_RULE; - gCrmOrch->decCrmDashAclUsedCounter(crm_resource, group.m_dash_acl_group_id); - - rule.m_dash_acl_rule_id = SAI_NULL_OBJECT_ID; -} - -task_process_status DashAclGroupMgr::removeRule(const string& group_id, const string& rule_id) -{ - SWSS_LOG_ENTER(); - - if (!exists(group_id) || !ruleExists(group_id, rule_id)) + for (const auto& tag_id : rule.m_src_tags) { - SWSS_LOG_INFO("ACL rule %s:%s does not exists", group_id.c_str(), rule_id.c_str()); - return task_success; + if (!m_dash_acl_orch->getDashAclTagMgr().exists(tag_id)) + { + SWSS_LOG_INFO("ACL tag %s doesn't exist, waiting for tag creating before creating rule %s", tag_id.c_str(), rule_id.c_str()); + return task_need_retry; + } } - auto& group = m_groups_table[group_id]; - if (isBound(group)) + for (const auto& tag_id : rule.m_dst_tags) { - SWSS_LOG_INFO("Failed to remove dash ACL rule %s:%s, ACL group is bound to the ENI", group_id.c_str(), rule_id.c_str()); - return task_need_retry; + if (!m_dash_acl_orch->getDashAclTagMgr().exists(tag_id)) + { + SWSS_LOG_INFO("ACL tag %s doesn't exist, waiting for tag creating before creating rule %s", tag_id.c_str(), rule_id.c_str()); + return task_need_retry; + } } - auto& rule = group.m_dash_acl_rule_table[rule_id]; - - removeRule(group, rule); + auto rule_info = createRule(group, rule); - detachTags(group_id, rule.m_src_tags); - detachTags(group_id, rule.m_dst_tags); + group.m_rule_count++; + attachTags(group_id, group.m_tags); - group.m_dash_acl_rule_table.erase(rule_id); - - SWSS_LOG_INFO("Removed ACL rule %s:%s", group_id.c_str(), rule_id.c_str()); + SWSS_LOG_INFO("Created ACL rule %s:%s", group_id.c_str(), rule_id.c_str()); return task_success; } @@ -580,19 +435,6 @@ void DashAclGroupMgr::bind(const DashAclGroup& group, const EniEntry& eni, DashA } } -bool DashAclGroupMgr::ruleExists(const string& group_id, const string& rule_id) const -{ - SWSS_LOG_ENTER(); - - auto group_it = m_groups_table.find(group_id); - if (group_it == m_groups_table.end()) - { - return false; - } - - return group_it->second.m_dash_acl_rule_table.find(rule_id) != group_it->second.m_dash_acl_rule_table.end(); -} - task_process_status DashAclGroupMgr::bind(const string& group_id, const string& eni_id, DashAclDirection direction, DashAclStage stage) { SWSS_LOG_ENTER(); @@ -601,15 +443,15 @@ task_process_status DashAclGroupMgr::bind(const string& group_id, const string& if (group_it == m_groups_table.end()) { SWSS_LOG_INFO("Failed to bind ACL group %s to ENI %s. ACL group does not exist", group_id.c_str(), eni_id.c_str()); - return task_need_retry; + return task_failed; } auto& group = group_it->second; - if (group.m_dash_acl_rule_table.empty()) + if (group.m_rule_count == 0) { - SWSS_LOG_INFO("ACL group %s has no rules attached. Waiting for ACL rules creation", group_id.c_str()); - return task_need_retry; + SWSS_LOG_INFO("Failed to bind ACL group %s to ENI %s. ACL group has no rules attached.", group_id.c_str(), eni_id.c_str()); + return task_failed; } auto eni = m_dash_orch->getEni(eni_id); diff --git a/orchagent/dash/dashaclgroupmgr.h b/orchagent/dash/dashaclgroupmgr.h index 6ef9498cd29..5f0dbb77e0f 100644 --- a/orchagent/dash/dashaclgroupmgr.h +++ b/orchagent/dash/dashaclgroupmgr.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include @@ -8,6 +9,7 @@ #include "dashorch.h" #include "dashtagmgr.h" +#include "table.h" #include "dash_api/acl_group.pb.h" #include "dash_api/acl_rule.pb.h" @@ -31,8 +33,6 @@ enum class DashAclDirection struct DashAclRule { - sai_object_id_t m_dash_acl_rule_id = SAI_NULL_OBJECT_ID; - enum class Action { ALLOW, @@ -51,16 +51,27 @@ struct DashAclRule std::vector m_dst_ports; }; +struct DashAclRuleInfo +{ + sai_object_id_t m_dash_acl_rule_id = SAI_NULL_OBJECT_ID; + + std::unordered_set m_src_tags; + std::unordered_set m_dst_tags; + + DashAclRuleInfo() = default; + DashAclRuleInfo(const DashAclRule &rule); + + bool isTagUsed(const std::string &tag_id) const; +}; + struct DashAclGroup { using EniTable = std::unordered_map>; - using RuleTable = std::unordered_map; - using RuleKeys = std::unordered_set; sai_object_id_t m_dash_acl_group_id = SAI_NULL_OBJECT_ID; + std::unordered_set m_tags; + int m_rule_count = 0; - std::string m_guid; sai_ip_addr_family_t m_ip_version; - RuleTable m_dash_acl_rule_table; EniTable m_in_tables; EniTable m_out_tables; @@ -86,20 +97,17 @@ class DashAclGroupMgr DashOrch *m_dash_orch; DashAclOrch *m_dash_acl_orch; std::unordered_map m_groups_table; + std::unique_ptr m_dash_acl_rules_table; public: - DashAclGroupMgr(DashOrch *dashorch, DashAclOrch *aclorch); + DashAclGroupMgr(swss::DBConnector *db, DashOrch *dashorch, DashAclOrch *aclorch); task_process_status create(const std::string& group_id, DashAclGroup& group); task_process_status remove(const std::string& group_id); bool exists(const std::string& group_id) const; - - void onUpdate(const std::string& group_id, const std::string& tag_id,const DashTag& tag); + bool isBound(const std::string& group_id); task_process_status createRule(const std::string& group_id, const std::string& rule_id, DashAclRule& rule); - task_process_status updateRule(const std::string& group_id, const std::string& rule_id, DashAclRule& rule); - task_process_status removeRule(const std::string& group_id, const std::string& rule_id); - bool ruleExists(const std::string& group_id, const std::string& rule_id) const; task_process_status bind(const std::string& group_id, const std::string& eni_id, DashAclDirection direction, DashAclStage stage); task_process_status unbind(const std::string& group_id, const std::string& eni_id, DashAclDirection direction, DashAclStage stage); @@ -109,16 +117,11 @@ class DashAclGroupMgr void create(DashAclGroup& group); void remove(DashAclGroup& group); - void createRule(DashAclGroup& group, DashAclRule& rule); - void removeRule(DashAclGroup& group, DashAclRule& rule); + DashAclRuleInfo createRule(DashAclGroup& group, DashAclRule& rule); void bind(const DashAclGroup& group, const EniEntry& eni, DashAclDirection direction, DashAclStage stage); void unbind(const DashAclGroup& group, const EniEntry& eni, DashAclDirection direction, DashAclStage stage); - bool isBound(const std::string &group_id); bool isBound(const DashAclGroup& group); void attachTags(const std::string &group_id, const std::unordered_set& tags); void detachTags(const std::string &group_id, const std::unordered_set& tags); - - void refreshAclGroupFull(const std::string &group_id); - void removeAclGroupFull(DashAclGroup& group); }; diff --git a/orchagent/dash/dashaclorch.cpp b/orchagent/dash/dashaclorch.cpp index 9c14d637196..28d1ec03942 100644 --- a/orchagent/dash/dashaclorch.cpp +++ b/orchagent/dash/dashaclorch.cpp @@ -74,10 +74,10 @@ inline void lexical_convert(const string &buffer, DashAclStage &stage) } -DashAclOrch::DashAclOrch(DBConnector *db, const vector &tables, DashOrch *dash_orch, ZmqServer *zmqServer) : +DashAclOrch::DashAclOrch(DBConnector *db, const vector &tables, DashOrch *dash_orch, DBConnector *app_state_db, ZmqServer *zmqServer) : ZmqOrch(db, tables, zmqServer), m_dash_orch(dash_orch), - m_group_mgr(dash_orch, this), + m_group_mgr(db, dash_orch, this), m_tag_mgr(this) { @@ -108,7 +108,6 @@ void DashAclOrch::doTask(ConsumerBase &consumer) PbWorker::makeMemberTask(APP_DASH_ACL_GROUP_TABLE_NAME, SET_COMMAND, &DashAclOrch::taskUpdateDashAclGroup, this), KeyOnlyWorker::makeMemberTask(APP_DASH_ACL_GROUP_TABLE_NAME, DEL_COMMAND, &DashAclOrch::taskRemoveDashAclGroup, this), PbWorker::makeMemberTask(APP_DASH_ACL_RULE_TABLE_NAME, SET_COMMAND, &DashAclOrch::taskUpdateDashAclRule, this), - KeyOnlyWorker::makeMemberTask(APP_DASH_ACL_RULE_TABLE_NAME, DEL_COMMAND, &DashAclOrch::taskRemoveDashAclRule, this), PbWorker::makeMemberTask(APP_DASH_PREFIX_TAG_TABLE_NAME, SET_COMMAND, &DashAclOrch::taskUpdateDashPrefixTag, this), KeyOnlyWorker::makeMemberTask(APP_DASH_PREFIX_TAG_TABLE_NAME, DEL_COMMAND, &DashAclOrch::taskRemoveDashPrefixTag, this), }; @@ -272,29 +271,13 @@ task_process_status DashAclOrch::taskUpdateDashAclRule( return task_failed; } - if (m_group_mgr.ruleExists(group_id, rule_id)) + if (m_group_mgr.isBound(group_id)) { - return m_group_mgr.updateRule(group_id, rule_id, rule); - } - else - { - return m_group_mgr.createRule(group_id, rule_id, rule); - } -} - -task_process_status DashAclOrch::taskRemoveDashAclRule( - const string &key) -{ - SWSS_LOG_ENTER(); - - string group_id, rule_id; - if (!extractVariables(key, ':', group_id, rule_id)) - { - SWSS_LOG_ERROR("Failed to parse key %s", key.c_str()); + SWSS_LOG_INFO("Failed to set dash ACL rule %s:%s, ACL group is bound to the ENI", group_id.c_str(), rule_id.c_str()); return task_failed; } - return m_group_mgr.removeRule(group_id, rule_id); + return m_group_mgr.createRule(group_id, rule_id, rule); } task_process_status DashAclOrch::taskUpdateDashPrefixTag( diff --git a/orchagent/dash/dashaclorch.h b/orchagent/dash/dashaclorch.h index b3859c5c2d8..125db81e248 100644 --- a/orchagent/dash/dashaclorch.h +++ b/orchagent/dash/dashaclorch.h @@ -35,7 +35,7 @@ class DashAclOrch : public ZmqOrch public: using TaskArgs = std::vector; - DashAclOrch(swss::DBConnector *db, const std::vector &tables, DashOrch *dash_orch, swss::ZmqServer *zmqServer); + DashAclOrch(swss::DBConnector *db, const std::vector &tables, DashOrch *dash_orch, swss::DBConnector *app_state_db, swss::ZmqServer *zmqServer); DashAclGroupMgr& getDashAclGroupMgr(); DashTagMgr& getDashAclTagMgr(); @@ -63,8 +63,6 @@ class DashAclOrch : public ZmqOrch task_process_status taskUpdateDashAclRule( const std::string &key, const dash::acl_rule::AclRule &data); - task_process_status taskRemoveDashAclRule( - const std::string &key); task_process_status taskUpdateDashPrefixTag( const std::string &key, diff --git a/orchagent/dash/dashenifwdinfo.cpp b/orchagent/dash/dashenifwdinfo.cpp new file mode 100644 index 00000000000..636c67c5a11 --- /dev/null +++ b/orchagent/dash/dashenifwdinfo.cpp @@ -0,0 +1,391 @@ +#include "dashenifwdorch.h" + +using namespace swss; +using namespace std; + +const int EniAclRule::BASE_PRIORITY = 9996; + +unique_ptr EniNH::createNextHop(dpu_type_t type, const IpAddress& ip) +{ + if (type == dpu_type_t::LOCAL) + { + return unique_ptr(new LocalEniNH(ip)); + } + return unique_ptr(new RemoteEniNH(ip)); +} + + +void LocalEniNH::resolve(EniInfo& eni) +{ + auto& ctx = eni.getCtx(); + auto alias = ctx->getNbrAlias(endpoint_); + + NextHopKey nh(endpoint_, alias); + if (ctx->isNeighborResolved(nh)) + { + setStatus(endpoint_status_t::RESOLVED); + return ; + } + + ctx->resolveNeighbor(nh); + setStatus(endpoint_status_t::UNRESOLVED); +} + +string LocalEniNH::getRedirectVal() +{ + return endpoint_.to_string(); +} + + +void RemoteEniNH::resolve(EniInfo& eni) +{ + auto& ctx = eni.getCtx(); + auto vnet = eni.getVnet(); + + if (!ctx->findVnetTunnel(vnet, tunnel_name_)) + { + SWSS_LOG_ERROR("Couldn't find tunnel name for Vnet %s", vnet.c_str()); + setStatus(endpoint_status_t::UNRESOLVED); + return ; + } + + uint64_t vnet_vni; + if (!ctx->findVnetVni(vnet, vnet_vni)) + { + SWSS_LOG_ERROR("Couldn't find VNI for Vnet %s", vnet.c_str()); + setStatus(endpoint_status_t::UNRESOLVED); + return ; + } + + vni_ = std::to_string(vnet_vni); + + /* Note: AclOrch already has logic to create / delete Tunnel NH, no need to create here */ + setStatus(endpoint_status_t::RESOLVED); +} + +string RemoteEniNH::getRedirectVal() +{ + /* Format Expected by AclOrch: endpoint_ip@tunnel_name[,vni][,mac] */ + return endpoint_.to_string() + "@" + tunnel_name_ + ',' + vni_; +} + +void EniAclRule::setKey(EniInfo& eni) +{ + name_ = string(DashEniFwd::TABLE) + ":" + eni.toKey(); + if (type_ == rule_type_t::TUNNEL_TERM) + { + name_ += "_TERM"; + } +} + +update_type_t EniAclRule::processUpdate(EniInfo& eni) +{ + SWSS_LOG_ENTER(); + auto& ctx = eni.getCtx(); + IpAddress primary_endp; + dpu_type_t primary_type = LOCAL; + update_type_t update_type = PRIMARY_UPDATE; + std::string primary_id; + + if (type_ == rule_type_t::TUNNEL_TERM) + { + /* Tunnel term entries always use local endpoint regardless of primary id */ + if (!eni.findLocalEp(primary_id)) + { + SWSS_LOG_ERROR("No Local endpoint was found for Rule: %s", getKey().c_str()); + return update_type_t::INVALID; + } + } + else + { + primary_id = eni.getPrimaryId(); + } + + if (!ctx->dpu_info.getType(primary_id, primary_type)) + { + SWSS_LOG_ERROR("No primary id %s in DPU Table", primary_id.c_str()); + return update_type_t::INVALID; + } + + if (primary_type == LOCAL) + { + ctx->dpu_info.getPaV4(primary_id, primary_endp); + } + else + { + ctx->dpu_info.getNpuV4(primary_id, primary_endp); + } + + if (nh_ == nullptr) + { + /* Create Request */ + update_type = update_type_t::CREATE; + } + else if (nh_->getType() != primary_type || nh_->getEp() != primary_endp) + { + /* primary endpoint is switched */ + update_type = update_type_t::PRIMARY_UPDATE; + SWSS_LOG_NOTICE("Endpoint IP for Rule %s updated from %s -> %s", getKey().c_str(), + nh_->getEp().to_string().c_str(), primary_endp.to_string().c_str()); + } + else if(nh_->getStatus() == RESOLVED) + { + /* No primary update and nexthop resolved, no update + Neigh Down on a existing local endpoint needs special handling */ + return update_type_t::IDEMPOTENT; + } + + if (update_type == update_type_t::PRIMARY_UPDATE || update_type == update_type_t::CREATE) + { + if (nh_ != nullptr) + { + nh_->destroy(eni); + } + nh_.reset(); + nh_ = EniNH::createNextHop(primary_type, primary_endp); + } + + /* Try to resolve the neighbor */ + nh_->resolve(eni); + return update_type; +} + +void EniAclRule::fire(EniInfo& eni) +{ + /* + Process an ENI update and handle the ACL rule accordingly + 1) See if the update is valid and infer if the nexthop is local or remote + 2) Create a NextHop object and if resolved, proceed with installing the ACL Rule + */ + SWSS_LOG_ENTER(); + + auto update_type = processUpdate(eni); + + if (update_type == update_type_t::INVALID || update_type == update_type_t::IDEMPOTENT) + { + if (update_type == update_type_t::INVALID) + { + setState(rule_state_t::FAILED); + } + return ; + } + + auto& ctx = eni.getCtx(); + auto key = getKey(); + + if (state_ == rule_state_t::INSTALLED && update_type == update_type_t::PRIMARY_UPDATE) + { + /* + Delete the complete rule before updating it, + ACLOrch Doesn't support incremental updates + */ + ctx->deleteAclRule(key); + setState(rule_state_t::UNINSTALLED); + } + + if (nh_->getStatus() != endpoint_status_t::RESOLVED) + { + /* Wait until the endpoint is resolved */ + setState(rule_state_t::PENDING); + return ; + } + + vector fv_ = { + { RULE_PRIORITY, to_string(BASE_PRIORITY + static_cast(type_)) }, + { MATCH_DST_IP, ctx->getVip().to_string() }, + { getMacMatchDirection(eni), eni.getMac().to_string() }, + { ACTION_REDIRECT_ACTION, nh_->getRedirectVal() } + }; + + if (type_ == rule_type_t::TUNNEL_TERM) + { + fv_.push_back({MATCH_TUNNEL_TERM, "true"}); + } + + ctx->createAclRule(key, fv_); + setState(INSTALLED); +} + +string EniAclRule::getMacMatchDirection(EniInfo& eni) +{ + return MATCH_INNER_DST_MAC; +} + +void EniAclRule::destroy(EniInfo& eni) +{ + if (state_ == rule_state_t::INSTALLED) + { + auto key = getKey(); + auto& ctx = eni.getCtx(); + ctx->deleteAclRule(key); + if (nh_ != nullptr) + { + nh_->destroy(eni); + } + nh_.reset(); + setState(rule_state_t::UNINSTALLED); + } +} + +void EniAclRule::setState(rule_state_t state) +{ + SWSS_LOG_ENTER(); + SWSS_LOG_INFO("EniFwd ACL Rule: %s State Change %d -> %d", getKey().c_str(), state_, state); + state_ = state; +} + + +EniInfo::EniInfo(const string& mac_str, const string& vnet, const shared_ptr& ctx) : + mac_(mac_str), vnet_name_(vnet), ctx(ctx) +{ + formatMac(); +} + +string EniInfo::toKey() const +{ + return vnet_name_ + "_" + mac_key_; +} + +void EniInfo::fireRule(rule_type_t rule_type) +{ + auto rule_itr = rule_container_.find(rule_type); + if (rule_itr != rule_container_.end()) + { + rule_itr->second.fire(*this); + } +} + +void EniInfo::fireAllRules() +{ + for (auto& rule_tuple : rule_container_) + { + fireRule(rule_tuple.first); + } +} + +bool EniInfo::destroy(const Request& db_request) +{ + for (auto& rule_tuple : rule_container_) + { + rule_tuple.second.destroy(*this); + } + rule_container_.clear(); + return true; +} + +bool EniInfo::create(const Request& db_request) +{ + SWSS_LOG_ENTER(); + + auto updates = db_request.getAttrFieldNames(); + auto itr_ep_list = updates.find(DashEniFwd::VDPU_IDS); + auto itr_primary_id = updates.find(DashEniFwd::PRIMARY); + + /* Validation Checks */ + if (itr_ep_list == updates.end() || itr_primary_id == updates.end()) + { + SWSS_LOG_ERROR("Invalid DASH_ENI_FORWARD_TABLE request: No endpoint/primary"); + return false; + } + + ep_list_ = db_request.getAttrStringList(DashEniFwd::VDPU_IDS); + primary_id_ = db_request.getAttrString(DashEniFwd::PRIMARY); + + std::string local_id; + bool tunn_term_allow = findLocalEp(local_id); + + /* Create Rules */ + rule_container_.emplace(piecewise_construct, + forward_as_tuple(rule_type_t::NO_TUNNEL_TERM), + forward_as_tuple(rule_type_t::NO_TUNNEL_TERM, *this)); + + if (tunn_term_allow) + { + /* Create rule for tunnel termination if required */ + rule_container_.emplace(piecewise_construct, + forward_as_tuple(rule_type_t::TUNNEL_TERM), + forward_as_tuple(rule_type_t::TUNNEL_TERM, *this)); + } + + fireAllRules(); + return true; +} + +bool EniInfo::update(const NeighborUpdate& nbr_update) +{ + if (nbr_update.add) + { + fireAllRules(); + } + else + { + /* + Neighbor Delete handling not supported yet + When this update comes, ACL rule must be deleted first, followed by the NEIGH object + */ + } + return true; +} + +bool EniInfo::update(const Request& db_request) +{ + SWSS_LOG_ENTER(); + + /* Only primary_id is expected to change after ENI is created */ + auto updates = db_request.getAttrFieldNames(); + auto itr_primary_id = updates.find(DashEniFwd::PRIMARY); + + /* Validation Checks */ + if (itr_primary_id == updates.end()) + { + throw logic_error("Invalid DASH_ENI_FORWARD_TABLE update: No primary idx"); + } + + if (getPrimaryId() == db_request.getAttrString(DashEniFwd::PRIMARY)) + { + /* No update in the primary id, return true */ + return true; + } + + /* Update local primary id and fire the rules */ + primary_id_ = db_request.getAttrString(DashEniFwd::PRIMARY); + fireAllRules(); + + return true; +} + +bool EniInfo::findLocalEp(std::string& local_endpoint) const +{ + /* Check if atleast one of the endpoints is local */ + bool found = false; + for (auto idx : ep_list_) + { + dpu_type_t val = dpu_type_t::CLUSTER; + if (ctx->dpu_info.getType(idx, val) && val == dpu_type_t::LOCAL) + { + if (!found) + { + found = true; + local_endpoint = idx; + } + else + { + SWSS_LOG_WARN("Multiple Local Endpoints for the ENI %s found, proceeding with %s", + mac_.to_string().c_str(), local_endpoint.c_str()); + } + } + } + return found; +} + +void EniInfo::formatMac() +{ + /* f4:93:9f:ef:c4:7e -> F4939FEFC47E */ + mac_key_.clear(); + auto mac_orig = mac_.to_string(); + for (char c : mac_orig) { + if (c != ':') { // Skip colons + mac_key_ += static_cast(toupper(c)); + } + } +} diff --git a/orchagent/dash/dashenifwdorch.cpp b/orchagent/dash/dashenifwdorch.cpp new file mode 100644 index 00000000000..55038d26fd8 --- /dev/null +++ b/orchagent/dash/dashenifwdorch.cpp @@ -0,0 +1,650 @@ +#include +#include +#include "dashenifwdorch.h" +#include "directory.h" + +extern Directory gDirectory; + +using namespace swss; +using namespace std; + +DashEniFwdOrch::DashEniFwdOrch(DBConnector* cfgDb, DBConnector* applDb, const std::string& tableName, NeighOrch* neighOrch) + : Orch2(applDb, tableName, request_), neighorch_(neighOrch) +{ + SWSS_LOG_ENTER(); + ctx = make_shared(cfgDb, applDb); + if (neighorch_) + { + /* Listen to Neighbor events */ + neighorch_->attach(this); + } +} + +DashEniFwdOrch::~DashEniFwdOrch() +{ + if (neighorch_) + { + neighorch_->detach(this); + } +} + +void DashEniFwdOrch::update(SubjectType type, void *cntx) +{ + SWSS_LOG_ENTER(); + + switch(type) { + case SUBJECT_TYPE_NEIGH_CHANGE: + { + NeighborUpdate *update = static_cast(cntx); + handleNeighUpdate(*update); + break; + } + default: + // Ignore the update + return; + } +} + +void DashEniFwdOrch::handleNeighUpdate(const NeighborUpdate& update) +{ + /* + Refresh ENI's that are hosted on the DPU with the corresponding Neighboo + */ + SWSS_LOG_ENTER(); + auto ipaddr = update.entry.ip_address; + auto dpu_id_itr = neigh_dpu_map_.find(ipaddr); + if (dpu_id_itr == neigh_dpu_map_.end()) + { + return ; + } + SWSS_LOG_NOTICE("Neighbor Update: %s, add: %d", ipaddr.to_string().c_str(), update.add); + + auto dpu_id = dpu_id_itr->second; + auto itr = dpu_eni_map_.lower_bound(dpu_id); + auto itr_end = dpu_eni_map_.upper_bound(dpu_id); + + while (itr != itr_end) + { + /* Find the eni_itr */ + auto eni_itr = eni_container_.find(itr->second); + if (eni_itr != eni_container_.end()) + { + eni_itr->second.update(update); + } + itr++; + } +} + +void DashEniFwdOrch::initLocalEndpoints() +{ + auto ids = ctx->dpu_info.getIds(); + dpu_type_t primary_type = CLUSTER; + IpAddress local_endp; + for (auto id : ids) + { + if(ctx->dpu_info.getType(id, primary_type) && primary_type == dpu_type_t::LOCAL) + { + if(ctx->dpu_info.getPaV4(id, local_endp)) + { + neigh_dpu_map_.insert(make_pair(local_endp, id)); + SWSS_LOG_NOTICE("Local DPU endpoint detected %s", local_endp.to_string().c_str()); + + /* Try to resovle the neighbor */ + auto alias = ctx->getNbrAlias(local_endp); + NextHopKey nh(local_endp, alias); + + if (ctx->isNeighborResolved(nh)) + { + SWSS_LOG_INFO("Neighbor already populated for local endpoint %s", local_endp.to_string().c_str()); + } + ctx->resolveNeighbor(nh); + } + } + } +} + +void DashEniFwdOrch::handleEniDpuMapping(const std::string& id, MacAddress mac, bool add) +{ + /* Make sure id is local */ + dpu_type_t primary_type = CLUSTER; + if(ctx->dpu_info.getType(id, primary_type) && primary_type == dpu_type_t::LOCAL) + { + if (add) + { + dpu_eni_map_.insert(make_pair(id, mac)); + } + else + { + auto range = dpu_eni_map_.equal_range(id); + for (auto it = range.first; it != range.second; ++it) + { + if (it->second == mac) + { + dpu_eni_map_.erase(it); + break; + } + } + } + } +} + +void DashEniFwdOrch::lazyInit() +{ + if (ctx_initialized_) + { + return ; + } + /* + 1. DpuRegistry + 2. Other Orch ptrs + 3. Internal dpu-id mappings + */ + ctx->initialize(); + ctx->populateDpuRegistry(); + initLocalEndpoints(); + ctx_initialized_ = true; +} + +bool DashEniFwdOrch::addOperation(const Request& request) +{ + lazyInit(); + + bool new_eni = false; + auto vnet_name = request.getKeyString(0); + auto eni_id = request.getKeyMacAddress(1); + auto eni_itr = eni_container_.find(eni_id); + + if (eni_itr == eni_container_.end()) + { + new_eni = true; + eni_container_.emplace(std::piecewise_construct, + std::forward_as_tuple(eni_id), + std::forward_as_tuple(eni_id.to_string(), vnet_name, ctx)); + + eni_itr = eni_container_.find(eni_id); + } + + if (new_eni) + { + eni_itr->second.create(request); + std::string local_ep; + if (eni_itr->second.findLocalEp(local_ep)) + { + /* Add to the local map if the endpoint is found */ + handleEniDpuMapping(local_ep, eni_id, true); + } + } + else + { + eni_itr->second.update(request); + } + return true; +} + +bool DashEniFwdOrch::delOperation(const Request& request) +{ + SWSS_LOG_ENTER(); + auto vnet_name = request.getKeyString(0); + auto eni_id = request.getKeyMacAddress(1); + + auto eni_itr = eni_container_.find(eni_id); + + if (eni_itr == eni_container_.end()) + { + SWSS_LOG_ERROR("Invalid del request %s:%s", vnet_name.c_str(), eni_id.to_string().c_str()); + return true; + } + + bool result = eni_itr->second.destroy(request); + if (result) + { + std::string local_ep; + if (eni_itr->second.findLocalEp(local_ep)) + { + handleEniDpuMapping(local_ep, eni_id, false); + } + } + eni_container_.erase(eni_id); + return true; +} + + +void DpuRegistry::populate(const DBConnector* cfg_db) +{ + /* + Read DPU, VDPU, and Remote DPU tables, they are expected to be populated by the time HA is ready + */ + SWSS_LOG_ENTER(); + processDpuTable(cfg_db); + processRemoteDpuTable(cfg_db); + processVdpuTable(cfg_db); +} + +void DpuRegistry::processDpuTable(const DBConnector* cfg_db) +{ + Table dpuTable(cfg_db, DashEniFwd::DPU_TABLE); + std::vector keys; + dpuTable.getKeys(keys); + for (auto key : keys) + { + try + { + std::vector values; + dpuTable.get(key, values); + + KeyOpFieldsValuesTuple kvo = { + key, SET_COMMAND, values + }; + + dpu_request_.clear(); + dpu_request_.parse(kvo); + string key = dpu_request_.getKeyString(0); + // Check if STATE is present and if present and value is 'down', skip this DPU + auto updates = dpu_request_.getAttrFieldNames(); + auto itr_state = updates.find(DashEniFwd::STATE); + if (itr_state != updates.end()) + { + auto state_val = dpu_request_.getAttrString(DashEniFwd::STATE); + if (state_val == "down") + { + SWSS_LOG_INFO("Skipping LOCAL DPU %s as its state is down", key.c_str()); + continue; + } + } + + DpuData data; + data.type = dpu_type_t::LOCAL; + data.pa_v4 = dpu_request_.getAttrIP(DashEniFwd::PA_V4); + dpus_name_map_.insert({key, data}); + + SWSS_LOG_INFO("LOCAL DPU %s found, PA_V4: %s", key.c_str(), data.pa_v4.to_string().c_str()); + } + catch(exception& e) + { + SWSS_LOG_ERROR("Failed to parse key:%s in the %s", key.c_str(), DashEniFwd::DPU_TABLE); + } + } +} + +void DpuRegistry::processRemoteDpuTable(const DBConnector* cfg_db) +{ + Table remoteDpuTable(cfg_db, DashEniFwd::REMOTE_DPU_TABLE); + std::vector keys; + remoteDpuTable.getKeys(keys); + for (auto key : keys) + { + try + { + std::vector values; + remoteDpuTable.get(key, values); + + KeyOpFieldsValuesTuple kvo = { + key, SET_COMMAND, values + }; + + remote_dpu_request_.clear(); + remote_dpu_request_.parse(kvo); + string key = remote_dpu_request_.getKeyString(0); + + DpuData data; + data.type = dpu_type_t::CLUSTER; + data.pa_v4 = remote_dpu_request_.getAttrIP(DashEniFwd::PA_V4); + data.npu_v4 = remote_dpu_request_.getAttrIP(DashEniFwd::NPU_V4); + dpus_name_map_.insert({key, data}); + + SWSS_LOG_INFO("Remote DPU %s found, PA_V4: %s, NPU_V4: %s", + key.c_str(), + data.pa_v4.to_string().c_str(), + data.npu_v4.to_string().c_str() + ); + } + catch(exception& e) + { + SWSS_LOG_ERROR("Failed to parse key:%s in the %s", key.c_str(), DashEniFwd::REMOTE_DPU_TABLE); + } + } +} + +void DpuRegistry::processVdpuTable(const DBConnector* cfg_db) +{ + Table vdpuTable(cfg_db, DashEniFwd::VDPU_TABLE); + std::vector keys; + vdpuTable.getKeys(keys); + for (auto key : keys) + { + try + { + std::vector values; + vdpuTable.get(key, values); + + KeyOpFieldsValuesTuple kvo = { + key, SET_COMMAND, values + }; + + vdpu_request_.clear(); + vdpu_request_.parse(kvo); + string key = vdpu_request_.getKeyString(0); + vector dpu_ids = vdpu_request_.getAttrStringList(DashEniFwd::DPU_IDS); + for (auto dpu_id : dpu_ids) + { + /* This method is expected to be called after the DPU/REMOTE_DPU table is populated */ + if (dpus_name_map_.find(dpu_id) != dpus_name_map_.end()) + { + vdpus_map_[key].push_back(dpu_id); + SWSS_LOG_INFO("DPU: %s belongs to VDPU %s", dpu_id.c_str(), key.c_str()); + } + else + { + SWSS_LOG_WARN("Invalid DPU ID: %s, not found in DPU/REMOTE_DPU table", dpu_id.c_str()); + } + } + } + catch(exception& e) + { + SWSS_LOG_ERROR("Failed to parse key:%s in the %s", key.c_str(), DashEniFwd::REMOTE_DPU_TABLE); + } + } +} + +std::vector DpuRegistry::getIds() +{ + std::vector ids; + for (auto itr = vdpus_map_.begin(); itr != vdpus_map_.end(); itr++) + { + ids.push_back(itr->first); + } + return ids; +} + +bool DpuRegistry::getDpuId(const std::string& vdpu_id, std::string& dpu_id) +{ + dpu_id.clear(); + auto itr = vdpus_map_.find(vdpu_id); + if (itr == vdpus_map_.end() || itr->second.empty()) return false; + dpu_id = itr->second[0]; + return true; +} + +bool DpuRegistry::getType(const std::string& vdpu_id, dpu_type_t& val) +{ + std::string id; + if (!getDpuId(vdpu_id, id)) return false; + auto itr = dpus_name_map_.find(id); + if (itr == dpus_name_map_.end()) return false; + val = itr->second.type; + return true; +} + +bool DpuRegistry::getPaV4(const std::string& vdpu_id, swss::IpAddress& val) +{ + std::string id; + if (!getDpuId(vdpu_id, id)) return false; + auto itr = dpus_name_map_.find(id); + if (itr == dpus_name_map_.end()) return false; + val = itr->second.pa_v4; + return true; +} + +bool DpuRegistry::getNpuV4(const std::string& vdpu_id, swss::IpAddress& val) +{ + std::string id; + if (!getDpuId(vdpu_id, id)) return false; + auto itr = dpus_name_map_.find(id); + if (itr == dpus_name_map_.end()) return false; + val = itr->second.npu_v4; + return true; +} + +EniFwdCtxBase::EniFwdCtxBase(DBConnector* cfgDb, DBConnector* applDb) +{ + cfg_db_ = make_unique(*cfgDb); + port_tbl_ = make_unique
(cfgDb, CFG_PORT_TABLE_NAME); + vip_tbl_ = make_unique
(cfgDb, DashEniFwd::VIP_TABLE); + rule_table_ = make_unique(applDb, APP_ACL_RULE_TABLE_NAME); + acl_table_type_ = make_unique(applDb, APP_ACL_TABLE_TYPE_TABLE_NAME); + acl_table_ = make_unique(applDb, APP_ACL_TABLE_TABLE_NAME); + vip_inferred_ = false; +} + +void EniFwdCtxBase::populateDpuRegistry() +{ + dpu_info.populate(cfg_db_.get()); +} + +std::set EniFwdCtxBase::findInternalPorts() +{ + std::vector all_ports; + std::set internal_ports; + port_tbl_->getKeys(all_ports); + for (auto& port : all_ports) + { + std::string val; + if (port_tbl_->hget(port, PORT_ROLE, val)) + { + if (val == PORT_ROLE_DPC) + { + internal_ports.insert(port); + } + } + } + return internal_ports; +} + +vector EniFwdCtxBase::getBindPoints() +{ + std::vector bpoints; + auto internal_ports = findInternalPorts(); + auto all_ports = getAllPorts(); + + std::set legitSet; + + /* Add Phy and Lag ports */ + for (auto &it: all_ports) + { + if (it.second.m_type == Port::PHY || it.second.m_type == Port::LAG) + { + legitSet.insert(it.first); + } + } + + /* Remove any Lag Members PHY's */ + for (auto &it: all_ports) + { + Port& port = it.second; + if (port.m_type == Port::LAG) + { + for (auto mem : port.m_members) + { + /* Remove any members that are part of a LAG */ + legitSet.erase(mem); + } + } + } + + /* Filter Internal ports */ + for (auto& port : legitSet) + { + if (internal_ports.find(port) == internal_ports.end()) + { + bpoints.push_back(port); + } + } + + return bpoints; +} + +string EniFwdCtxBase::getNbrAlias(const swss::IpAddress& nh_ip) +{ + auto itr = nh_alias_map_.find(nh_ip); + if (itr != nh_alias_map_.end()) + { + return itr->second; + } + + auto alias = this->getRouterIntfsAlias(nh_ip); + if (!alias.empty()) + { + nh_alias_map_.insert(std::pair(nh_ip, alias)); + } + return alias; +} + +IpPrefix EniFwdCtxBase::getVip() +{ + SWSS_LOG_ENTER(); + + if (!vip_inferred_) + { + std::vector keys; + vip_tbl_->getKeys(keys); + if (keys.empty()) + { + SWSS_LOG_THROW("Invalid Config: VIP info not populated"); + } + + try + { + vip = IpPrefix(keys[0]); + SWSS_LOG_NOTICE("VIP found: %s", vip.to_string().c_str()); + } + catch (std::exception& e) + { + SWSS_LOG_THROW("VIP is not formatted correctly %s", keys[0].c_str()); + } + vip_inferred_ = true; + } + return vip; +} + +void EniFwdCtx::initialize() +{ + portsorch_ = gDirectory.get(); + neighorch_ = gDirectory.get(); + intfsorch_ = gDirectory.get(); + vnetorch_ = gDirectory.get(); + vxlanorch_ = gDirectory.get(); + assert(portsorch_); + assert(neighorch_); + assert(intfsorch_); + assert(vnetorch_); + assert(vxlanorch_); +} + +bool EniFwdCtx::isNeighborResolved(const NextHopKey& nh) +{ + return neighorch_->isNeighborResolved(nh); +} + +void EniFwdCtx::resolveNeighbor(const NeighborEntry& nh) +{ + /* Neighorch already has the logic to handle the duplicate requests */ + neighorch_->resolveNeighbor(nh); +} + +string EniFwdCtx::getRouterIntfsAlias(const IpAddress &ip, const string &vrf_name) +{ + return intfsorch_->getRouterIntfsAlias(ip, vrf_name); +} + +bool EniFwdCtx::findVnetVni(const string& vnet_name, uint64_t& vni) +{ + if (vnetorch_->isVnetExists(vnet_name)) + { + vni = vnetorch_->getTypePtr(vnet_name)->getVni(); + return true; + } + return false; +} + +bool EniFwdCtx::findVnetTunnel(const string& vnet_name, string& tunnel) +{ + if (vnetorch_->isVnetExists(vnet_name)) + { + tunnel = vnetorch_->getTunnelName(vnet_name); + return true; + } + return false; +} + +std::map& EniFwdCtx::getAllPorts() +{ + return portsorch_->getAllPorts(); +} + +void EniFwdCtxBase::createAclRule(const std::string& rule, const std::vector& fv) +{ + if (acl_rule_count_ == 0) + { + addAclTable(); + } + acl_rule_count_++; + SWSS_LOG_INFO("Creating ACL rule: %s, ENI Forwarding rules count: %u", rule.c_str(), acl_rule_count_); + rule_table_->set(rule, fv); +} + +void EniFwdCtxBase::deleteAclRule(const std::string& rule) +{ + rule_table_->del(rule); + if (acl_rule_count_ > 0) + { + acl_rule_count_--; + SWSS_LOG_INFO("Deleted ACL rule: %s, ENI Forwarding rule count: %u", rule.c_str(), acl_rule_count_); + if (acl_rule_count_ == 0) + { + deleteAclTable(); + } + } + else + { + SWSS_LOG_ERROR("Attempted to delete ACL rule %s but rule count is already 0", rule.c_str()); + } +} + +void EniFwdCtxBase::addAclTable() +{ + vector match_list = { + MATCH_DST_IP, + MATCH_INNER_DST_MAC, + MATCH_TUNNEL_TERM + }; + + auto concat = [](const std::string &a, const std::string &b) { return a + "," + b; }; + + std::string matches = std::accumulate( + std::next(match_list.begin()), match_list.end(), match_list[0], + concat); + + string bpoint_types = string(BIND_POINT_TYPE_PORT) + "," + string(BIND_POINT_TYPE_PORTCHANNEL); + + vector fv_ = { + { ACL_TABLE_TYPE_MATCHES, matches}, + { ACL_TABLE_TYPE_ACTIONS, ACTION_REDIRECT_ACTION }, + { ACL_TABLE_TYPE_BPOINT_TYPES, bpoint_types} + }; + + acl_table_type_->set(DashEniFwd::TABLE_TYPE, fv_); + + auto ports = getBindPoints(); + std::string ports_str; + + if (!ports.empty()) + { + ports_str = std::accumulate(std::next(ports.begin()), ports.end(), ports[0], concat); + } + + /* Write ACL Table */ + vector table_fv_ = { + { ACL_TABLE_DESCRIPTION, "Contains Rule for DASH ENI Based Forwarding"}, + { ACL_TABLE_TYPE, DashEniFwd::TABLE_TYPE }, + { ACL_TABLE_STAGE, STAGE_INGRESS }, + { ACL_TABLE_PORTS, ports_str } + }; + + acl_table_->set(DashEniFwd::TABLE, table_fv_); +} + +void EniFwdCtxBase::deleteAclTable() +{ + acl_table_->del(DashEniFwd::TABLE); + acl_table_type_->del(DashEniFwd::TABLE_TYPE); +} diff --git a/orchagent/dash/dashenifwdorch.h b/orchagent/dash/dashenifwdorch.h new file mode 100644 index 00000000000..a513aa1a81e --- /dev/null +++ b/orchagent/dash/dashenifwdorch.h @@ -0,0 +1,394 @@ +#pragma once + +#include +#include +#include "producerstatetable.h" +#include "orch.h" +#include "portsorch.h" +#include "aclorch.h" +#include "neighorch.h" +#include "vnetorch.h" +#include "observer.h" +#include "request_parser.h" +#include +#include + +typedef enum +{ + LOCAL, + CLUSTER +} dpu_type_t; + +typedef enum +{ + RESOLVED, + UNRESOLVED +} endpoint_status_t; + +typedef enum +{ + FAILED, + PENDING, + INSTALLED, + UNINSTALLED +} rule_state_t; + +typedef enum +{ + INVALID, + IDEMPOTENT, + CREATE, + PRIMARY_UPDATE /* Either NH update or primary endp change */ +} update_type_t; + +typedef enum +{ + NO_TUNNEL_TERM = 0, + TUNNEL_TERM +} rule_type_t; + + +class DpuRegistry; +class EniNH; +class LocalEniNH; +class RemoteEniNH; +class EniAclRule; +class EniInfo; +class EniFwdCtxBase; +class EniFwdCtx; + +namespace DashEniFwd +{ + /* TABLES; Until finalized and added to sonic-swss-common */ + static constexpr const char* DPU_TABLE = "DPU"; + static constexpr const char* REMOTE_DPU_TABLE = "REMOTE_DPU"; + static constexpr const char* VDPU_TABLE = "VDPU"; + static constexpr const char* VIP_TABLE = "VIP_TABLE"; + + /* ENI Registry Fields */ + static constexpr const char* TABLE_TYPE = "ENI_REDIRECT"; + static constexpr const char* TABLE = "ENI"; + static constexpr const char* VDPU_IDS = "vdpu_ids"; + static constexpr const char* PRIMARY = "primary_vdpu"; + + /* DPU Registry Fields */ + static constexpr const char* STATE = "state"; + static constexpr const char* PA_V4 = "pa_ipv4"; + static constexpr const char* PA_V6 = "pa_ipv6"; + static constexpr const char* NPU_V4 = "npu_ipv4"; + static constexpr const char* NPU_V6 = "npu_ipv6"; + static constexpr const char* DPU_IDS = "main_dpu_ids"; +}; + +const request_description_t eni_dash_fwd_desc = { + { REQ_T_STRING, REQ_T_MAC_ADDRESS }, // VNET_NAME, ENI_ID + { + { DashEniFwd::VDPU_IDS, REQ_T_STRING_LIST }, // VDPU ID's + { DashEniFwd::PRIMARY, REQ_T_STRING }, + }, + { DashEniFwd::PRIMARY } +}; + +class DashEniFwdOrch : public Orch2, public Observer +{ +public: + struct EniFwdRequest : public Request + { + EniFwdRequest() : Request(eni_dash_fwd_desc, ':', true) {} + }; + + DashEniFwdOrch(swss::DBConnector*, swss::DBConnector*, const std::string&, NeighOrch* neigh_orch_); + ~DashEniFwdOrch(); + + /* Refresh the ENIs based on NextHop status */ + void update(SubjectType, void *) override; + +protected: + virtual bool addOperation(const Request& request); + virtual bool delOperation(const Request& request); + EniFwdRequest request_; + +private: + void lazyInit(); + void initLocalEndpoints(); + void handleNeighUpdate(const NeighborUpdate& update); + void handleEniDpuMapping(const std::string& id, MacAddress mac, bool add = true); + + /* multimap because Multiple ENIs can be mapped to the same DPU */ + std::multimap dpu_eni_map_; + /* Local Endpoint -> DPU mapping */ + std::map neigh_dpu_map_; + std::map eni_container_; + + bool ctx_initialized_ = false; + shared_ptr ctx; + NeighOrch* neighorch_; +}; + + +const request_description_t dpu_table_desc = { + { REQ_T_STRING }, + { + { DashEniFwd::STATE, REQ_T_STRING }, + { DashEniFwd::PA_V4, REQ_T_IP }, + { DashEniFwd::PA_V6, REQ_T_IP }, + }, + { DashEniFwd::STATE, DashEniFwd::PA_V4 } +}; + +const request_description_t remote_dpu_table_desc = { + { REQ_T_STRING }, + { + { DashEniFwd::PA_V4, REQ_T_IP }, + { DashEniFwd::PA_V6, REQ_T_IP }, + { DashEniFwd::NPU_V4, REQ_T_IP }, + { DashEniFwd::NPU_V6, REQ_T_IP }, + }, + { DashEniFwd::PA_V4, DashEniFwd::NPU_V4 } +}; + +const request_description_t vdpu_table_desc = { + { REQ_T_STRING }, + { + { DashEniFwd::DPU_IDS, REQ_T_STRING_LIST }, + }, + { DashEniFwd::DPU_IDS } +}; + +class DpuRegistry +{ +public: + struct DpuData + { + dpu_type_t type; + swss::IpAddress pa_v4; + swss::IpAddress npu_v4; + }; + + struct DpuRequest : public Request + { + DpuRequest() : Request(dpu_table_desc, '|', true) {} + }; + struct RemoteDpuRequest : public Request + { + RemoteDpuRequest() : Request(remote_dpu_table_desc, '|', true) {} + }; + struct VdpuRequest : public Request + { + VdpuRequest() : Request(vdpu_table_desc, '|', true) {} + }; + + void populate(const swss::DBConnector*); + std::vector getIds(); + + bool getDpuId(const std::string& vdpu_id, std::string& dpu_id); + bool getType(const std::string& vdpu_id, dpu_type_t& val); + bool getPaV4(const std::string& vdpu_id, swss::IpAddress& val); + bool getNpuV4(const std::string& vdpu_id, swss::IpAddress& val); + +private: + void processDpuTable(const swss::DBConnector*); + void processRemoteDpuTable(const swss::DBConnector*); + void processVdpuTable(const swss::DBConnector*); + + DpuRequest dpu_request_; + RemoteDpuRequest remote_dpu_request_; + VdpuRequest vdpu_request_; + // DPU -> DpuData + unordered_map dpus_name_map_; + // VDPU Name -> [DPU2, DPU3, ...] + unordered_map> vdpus_map_; +}; + + +class EniNH +{ +public: + static std::unique_ptr createNextHop(dpu_type_t, const swss::IpAddress&); + + EniNH(const swss::IpAddress& ip) : endpoint_(ip) {} + void setStatus(endpoint_status_t status) {status_ = status;} + void setType(dpu_type_t type) {type_ = type;} + endpoint_status_t getStatus() {return status_;} + dpu_type_t getType() {return type_;} + swss::IpAddress getEp() {return endpoint_;} + + virtual void resolve(EniInfo& eni) = 0; + virtual void destroy(EniInfo& eni) {}; + virtual string getRedirectVal() = 0; + +protected: + endpoint_status_t status_; + dpu_type_t type_; + swss::IpAddress endpoint_; +}; + + +class LocalEniNH : public EniNH +{ +public: + LocalEniNH(const swss::IpAddress& ip) : EniNH(ip) + { + setStatus(endpoint_status_t::UNRESOLVED); + setType(dpu_type_t::LOCAL); + } + void resolve(EniInfo& eni) override; + string getRedirectVal() override; +}; + + +class RemoteEniNH : public EniNH +{ +public: + RemoteEniNH(const swss::IpAddress& ip) : EniNH(ip) + { + /* No BFD monitoring for Remote NH yet */ + setStatus(endpoint_status_t::UNRESOLVED); + setType(dpu_type_t::CLUSTER); + } + void resolve(EniInfo& eni) override; + string getRedirectVal() override; + +private: + string tunnel_name_; + string vni_; +}; + + +class EniAclRule +{ +public: + static const int BASE_PRIORITY; + + EniAclRule(rule_type_t type, EniInfo& eni) : + type_(type), + state_(rule_state_t::PENDING) { setKey(eni); } + + void destroy(EniInfo&); + void fire(EniInfo&); + + update_type_t processUpdate(EniInfo& eni); + std::string getKey() {return name_; } + string getMacMatchDirection(EniInfo& eni); + void setState(rule_state_t state); + +private: + void setKey(EniInfo&); + std::unique_ptr nh_ = nullptr; + std::string name_; + rule_type_t type_; + rule_state_t state_; +}; + + +class EniInfo +{ +public: + friend class DashEniFwdOrch; /* Only orch is expected to call create/update/fire */ + + EniInfo(const std::string&, const std::string&, const shared_ptr&); + EniInfo(const EniInfo&) = delete; + EniInfo& operator=(const EniInfo&) = delete; + EniInfo(EniInfo&&) = delete; + EniInfo& operator=(EniInfo&&) = delete; + + string toKey() const; + std::shared_ptr& getCtx() {return ctx;} + bool findLocalEp(std::string&) const; + swss::MacAddress getMac() const { return mac_; } // Can only be set during object creation + std::vector getEpList() { return ep_list_; } + std::string getPrimaryId() const { return primary_id_; } + std::string getVnet() const { return vnet_name_; } + +protected: + void formatMac(); + void fireRule(rule_type_t); + void fireAllRules(); + bool create(const Request&); + bool destroy(const Request&); + bool update(const Request& ); + bool update(const NeighborUpdate&); + + std::shared_ptr ctx; + std::map rule_container_; + std::vector ep_list_; + std::string primary_id_; + std::string vnet_name_; + swss::MacAddress mac_; + std::string mac_key_; // Formatted MAC key +}; + + +/* + Collection of API's used across DashEniFwdOrch +*/ +class EniFwdCtxBase +{ +public: + EniFwdCtxBase(DBConnector* cfgDb, DBConnector* applDb); + void populateDpuRegistry(); + std::vector getBindPoints(); + std::string getNbrAlias(const swss::IpAddress& ip); + swss::IpPrefix getVip(); + + void createAclRule(const std::string&, const std::vector&); + void deleteAclRule(const std::string&); + + virtual void initialize() = 0; + /* API's that call other orchagents */ + virtual std::map& getAllPorts() = 0; + virtual bool isNeighborResolved(const NextHopKey&) = 0; + virtual void resolveNeighbor(const NeighborEntry &) = 0; + virtual string getRouterIntfsAlias(const IpAddress &, const string & = "") = 0; + virtual bool findVnetVni(const std::string&, uint64_t& ) = 0; + virtual bool findVnetTunnel(const std::string&, string&) = 0; + + DpuRegistry dpu_info; + +protected: + std::set findInternalPorts(); + void addAclTable(); + void deleteAclTable(); + /* Reference counting for ACL rules */ + uint32_t acl_rule_count_ = 0; + + /* Mapping between DPU Nbr and Alias */ + std::map nh_alias_map_; + + unique_ptr port_tbl_; + unique_ptr vip_tbl_; + unique_ptr cfg_db_; + unique_ptr rule_table_; + unique_ptr acl_table_; + unique_ptr acl_table_type_; + + /* Only one vip is expected per T1 cluster */ + swss::IpPrefix vip; + bool vip_inferred_; +}; + + +/* + Implements API's to access other orchagents +*/ +class EniFwdCtx : public EniFwdCtxBase +{ +public: + using EniFwdCtxBase::EniFwdCtxBase; + + /* Setup pointers to other orchagents */ + void initialize() override; + bool isNeighborResolved(const NextHopKey&) override; + void resolveNeighbor(const NeighborEntry&) override; + std::string getRouterIntfsAlias(const IpAddress &, const string & = "") override; + bool findVnetVni(const std::string&, uint64_t&) override; + bool findVnetTunnel(const std::string&, string&) override; + std::map& getAllPorts() override; + +private: + PortsOrch* portsorch_; + NeighOrch* neighorch_; + IntfsOrch* intfsorch_; + VNetOrch* vnetorch_; + VxlanTunnelOrch* vxlanorch_; +}; diff --git a/orchagent/dash/dashhaorch.cpp b/orchagent/dash/dashhaorch.cpp new file mode 100644 index 00000000000..5d1d6e493a4 --- /dev/null +++ b/orchagent/dash/dashhaorch.cpp @@ -0,0 +1,1313 @@ +#include "dashhaorch.h" + +#include "orch.h" +#include "sai.h" +#include "saiextensions.h" +#include "bfdorch.h" +#include "dashorch.h" +#include "crmorch.h" +#include "saihelper.h" +#include "table.h" +#include "taskworker.h" +#include "pbutils.h" +#include "converter.h" + +#include "chrono" + +using namespace std; +using namespace swss; + +extern sai_dash_ha_api_t* sai_dash_ha_api; +extern sai_dash_eni_api_t* sai_dash_eni_api; +extern sai_object_id_t gSwitchId; +extern sai_switch_api_t* sai_switch_api; + +static const map sai_ha_set_event_type_name = +{ + { SAI_HA_SET_EVENT_DP_CHANNEL_UP, "up" }, + { SAI_HA_SET_EVENT_DP_CHANNEL_DOWN, "down" } +}; + +static const map sai_ha_role_name = { + { SAI_DASH_HA_ROLE_DEAD, "dead" }, + { SAI_DASH_HA_ROLE_ACTIVE, "active" }, + { SAI_DASH_HA_ROLE_STANDBY, "standby" }, + { SAI_DASH_HA_ROLE_STANDALONE, "standalone" }, + { SAI_DASH_HA_ROLE_SWITCHING_TO_ACTIVE, "switching_to_active" }, +}; + +static const map sai_ha_state_name = { + { SAI_DASH_HA_STATE_DEAD, "dead" }, + { SAI_DASH_HA_STATE_CONNECTING, "connecting" }, + { SAI_DASH_HA_STATE_CONNECTED, "connected" }, + { SAI_DASH_HA_STATE_INITIALIZING_TO_ACTIVE, "initializing_to_active" }, + { SAI_DASH_HA_STATE_INITIALIZING_TO_STANDBY, "initializing_to_standby" }, + { SAI_DASH_HA_STATE_PENDING_STANDALONE_ACTIVATION, "pending_standalone_activation" }, + { SAI_DASH_HA_STATE_PENDING_ACTIVE_ACTIVATION, "pending_active_activation" }, + { SAI_DASH_HA_STATE_PENDING_STANDBY_ACTIVATION, "pending_standby_activation" }, + { SAI_DASH_HA_STATE_STANDALONE, "standalone" }, + { SAI_DASH_HA_STATE_ACTIVE, "active" }, + { SAI_DASH_HA_STATE_STANDBY, "standby" }, + { SAI_DASH_HA_STATE_DESTROYING, "destroying" }, + { SAI_DASH_HA_STATE_SWITCHING_TO_STANDALONE, "switching_to_standalone" }, +}; + +static const map sai_ha_scope_event_type_name = +{ + { SAI_HA_SCOPE_EVENT_STATE_CHANGED, "state_changed" }, + { SAI_HA_SCOPE_EVENT_FLOW_RECONCILE_NEEDED, "flow_reconcile_needed" }, + { SAI_HA_SCOPE_EVENT_SPLIT_BRAIN_DETECTED, "split_brain_detected" } +}; + +DashHaOrch::DashHaOrch(DBConnector *db, const vector &tables, DashOrch *dash_orch, BfdOrch *bfd_orch, DBConnector *app_state_db, ZmqServer *zmqServer) : + ZmqOrch(db, tables, zmqServer), + m_dash_orch(dash_orch), + m_bfd_orch(bfd_orch) +{ + SWSS_LOG_ENTER(); + + dash_ha_set_result_table_ = make_unique
(app_state_db, APP_DASH_HA_SET_TABLE_NAME); + dash_ha_scope_result_table_ = make_unique
(app_state_db, APP_DASH_HA_SCOPE_TABLE_NAME); + + m_dpuStateDbConnector = make_unique("DPU_STATE_DB", 0, true); + + m_dpuStateDbHaSetTable = make_unique
(m_dpuStateDbConnector.get(), STATE_DASH_HA_SET_STATE_TABLE_NAME); + m_dpuStateDbHaScopeTable = make_unique
(m_dpuStateDbConnector.get(), STATE_DASH_HA_SCOPE_STATE_TABLE_NAME); + + DBConnector *notificationsDb = new DBConnector("ASIC_DB", 0); + m_haSetNotificationConsumer = new NotificationConsumer(notificationsDb, "NOTIFICATIONS"); + auto haSetNotificatier = new Notifier(m_haSetNotificationConsumer, this, "HA_SET_NOTIFICATIONS"); + + m_haScopeNotificationConsumer = new NotificationConsumer(notificationsDb, "NOTIFICATIONS"); + auto haScopeNotificatier = new Notifier(m_haScopeNotificationConsumer, this, "HA_SCOPE_NOTIFICATIONS"); + + Orch::addExecutor(haSetNotificatier); + Orch::addExecutor(haScopeNotificatier); + + register_ha_set_notifier(); + register_ha_scope_notifier(); + + // Register this DashHaOrch instance with DashOrch + m_dash_orch->setDashHaOrch(this); +} + +bool DashHaOrch::register_ha_set_notifier() +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + sai_status_t status; + sai_attr_capability_t capability; + + status = sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_SWITCH, + SAI_SWITCH_ATTR_HA_SET_EVENT_NOTIFY, + &capability); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Unable to query the HA Set event notification capability"); + return false; + } + + if (!capability.set_implemented) + { + SWSS_LOG_INFO("HA Set event notification not supported"); + return false; + } + + attr.id = SAI_SWITCH_ATTR_HA_SET_EVENT_NOTIFY; + attr.value.ptr = (void *)on_ha_set_event; + + status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to register HA Set event notification"); + return false; + } + + return true; +} + +bool DashHaOrch::register_ha_scope_notifier() +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + sai_status_t status; + sai_attr_capability_t capability; + + status = sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_SWITCH, + SAI_SWITCH_ATTR_HA_SCOPE_EVENT_NOTIFY, + &capability); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Unable to query the HA Scope event notification capability"); + return false; + } + + if (!capability.set_implemented) + { + SWSS_LOG_INFO("HA Scope event notification not supported"); + return false; + } + + attr.id = SAI_SWITCH_ATTR_HA_SCOPE_EVENT_NOTIFY; + attr.value.ptr = (void *)on_ha_scope_event; + + status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to register HA Scope event notification"); + return false; + } + + return true; +} + +std::string DashHaOrch::getHaSetObjectKey(const sai_object_id_t ha_set_oid) +{ + SWSS_LOG_ENTER(); + + for (auto ha_set_entry : m_ha_set_entries) + { + if (ha_set_entry.second.ha_set_id == ha_set_oid) + { + return ha_set_entry.first; + } + } + + return ""; +} + +std::string DashHaOrch::getHaScopeObjectKey(const sai_object_id_t ha_scope_oid) +{ + SWSS_LOG_ENTER(); + + for (auto ha_scope_entry : m_ha_scope_entries) + { + if (ha_scope_entry.second.ha_scope_id == ha_scope_oid) + { + return ha_scope_entry.first; + } + } + + return ""; +} + +HaScopeEntry DashHaOrch::getHaScopeForEni(const std::string& eni) +{ + SWSS_LOG_ENTER(); + + if (m_ha_scope_entries.empty()) + { + HaScopeEntry emptyEntry; + emptyEntry.ha_scope_id = SAI_NULL_OBJECT_ID; + return emptyEntry; + } + + /* Return the first entry. This logic only applies to DPU Scope HA */ + return m_ha_scope_entries.begin()->second; +} + +bool DashHaOrch::updateExistingHaSetEntry(const std::string &key, const dash::ha_set::HaSet &entry, sai_object_id_t sai_ha_set_oid) +{ + SWSS_LOG_ENTER(); + + sai_status_t status; + sai_attribute_t ha_set_attr_list[8]={}; + sai_ip_address_t sai_peer_ip; + + if (!to_sai(entry.peer_ip(), sai_peer_ip)) + { + SWSS_LOG_WARN("HA Set entry already exists for %s", key.c_str()); + return true; + } + + ha_set_attr_list[0].id = SAI_HA_SET_ATTR_PEER_IP; + ha_set_attr_list[0].value.ipaddr = sai_peer_ip; + status = sai_dash_ha_api->set_ha_set_attribute(sai_ha_set_oid, + &ha_set_attr_list[0]); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to update peer ip for HA Set object in SAI for %s", key.c_str()); + task_process_status handle_status = handleSaiCreateStatus((sai_api_t) SAI_API_DASH_HA, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + SWSS_LOG_INFO("HA Set entry updated for %s, peer_ip is updated to %s", + key.c_str(), + to_string(entry.peer_ip()).c_str()); + + *m_ha_set_entries[key].metadata.mutable_peer_ip() = entry.peer_ip(); + + return true; +} + +bool DashHaOrch::addHaSetEntry(const std::string &key, const dash::ha_set::HaSet &entry) +{ + SWSS_LOG_ENTER(); + + auto it = m_ha_set_entries.find(key); + + if (it != m_ha_set_entries.end()) + { + SWSS_LOG_DEBUG("HA Set entry already exists for %s, updating it", key.c_str()); + + return updateExistingHaSetEntry(key, entry, it->second.ha_set_id); + } + + uint32_t attr_count = 8; + sai_attribute_t ha_set_attr_list[8]={}; + sai_status_t status; + sai_object_id_t sai_ha_set_oid = 0UL; + + sai_ip_address_t sai_local_ip; + sai_ip_address_t sai_peer_ip; + + if (!to_sai(entry.local_ip(), sai_local_ip)) + { + return false; + } + + if (!to_sai(entry.peer_ip(), sai_peer_ip)) + { + return false; + } + + ha_set_attr_list[0].id = SAI_HA_SET_ATTR_LOCAL_IP; + ha_set_attr_list[0].value.ipaddr = sai_local_ip; + + ha_set_attr_list[1].id = SAI_HA_SET_ATTR_PEER_IP; + ha_set_attr_list[1].value.ipaddr = sai_peer_ip; + + ha_set_attr_list[2].id = SAI_HA_SET_ATTR_CP_DATA_CHANNEL_PORT; + ha_set_attr_list[2].value.u16 = static_cast(entry.cp_data_channel_port()); + + ha_set_attr_list[3].id = SAI_HA_SET_ATTR_DP_CHANNEL_DST_PORT; + ha_set_attr_list[3].value.u16 = static_cast(entry.dp_channel_dst_port()); + + ha_set_attr_list[4].id = SAI_HA_SET_ATTR_DP_CHANNEL_MIN_SRC_PORT; + ha_set_attr_list[4].value.u16 = static_cast(entry.dp_channel_src_port_min()); + + ha_set_attr_list[5].id = SAI_HA_SET_ATTR_DP_CHANNEL_MAX_SRC_PORT; + ha_set_attr_list[5].value.u16 = static_cast(entry.dp_channel_src_port_max()); + + ha_set_attr_list[6].id = SAI_HA_SET_ATTR_DP_CHANNEL_PROBE_INTERVAL_MS; + ha_set_attr_list[6].value.u32 = entry.dp_channel_probe_interval_ms(); + + ha_set_attr_list[7].id = SAI_HA_SET_ATTR_DP_CHANNEL_PROBE_FAIL_THRESHOLD; + ha_set_attr_list[7].value.u32 = entry.dp_channel_probe_fail_threshold(); + + status = sai_dash_ha_api->create_ha_set(&sai_ha_set_oid, + gSwitchId, + attr_count, + ha_set_attr_list); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create HA Set object in SAI for %s", key.c_str()); + task_process_status handle_status = handleSaiCreateStatus((sai_api_t) SAI_API_DASH_HA, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + m_ha_set_entries[key] = HaSetEntry {sai_ha_set_oid, entry}; + SWSS_LOG_NOTICE("Created HA Set object for %s", key.c_str()); + + return true; +} + +bool DashHaOrch::removeHaSetEntry(const std::string &key) +{ + SWSS_LOG_ENTER(); + + auto it = m_ha_set_entries.find(key); + + if (it == m_ha_set_entries.end()) + { + SWSS_LOG_WARN("HA Set entry does not exist for %s", key.c_str()); + return true; + } + + sai_status_t status = sai_dash_ha_api->remove_ha_set(it->second.ha_set_id); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove HA Set object in SAI for %s", key.c_str()); + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_HA, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + m_ha_set_entries.erase(it); + SWSS_LOG_NOTICE("Removed HA Set object for %s", key.c_str()); + + return true; +} + +void DashHaOrch::doTaskHaSetTable(ConsumerBase &consumer) +{ + SWSS_LOG_ENTER(); + + uint32_t result; + + auto it = consumer.m_toSync.begin(); + + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple tuple = it->second; + const auto& key = kfvKey(tuple); + const auto& op = kfvOp(tuple); + result = DASH_RESULT_SUCCESS; + + if (op == SET_COMMAND) + { + dash::ha_set::HaSet entry; + + /* + * For HA internal tables, kfv format was used instead of serialized pb objects in the end. + * I decided to keep protobuf conversion still for: + * - ensuring the data integrity. + * - in case we need to switch to protobuf in the future. + */ + // if (!parsePbMessage(kfvFieldsValues(tuple), entry)) + // { + // SWSS_LOG_WARN("Requires protobuf at HaSet :%s", key.c_str()); + // it = consumer.m_toSync.erase(it); + // continue; + // } + + if (!convertKfvToHaSetPb(kfvFieldsValues(tuple), entry)) + { + SWSS_LOG_WARN("Failed to convert KeyOpFieldsValuesTuple to HaSet entry for %s, invalid values probably.", key.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + + if (addHaSetEntry(key, entry)) + { + it = consumer.m_toSync.erase(it); + } + else + { + result = DASH_RESULT_FAILURE; + it++; + } + writeResultToDB(dash_ha_set_result_table_, key, result); + + } + else if (op == DEL_COMMAND) + { + if(removeHaSetEntry(key)) + { + it = consumer.m_toSync.erase(it); + removeResultFromDB(dash_ha_set_result_table_, key); + } + else + { + it++; + } + } + else + { + SWSS_LOG_ERROR("Unknown operation %s", op.c_str()); + it = consumer.m_toSync.erase(it); + } + } +} + +bool DashHaOrch::addHaScopeEntry(const std::string &key, const dash::ha_scope::HaScope &entry) +{ + SWSS_LOG_ENTER(); + + auto ha_scope_it = m_ha_scope_entries.find(key); + if (ha_scope_it != m_ha_scope_entries.end()) + { + bool success = true; + bool repeated_message = true; + + if (ha_scope_it->second.metadata.ha_role() != entry.ha_role()) + { + success = success && setHaScopeHaRole(key, entry); + repeated_message = false; + } + + if (entry.flow_reconcile_requested() == true) + { + success = success && setHaScopeFlowReconcileRequest(key); + repeated_message = false; + } + + if (entry.activate_role_requested() == true) + { + success = success && setHaScopeActivateRoleRequest(key); + repeated_message = false; + } + + if (ha_scope_it->second.metadata.disabled() != entry.disabled()) + { + success = success && setHaScopeDisabled(key, entry.disabled()); + repeated_message = false; + } + + if (repeated_message) + { + SWSS_LOG_WARN("HA Scope entry already exists for %s", key.c_str()); + } + else + { + SWSS_LOG_NOTICE("HA Scope entry updated for %s", key.c_str()); + } + + return success; + } + + std::map::iterator ha_set_it; + if (!entry.ha_set_id().empty()) + { + ha_set_it = m_ha_set_entries.find(entry.ha_set_id()); + } + else + { + /* ha_set_id field in ha_scope_table was added as a revision of detailed HLD, adding backward compatibility for ha_set_id mapping. */ + ha_set_it = m_ha_set_entries.find(key); + } + + if (ha_set_it == m_ha_set_entries.end()) + { + // If there is no HA Set entry, we cannot create HA Scope. + SWSS_LOG_ERROR("HA Set entry does not exist for %s", key.c_str()); + return false; + } + sai_object_id_t ha_set_oid = ha_set_it->second.ha_set_id; + + vector ha_scope_attrs; + sai_status_t status; + sai_object_id_t sai_ha_scope_oid = SAI_NULL_OBJECT_ID; + + sai_attribute_t ha_set_attr = {}; + ha_set_attr.id = SAI_HA_SCOPE_ATTR_HA_SET_ID; + ha_set_attr.value.oid = ha_set_oid; + ha_scope_attrs.push_back(ha_set_attr); + + sai_attribute_t ha_role_attr = {}; + ha_role_attr.id = SAI_HA_SCOPE_ATTR_DASH_HA_ROLE; + ha_role_attr.value.u16 = to_sai(entry.ha_role()); + ha_scope_attrs.push_back(ha_role_attr); + + sai_attribute_t disabled_attr = {}; + disabled_attr.id = SAI_HA_SCOPE_ATTR_ADMIN_STATE; + disabled_attr.value.booldata = !entry.disabled(); + ha_scope_attrs.push_back(disabled_attr); + + if (entry.has_vip_v4() && entry.vip_v4().has_ipv4()) + { + sai_ip_address_t sai_vip_v4 = {}; + if(to_sai(entry.vip_v4(), sai_vip_v4)) + { + sai_attribute_t vip_v4_attr = {}; + vip_v4_attr.id = SAI_HA_SCOPE_ATTR_VIP_V4; + vip_v4_attr.value.ipaddr = sai_vip_v4; + ha_scope_attrs.push_back(vip_v4_attr); + } + else + { + SWSS_LOG_WARN("Failed to convert VIP V4 for HA Scope %s", key.c_str()); + } + } + else if (ha_set_it->second.metadata.has_vip_v4() && ha_set_it->second.metadata.vip_v4().has_ipv4()) + { + SWSS_LOG_NOTICE("HA Scope entry %s does not have VIP V4, using HA Set metadata", key.c_str()); + + sai_ip_address_t sai_vip_v4 = {}; + if (to_sai(ha_set_it->second.metadata.vip_v4(), sai_vip_v4)) + { + sai_attribute_t vip_v4_attr = {}; + vip_v4_attr.id = SAI_HA_SCOPE_ATTR_VIP_V4; + vip_v4_attr.value.ipaddr = sai_vip_v4; + ha_scope_attrs.push_back(vip_v4_attr); + } + else + { + SWSS_LOG_WARN("Failed to convert VIP V4 for HA Scope %s", key.c_str()); + } + } + + if (entry.has_vip_v6() && entry.vip_v6().has_ipv6()) + { + sai_ip_address_t sai_vip_v6 = {}; + if(to_sai(entry.vip_v6(), sai_vip_v6)) + { + sai_attribute_t vip_v6_attr = {}; + vip_v6_attr.id = SAI_HA_SCOPE_ATTR_VIP_V6; + vip_v6_attr.value.ipaddr = sai_vip_v6; + ha_scope_attrs.push_back(vip_v6_attr); + } + else + { + SWSS_LOG_WARN("Failed to convert VIP V6 for HA Scope %s", key.c_str()); + } + } + else if (ha_set_it->second.metadata.has_vip_v6() && !ha_set_it->second.metadata.vip_v6().ipv6().empty()) + { + sai_ip_address_t sai_vip_v6 = {}; + if (to_sai(ha_set_it->second.metadata.vip_v6(), sai_vip_v6)) + { + sai_attribute_t vip_v6_attr = {}; + vip_v6_attr.id = SAI_HA_SCOPE_ATTR_VIP_V6; + vip_v6_attr.value.ipaddr = sai_vip_v6; + ha_scope_attrs.push_back(vip_v6_attr); + } + else + { + SWSS_LOG_WARN("Failed to convert VIP V6 for HA Scope %s", key.c_str()); + } + } + + status = sai_dash_ha_api->create_ha_scope(&sai_ha_scope_oid, + gSwitchId, + static_cast(ha_scope_attrs.size()), + ha_scope_attrs.data()); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create HA Scope object in SAI for %s", key.c_str()); + task_process_status handle_status = handleSaiCreateStatus((sai_api_t) SAI_API_DASH_HA, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + m_ha_scope_entries[key] = HaScopeEntry {sai_ha_scope_oid, entry, getNowTime(), SAI_DASH_HA_STATE_DEAD, getNowTime()}; + SWSS_LOG_NOTICE("Created HA Scope object for %s", key.c_str()); + + // set HA Scope ID to ENI + if (ha_set_it->second.metadata.scope() == dash::types::HaScope::HA_SCOPE_ENI) + { + auto eni_entry = m_dash_orch->getEni(key); + if (eni_entry == nullptr) + { + SWSS_LOG_ERROR("ENI entry does not exist for %s", key.c_str()); + return false; + } + + return setEniHaScopeId(eni_entry->eni_id, sai_ha_scope_oid); + + } else if (ha_set_it->second.metadata.scope() == dash::types::HaScope::HA_SCOPE_DPU) + { + auto eni_table = m_dash_orch->getEniTable(); + auto it = eni_table->begin(); + bool success = true; + while (it != eni_table->end()) + { + if (!setEniHaScopeId(it->second.eni_id, sai_ha_scope_oid)) + { + SWSS_LOG_ERROR("Failed to set HA Scope ID for ENI %s", it->first.c_str()); + success = false; + } + it++; + } + + if (!success) + { + return false; + } + } + else + { + SWSS_LOG_ERROR("Invalid HA Scope type %s: %s", ha_set_it->first.c_str(), dash::types::HaScope_Name(ha_set_it->second.metadata.scope()).c_str()); + return false; + } + + return true; +} + +bool DashHaOrch::setHaScopeHaRole(const std::string &key, const dash::ha_scope::HaScope &entry) +{ + SWSS_LOG_ENTER(); + + sai_object_id_t ha_scope_id = m_ha_scope_entries[key].ha_scope_id; + + /* + Remove bfd passive sessions in planned shutdown (scope == DPU) + */ + if (entry.ha_role() == dash::types::HA_ROLE_DEAD + && !m_ha_set_entries.empty()) + { + if (has_dpu_scope()) + { + m_bfd_orch->removeAllSoftwareBfdSessions(); + } + } + + sai_attribute_t ha_scope_attr; + ha_scope_attr.id = SAI_HA_SCOPE_ATTR_DASH_HA_ROLE; + ha_scope_attr.value.u32 = to_sai(entry.ha_role()); + + sai_status_t status = sai_dash_ha_api->set_ha_scope_attribute(ha_scope_id, + &ha_scope_attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set HA Scope role in SAI for %s", key.c_str()); + task_process_status handle_status = handleSaiSetStatus((sai_api_t) SAI_API_DASH_HA, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + SWSS_LOG_NOTICE("Set HA Scope role for %s to %s", key.c_str(), (dash::types::HaRole_Name(entry.ha_role())).c_str()); + + return true; +} + +bool DashHaOrch::setHaScopeFlowReconcileRequest(const std::string &key) +{ + SWSS_LOG_ENTER(); + + sai_object_id_t ha_scope_id = m_ha_scope_entries[key].ha_scope_id; + + sai_attribute_t ha_scope_attr; + ha_scope_attr.id = SAI_HA_SCOPE_ATTR_FLOW_RECONCILE_REQUESTED; + ha_scope_attr.value.booldata = true; + + sai_status_t status = sai_dash_ha_api->set_ha_scope_attribute(ha_scope_id, + &ha_scope_attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set HA Scope flow reconcile request in SAI for %s", key.c_str()); + task_process_status handle_status = handleSaiSetStatus((sai_api_t) SAI_API_DASH_HA, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + SWSS_LOG_NOTICE("Set HA Scope flow reconcile request for %s", key.c_str()); + + std::vector fvs = {{"flow_reconcile_pending", "false"}}; + m_dpuStateDbHaScopeTable->set(key, fvs); + + return true; +} + +bool DashHaOrch::setHaScopeActivateRoleRequest(const std::string &key) +{ + SWSS_LOG_ENTER(); + + sai_object_id_t ha_scope_id = m_ha_scope_entries[key].ha_scope_id; + + sai_attribute_t ha_scope_attr; + ha_scope_attr.id = SAI_HA_SCOPE_ATTR_ACTIVATE_ROLE; + ha_scope_attr.value.booldata = true; + + sai_status_t status = sai_dash_ha_api->set_ha_scope_attribute(ha_scope_id, + &ha_scope_attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set HA Scope activate role request in SAI for %s", key.c_str()); + task_process_status handle_status = handleSaiSetStatus((sai_api_t) SAI_API_DASH_HA, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + SWSS_LOG_NOTICE("Set HA Scope activate role request for %s", key.c_str()); + + std::vector fvs = {{"activate_role_pending", "false"}}; + m_dpuStateDbHaScopeTable->set(key, fvs); + + return true; +} + +bool DashHaOrch::setHaScopeDisabled(const std::string &key, bool disabled) +{ + SWSS_LOG_ENTER(); + + sai_object_id_t ha_scope_id = m_ha_scope_entries[key].ha_scope_id; + + sai_attribute_t ha_scope_attr; + ha_scope_attr.id = SAI_HA_SCOPE_ATTR_ADMIN_STATE; + ha_scope_attr.value.booldata = !disabled; + + sai_status_t status = sai_dash_ha_api->set_ha_scope_attribute(ha_scope_id, + &ha_scope_attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set HA Scope admin state to %d in SAI for %s", disabled, key.c_str()); + task_process_status handle_status = handleSaiSetStatus((sai_api_t) SAI_API_DASH_HA, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + m_ha_scope_entries[key].metadata.set_disabled(disabled); + SWSS_LOG_NOTICE("Set HA Scope admin state for %s to %d", key.c_str(), !disabled); + + return true; +} + +bool DashHaOrch::setEniHaScopeId(const sai_object_id_t eni_id, const sai_object_id_t ha_scope_id) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t eni_attr; + eni_attr.id = SAI_ENI_ATTR_HA_SCOPE_ID; + eni_attr.value.oid = ha_scope_id; + sai_status_t status = sai_dash_eni_api->set_eni_attribute(eni_id, &eni_attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set HA Scope ID for ENI %s", std::to_string(eni_id).c_str()); + task_process_status handle_status = handleSaiSetStatus((sai_api_t) SAI_API_DASH_ENI, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + return true; +} + +bool DashHaOrch::removeHaScopeEntry(const std::string &key) +{ + SWSS_LOG_ENTER(); + + auto it = m_ha_scope_entries.find(key); + + if (it == m_ha_scope_entries.end()) + { + SWSS_LOG_WARN("HA Scope entry does not exist for %s", key.c_str()); + return true; + } + + sai_status_t status = sai_dash_ha_api->remove_ha_scope(it->second.ha_scope_id); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove HA Scope object in SAI for %s", key.c_str()); + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_HA, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + m_ha_scope_entries.erase(it); + SWSS_LOG_NOTICE("Removed HA Scope object for %s", key.c_str()); + + return true; +} + +void DashHaOrch::doTaskHaScopeTable(ConsumerBase &consumer) +{ + SWSS_LOG_ENTER(); + + uint32_t result; + + auto it = consumer.m_toSync.begin(); + + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple tuple = it->second; + const auto& key = kfvKey(tuple); + const auto& op = kfvOp(tuple); + result = DASH_RESULT_SUCCESS; + + if (op == SET_COMMAND) + { + dash::ha_scope::HaScope entry; + + auto existing_it = m_ha_scope_entries.find(key); + if (existing_it != m_ha_scope_entries.end()) + { + // Start with existing entry to preserve unmodified fields + entry.CopyFrom(existing_it->second.metadata); + } + + /* + * For HA internal tables, kfv format was used instead of serialized pb objects in the end. + * I decided to keep protobuf conversion still for: + * - ensuring the data integrity. + * - in case we need to switch to protobuf in the future. + */ + // if (!parsePbMessage(kfvFieldsValues(tuple), entry)) + // { + // SWSS_LOG_WARN("Requires protobuf at HaScope :%s", key.c_str()); + // it = consumer.m_toSync.erase(it); + // continue; + // } + + if (!convertKfvToHaScopePb(kfvFieldsValues(tuple), entry)) + { + SWSS_LOG_WARN("Failed to convert KeyOpFieldsValuesTuple to HaScope entry for %s, invalid values probably.", key.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + + if (addHaScopeEntry(key, entry)) + { + it = consumer.m_toSync.erase(it); + } + else + { + result = DASH_RESULT_FAILURE; + it++; + } + writeResultToDB(dash_ha_scope_result_table_, key, result); + } + else if (op == DEL_COMMAND) + { + if(removeHaScopeEntry(key)) + { + it = consumer.m_toSync.erase(it); + removeResultFromDB(dash_ha_scope_result_table_, key); + } + else + { + it++; + } + } + else + { + SWSS_LOG_ERROR("Unknown operation %s", op.c_str()); + it = consumer.m_toSync.erase(it); + } + } +} + +void DashHaOrch::doTaskBfdSessionTable(ConsumerBase &consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple tuple = it->second; + const auto& key = kfvKey(tuple); + const auto& op = kfvOp(tuple); + + SWSS_LOG_DEBUG("Processing BFD Session table"); + + if (op == SET_COMMAND) + { + if (has_eni_scope()) + { + m_bfd_orch->createSoftwareBfdSession(key, kfvFieldsValues(tuple)); + } + + // Per HLD, once the state is moved to Active/Standby/Standalone state, we will create the BFD responder on DPU. + bool has_dpu_scope_ha_state_activated = false; + if (has_dpu_scope()) + { + for (const auto& ha_scope_entry : m_ha_scope_entries) + { + if (in(ha_scope_entry.second.ha_state, {SAI_DASH_HA_STATE_ACTIVE, + SAI_DASH_HA_STATE_STANDBY, + SAI_DASH_HA_STATE_STANDALONE})) + { + has_dpu_scope_ha_state_activated = true; + break; + } + } + } + + if (has_dpu_scope_ha_state_activated) + { + m_bfd_orch->createSoftwareBfdSession(key, kfvFieldsValues(tuple)); + } + + /* + Caching BFD sessions for planned ha_role up->down->up. + */ + if ((!has_eni_scope())) + { + SWSS_LOG_INFO("Caching BFD session %s as there is no non-dead DPU HA Scope", key.c_str()); + + m_bfd_session_pending_creation[key] = kfvFieldsValues(tuple); + } + + it = consumer.m_toSync.erase(it); + } + else if (op == DEL_COMMAND) + { + m_bfd_orch->removeSoftwareBfdSession(key); + it = consumer.m_toSync.erase(it); + m_bfd_session_pending_creation.erase(key); + } + } +} + +void DashHaOrch::doTask(ConsumerBase &consumer) +{ + SWSS_LOG_ENTER(); + + if (consumer.getTableName() == APP_DASH_HA_SET_TABLE_NAME) + { + doTaskHaSetTable(consumer); + } + else if (consumer.getTableName() == APP_DASH_HA_SCOPE_TABLE_NAME) + { + doTaskHaScopeTable(consumer); + } + else if (consumer.getTableName() == APP_BFD_SESSION_TABLE_NAME) + { + doTaskBfdSessionTable(consumer); + } + else + { + SWSS_LOG_ERROR("Unknown table: %s", consumer.getTableName().c_str()); + } +} + +void DashHaOrch::doTask(NotificationConsumer &consumer) +{ + SWSS_LOG_ENTER(); + + std::deque events; + consumer.pops(events); + + for (auto &event : events) + { + std::string op = kfvOp(event); + std::string data = kfvKey(event); + std::vector values = kfvFieldsValues(event); + + if (op == "ha_set_event") + { + std::time_t now_time = getNowTime(); + + uint32_t count; + sai_ha_set_event_data_t *ha_set_event = nullptr; + + sai_deserialize_ha_set_event_ntf(data, count, &ha_set_event); + + for (uint32_t i = 0; i < count; i++) + { + sai_object_id_t ha_set_id = ha_set_event[i].ha_set_id; + sai_ha_set_event_t event_type = ha_set_event[i].event_type; + + SWSS_LOG_INFO("Get HA Set event notification id:%" PRIx64 " event: Data plane channel goes %s", ha_set_id, sai_ha_set_event_type_name.at(event_type).c_str()); + + auto key = getHaSetObjectKey(ha_set_id); + if (key.empty()) + { + SWSS_LOG_ERROR("HA Set object not found for ID: %" PRIx64, ha_set_id); + continue; + } + std::vector fvs = { + {"last_updated_time", to_string(now_time)}, + {"dp_channel_is_alive", sai_ha_set_event_type_name.at(event_type)} + }; + m_dpuStateDbHaSetTable->set(key, fvs); + } + sai_deserialize_free_ha_set_event_ntf(count, ha_set_event); + } + + if (op == "ha_scope_event") + { + std::time_t now_time = getNowTime(); + + uint32_t count; + sai_ha_scope_event_data_t *ha_scope_event = nullptr; + + sai_deserialize_ha_scope_event_ntf(data, count, &ha_scope_event); + + for (uint32_t i = 0; i < count; i++) + { + sai_ha_scope_event_t event_type = ha_scope_event[i].event_type; + sai_object_id_t ha_scope_id = ha_scope_event[i].ha_scope_id; + + SWSS_LOG_INFO("Get HA Scope event notification id:%" PRIx64 " event: %s", ha_scope_id, sai_ha_scope_event_type_name.at(event_type).c_str()); + + auto key = getHaScopeObjectKey(ha_scope_id); + if (key.empty()) + { + SWSS_LOG_ERROR("HA Scope object not found for ID: %" PRIx64, ha_scope_id); + continue; + } + + std::vector fvs = { + {"last_updated_time", to_string(now_time)}, + {"ha_term", to_string(ha_scope_event[i].flow_version)} + }; + + auto ha_role = to_pb(ha_scope_event[i].ha_role); + std::time_t role_start_time = now_time; + + if (m_ha_scope_entries[key].metadata.ha_role() != ha_role) + { + m_ha_scope_entries[key].metadata.set_ha_role(ha_role); + m_ha_scope_entries[key].last_role_start_time = now_time; + SWSS_LOG_NOTICE("HA Scope role changed for %s to %s", key.c_str(), dash::types::HaRole_Name(ha_role).c_str()); + } else + { + role_start_time = m_ha_scope_entries[key].last_role_start_time; + } + + fvs.push_back({"ha_role", sai_ha_role_name.at(ha_scope_event[i].ha_role)}); + fvs.push_back({"ha_role_start_time", to_string(role_start_time)}); + + switch (event_type) + { + case SAI_HA_SCOPE_EVENT_FLOW_RECONCILE_NEEDED: + fvs.push_back({"flow_reconcile_pending", "true"}); + break; + case SAI_HA_SCOPE_EVENT_SPLIT_BRAIN_DETECTED: + fvs.push_back({"brainsplit_recover_pending", "true"}); + break; + case SAI_HA_SCOPE_EVENT_STATE_CHANGED: + if (in(ha_scope_event[i].ha_state, {SAI_DASH_HA_STATE_PENDING_STANDALONE_ACTIVATION, + SAI_DASH_HA_STATE_PENDING_ACTIVE_ACTIVATION, + SAI_DASH_HA_STATE_PENDING_STANDBY_ACTIVATION})) + { + fvs.push_back({"activate_role_pending", "true"}); + SWSS_LOG_NOTICE("DPU is pending on role activation for %s", key.c_str()); + } + else if (in(ha_scope_event[i].ha_state, {SAI_DASH_HA_STATE_ACTIVE, + SAI_DASH_HA_STATE_STANDBY})) + { + fvs.push_back({"brainsplit_recover_pending", "false"}); + } + + fvs.push_back({"ha_state", sai_ha_state_name.at(ha_scope_event[i].ha_state)}); + fvs.push_back({"ha_state_start_time", to_string(now_time)}); + + m_ha_scope_entries[key].ha_state = ha_scope_event[i].ha_state; + m_ha_scope_entries[key].last_state_start_time = now_time; + + if (has_dpu_scope() && in(ha_scope_event[i].ha_state, {SAI_DASH_HA_STATE_ACTIVE, + SAI_DASH_HA_STATE_STANDBY, + SAI_DASH_HA_STATE_STANDALONE})) + { + processCachedBfdSessions(); + } + break; + default: + SWSS_LOG_ERROR("Unknown HA Scope event type %d for %s", event_type, key.c_str()); + } + + m_dpuStateDbHaScopeTable->set(key, fvs); + + } + sai_deserialize_free_ha_scope_event_ntf(count, ha_scope_event); + } + } +} + +bool DashHaOrch::convertKfvToHaSetPb(const std::vector &kfv, dash::ha_set::HaSet &entry) +{ + SWSS_LOG_ENTER(); + + for (const auto &fv : kfv) + { + const std::string &field = fvField(fv); + const std::string &value = fvValue(fv); + + if (field == "version") + { + entry.set_version(value); + } + else if (field == "vip_v4") + { + dash::types::IpAddress temp_ip; + if (!to_pb(value, temp_ip) || !temp_ip.has_ipv4()) + { + SWSS_LOG_ERROR("Invalid IPv4 address %s", value.c_str()); + return false; + } + entry.mutable_vip_v4()->CopyFrom(temp_ip); + } + else if (field == "vip_v6") + { + dash::types::IpAddress temp_ip; + if (!to_pb(value, temp_ip) || !temp_ip.has_ipv6()) + { + SWSS_LOG_ERROR("Invalid IPv6 address %s", value.c_str()); + return false; + } + entry.mutable_vip_v6()->CopyFrom(temp_ip); + } + else if (field == "owner") + { + dash::types::HaOwner owner; + if (!to_pb(value, owner)) + { + return false; + } + entry.set_owner(owner); + } + else if (field == "scope") + { + dash::types::HaScope ha_scope; + if (!to_pb(value, ha_scope)) + { + return false; + } + entry.set_scope(ha_scope); + } + else if (field == "local_npu_ip") + { + if (!to_pb(value, *entry.mutable_local_npu_ip())) + { + SWSS_LOG_ERROR("Invalid IP address %s", value.c_str()); + return false; + } + } + else if (field == "local_ip") + { + if (!to_pb(value, *entry.mutable_local_ip())) + { + SWSS_LOG_ERROR("Invalid IP address %s", value.c_str()); + return false; + } + } + else if (field == "peer_ip") + { + if (!to_pb(value, *entry.mutable_peer_ip())) + { + SWSS_LOG_ERROR("Invalid IP address %s", value.c_str()); + return false; + } + } + else if (field == "cp_data_channel_port") + { + entry.set_cp_data_channel_port(to_uint(value)); + } + else if (field == "dp_channel_dst_port") + { + entry.set_dp_channel_dst_port(to_uint(value)); + } + else if (field == "dp_channel_src_port_min") + { + entry.set_dp_channel_src_port_min(to_uint(value)); + } + else if (field == "dp_channel_src_port_max") + { + entry.set_dp_channel_src_port_max(to_uint(value)); + } + else if (field == "dp_channel_probe_interval_ms") + { + entry.set_dp_channel_probe_interval_ms(to_uint(value)); + } + else if (field == "dp_channel_probe_fail_threshold") + { + entry.set_dp_channel_probe_fail_threshold(to_uint(value)); + } + else + { + SWSS_LOG_WARN("Unknown field %s in HA Set entry", field.c_str()); + } + } + return true; +} + +bool DashHaOrch::convertKfvToHaScopePb(const std::vector &kfv, dash::ha_scope::HaScope &entry) +{ + SWSS_LOG_ENTER(); + + for (const auto &fv : kfv) + { + const std::string &field = fvField(fv); + const std::string &value = fvValue(fv); + + if (field == "version") + { + entry.set_version(value); + } + else if (field == "disabled") + { + entry.set_disabled(value == "true" || value == "1"); + } + else if (field == "ha_role") + { + dash::types::HaRole ha_role; + if (!to_pb(value, ha_role)) + { + return false; + } + entry.set_ha_role(ha_role); + } + else if (field == "flow_reconcile_requested") + { + entry.set_flow_reconcile_requested(value == "true" || value == "1"); + } + else if (field == "activate_role_requested") + { + entry.set_activate_role_requested(value == "true" || value == "1"); + } + else if (field == "vip_v4") + { + dash::types::IpAddress temp_ip; + if (!to_pb(value, temp_ip) || !temp_ip.has_ipv4()) + { + SWSS_LOG_ERROR("Invalid IPv4 address %s", value.c_str()); + return false; + } + entry.mutable_vip_v4()->CopyFrom(temp_ip); + } + else if (field == "vip_v6") + { + dash::types::IpAddress temp_ip; + if (!to_pb(value, temp_ip) || !temp_ip.has_ipv6()) + { + SWSS_LOG_ERROR("Invalid IPv6 address %s", value.c_str()); + return false; + } + entry.mutable_vip_v6()->CopyFrom(temp_ip); + } + else if (field == "ha_set_id") + { + entry.set_ha_set_id(value); + } + else + { + SWSS_LOG_WARN("Unknown field %s in HA Scope entry", field.c_str()); + } + } + return true; +} + +bool DashHaOrch::has_dpu_scope() +{ + for (const auto& ha_set_entry : m_ha_set_entries) + { + if (ha_set_entry.second.metadata.scope() == dash::types::HA_SCOPE_DPU) + { + return true; + } + } + return false; +} + +bool DashHaOrch::has_eni_scope() +{ + for (const auto& ha_set_entry : m_ha_set_entries) + { + if (ha_set_entry.second.metadata.scope() == dash::types::HA_SCOPE_ENI) + { + return true; + } + } + return false; +} + +void DashHaOrch::processCachedBfdSessions() +{ + /* + Create bfd passive sessions cached when moving out of DEAD role (scope == DPU) + */ + if (has_dpu_scope() && !m_bfd_session_pending_creation.empty()) + { + for (const auto& bfd_entry : m_bfd_session_pending_creation) + { + m_bfd_orch->createSoftwareBfdSession(bfd_entry.first, bfd_entry.second); + } + } +} diff --git a/orchagent/dash/dashhaorch.h b/orchagent/dash/dashhaorch.h new file mode 100644 index 00000000000..c0732fb3455 --- /dev/null +++ b/orchagent/dash/dashhaorch.h @@ -0,0 +1,117 @@ +#ifndef DASHHAORCH_H +#define DASHHAORCH_H +#include + +#include "dbconnector.h" +#include "dashorch.h" +#include "bfdorch.h" +#include "zmqorch.h" +#include "zmqserver.h" +#include "saitypes.h" +#include "notifier.h" +#include "directory.h" +#include "sai_serialize.h" +#include "notifications.h" + +#include "dash_api/ha_set.pb.h" +#include "dash_api/ha_scope.pb.h" + +#include "pbutils.h" + +struct HaSetEntry +{ + sai_object_id_t ha_set_id; + dash::ha_set::HaSet metadata; +}; + +struct HaScopeEntry +{ + sai_object_id_t ha_scope_id; + dash::ha_scope::HaScope metadata; + std::time_t last_role_start_time; + + sai_dash_ha_state_t ha_state; + std::time_t last_state_start_time; +}; + +typedef std::map HaSetTable; +typedef std::map HaScopeTable; +typedef std::map> DashBfdSessionTable; + +template +bool in(T value, std::initializer_list list) { + return std::find(list.begin(), list.end(), value) != list.end(); +} + +class DashHaOrch : public ZmqOrch +{ +public: + DashHaOrch(swss::DBConnector *db, const std::vector &tableNames, DashOrch *dash_orch, BfdOrch *bfd_orch, swss::DBConnector *app_state_db, swss::ZmqServer *zmqServer); + +protected: + HaSetTable m_ha_set_entries; + HaScopeTable m_ha_scope_entries; + DashBfdSessionTable m_bfd_session_pending_creation; + + DashOrch *m_dash_orch; + BfdOrch *m_bfd_orch; + + void doTask(ConsumerBase &consumer); + void doTask(swss::NotificationConsumer &consumer); + void doTaskEniTable(ConsumerBase &consumer); + void doTaskHaSetTable(ConsumerBase &consumer); + void doTaskHaScopeTable(ConsumerBase &consumer); + void doTaskBfdSessionTable(ConsumerBase &consumer); + + bool addHaSetEntry(const std::string &key, const dash::ha_set::HaSet &entry); + bool removeHaSetEntry(const std::string &key); + bool addHaScopeEntry(const std::string &key, const dash::ha_scope::HaScope &entry); + bool removeHaScopeEntry(const std::string &key); + bool setHaScopeHaRole(const std::string &key, const dash::ha_scope::HaScope &entry); + bool setHaScopeFlowReconcileRequest(const std::string &key); + bool setHaScopeActivateRoleRequest(const std::string &key); + bool setHaScopeDisabled(const std::string &key, bool disabled); + bool setEniHaScopeId(const sai_object_id_t eni_id, const sai_object_id_t ha_scope_id); + bool register_ha_set_notifier(); + bool register_ha_scope_notifier(); + bool updateExistingHaSetEntry(const std::string &key, const dash::ha_set::HaSet &entry, sai_object_id_t sai_ha_set_oid); + + bool has_dpu_scope(); + bool has_eni_scope(); + + void processCachedBfdSessions(); + + bool convertKfvToHaSetPb( + const std::vector &kfv, + dash::ha_set::HaSet &entry + ); + + bool convertKfvToHaScopePb( + const std::vector &kfv, + dash::ha_scope::HaScope &entry + ); + + std::string getHaSetObjectKey(const sai_object_id_t ha_set_id); + std::string getHaScopeObjectKey(const sai_object_id_t ha_scope_id); + std::time_t getNowTime(){ + return std::chrono::system_clock::to_time_t(std::chrono::system_clock::now()); + }; + + std::unique_ptr dash_ha_set_result_table_; + std::unique_ptr dash_ha_scope_result_table_; + + std::unique_ptr m_dpuStateDbConnector; + std::unique_ptr m_dpuStateDbHaSetTable; + std::unique_ptr m_dpuStateDbHaScopeTable; + + swss::NotificationConsumer* m_haSetNotificationConsumer; + swss::NotificationConsumer* m_haScopeNotificationConsumer; + +public: + const HaSetTable& getHaSetEntries() const { return m_ha_set_entries; }; + const HaScopeTable& getHaScopeEntries() const { return m_ha_scope_entries; }; + const DashBfdSessionTable& getBfdSessionPendingCreation() const { return m_bfd_session_pending_creation; }; + virtual HaScopeEntry getHaScopeForEni(const std::string& eni); +}; + +#endif // DASHHAORCH_H diff --git a/orchagent/dash/dashmeterorch.cpp b/orchagent/dash/dashmeterorch.cpp new file mode 100644 index 00000000000..dba2d17f2e7 --- /dev/null +++ b/orchagent/dash/dashmeterorch.cpp @@ -0,0 +1,706 @@ +#include +#include +#include +#include +#include + +#include "directory.h" +#include "dashmeterorch.h" +#include "taskworker.h" +#include "pbutils.h" +#include "crmorch.h" +#include "sai.h" +#include "saiextensions.h" +#include "saihelper.h" + +using namespace std; +using namespace swss; +using namespace dash::meter_policy; +using namespace dash::meter_rule; + +extern sai_dash_meter_api_t* sai_dash_meter_api; +extern sai_object_id_t gSwitchId; +extern size_t gMaxBulkSize; +extern CrmOrch *gCrmOrch; +extern Directory gDirectory; +extern bool gTraditionalFlexCounter; + +#define METER_FLEX_COUNTER_UPD_INTERVAL 1 + +DashMeterOrch::DashMeterOrch(DBConnector *db, const vector &tables, DashOrch *dash_orch, DBConnector *app_state_db, ZmqServer *zmqServer) : + m_meter_stat_manager(METER_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, METER_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false), + meter_rule_bulker_(sai_dash_meter_api, gSwitchId, gMaxBulkSize), + ZmqOrch(db, tables, zmqServer), + m_dash_orch(dash_orch) +{ + SWSS_LOG_ENTER(); + + m_counter_db = std::shared_ptr(new DBConnector("COUNTERS_DB", 0)); + m_asic_db = std::shared_ptr(new DBConnector("ASIC_DB", 0)); + + if (gTraditionalFlexCounter) + { + m_vid_to_rid_table = std::make_unique
(m_asic_db.get(), "VIDTORID"); + } + + auto intervT = timespec { .tv_sec = METER_FLEX_COUNTER_UPD_INTERVAL , .tv_nsec = 0 }; + m_meter_fc_update_timer = new SelectableTimer(intervT); + auto executorT = new ExecutableTimer(m_meter_fc_update_timer, this, "METER_FLEX_COUNTER_UPD_TIMER"); + Orch::addExecutor(executorT); + + /* Fetch the meter bucket counter Ids */ + m_meter_counter_stats.clear(); + auto stat_enum_list = queryAvailableCounterStats((sai_object_type_t)SAI_OBJECT_TYPE_METER_BUCKET_ENTRY); + for (auto &stat_enum: stat_enum_list) + { + auto counter_id = static_cast(stat_enum); + m_meter_counter_stats.emplace(sai_serialize_meter_bucket_entry_stat(counter_id)); + } +} + +sai_object_id_t DashMeterOrch::getMeterPolicyOid(const string& meter_policy) const +{ + SWSS_LOG_ENTER(); + auto it = meter_policy_entries_.find(meter_policy); + if (it == meter_policy_entries_.end()) + { + return SAI_NULL_OBJECT_ID; + } + return it->second.meter_policy_oid; +} + +uint32_t DashMeterOrch::getMeterPolicyRuleCount(const string& meter_policy) const +{ + SWSS_LOG_ENTER(); + auto it = meter_policy_entries_.find(meter_policy); + if (it == meter_policy_entries_.end()) + { + return 0; + } + return it->second.rule_count; +} + +sai_ip_addr_family_t DashMeterOrch::getMeterPolicyAddrFamily(const string& meter_policy) const +{ + SWSS_LOG_ENTER(); + sai_ip_addr_family_t addr_family = SAI_IP_ADDR_FAMILY_IPV4; + auto it = meter_policy_entries_.find(meter_policy); + + if (it != meter_policy_entries_.end()) + { + to_sai(it->second.metadata.ip_version(), addr_family); + } + return addr_family; +} + +bool DashMeterOrch::isV4(const string& meter_policy) const +{ + return (getMeterPolicyAddrFamily(meter_policy) == SAI_IP_ADDR_FAMILY_IPV4) ? true : false; +} + +void DashMeterOrch::incrMeterPolicyRuleCount(const string& meter_policy) +{ + SWSS_LOG_ENTER(); + auto it = meter_policy_entries_.find(meter_policy); + if (it != meter_policy_entries_.end()) + { + it->second.rule_count += +1; + } + else + { + SWSS_LOG_WARN("Meter policy %s not found during rule count incr", meter_policy.c_str()); + } +} + +void DashMeterOrch::decrMeterPolicyRuleCount(const string& meter_policy) +{ + SWSS_LOG_ENTER(); + auto it = meter_policy_entries_.find(meter_policy); + if (it != meter_policy_entries_.end()) + { + if (it->second.rule_count > 0) + { + it->second.rule_count += -1; + } + else + { + SWSS_LOG_WARN("Meter policy %s invalid rule count %d before decr", + meter_policy.c_str(), it->second.rule_count); + } + } + else + { + SWSS_LOG_WARN("Meter policy %s not found during rule count decr", meter_policy.c_str()); + } +} + +int32_t DashMeterOrch::getMeterPolicyEniBindCount(const string& meter_policy) const +{ + SWSS_LOG_ENTER(); + auto it = meter_policy_entries_.find(meter_policy); + if (it == meter_policy_entries_.end()) + { + return 0; + } + return it->second.eni_bind_count; +} + +void DashMeterOrch::incrMeterPolicyEniBindCount(const string& meter_policy) +{ + SWSS_LOG_ENTER(); + auto it = meter_policy_entries_.find(meter_policy); + if (it != meter_policy_entries_.end()) + { + it->second.eni_bind_count += 1; + SWSS_LOG_INFO("Meter policy %s updated ENI bind count is %d", meter_policy.c_str(), + getMeterPolicyEniBindCount(meter_policy)); + } + else + { + SWSS_LOG_WARN("Meter policy %s not found during bind count incr", meter_policy.c_str()); + } +} + +void DashMeterOrch::decrMeterPolicyEniBindCount(const string& meter_policy) +{ + SWSS_LOG_ENTER(); + auto it = meter_policy_entries_.find(meter_policy); + if (it != meter_policy_entries_.end()) + { + if (it->second.eni_bind_count > 0) + { + it->second.eni_bind_count += -1; + } + else + { + SWSS_LOG_WARN("Meter policy %s invalid bind count %d before decr", + meter_policy.c_str(), it->second.eni_bind_count); + } + SWSS_LOG_INFO("Meter policy %s updated ENI bind count is %d", meter_policy.c_str(), + getMeterPolicyEniBindCount(meter_policy)); + } + else + { + SWSS_LOG_WARN("Meter policy %s not found during bind count decr", meter_policy.c_str()); + } +} + +bool DashMeterOrch::isMeterPolicyBound(const std::string& meter_policy) const +{ + SWSS_LOG_ENTER(); + auto it = meter_policy_entries_.find(meter_policy); + if (it == meter_policy_entries_.end()) + { + return false; + } + return it->second.eni_bind_count > 0; +} + +bool DashMeterOrch::addMeterPolicy(const string& meter_policy, MeterPolicyContext& ctxt) +{ + SWSS_LOG_ENTER(); + + sai_object_id_t meter_policy_oid = getMeterPolicyOid(meter_policy); + if (meter_policy_oid != SAI_NULL_OBJECT_ID) + { + SWSS_LOG_WARN("Meter policy %s already exists", meter_policy.c_str()); + return true; + } + + sai_ip_addr_family_t sai_addr_family = SAI_IP_ADDR_FAMILY_IPV4; + vector meter_policy_attrs; + sai_attribute_t meter_policy_attr; + + meter_policy_attr.id = SAI_METER_POLICY_ATTR_IP_ADDR_FAMILY; + to_sai(ctxt.metadata.ip_version(), sai_addr_family); + meter_policy_attr.value.u32 = sai_addr_family; + meter_policy_attrs.push_back(meter_policy_attr); + + sai_status_t status = sai_dash_meter_api->create_meter_policy(&meter_policy_oid, gSwitchId, (uint32_t)meter_policy_attrs.size(), meter_policy_attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create Meter policy %s", meter_policy.c_str()); + task_process_status handle_status = handleSaiCreateStatus((sai_api_t) SAI_API_DASH_METER, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + meter_policy_entries_[meter_policy] = { meter_policy_oid, ctxt.metadata, 0, 0}; + gCrmOrch->incCrmResUsedCounter(isV4(meter_policy) ? CrmResourceType::CRM_DASH_IPV4_METER_POLICY : CrmResourceType::CRM_DASH_IPV6_METER_POLICY); + SWSS_LOG_INFO("Meter policy %s added", meter_policy.c_str()); + + return true; +} + +bool DashMeterOrch::removeMeterPolicy(const string& meter_policy) +{ + SWSS_LOG_ENTER(); + + if (isMeterPolicyBound(meter_policy)) + { + SWSS_LOG_WARN("Cannot remove bound meter policy %s", meter_policy.c_str()); + return false; + } + + sai_object_id_t meter_policy_oid = getMeterPolicyOid(meter_policy); + if (meter_policy_oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_INFO("Failed to find meter policy %s to remove", meter_policy.c_str()); + return true; + } + + uint32_t rule_count = getMeterPolicyRuleCount(meter_policy); + if (rule_count != 0) + { + SWSS_LOG_INFO("Failed to remove meter policy %s due to rule count %d ", meter_policy.c_str(), rule_count); + return true; + } + + sai_status_t status = sai_dash_meter_api->remove_meter_policy(meter_policy_oid); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove meter policy %s", meter_policy.c_str()); + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_METER, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + meter_policy_entries_.erase(meter_policy); + gCrmOrch->decCrmResUsedCounter(isV4(meter_policy) ? CrmResourceType::CRM_DASH_IPV4_METER_POLICY : CrmResourceType::CRM_DASH_IPV6_METER_POLICY); + SWSS_LOG_INFO("Meter policy %s removed", meter_policy.c_str()); + + return true; +} + +void DashMeterOrch::doTaskMeterPolicyTable(ConsumerBase& consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + + while (it != consumer.m_toSync.end()) + { + auto tuple = it->second; + auto op = kfvOp(tuple); + const string& key = kfvKey(tuple); + + if (op == SET_COMMAND) + { + MeterPolicyContext ctxt; + ctxt.meter_policy = key; + + if (!parsePbMessage(kfvFieldsValues(tuple), ctxt.metadata)) + { + SWSS_LOG_WARN("Requires protobuff at MeterPolicy :%s", key.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + if (addMeterPolicy(key, ctxt)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + else if (op == DEL_COMMAND) + { + if (removeMeterPolicy(key)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + else + { + SWSS_LOG_ERROR("Unknown operation %s", op.c_str()); + it = consumer.m_toSync.erase(it); + } + } +} + + +bool DashMeterOrch::addMeterRule(const string& key, MeterRuleBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + bool exists = (meter_rule_entries_.find(key) != meter_rule_entries_.end()); + if (exists) + { + SWSS_LOG_WARN("Meter rule entry already exists for %s", key.c_str()); + return true; + } + + if (isMeterPolicyBound(ctxt.meter_policy)) + { + SWSS_LOG_WARN("Cannot add new rule %s to Meter policy %s as it is already bound", key.c_str(), ctxt.meter_policy.c_str()); + return true; + } + + sai_object_id_t meter_policy_oid = getMeterPolicyOid(ctxt.meter_policy); + if (meter_policy_oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_INFO("Retry for rule %s as meter policy %s not found", key.c_str(), ctxt.meter_policy.c_str()); + return false; + } + + auto& object_ids = ctxt.object_ids; + vector meter_rule_attrs; + sai_attribute_t meter_rule_attr; + + meter_rule_attr.id = SAI_METER_RULE_ATTR_DIP; + to_sai(ctxt.metadata.ip_prefix().ip(), meter_rule_attr.value.ipaddr); + meter_rule_attrs.push_back(meter_rule_attr); + + meter_rule_attr.id = SAI_METER_RULE_ATTR_DIP_MASK; + to_sai(ctxt.metadata.ip_prefix().mask(), meter_rule_attr.value.ipaddr); + meter_rule_attrs.push_back(meter_rule_attr); + + meter_rule_attr.id = SAI_METER_RULE_ATTR_METER_POLICY_ID; + meter_rule_attr.value.oid = meter_policy_oid; + meter_rule_attrs.push_back(meter_rule_attr); + + meter_rule_attr.id = SAI_METER_RULE_ATTR_METER_CLASS; + meter_rule_attr.value.u32 = (uint32_t) ctxt.metadata.metering_class(); // TBD 64 to 32 bit conversion + meter_rule_attrs.push_back(meter_rule_attr); + + meter_rule_attr.id = SAI_METER_RULE_ATTR_PRIORITY; + meter_rule_attr.value.u32 = ctxt.metadata.priority(); + meter_rule_attrs.push_back(meter_rule_attr); + + object_ids.emplace_back(); + meter_rule_bulker_.create_entry(&object_ids.back(), (uint32_t)meter_rule_attrs.size(), meter_rule_attrs.data()); + + return false; +} + +bool DashMeterOrch::addMeterRulePost(const string& key, const MeterRuleBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + const auto& object_ids = ctxt.object_ids; + if (object_ids.empty()) + { + return false; + } + + auto it_id = object_ids.begin(); + sai_object_id_t id = *it_id++; + if (id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("Failed to create meter rule entry for %s", key.c_str()); + return false; + } + + meter_rule_entries_[key] = { id, ctxt.metadata, ctxt.meter_policy, ctxt.rule_num }; + incrMeterPolicyRuleCount(ctxt.meter_policy); + + gCrmOrch->incCrmResUsedCounter(isV4(ctxt.meter_policy) ? CrmResourceType::CRM_DASH_IPV4_METER_RULE : CrmResourceType::CRM_DASH_IPV6_METER_RULE); + SWSS_LOG_INFO("Meter Rule entry for %s added", key.c_str()); + + return true; +} + +bool DashMeterOrch::removeMeterRule(const string& key, MeterRuleBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + bool exists = (meter_rule_entries_.find(key) != meter_rule_entries_.end()); + if (!exists) + { + SWSS_LOG_WARN("Failed to find meter rule entry %s to remove", key.c_str()); + return true; + } + if (isMeterPolicyBound(ctxt.meter_policy)) + { + SWSS_LOG_WARN("Cannot remove rule from meter policy %s as it is already bound", ctxt.meter_policy.c_str()); + return true; + } + + auto& object_statuses = ctxt.object_statuses; + object_statuses.emplace_back(); + meter_rule_bulker_.remove_entry(&object_statuses.back(), + meter_rule_entries_[key].meter_rule_oid); + + return false; +} + +bool DashMeterOrch::removeMeterRulePost(const string& key, const MeterRuleBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + const auto& object_statuses = ctxt.object_statuses; + if (object_statuses.empty()) + { + return false; + } + + auto it_status = object_statuses.begin(); + sai_status_t status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + // Retry later if object has non-zero reference to it + if (status == SAI_STATUS_NOT_EXECUTED) + { + return false; + } + SWSS_LOG_ERROR("Failed to remove meter rule entry for %s", key.c_str()); + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_METER, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + gCrmOrch->decCrmResUsedCounter(isV4(ctxt.meter_policy) ? CrmResourceType::CRM_DASH_IPV4_METER_RULE : CrmResourceType::CRM_DASH_IPV6_METER_RULE); + meter_rule_entries_.erase(key); + decrMeterPolicyRuleCount(ctxt.meter_policy); + SWSS_LOG_INFO("Meter rule entry removed for %s", key.c_str()); + + return true; +} + + +void DashMeterOrch::doTaskMeterRuleTable(ConsumerBase& consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + + while (it != consumer.m_toSync.end()) + { + std::map, + MeterRuleBulkContext> toBulk; + + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple tuple = it->second; + const string& key = kfvKey(tuple); + auto op = kfvOp(tuple); + auto rc = toBulk.emplace(std::piecewise_construct, + std::forward_as_tuple(key, op), + std::forward_as_tuple()); + bool inserted = rc.second; + auto &ctxt = rc.first->second; + + if (!inserted) + { + ctxt.clear(); + } + + string& meter_policy = ctxt.meter_policy; + uint32_t& rule_num = ctxt.rule_num; + + vector keys = tokenize(key, ':'); + meter_policy = keys[0]; + string rule_num_str; + size_t pos = key.find(":", meter_policy.length()); + rule_num_str = key.substr(pos + 1); + rule_num = stoi(rule_num_str); + + if (op == SET_COMMAND) + { + if (!parsePbMessage(kfvFieldsValues(tuple), ctxt.metadata)) + { + SWSS_LOG_WARN("Requires protobuff at MeterRule :%s", key.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + if (addMeterRule(key, ctxt)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + else if (op == DEL_COMMAND) + { + if (removeMeterRule(key, ctxt)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + else + { + SWSS_LOG_ERROR("Unknown operation %s", op.c_str()); + it = consumer.m_toSync.erase(it); + } + } + + meter_rule_bulker_.flush(); + + auto it_prev = consumer.m_toSync.begin(); + while (it_prev != it) + { + KeyOpFieldsValuesTuple t = it_prev->second; + string key = kfvKey(t); + string op = kfvOp(t); + auto found = toBulk.find(make_pair(key, op)); + if (found == toBulk.end()) + { + it_prev++; + continue; + } + + const auto& ctxt = found->second; + const auto& object_statuses = ctxt.object_statuses; + const auto& object_ids = ctxt.object_ids; + + if (op == SET_COMMAND) + { + if (object_ids.empty()) + { + it_prev++; + continue; + } + + if (addMeterRulePost(key, ctxt)) + { + it_prev = consumer.m_toSync.erase(it_prev); + } + else + { + it_prev++; + } + } + else if (op == DEL_COMMAND) + { + if (object_statuses.empty()) + { + it_prev++; + continue; + } + + if (removeMeterRulePost(key, ctxt)) + { + it_prev = consumer.m_toSync.erase(it_prev); + } + else + { + it_prev++; + } + } + } + } +} + +void DashMeterOrch::doTask(ConsumerBase& consumer) +{ + SWSS_LOG_ENTER(); + + const auto& tn = consumer.getTableName(); + + SWSS_LOG_INFO("Table name: %s", tn.c_str()); + + if (tn == APP_DASH_METER_POLICY_TABLE_NAME) + { + doTaskMeterPolicyTable(consumer); + } + else if (tn == APP_DASH_METER_RULE_TABLE_NAME) + { + doTaskMeterRuleTable(consumer); + } + else + { + SWSS_LOG_ERROR("Unknown table: %s", tn.c_str()); + } +} + +void DashMeterOrch::addEniToMeterFC(sai_object_id_t oid, const string &name) +{ + if (!m_meter_fc_status) + { + return; + } + auto was_empty = m_meter_stat_work_queue.empty(); + m_meter_stat_work_queue[oid] = name; + if (was_empty) + { + m_meter_fc_update_timer->start(); + } +} + +void DashMeterOrch::removeEniFromMeterFC(sai_object_id_t oid, const string &name) +{ + SWSS_LOG_ENTER(); + + if (oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_WARN("Cannot remove meter counter on NULL OID for eni %s", name.c_str()); + return; + } + if (m_meter_stat_work_queue.find(oid) != m_meter_stat_work_queue.end()) + { + m_meter_stat_work_queue.erase(oid); + return; + } + + m_meter_stat_manager.clearCounterIdList(oid); + SWSS_LOG_INFO("Unregistering FC for ENI %s, oid %s", name.c_str(), sai_serialize_object_id(oid).c_str()); +} + +void DashMeterOrch::handleMeterFCStatusUpdate(bool enabled) +{ + DashOrch *dash_orch = gDirectory.get(); + bool prev_enabled = m_meter_fc_status; + m_meter_fc_status = enabled; /* Update the status */ + if (!enabled && prev_enabled) + { + m_meter_fc_update_timer->stop(); + dash_orch->refreshMeterFCStats(false); /* Clear any existing FC entries */ + } + else if (enabled && !prev_enabled) + { + dash_orch->refreshMeterFCStats(true); + m_meter_fc_update_timer->start(); + } +} + +void DashMeterOrch::doTask(SelectableTimer &timer) +{ + SWSS_LOG_ENTER(); + + if (!m_meter_fc_status) + { + m_meter_fc_update_timer->stop(); + return ; + } + + for (auto it = m_meter_stat_work_queue.begin(); it != m_meter_stat_work_queue.end(); ) + { + string value; + const auto id = sai_serialize_object_id(it->first); + if (!gTraditionalFlexCounter || m_vid_to_rid_table->hget("", id, value)) + { + SWSS_LOG_INFO("Registering FC for ENI %s, oid %s", it->second.c_str(), id.c_str()); + std::vector eniNameFvs; + eniNameFvs.emplace_back(it->second, id); + + m_meter_stat_manager.setCounterIdList(it->first, CounterType::DASH_METER, m_meter_counter_stats); + it = m_meter_stat_work_queue.erase(it); + } + else + { + ++it; + } + } + + if (m_meter_stat_work_queue.empty()) + { + m_meter_fc_update_timer->stop(); + } +} diff --git a/orchagent/dash/dashmeterorch.h b/orchagent/dash/dashmeterorch.h new file mode 100644 index 00000000000..ac38f15457f --- /dev/null +++ b/orchagent/dash/dashmeterorch.h @@ -0,0 +1,116 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include "zmqorch.h" +#include "zmqserver.h" + +#include "dashorch.h" +#include "dash_api/meter_policy.pb.h" +#include "dash_api/meter_rule.pb.h" + +#define METER_STAT_COUNTER_FLEX_COUNTER_GROUP "METER_STAT_COUNTER" +#define METER_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 10000 + +struct MeterPolicyContext +{ + std::string meter_policy; + dash::meter_policy::MeterPolicy metadata; +}; +struct MeterPolicyEntry +{ + sai_object_id_t meter_policy_oid; + dash::meter_policy::MeterPolicy metadata; + int32_t rule_count; + int32_t eni_bind_count; +}; +typedef std::map MeterPolicyTable; + +struct MeterRuleBulkContext +{ + std::string meter_policy; + uint32_t rule_num; + dash::meter_rule::MeterRule metadata; + std::deque object_ids; + std::deque object_statuses; + MeterRuleBulkContext() {} + MeterRuleBulkContext(const MeterRuleBulkContext&) = delete; + MeterRuleBulkContext(MeterRuleBulkContext&&) = delete; + + void clear() + { + object_statuses.clear(); + } +}; + +struct MeterRuleEntry +{ + sai_object_id_t meter_rule_oid; + dash::meter_rule::MeterRule metadata; + std::string meter_policy; + uint32_t rule_num; +}; +typedef std::map MeterRuleTable; + + +class DashMeterOrch : public ZmqOrch +{ +public: + using TaskArgs = std::vector; + + DashMeterOrch(swss::DBConnector *db, const std::vector &tables, DashOrch *dash_orch, swss::DBConnector *app_state_db, swss::ZmqServer *zmqServer); + sai_object_id_t getMeterPolicyOid(const std::string& meter_policy) const; + int32_t getMeterPolicyEniBindCount(const std::string& meter_policy) const; + void incrMeterPolicyEniBindCount(const std::string& meter_policy); + void decrMeterPolicyEniBindCount(const std::string& meter_policy); + void addEniToMeterFC(sai_object_id_t oid, const std::string& name); + void removeEniFromMeterFC(sai_object_id_t oid, const std::string& name); + void handleMeterFCStatusUpdate(bool is_enabled); + +private: + + void doTask(swss::SelectableTimer&); + void doTask(ConsumerBase &consumer); + void doTaskMeterPolicyTable(ConsumerBase &consumer); + void doTaskMeterRuleTable(ConsumerBase &consumer); + + bool addMeterPolicy(const std::string& meter_policy, MeterPolicyContext& ctxt); + bool removeMeterPolicy(const std::string& meter_policy); + + uint32_t getMeterPolicyRuleCount(const std::string& meter_policy) const; + sai_ip_addr_family_t getMeterPolicyAddrFamily(const std::string& meter_policy) const; + bool isV4(const std::string& meter_policy) const; + + bool addMeterRule(const std::string& key, MeterRuleBulkContext& ctxt); + bool addMeterRulePost(const std::string& key, const MeterRuleBulkContext& ctxt); + bool removeMeterRule(const std::string& key, MeterRuleBulkContext& ctxt); + bool removeMeterRulePost(const std::string& key, const MeterRuleBulkContext& ctxt); + + bool isMeterPolicyBound(const std::string& meter_policy) const; + void incrMeterPolicyRuleCount(const std::string& meter_policy); + void decrMeterPolicyRuleCount(const std::string& meter_policy); + + DashOrch *m_dash_orch; + MeterPolicyTable meter_policy_entries_; + MeterRuleTable meter_rule_entries_; + ObjectBulker meter_rule_bulker_; + bool m_meter_fc_status = false; + FlexCounterManager m_meter_stat_manager; + std::unordered_set m_meter_counter_stats; + std::map m_meter_stat_work_queue; + std::unique_ptr m_vid_to_rid_table; + std::shared_ptr m_counter_db; + std::shared_ptr m_asic_db; + swss::SelectableTimer* m_meter_fc_update_timer = nullptr; +}; diff --git a/orchagent/dash/dashorch.cpp b/orchagent/dash/dashorch.cpp index 95dde9f8882..09af858395c 100644 --- a/orchagent/dash/dashorch.cpp +++ b/orchagent/dash/dashorch.cpp @@ -9,6 +9,7 @@ #include "converter.h" #include "dashorch.h" +#include "dashhaorch.h" #include "macaddress.h" #include "orch.h" #include "sai.h" @@ -17,46 +18,163 @@ #include "tokenize.h" #include "crmorch.h" #include "saihelper.h" +#include "directory.h" +#include "flex_counter_manager.h" #include "taskworker.h" #include "pbutils.h" +#include "dashrouteorch.h" +#include "dashmeterorch.h" +#include using namespace std; using namespace swss; +using namespace google::protobuf::util; +extern Directory gDirectory; extern std::unordered_map gVnetNameToId; +extern sai_dash_appliance_api_t* sai_dash_appliance_api; extern sai_dash_vip_api_t* sai_dash_vip_api; extern sai_dash_direction_lookup_api_t* sai_dash_direction_lookup_api; extern sai_dash_eni_api_t* sai_dash_eni_api; +extern sai_dash_trusted_vni_api_t* sai_dash_trusted_vni_api; extern sai_object_id_t gSwitchId; extern size_t gMaxBulkSize; extern CrmOrch *gCrmOrch; +extern bool gTraditionalFlexCounter; -DashOrch::DashOrch(DBConnector *db, vector &tableName, ZmqServer *zmqServer) : ZmqOrch(db, tableName, zmqServer) +#define FLEX_COUNTER_UPD_INTERVAL 1 + +static const std::unordered_map eniModeMap = +{ + { dash::eni::MODE_VM, SAI_DASH_ENI_MODE_VM }, + { dash::eni::MODE_FNIC, SAI_DASH_ENI_MODE_FNIC } +}; + +static const std::unordered_map directionLookupActionMap = +{ + { "src_mac", SAI_DIRECTION_LOOKUP_ENTRY_ACTION_SET_OUTBOUND_DIRECTION }, + { "dst_mac", SAI_DIRECTION_LOOKUP_ENTRY_ACTION_SET_INBOUND_DIRECTION } +}; + +DashOrch::DashOrch(DBConnector *db, vector &tableName, DBConnector *app_state_db, ZmqServer *zmqServer) : + ZmqOrch(db, tableName, zmqServer), + m_eni_stat_manager(ENI_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, ENI_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false) +{ + SWSS_LOG_ENTER(); + + m_asic_db = std::shared_ptr(new DBConnector("ASIC_DB", 0)); + m_counter_db = std::shared_ptr(new DBConnector("COUNTERS_DB", 0)); + m_eni_name_table = make_unique
(m_counter_db.get(), COUNTERS_ENI_NAME_MAP); + dash_eni_result_table_ = make_unique
(app_state_db, APP_DASH_ENI_TABLE_NAME); + dash_eni_route_result_table_ = make_unique
(app_state_db, APP_DASH_ENI_ROUTE_TABLE_NAME); + dash_qos_result_table_ = make_unique
(app_state_db, APP_DASH_QOS_TABLE_NAME); + dash_appliance_result_table_ = make_unique
(app_state_db, APP_DASH_APPLIANCE_TABLE_NAME); + dash_routing_type_result_table_ = make_unique
(app_state_db, APP_DASH_ROUTING_TYPE_TABLE_NAME); + + if (gTraditionalFlexCounter) + { + m_vid_to_rid_table = std::make_unique
(m_asic_db.get(), "VIDTORID"); + } + + auto intervT = timespec { .tv_sec = FLEX_COUNTER_UPD_INTERVAL , .tv_nsec = 0 }; + m_fc_update_timer = new SelectableTimer(intervT); + auto executorT = new ExecutableTimer(m_fc_update_timer, this, "FLEX_COUNTER_UPD_TIMER"); + Orch::addExecutor(executorT); + + /* Fetch the available counter Ids */ + m_counter_stats.clear(); + auto stat_enum_list = queryAvailableCounterStats((sai_object_type_t)SAI_OBJECT_TYPE_ENI); + for (auto &stat_enum: stat_enum_list) + { + auto counter_id = static_cast(stat_enum); + m_counter_stats.insert(sai_serialize_eni_stat(counter_id)); + } +} + +void DashOrch::setDashHaOrch(DashHaOrch *dash_ha_orch) { SWSS_LOG_ENTER(); + m_dash_ha_orch = dash_ha_orch; +} + +bool DashOrch::getRouteTypeActions(dash::route_type::RoutingType routing_type, dash::route_type::RouteType& route_type) +{ + SWSS_LOG_ENTER(); + + auto it = routing_type_entries_.find(routing_type); + if (it == routing_type_entries_.end()) + { + SWSS_LOG_WARN("Routing type %s not found", dash::route_type::RoutingType_Name(routing_type).c_str()); + return false; + } + + route_type = it->second; + return true; +} + +bool DashOrch::hasApplianceEntry() +{ + return !appliance_entries_.empty(); } bool DashOrch::addApplianceEntry(const string& appliance_id, const dash::appliance::Appliance &entry) { SWSS_LOG_ENTER(); - if (appliance_entries_.find(appliance_id) != appliance_entries_.end()) + auto it = appliance_entries_.find(appliance_id); + + if (it != appliance_entries_.end()) { - SWSS_LOG_WARN("Appliance Entry already exists for %s", appliance_id.c_str()); + if (!MessageDifferencer::Equivalent(it->second.metadata.trusted_vnis(), entry.trusted_vnis())) + { + SWSS_LOG_INFO("Appliance Entry %s already exists with different trusted vnis", appliance_id.c_str()); + removeApplianceTrustedVni(appliance_id, it->second.metadata); + addApplianceTrustedVni(appliance_id, entry); + } + else + { + SWSS_LOG_WARN("Appliance Entry already exists for %s", appliance_id.c_str()); + } return true; } + if (!appliance_entries_.empty()) + { + SWSS_LOG_ERROR("Appliance entry is a singleton and already exists"); + return false; + } uint32_t attr_count = 1; + sai_attribute_t appliance_attr; + sai_status_t status; + + // NOTE: DASH Appliance object should be the first object pushed to SAI + sai_object_id_t sai_appliance_id = 0UL; + appliance_attr.id = SAI_DASH_APPLIANCE_ATTR_LOCAL_REGION_ID; + appliance_attr.value.u32 = entry.local_region_id(); + status = sai_dash_appliance_api->create_dash_appliance(&sai_appliance_id, gSwitchId, + attr_count, &appliance_attr); + if (status != SAI_STATUS_SUCCESS) + { + if (status != SAI_STATUS_NOT_IMPLEMENTED) + { + SWSS_LOG_ERROR("Failed to create dash appliance object in SAI for %s", appliance_id.c_str()); + task_process_status handle_status = handleSaiCreateStatus((sai_api_t) SAI_API_DASH_APPLIANCE, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + // ignore if not implemented in SAI + sai_appliance_id = 0; + } + sai_vip_entry_t vip_entry; vip_entry.switch_id = gSwitchId; if (!to_sai(entry.sip(), vip_entry.vip)) { return false; } - sai_attribute_t appliance_attr; - vector appliance_attrs; - sai_status_t status; appliance_attr.id = SAI_VIP_ENTRY_ATTR_ACTION; appliance_attr.value.u32 = SAI_VIP_ENTRY_ACTION_ACCEPT; status = sai_dash_vip_api->create_vip_entry(&vip_entry, attr_count, &appliance_attr); @@ -71,11 +189,30 @@ bool DashOrch::addApplianceEntry(const string& appliance_id, const dash::applian } sai_direction_lookup_entry_t direction_lookup_entry; + vector direction_lookup_attrs; direction_lookup_entry.switch_id = gSwitchId; direction_lookup_entry.vni = entry.vm_vni(); appliance_attr.id = SAI_DIRECTION_LOOKUP_ENTRY_ATTR_ACTION; - appliance_attr.value.u32 = SAI_DIRECTION_LOOKUP_ENTRY_ACTION_SET_OUTBOUND_DIRECTION; - status = sai_dash_direction_lookup_api->create_direction_lookup_entry(&direction_lookup_entry, attr_count, &appliance_attr); + if (entry.has_outbound_direction_lookup()) + { + appliance_attr.value.u32 = directionLookupActionMap.at(entry.outbound_direction_lookup()); + } + else + { + appliance_attr.value.u32 = SAI_DIRECTION_LOOKUP_ENTRY_ACTION_SET_OUTBOUND_DIRECTION; + } + direction_lookup_attrs.push_back(appliance_attr); + + // Don't set up this entry for dst_mac i.e. for Floating NIC. + if (!entry.has_outbound_direction_lookup() || (entry.outbound_direction_lookup() != "dst_mac")) + { + appliance_attr.id = SAI_DIRECTION_LOOKUP_ENTRY_ATTR_DASH_ENI_MAC_OVERRIDE_TYPE; + appliance_attr.value.u32 = SAI_DASH_ENI_MAC_OVERRIDE_TYPE_SRC_MAC; + direction_lookup_attrs.push_back(appliance_attr); + } + + status = sai_dash_direction_lookup_api->create_direction_lookup_entry(&direction_lookup_entry, + (uint32_t)direction_lookup_attrs.size(), direction_lookup_attrs.data()); if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to create direction lookup entry for %s", appliance_id.c_str()); @@ -85,18 +222,50 @@ bool DashOrch::addApplianceEntry(const string& appliance_id, const dash::applian return parseHandleSaiStatusFailure(handle_status); } } - appliance_entries_[appliance_id] = entry; - SWSS_LOG_NOTICE("Created vip and direction lookup entries for %s", appliance_id.c_str()); + appliance_entries_[appliance_id] = ApplianceEntry { sai_appliance_id, entry }; + SWSS_LOG_NOTICE("Created appliance, vip and direction lookup entries for %s", appliance_id.c_str()); + + if (entry.has_trusted_vnis()) + { + addApplianceTrustedVni(appliance_id, entry); + } return true; } +void DashOrch::addApplianceTrustedVni(const std::string& appliance_id, const dash::appliance::Appliance& entry) +{ + SWSS_LOG_ENTER(); + sai_global_trusted_vni_entry_t trusted_vni_entry; + trusted_vni_entry.switch_id = gSwitchId; + sai_u32_range_t vni_range; + if (!to_sai(entry.trusted_vnis(), vni_range)) + { + SWSS_LOG_ERROR("Failed to convert trusted vni range for appliance"); + return; + } + + trusted_vni_entry.vni_range = vni_range; + sai_status_t status = sai_dash_trusted_vni_api->create_global_trusted_vni_entry(&trusted_vni_entry, 0, NULL); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create global trusted vni entry with range %u-%u for appliance", vni_range.min, vni_range.max); + task_process_status handle_status = handleSaiCreateStatus((sai_api_t)SAI_API_DASH_TRUSTED_VNI, status); + if (handle_status != task_success) + { + parseHandleSaiStatusFailure(handle_status); + } + } + appliance_entries_[appliance_id].metadata.mutable_trusted_vnis()->CopyFrom(entry.trusted_vnis()); + SWSS_LOG_NOTICE("Created global trusted vni entry for appliance with range %u-%u", + vni_range.min, vni_range.max); +} + bool DashOrch::removeApplianceEntry(const string& appliance_id) { SWSS_LOG_ENTER(); sai_status_t status; - dash::appliance::Appliance entry; if (appliance_entries_.find(appliance_id) == appliance_entries_.end()) { @@ -104,7 +273,7 @@ bool DashOrch::removeApplianceEntry(const string& appliance_id) return true; } - entry = appliance_entries_[appliance_id]; + const auto& entry = appliance_entries_[appliance_id].metadata; sai_vip_entry_t vip_entry; vip_entry.switch_id = gSwitchId; if (!to_sai(entry.sip(), vip_entry.vip)) @@ -135,22 +304,76 @@ bool DashOrch::removeApplianceEntry(const string& appliance_id) return parseHandleSaiStatusFailure(handle_status); } } + + auto sai_appliance_id = appliance_entries_[appliance_id].appliance_id; + if (sai_appliance_id != 0UL) + { + status = sai_dash_appliance_api->remove_dash_appliance(sai_appliance_id); + if (status != SAI_STATUS_SUCCESS && status != SAI_STATUS_NOT_IMPLEMENTED) + { + SWSS_LOG_ERROR("Failed to remove dash appliance object in SAI for %s", appliance_id.c_str()); + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_APPLIANCE, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + } + + if (entry.has_trusted_vnis()) + { + removeApplianceTrustedVni(appliance_id, entry); + } + appliance_entries_.erase(appliance_id); - SWSS_LOG_NOTICE("Removed vip and direction lookup entries for %s", appliance_id.c_str()); + SWSS_LOG_NOTICE("Removed appliance, vip and direction lookup entries for %s", appliance_id.c_str()); + return true; } +void DashOrch::removeApplianceTrustedVni(const std::string& appliance_id, const dash::appliance::Appliance& entry) +{ + SWSS_LOG_ENTER(); + sai_global_trusted_vni_entry_t trusted_vni_entry; + trusted_vni_entry.switch_id = gSwitchId; + sai_u32_range_t vni_range; + + if (!to_sai(entry.trusted_vnis(), vni_range)) + { + SWSS_LOG_ERROR("Failed to convert trusted vni range for appliance"); + return; + } + + trusted_vni_entry.vni_range = vni_range; + sai_status_t status = sai_dash_trusted_vni_api->remove_global_trusted_vni_entry(&trusted_vni_entry); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove global trusted vni entry with range %u-%u for appliance", vni_range.min, vni_range.max); + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t)SAI_API_DASH_TRUSTED_VNI, status); + if (handle_status != task_success) + { + parseHandleSaiStatusFailure(handle_status); + } + } + + appliance_entries_[appliance_id].metadata.clear_trusted_vnis(); + SWSS_LOG_NOTICE("Removed global trusted vni entry for appliance with range %u-%u", + vni_range.min, vni_range.max); +} + void DashOrch::doTaskApplianceTable(ConsumerBase& consumer) { SWSS_LOG_ENTER(); auto it = consumer.m_toSync.begin(); + uint32_t result; while (it != consumer.m_toSync.end()) { KeyOpFieldsValuesTuple t = it->second; string appliance_id = kfvKey(t); string op = kfvOp(t); + result = DASH_RESULT_SUCCESS; if (op == SET_COMMAND) { @@ -169,14 +392,17 @@ void DashOrch::doTaskApplianceTable(ConsumerBase& consumer) } else { + result = DASH_RESULT_FAILURE; it++; } + writeResultToDB(dash_appliance_result_table_, appliance_id, result); } else if (op == DEL_COMMAND) { if (removeApplianceEntry(appliance_id)) { it = consumer.m_toSync.erase(it); + removeResultFromDB(dash_appliance_result_table_, appliance_id); } else { @@ -191,34 +417,34 @@ void DashOrch::doTaskApplianceTable(ConsumerBase& consumer) } } -bool DashOrch::addRoutingTypeEntry(const string& routing_type, const dash::route_type::RouteType &entry) +bool DashOrch::addRoutingTypeEntry(const dash::route_type::RoutingType& routing_type, const dash::route_type::RouteType &entry) { SWSS_LOG_ENTER(); if (routing_type_entries_.find(routing_type) != routing_type_entries_.end()) { - SWSS_LOG_WARN("Routing type entry already exists for %s", routing_type.c_str()); + SWSS_LOG_WARN("Routing type entry already exists for %s", dash::route_type::RoutingType_Name(routing_type).c_str()); return true; } routing_type_entries_[routing_type] = entry; - SWSS_LOG_NOTICE("Routing type entry added %s", routing_type.c_str()); + SWSS_LOG_NOTICE("Routing type entry added %s", dash::route_type::RoutingType_Name(routing_type).c_str()); return true; } -bool DashOrch::removeRoutingTypeEntry(const string& routing_type) +bool DashOrch::removeRoutingTypeEntry(const dash::route_type::RoutingType& routing_type) { SWSS_LOG_ENTER(); if (routing_type_entries_.find(routing_type) == routing_type_entries_.end()) { - SWSS_LOG_WARN("Routing type entry does not exist for %s", routing_type.c_str()); + SWSS_LOG_WARN("Routing type entry does not exist for %s", dash::route_type::RoutingType_Name(routing_type).c_str()); return true; } routing_type_entries_.erase(routing_type); - SWSS_LOG_NOTICE("Routing type entry removed for %s", routing_type.c_str()); + SWSS_LOG_NOTICE("Routing type entry removed for %s", dash::route_type::RoutingType_Name(routing_type).c_str()); return true; } @@ -228,11 +454,24 @@ void DashOrch::doTaskRoutingTypeTable(ConsumerBase& consumer) SWSS_LOG_ENTER(); auto it = consumer.m_toSync.begin(); + uint32_t result; while (it != consumer.m_toSync.end()) { KeyOpFieldsValuesTuple t = it->second; - string routing_type = kfvKey(t); + string routing_type_str = kfvKey(t); string op = kfvOp(t); + dash::route_type::RoutingType routing_type; + result = DASH_RESULT_SUCCESS; + + std::transform(routing_type_str.begin(), routing_type_str.end(), routing_type_str.begin(), ::toupper); + routing_type_str = "ROUTING_TYPE_" + routing_type_str; + + if (!dash::route_type::RoutingType_Parse(routing_type_str, &routing_type)) + { + SWSS_LOG_WARN("Invalid routing type %s", routing_type_str.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } if (op == SET_COMMAND) { @@ -240,7 +479,7 @@ void DashOrch::doTaskRoutingTypeTable(ConsumerBase& consumer) if (!parsePbMessage(kfvFieldsValues(t), entry)) { - SWSS_LOG_WARN("Requires protobuff at routing type :%s", routing_type.c_str()); + SWSS_LOG_WARN("Requires protobuff at routing type :%s", routing_type_str.c_str()); it = consumer.m_toSync.erase(it); continue; } @@ -251,14 +490,17 @@ void DashOrch::doTaskRoutingTypeTable(ConsumerBase& consumer) } else { + result = DASH_RESULT_FAILURE; it++; } + writeResultToDB(dash_routing_type_result_table_, routing_type_str, result); } else if (op == DEL_COMMAND) { if (removeRoutingTypeEntry(routing_type)) { it = consumer.m_toSync.erase(it); + removeResultFromDB(dash_routing_type_result_table_, routing_type_str); } else { @@ -277,9 +519,11 @@ bool DashOrch::setEniAdminState(const string& eni, const EniEntry& entry) { SWSS_LOG_ENTER(); + bool eni_enable = entry.metadata.admin_state() == dash::eni::State::STATE_ENABLED; + sai_attribute_t eni_attr; eni_attr.id = SAI_ENI_ATTR_ADMIN_STATE; - eni_attr.value.booldata = entry.metadata.admin_state(); + eni_attr.value.booldata = eni_enable; sai_status_t status = sai_dash_eni_api->set_eni_attribute(eni_entries_[eni].eni_id, &eni_attr); @@ -293,7 +537,7 @@ bool DashOrch::setEniAdminState(const string& eni, const EniEntry& entry) } } eni_entries_[eni].metadata.set_admin_state(entry.metadata.admin_state()); - SWSS_LOG_NOTICE("Set ENI %s admin state to %s", eni.c_str(), entry.metadata.admin_state() ? "UP" : "DOWN"); + SWSS_LOG_NOTICE("Set ENI %s admin state to %s", eni.c_str(), eni_enable ? "UP" : "DOWN"); return true; } @@ -316,6 +560,31 @@ bool DashOrch::addEniObject(const string& eni, EniEntry& entry) return false; } + DashMeterOrch *dash_meter_orch = gDirectory.get(); + const string &v4_meter_policy = entry.metadata.has_v4_meter_policy_id() ? + entry.metadata.v4_meter_policy_id() : ""; + const string &v6_meter_policy = entry.metadata.has_v6_meter_policy_id() ? + entry.metadata.v6_meter_policy_id() : ""; + + if (!v4_meter_policy.empty()) + { + sai_object_id_t meter_policy_oid = dash_meter_orch->getMeterPolicyOid(v4_meter_policy); + if (meter_policy_oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_INFO("Retry as v4 meter_policy %s not found", v4_meter_policy.c_str()); + return false; + } + } + if (!v6_meter_policy.empty()) + { + sai_object_id_t meter_policy_oid = dash_meter_orch->getMeterPolicyOid(v6_meter_policy); + if (meter_policy_oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_INFO("Retry as v6 meter_policy %s not found", v6_meter_policy.c_str()); + return false; + } + } + sai_object_id_t &eni_id = entry.eni_id; sai_attribute_t eni_attr; vector eni_attrs; @@ -352,10 +621,93 @@ bool DashOrch::addEniObject(const string& eni, EniEntry& entry) eni_attrs.push_back(eni_attr); eni_attr.id = SAI_ENI_ATTR_VM_VNI; - auto app_entry = appliance_entries_.begin()->second; + auto& app_entry = appliance_entries_.begin()->second.metadata; eni_attr.value.u32 = app_entry.vm_vni(); eni_attrs.push_back(eni_attr); + if (entry.metadata.has_pl_underlay_sip()) + { + eni_attr.id = SAI_ENI_ATTR_PL_UNDERLAY_SIP; + to_sai(entry.metadata.pl_underlay_sip(), eni_attr.value.ipaddr); + eni_attrs.push_back(eni_attr); + } + + if (entry.metadata.has_pl_sip_encoding()) + { + eni_attr.id = SAI_ENI_ATTR_PL_SIP; + to_sai(entry.metadata.pl_sip_encoding().ip(), eni_attr.value.ipaddr); + eni_attrs.push_back(eni_attr); + + eni_attr.id = SAI_ENI_ATTR_PL_SIP_MASK; + to_sai(entry.metadata.pl_sip_encoding().mask(), eni_attr.value.ipaddr); + eni_attrs.push_back(eni_attr); + } + + if (!v4_meter_policy.empty()) + { + eni_attr.id = SAI_ENI_ATTR_V4_METER_POLICY_ID; + eni_attr.value.oid = dash_meter_orch->getMeterPolicyOid(v4_meter_policy); + eni_attrs.push_back(eni_attr); + } + + if (!v6_meter_policy.empty()) + { + eni_attr.id = SAI_ENI_ATTR_V6_METER_POLICY_ID; + eni_attr.value.oid = dash_meter_orch->getMeterPolicyOid(v6_meter_policy); + eni_attrs.push_back(eni_attr); + } + + // Set HA Scope ID if DashHaOrch is available and has HA scopes configured + if (m_dash_ha_orch != nullptr) + { + HaScopeEntry ha_scope_entry = m_dash_ha_orch->getHaScopeForEni(eni); + if (ha_scope_entry.ha_scope_id != SAI_NULL_OBJECT_ID) + { + eni_attr.id = SAI_ENI_ATTR_HA_SCOPE_ID; + eni_attr.value.oid = ha_scope_entry.ha_scope_id; + eni_attrs.push_back(eni_attr); + SWSS_LOG_INFO("Setting HA Scope ID %" PRIx64 " for ENI %s", ha_scope_entry.ha_scope_id, eni.c_str()); + + // Set HA flow owner based on HA role + eni_attr.id = SAI_ENI_ATTR_IS_HA_FLOW_OWNER; + if (ha_scope_entry.metadata.ha_role() == dash::types::HA_ROLE_ACTIVE || ha_scope_entry.metadata.ha_role() == dash::types::HA_ROLE_STANDALONE) + { + eni_attr.value.booldata = true; + SWSS_LOG_INFO("Setting HA flow owner to true (ACTIVE) for ENI %s", eni.c_str()); + } + else if (ha_scope_entry.metadata.ha_role() == dash::types::HA_ROLE_STANDBY) + { + eni_attr.value.booldata = false; + SWSS_LOG_INFO("Setting HA flow owner to false (STANDBY) for ENI %s", eni.c_str()); + } + else + { + // For other roles (DEAD, SWITCHING_TO_ACTIVE), default to false + eni_attr.value.booldata = false; + SWSS_LOG_INFO("Setting HA flow owner to false (role: %s) for ENI %s", dash::types::HaRole_Name(ha_scope_entry.metadata.ha_role()).c_str(), eni.c_str()); + } + eni_attrs.push_back(eni_attr); + } + else + { + SWSS_LOG_INFO("No HA Scope ID set for ENI %s", eni.c_str()); + } + } + + if (entry.metadata.has_eni_mode()) { + auto it = eniModeMap.find(entry.metadata.eni_mode()); + eni_attr.id = SAI_ENI_ATTR_DASH_ENI_MODE; + if (it != eniModeMap.end()) + { + eni_attr.value.u32 = it->second; + } else { + // Default to VM mode if not specified + eni_attr.value.u32 = SAI_DASH_ENI_MODE_VM; + SWSS_LOG_ERROR("Invalid ENI mode %s for ENI %s, defaulting to VM mode", dash::eni::EniMode_Name(entry.metadata.eni_mode()).c_str(), eni.c_str()); + } + eni_attrs.push_back(eni_attr); + } + sai_status_t status = sai_dash_eni_api->create_eni(&eni_id, gSwitchId, (uint32_t)eni_attrs.size(), eni_attrs.data()); if (status != SAI_STATUS_SUCCESS) @@ -368,8 +720,21 @@ bool DashOrch::addEniObject(const string& eni, EniEntry& entry) } } + addEniMapEntry(eni_id, eni); + addEniToFC(eni_id, eni); + dash_meter_orch->addEniToMeterFC(eni_id, eni); + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_DASH_ENI); + if (!v4_meter_policy.empty()) + { + dash_meter_orch->incrMeterPolicyEniBindCount(v4_meter_policy); + } + if (!v6_meter_policy.empty()) + { + dash_meter_orch->incrMeterPolicyEniBindCount(v6_meter_policy); + } + SWSS_LOG_NOTICE("Created ENI object for %s", eni.c_str()); return true; @@ -407,19 +772,59 @@ bool DashOrch::addEniAddrMapEntry(const string& eni, const EniEntry& entry) return true; } -bool DashOrch::addEni(const string& eni, EniEntry &entry) +void DashOrch::addEniTrustedVnis(const std::string& eni, const EniEntry& entry) { SWSS_LOG_ENTER(); + sai_eni_trusted_vni_entry_t trusted_vni_entry; + trusted_vni_entry.switch_id = gSwitchId; + trusted_vni_entry.eni_id = entry.eni_id; + sai_u32_range_t vni_range; + if (!to_sai(entry.metadata.trusted_vnis(), vni_range)) + { + SWSS_LOG_ERROR("Failed to convert trusted vni range for ENI %s", entry.metadata.eni_id().c_str()); + return; + } + trusted_vni_entry.vni_range = vni_range; - auto it = eni_entries_.find(eni); - if (it != eni_entries_.end() && it->second.metadata.admin_state() != entry.metadata.admin_state()) + sai_status_t status = sai_dash_trusted_vni_api->create_eni_trusted_vni_entry(&trusted_vni_entry, 0, NULL); + if (status != SAI_STATUS_SUCCESS) { - return setEniAdminState(eni, entry); + SWSS_LOG_ERROR("Failed to create ENI trusted vni entry with range %u-%u for ENI %s", vni_range.min, vni_range.max, entry.metadata.eni_id().c_str()); + task_process_status handle_status = handleSaiCreateStatus((sai_api_t)SAI_API_DASH_TRUSTED_VNI, status); + if (handle_status != task_success) + { + parseHandleSaiStatusFailure(handle_status); + } } + eni_entries_[eni].metadata.mutable_trusted_vnis()->CopyFrom(entry.metadata.trusted_vnis()); + SWSS_LOG_NOTICE("Created ENI trusted vni entry for ENI %s with range %u-%u", + entry.metadata.eni_id().c_str(), vni_range.min, vni_range.max); +} - else if (it != eni_entries_.end()) +bool DashOrch::addEni(const string& eni, EniEntry &entry) +{ + SWSS_LOG_ENTER(); + + auto it = eni_entries_.find(eni); + if (it != eni_entries_.end()) { - SWSS_LOG_WARN("ENI %s already exists", eni.c_str()); + bool changed = false; + if (!MessageDifferencer::Equivalent(it->second.metadata.trusted_vnis(), entry.metadata.trusted_vnis())) + { + SWSS_LOG_INFO("ENI %s trusted vnis have changed", eni.c_str()); + removeEniTrustedVnis(eni, it->second); + addEniTrustedVnis(eni, entry); + changed = true; + } + if (it->second.metadata.admin_state() != entry.metadata.admin_state()) + { + SWSS_LOG_INFO("ENI %s already exists, updating admin state", eni.c_str()); + return setEniAdminState(eni, entry); + } + if (!changed) + { + SWSS_LOG_WARN("ENI %s already exists", eni.c_str()); + } return true; } @@ -429,6 +834,11 @@ bool DashOrch::addEni(const string& eni, EniEntry &entry) } eni_entries_[eni] = entry; + if (entry.metadata.has_trusted_vnis()) + { + addEniTrustedVnis(eni, entry); + } + return true; } @@ -450,6 +860,12 @@ bool DashOrch::removeEniObject(const string& eni) SWSS_LOG_ENTER(); EniEntry entry = eni_entries_[eni]; + DashMeterOrch *dash_meter_orch = gDirectory.get(); + + dash_meter_orch->removeEniFromMeterFC(entry.eni_id, eni); + removeEniFromFC(entry.eni_id, eni); + removeEniMapEntry(entry.eni_id, eni); + sai_status_t status = sai_dash_eni_api->remove_eni(entry.eni_id); if (status != SAI_STATUS_SUCCESS) { @@ -466,6 +882,20 @@ bool DashOrch::removeEniObject(const string& eni) } } + const string &v4_meter_policy = entry.metadata.has_v4_meter_policy_id() ? + entry.metadata.v4_meter_policy_id() : ""; + const string &v6_meter_policy = entry.metadata.has_v6_meter_policy_id() ? + entry.metadata.v6_meter_policy_id() : ""; + + if (!v4_meter_policy.empty()) + { + dash_meter_orch->decrMeterPolicyEniBindCount(v4_meter_policy); + } + if (!v6_meter_policy.empty()) + { + dash_meter_orch->decrMeterPolicyEniBindCount(v6_meter_policy); + } + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_DASH_ENI); SWSS_LOG_NOTICE("Removed ENI object for %s", eni.c_str()); @@ -505,6 +935,36 @@ bool DashOrch::removeEniAddrMapEntry(const string& eni) return true; } +void DashOrch::removeEniTrustedVnis(const std::string& eni, const EniEntry& entry) +{ + SWSS_LOG_ENTER(); + sai_eni_trusted_vni_entry_t trusted_vni_entry; + trusted_vni_entry.switch_id = gSwitchId; + trusted_vni_entry.eni_id = entry.eni_id; + sai_u32_range_t vni_range; + + if (!to_sai(entry.metadata.trusted_vnis(), vni_range)) + { + SWSS_LOG_ERROR("Failed to convert trusted vni range for ENI %s", entry.metadata.eni_id().c_str()); + return; + } + + trusted_vni_entry.vni_range = vni_range; + sai_status_t status = sai_dash_trusted_vni_api->remove_eni_trusted_vni_entry(&trusted_vni_entry); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove ENI trusted vni entry with range %u-%u for ENI %s", vni_range.min, vni_range.max, entry.metadata.eni_id().c_str()); + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t)SAI_API_DASH_TRUSTED_VNI, status); + if (handle_status != task_success) + { + parseHandleSaiStatusFailure(handle_status); + } + } + eni_entries_[eni].metadata.clear_trusted_vnis(); + SWSS_LOG_NOTICE("Removed ENI trusted vni entry for ENI %s with range %u-%u", + entry.metadata.eni_id().c_str(), vni_range.min, vni_range.max); +} + bool DashOrch::removeEni(const string& eni) { SWSS_LOG_ENTER(); @@ -514,10 +974,17 @@ bool DashOrch::removeEni(const string& eni) SWSS_LOG_WARN("ENI %s does not exist", eni.c_str()); return true; } + + if (eni_entries_[eni].metadata.has_trusted_vnis()) + { + removeEniTrustedVnis(eni, eni_entries_[eni]); + } + if (!removeEniAddrMapEntry(eni) || !removeEniObject(eni)) { return false; } + eni_entries_.erase(eni); return true; @@ -527,14 +994,14 @@ void DashOrch::doTaskEniTable(ConsumerBase& consumer) { SWSS_LOG_ENTER(); - const auto& tn = consumer.getTableName(); - auto it = consumer.m_toSync.begin(); + uint32_t result; while (it != consumer.m_toSync.end()) { auto t = it->second; string eni = kfvKey(t); string op = kfvOp(t); + result = DASH_RESULT_SUCCESS; if (op == SET_COMMAND) { EniEntry entry; @@ -552,14 +1019,17 @@ void DashOrch::doTaskEniTable(ConsumerBase& consumer) } else { + result = DASH_RESULT_FAILURE; it++; } + writeResultToDB(dash_eni_result_table_, eni, result); } else if (op == DEL_COMMAND) { if (removeEni(eni)) { it = consumer.m_toSync.erase(it); + removeResultFromDB(dash_eni_result_table_, eni); } else { @@ -606,11 +1076,13 @@ bool DashOrch::removeQosEntry(const string& qos_name) void DashOrch::doTaskQosTable(ConsumerBase& consumer) { auto it = consumer.m_toSync.begin(); + uint32_t result; while (it != consumer.m_toSync.end()) { KeyOpFieldsValuesTuple t = it->second; string qos_name = kfvKey(t); string op = kfvOp(t); + result = DASH_RESULT_SUCCESS; if (op == SET_COMMAND) { @@ -629,14 +1101,170 @@ void DashOrch::doTaskQosTable(ConsumerBase& consumer) } else { + result = DASH_RESULT_FAILURE; it++; } + writeResultToDB(dash_qos_result_table_, qos_name, result); } else if (op == DEL_COMMAND) { if (removeQosEntry(qos_name)) { it = consumer.m_toSync.erase(it); + removeResultFromDB(dash_qos_result_table_, qos_name); + } + else + { + it++; + } + } + else + { + SWSS_LOG_ERROR("Unknown operation %s", op.c_str()); + it = consumer.m_toSync.erase(it); + } + } +} + +bool DashOrch::setEniRoute(const std::string& eni, const dash::eni_route::EniRoute& entry) +{ + SWSS_LOG_ENTER(); + + + if (eni_entries_.find(eni) == eni_entries_.end()) + { + SWSS_LOG_INFO("ENI %s not yet created, not programming ENI route entry", eni.c_str()); + return false; + } + + DashRouteOrch *dash_route_orch = gDirectory.get(); + sai_object_id_t route_group_oid = dash_route_orch->getRouteGroupOid(entry.group_id()); + if (route_group_oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_INFO("Route group not yet created, skipping route entry for ENI %s", entry.group_id().c_str()); + return false; + } + + std::string old_group_id; + if (eni_route_entries_.find(eni) != eni_route_entries_.end()) + { + if (eni_route_entries_[eni].group_id() != entry.group_id()) + { + old_group_id = eni_route_entries_[eni].group_id(); + SWSS_LOG_INFO("Updating route entry from %s to %s for ENI %s", eni_route_entries_[eni].group_id().c_str(), entry.group_id().c_str(), eni.c_str()); + } + else + { + SWSS_LOG_WARN("Duplicate ENI route entry already exists for %s", eni.c_str()); + return true; + } + } + + sai_attribute_t eni_attr; + eni_attr.id = SAI_ENI_ATTR_OUTBOUND_ROUTING_GROUP_ID; + eni_attr.value.oid = route_group_oid; + + sai_status_t status = sai_dash_eni_api->set_eni_attribute(eni_entries_[eni].eni_id, + &eni_attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set ENI route group for %s", eni.c_str()); + task_process_status handle_status = handleSaiSetStatus((sai_api_t) SAI_API_DASH_ENI, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + eni_route_entries_[eni] = entry; + dash_route_orch->bindRouteGroup(entry.group_id()); + + if (!old_group_id.empty()) + { + dash_route_orch->unbindRouteGroup(old_group_id); + } + + SWSS_LOG_NOTICE("Updated ENI route group for %s to route group %s", eni.c_str(), entry.group_id().c_str()); + return true; +} + +bool DashOrch::removeEniRoute(const std::string& eni) +{ + SWSS_LOG_ENTER(); + + if (eni_route_entries_.find(eni) == eni_route_entries_.end()) + { + SWSS_LOG_WARN("ENI route entry does not exist for %s", eni.c_str()); + return true; + } + + if (eni_entries_.find(eni) != eni_entries_.end()) + { + sai_attribute_t eni_attr; + eni_attr.id = SAI_ENI_ATTR_OUTBOUND_ROUTING_GROUP_ID; + eni_attr.value.oid = SAI_NULL_OBJECT_ID; + + sai_status_t status = sai_dash_eni_api->set_eni_attribute(eni_entries_[eni].eni_id, + &eni_attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove ENI route for %s", eni.c_str()); + task_process_status handle_status = handleSaiSetStatus((sai_api_t) SAI_API_DASH_ENI, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + } + + DashRouteOrch *dash_route_orch = gDirectory.get(); + dash_route_orch->unbindRouteGroup(eni_route_entries_[eni].group_id()); + eni_route_entries_.erase(eni); + + SWSS_LOG_NOTICE("Removed ENI route entry for %s", eni.c_str()); + + return true; +} + +void DashOrch::doTaskEniRouteTable(ConsumerBase& consumer) +{ + auto it = consumer.m_toSync.begin(); + uint32_t result; + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + string eni = kfvKey(t); + string op = kfvOp(t); + result = DASH_RESULT_SUCCESS; + + if (op == SET_COMMAND) + { + dash::eni_route::EniRoute entry; + + if (!parsePbMessage(kfvFieldsValues(t), entry)) + { + SWSS_LOG_WARN("Requires protobuf at ENI route:%s", eni.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + + if (setEniRoute(eni, entry)) + { + it = consumer.m_toSync.erase(it); + } + else + { + result = DASH_RESULT_FAILURE; + it++; + } + writeResultToDB(dash_eni_route_result_table_, eni, result); + } + else if (op == DEL_COMMAND) + { + if (removeEniRoute(eni)) + { + it = consumer.m_toSync.erase(it); + removeResultFromDB(dash_eni_route_result_table_, eni); } else { @@ -675,8 +1303,168 @@ void DashOrch::doTask(ConsumerBase& consumer) { doTaskQosTable(consumer); } + else if (tn == APP_DASH_ENI_ROUTE_TABLE_NAME) + { + doTaskEniRouteTable(consumer); + } else { SWSS_LOG_ERROR("Unknown table: %s", tn.c_str()); } } + +void DashOrch::removeEniFromFC(sai_object_id_t oid, const string &name) +{ + SWSS_LOG_ENTER(); + + if (oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_WARN("Cannot remove counter on NULL OID for eni %s", name.c_str()); + return; + } + + if (m_eni_stat_work_queue.find(oid) != m_eni_stat_work_queue.end()) + { + m_eni_stat_work_queue.erase(oid); + return; + } + + m_eni_stat_manager.clearCounterIdList(oid); + SWSS_LOG_INFO("Unregistering FC for %s, id: %s", name.c_str(), sai_serialize_object_id(oid).c_str()); +} + +void DashOrch::refreshEniFCStats(bool install) +{ + for (auto it = eni_entries_.begin(); it != eni_entries_.end(); it++) + { + if (install) + { + addEniToFC(it->second.eni_id, it->first); + } + else + { + removeEniFromFC(it->second.eni_id, it->first); + } + } +} + +void DashOrch::refreshMeterFCStats(bool install) +{ + DashMeterOrch *dash_meter_orch = gDirectory.get(); + for (auto it = eni_entries_.begin(); it != eni_entries_.end(); it++) + { + if (install) + { + dash_meter_orch->addEniToMeterFC(it->second.eni_id, it->first); + } + else + { + dash_meter_orch->removeEniFromMeterFC(it->second.eni_id, it->first); + } + } +} + +void DashOrch::handleFCStatusUpdate(bool enabled) +{ + bool prev_enabled = m_eni_fc_status; + m_eni_fc_status = enabled; /* Update the status */ + if (!enabled && prev_enabled) + { + m_fc_update_timer->stop(); + refreshEniFCStats(false); /* Clear any existing FC entries */ + } + else if (enabled && !prev_enabled) + { + refreshEniFCStats(true); + m_fc_update_timer->start(); + } +} + +void DashOrch::addEniMapEntry(sai_object_id_t oid, const string &name) { + SWSS_LOG_ENTER(); + + if (oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_WARN("Cannot add ENI map entry with NULL OID for eni %s", name.c_str()); + return; + } + + const auto id = sai_serialize_object_id(oid); + SWSS_LOG_INFO("Adding ENI map entry for %s, id: %s", name.c_str(), id.c_str()); + std::vector eniNameFvs; + eniNameFvs.emplace_back(name, id); + m_eni_name_table->set("", eniNameFvs); +} + +void DashOrch::removeEniMapEntry(sai_object_id_t oid, const string &name) { + SWSS_LOG_ENTER(); + + if (oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_WARN("Cannot remove ENI map entry on NULL OID for eni %s", name.c_str()); + return; + } + + m_eni_name_table->hdel("", name); + SWSS_LOG_INFO("Removing ENI map entry for %s, id: %s", name.c_str(), sai_serialize_object_id(oid).c_str()); +} + +void DashOrch::addEniToFC(sai_object_id_t oid, const string &name) +{ + if (!m_eni_fc_status) + { + return ; + } + auto was_empty = m_eni_stat_work_queue.empty(); + m_eni_stat_work_queue[oid] = name; + if (was_empty) + { + m_fc_update_timer->start(); + } +} + +void DashOrch::doTask(SelectableTimer &timer) +{ + SWSS_LOG_ENTER(); + + if (!m_eni_fc_status) + { + m_fc_update_timer->stop(); + return ; + } + + for (auto it = m_eni_stat_work_queue.begin(); it != m_eni_stat_work_queue.end(); ) + { + string value; + const auto id = sai_serialize_object_id(it->first); + + if (!gTraditionalFlexCounter || m_vid_to_rid_table->hget("", id, value)) + { + SWSS_LOG_INFO("Registering FC for ENI: %s, id %s", it->second.c_str(), id.c_str()); + + m_eni_stat_manager.setCounterIdList(it->first, CounterType::ENI, m_counter_stats); + it = m_eni_stat_work_queue.erase(it); + } + else + { + ++it; + } + } + + if (m_eni_stat_work_queue.empty()) + { + m_fc_update_timer->stop(); + } +} + +dash::types::IpAddress DashOrch::getApplianceVip() +{ + SWSS_LOG_ENTER(); + + if (appliance_entries_.empty()) + { + return dash::types::IpAddress(); + } + // we only expect one appliance per DPU, so always take the first entry in the cache + return appliance_entries_.begin()->second.metadata.sip(); +} diff --git a/orchagent/dash/dashorch.h b/orchagent/dash/dashorch.h index eca365225c7..7ed8c7280e7 100644 --- a/orchagent/dash/dashorch.h +++ b/orchagent/dash/dashorch.h @@ -17,11 +17,21 @@ #include "timer.h" #include "zmqorch.h" #include "zmqserver.h" +#include "flex_counter_manager.h" #include "dash_api/appliance.pb.h" #include "dash_api/route_type.pb.h" #include "dash_api/eni.pb.h" #include "dash_api/qos.pb.h" +#include "dash_api/eni_route.pb.h" + +#define ENI_STAT_COUNTER_FLEX_COUNTER_GROUP "ENI_STAT_COUNTER" +#define ENI_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 10000 + +#define DASH_RESULT_SUCCESS 0 +#define DASH_RESULT_FAILURE 1 + +class DashHaOrch; struct EniEntry { @@ -29,38 +39,86 @@ struct EniEntry dash::eni::Eni metadata; }; -typedef std::map ApplianceTable; -typedef std::map RoutingTypeTable; +struct ApplianceEntry +{ + sai_object_id_t appliance_id; + dash::appliance::Appliance metadata; +}; + +typedef std::map ApplianceTable; +typedef std::map RoutingTypeTable; typedef std::map EniTable; typedef std::map QosTable; +typedef std::map EniRouteTable; class DashOrch : public ZmqOrch { public: - DashOrch(swss::DBConnector *db, std::vector &tables, swss::ZmqServer *zmqServer); + DashOrch(swss::DBConnector *db, std::vector &tables, swss::DBConnector *app_state_db, swss::ZmqServer *zmqServer); + void setDashHaOrch(DashHaOrch *dash_ha_orch); const EniEntry *getEni(const std::string &eni) const; + const EniTable *getEniTable() const { return &eni_entries_; }; + bool getRouteTypeActions(dash::route_type::RoutingType routing_type, dash::route_type::RouteType& route_type); + void handleFCStatusUpdate(bool is_enabled); + dash::types::IpAddress getApplianceVip(); + bool hasApplianceEntry(); + void clearMeterFCStats(); + void refreshMeterFCStats(bool); private: ApplianceTable appliance_entries_; RoutingTypeTable routing_type_entries_; EniTable eni_entries_; QosTable qos_entries_; + EniRouteTable eni_route_entries_; + std::unique_ptr dash_eni_result_table_; + std::unique_ptr dash_qos_result_table_; + std::unique_ptr dash_appliance_result_table_; + std::unique_ptr dash_eni_route_result_table_; + std::unique_ptr dash_routing_type_result_table_; void doTask(ConsumerBase &consumer); void doTaskApplianceTable(ConsumerBase &consumer); void doTaskRoutingTypeTable(ConsumerBase &consumer); void doTaskEniTable(ConsumerBase &consumer); void doTaskQosTable(ConsumerBase &consumer); + void doTaskEniRouteTable(ConsumerBase &consumer); + void doTaskRouteGroupTable(ConsumerBase &consumer); bool addApplianceEntry(const std::string& appliance_id, const dash::appliance::Appliance &entry); + void addApplianceTrustedVni(const std::string& appliance_id, const dash::appliance::Appliance& entry); bool removeApplianceEntry(const std::string& appliance_id); - bool addRoutingTypeEntry(const std::string& routing_type, const dash::route_type::RouteType &entry); - bool removeRoutingTypeEntry(const std::string& routing_type); + void removeApplianceTrustedVni(const std::string& appliance_id, const dash::appliance::Appliance& entry); + bool addRoutingTypeEntry(const dash::route_type::RoutingType &routing_type, const dash::route_type::RouteType &entry); + bool removeRoutingTypeEntry(const dash::route_type::RoutingType &routing_type); bool addEniObject(const std::string& eni, EniEntry& entry); bool addEniAddrMapEntry(const std::string& eni, const EniEntry& entry); + void addEniTrustedVnis(const std::string& eni, const EniEntry& entry); bool addEni(const std::string& eni, EniEntry &entry); bool removeEniObject(const std::string& eni); bool removeEniAddrMapEntry(const std::string& eni); + void removeEniTrustedVnis(const std::string& eni, const EniEntry& entry); bool removeEni(const std::string& eni); bool setEniAdminState(const std::string& eni, const EniEntry& entry); bool addQosEntry(const std::string& qos_name, const dash::qos::Qos &entry); bool removeQosEntry(const std::string& qos_name); + bool setEniRoute(const std::string& eni, const dash::eni_route::EniRoute& entry); + bool removeEniRoute(const std::string& eni); + +private: + std::map m_eni_stat_work_queue; + FlexCounterManager m_eni_stat_manager; + bool m_eni_fc_status = false; + std::unordered_set m_counter_stats; + std::unique_ptr m_eni_name_table; + std::unique_ptr m_vid_to_rid_table; + std::shared_ptr m_counter_db; + std::shared_ptr m_asic_db; + swss::SelectableTimer* m_fc_update_timer = nullptr; + DashHaOrch* m_dash_ha_orch = nullptr; + + void doTask(swss::SelectableTimer&); + void addEniMapEntry(sai_object_id_t oid, const std::string& name); + void removeEniMapEntry(sai_object_id_t oid, const std::string& name); + void addEniToFC(sai_object_id_t oid, const std::string& name); + void removeEniFromFC(sai_object_id_t oid, const std::string& name); + void refreshEniFCStats(bool); }; diff --git a/orchagent/dash/dashportmaporch.cpp b/orchagent/dash/dashportmaporch.cpp new file mode 100644 index 00000000000..facc93d516d --- /dev/null +++ b/orchagent/dash/dashportmaporch.cpp @@ -0,0 +1,590 @@ +#include "dashportmaporch.h" +#include "orch.h" +#include "dashorch.h" +#include "taskworker.h" +#include "bulker.h" +#include "pbutils.h" + +extern size_t gMaxBulkSize; +extern sai_dash_outbound_port_map_api_t *sai_dash_outbound_port_map_api; +extern sai_object_id_t gSwitchId; + +static const std::unordered_map + gPortMapRangeActionMap = { + {dash::outbound_port_map_range::PortMapRangeAction::ACTION_SKIP_MAPPING, + SAI_OUTBOUND_PORT_MAP_PORT_RANGE_ENTRY_ACTION_SKIP_MAPPING}, + {dash::outbound_port_map_range::PortMapRangeAction::ACTION_MAP_PRIVATE_LINK_SERVICE, + SAI_OUTBOUND_PORT_MAP_PORT_RANGE_ENTRY_ACTION_MAP_TO_PRIVATE_LINK_SERVICE}}; + +DashPortMapOrch::DashPortMapOrch(swss::DBConnector *db, std::vector &tables, swss::DBConnector *app_state_db, swss::ZmqServer *zmqServer) : ZmqOrch(db, tables, zmqServer), + port_map_bulker_(sai_dash_outbound_port_map_api, gSwitchId, gMaxBulkSize), + port_map_range_bulker_(sai_dash_outbound_port_map_api, gMaxBulkSize) +{ + SWSS_LOG_ENTER(); + dash_port_map_result_table_ = std::make_unique(app_state_db, APP_DASH_OUTBOUND_PORT_MAP_TABLE_NAME); + dash_port_map_range_result_table_ = std::make_unique(app_state_db, APP_DASH_OUTBOUND_PORT_MAP_RANGE_TABLE_NAME); +} + +void DashPortMapOrch::doTask(ConsumerBase &consumer) +{ + SWSS_LOG_ENTER(); + + const auto &tn = consumer.getTableName(); + + SWSS_LOG_INFO("Table name: %s", tn.c_str()); + + if (tn == APP_DASH_OUTBOUND_PORT_MAP_TABLE_NAME) + { + doTaskPortMapTable(consumer); + } + else if (tn == APP_DASH_OUTBOUND_PORT_MAP_RANGE_TABLE_NAME) + { + doTaskPortMapRangeTable(consumer); + } + else + { + SWSS_LOG_ERROR("Unknown table: %s", tn.c_str()); + } +} + +void DashPortMapOrch::doTaskPortMapTable(ConsumerBase &consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + uint32_t result; + + std::map, + DashPortMapBulkContext> + toBulk; + while (it != consumer.m_toSync.end()) + { + swss::KeyOpFieldsValuesTuple tuple = it->second; + std::string port_map_id = kfvKey(tuple); + std::string op = kfvOp(tuple); + auto rc = toBulk.emplace(std::piecewise_construct, + std::forward_as_tuple(port_map_id, op), + std::forward_as_tuple()); + bool inserted = rc.second; + auto &ctxt = rc.first->second; + result = DASH_RESULT_SUCCESS; + SWSS_LOG_INFO("Processing port map entry: %s, operation: %s", port_map_id.c_str(), op.c_str()); + + if (!inserted) + { + ctxt.clear(); + } + + if (op == SET_COMMAND) + { + // the only info we need is the port map ID which is provided in the key + // no need to parse protobuf message here + + if (addPortMap(port_map_id, ctxt)) + { + it = consumer.m_toSync.erase(it); + // the only reason to remove from consumer prior to flush is if the port map already exists, + // so treat it like a success + writeResultToDB(dash_port_map_result_table_, port_map_id, result); + } + else + { + it++; + } + } + else if (op == DEL_COMMAND) + { + if (removePortMap(port_map_id, ctxt)) + { + it = consumer.m_toSync.erase(it); + removeResultFromDB(dash_port_map_result_table_, port_map_id); + } + else + { + it++; + } + } + else + { + SWSS_LOG_ERROR("Unknown operation %s", op.c_str()); + it = consumer.m_toSync.erase(it); + } + } + + port_map_bulker_.flush(); + + auto it_prev = consumer.m_toSync.begin(); + while (it_prev != it) + { + swss::KeyOpFieldsValuesTuple tuple = it_prev->second; + std::string port_map_id = kfvKey(tuple); + std::string op = kfvOp(tuple); + result = DASH_RESULT_SUCCESS; + auto found = toBulk.find(std::make_pair(port_map_id, op)); + if (found == toBulk.end()) + { + it_prev++; + continue; + } + + auto &ctxt = found->second; + if (ctxt.port_map_oids.empty() && ctxt.port_map_statuses.empty()) + { + it_prev++; + continue; + } + + if (op == SET_COMMAND) + { + if (addPortMapPost(port_map_id, ctxt)) + { + it_prev = consumer.m_toSync.erase(it_prev); + } + else + { + result = DASH_RESULT_FAILURE; + it_prev++; + } + writeResultToDB(dash_port_map_result_table_, port_map_id, result); + } + else if (op == DEL_COMMAND) + { + if (removePortMapPost(port_map_id, ctxt)) + { + it_prev = consumer.m_toSync.erase(it_prev); + removeResultFromDB(dash_port_map_result_table_, port_map_id); + } + else + { + it_prev++; + } + } + else + { + SWSS_LOG_ERROR("Unknown operation %s", op.c_str()); + it_prev = consumer.m_toSync.erase(it_prev); + } + } +} + +sai_object_id_t DashPortMapOrch::getPortMapOid(const std::string& port_map_name) +{ + SWSS_LOG_ENTER(); + + auto it = port_map_table_.find(port_map_name); + if (it == port_map_table_.end()) + { + return SAI_NULL_OBJECT_ID; + } + return it->second; +} + +bool DashPortMapOrch::addPortMap(const std::string &port_map_id, DashPortMapBulkContext &ctxt) +{ + SWSS_LOG_ENTER(); + + if (port_map_table_.find(port_map_id) != port_map_table_.end()) + { + SWSS_LOG_WARN("Port map %s already exists", port_map_id.c_str()); + return true; + } + + std::vector attrs; + sai_attribute_t attr; + attr.id = SAI_OUTBOUND_PORT_MAP_ATTR_COUNTER_ID; + attr.value.oid = SAI_NULL_OBJECT_ID; + attrs.push_back(attr); + auto &object_ids = ctxt.port_map_oids; + object_ids.emplace_back(); + port_map_bulker_.create_entry(&object_ids.back(), (uint32_t)attrs.size(), attrs.data()); + SWSS_LOG_INFO("Adding port map %s to bulker", port_map_id.c_str()); + return false; +} + +bool DashPortMapOrch::addPortMapPost(const std::string &port_map_id, DashPortMapBulkContext &ctxt) +{ + SWSS_LOG_ENTER(); + + auto &object_ids = ctxt.port_map_oids; + if (object_ids.empty()) + { + return false; + } + + auto it_status = object_ids.begin(); + sai_object_id_t port_map_oid = *it_status++; + if (port_map_oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("Failed to create port map %s", port_map_id.c_str()); + return false; + } + + port_map_table_[port_map_id] = port_map_oid; + SWSS_LOG_NOTICE("Created port map %s with OID 0x%" PRIx64, port_map_id.c_str(), port_map_oid); + return true; +} + +bool DashPortMapOrch::removePortMap(const std::string &port_map_id, DashPortMapBulkContext &ctxt) +{ + SWSS_LOG_ENTER(); + + auto it = port_map_table_.find(port_map_id); + if (it == port_map_table_.end()) + { + SWSS_LOG_WARN("Port map %s not found for removal", port_map_id.c_str()); + return true; + } + + auto &object_statuses = ctxt.port_map_statuses; + object_statuses.emplace_back(); + sai_object_id_t port_map_oid = port_map_table_[port_map_id]; + port_map_bulker_.remove_entry(&object_statuses.back(), port_map_oid); + SWSS_LOG_NOTICE("Removing port map %s with OID 0x%" PRIx64, port_map_id.c_str(), port_map_oid); + + return false; +} + +bool DashPortMapOrch::removePortMapPost(const std::string &port_map_id, DashPortMapBulkContext &ctxt) +{ + SWSS_LOG_ENTER(); + + auto &object_statuses = ctxt.port_map_statuses; + if (object_statuses.empty()) + { + return false; + } + + auto it_status = object_statuses.begin(); + sai_status_t status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_NOT_EXECUTED) + { + SWSS_LOG_INFO("Port map %s not removed, will retry later", port_map_id.c_str()); + return false; + } + SWSS_LOG_ERROR("Failed to remove port map %s, status: %s", port_map_id.c_str(), sai_serialize_status(status).c_str()); + task_process_status handle_status = handleSaiCreateStatus((sai_api_t)SAI_API_DASH_OUTBOUND_PORT_MAP, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + port_map_table_.erase(port_map_id); + SWSS_LOG_NOTICE("Removed port map %s", port_map_id.c_str()); + return true; +} + +void DashPortMapOrch::doTaskPortMapRangeTable(ConsumerBase &consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + uint32_t result; + + std::map, + DashPortMapRangeBulkContext> + toBulk; + while (it != consumer.m_toSync.end()) + { + swss::KeyOpFieldsValuesTuple tuple = it->second; + std::string key = kfvKey(tuple); + std::string op = kfvOp(tuple); + auto rc = toBulk.emplace(std::piecewise_construct, + std::forward_as_tuple(key, op), + std::forward_as_tuple()); + bool inserted = rc.second; + auto &ctxt = rc.first->second; + result = DASH_RESULT_FAILURE; + SWSS_LOG_INFO("Processing port map range entry: %s, operation: %s", key.c_str(), op.c_str()); + + if (!inserted) + { + ctxt.clear(); + } + + if (!parsePortMapRange(key, ctxt)) + { + SWSS_LOG_ERROR("Failed to parse port map range key: %s", key.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + + if (op == SET_COMMAND) + { + if (!parsePbMessage(kfvFieldsValues(tuple), ctxt.metadata)) + { + SWSS_LOG_ERROR("Failed to parse protobuf message for port map range %s", key.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + + if (addPortMapRange(ctxt)) + { + it = consumer.m_toSync.erase(it); + // if we ever remove from consumer early, that means parsing was unsuccessful and a retry will not help, + // so treat it as a failure + writeResultToDB(dash_port_map_range_result_table_, key, result); + } + else + { + it++; + } + } + else if (op == DEL_COMMAND) + { + if (removePortMapRange(ctxt)) + { + it = consumer.m_toSync.erase(it); + removeResultFromDB(dash_port_map_range_result_table_, key); + } + else + { + it++; + } + } + else + { + SWSS_LOG_ERROR("Unknown operation %s", op.c_str()); + it = consumer.m_toSync.erase(it); + } + } + + port_map_range_bulker_.flush(); + auto it_prev = consumer.m_toSync.begin(); + while (it_prev != it) + { + swss::KeyOpFieldsValuesTuple tuple = it_prev->second; + std::string key = kfvKey(tuple); + std::string op = kfvOp(tuple); + result = DASH_RESULT_SUCCESS; + auto found = toBulk.find(std::make_pair(key, op)); + if (found == toBulk.end()) + { + it_prev++; + continue; + } + auto &ctxt = found->second; + if (ctxt.port_map_range_statuses.empty()) + { + it_prev++; + continue; + } + + if (op == SET_COMMAND) + { + if (addPortMapRangePost(ctxt)) + { + it_prev = consumer.m_toSync.erase(it_prev); + } + else + { + result = DASH_RESULT_FAILURE; + it_prev++; + } + writeResultToDB(dash_port_map_range_result_table_, key, result); + } + else if (op == DEL_COMMAND) + { + if (removePortMapRangePost(ctxt)) + { + it_prev = consumer.m_toSync.erase(it_prev); + removeResultFromDB(dash_port_map_range_result_table_, key); + } + else + { + it_prev++; + } + } + else + { + SWSS_LOG_ERROR("Unknown operation %s", op.c_str()); + it_prev = consumer.m_toSync.erase(it_prev); + } + } +} + +bool DashPortMapOrch::addPortMapRange(DashPortMapRangeBulkContext &ctxt) +{ + SWSS_LOG_ENTER(); + + auto parent_it = port_map_table_.find(ctxt.parent_map_id); + if (parent_it == port_map_table_.end()) + { + SWSS_LOG_INFO("Parent port map %s does not exist for port map range", ctxt.parent_map_id.c_str()); + return false; + } + + sai_outbound_port_map_port_range_entry_t entry; + entry.switch_id = gSwitchId; + entry.outbound_port_map_id = parent_it->second; + sai_u32_range_t port_range; + port_range.min = ctxt.start_port; + port_range.max = ctxt.end_port; + entry.dst_port_range = port_range; + + std::vector attrs; + sai_attribute_t attr; + + auto action_it = gPortMapRangeActionMap.find(ctxt.metadata.action()); + if (action_it == gPortMapRangeActionMap.end()) + { + SWSS_LOG_ERROR("Unknown port map range action: %s", dash::outbound_port_map_range::PortMapRangeAction_Name(ctxt.metadata.action()).c_str()); + return true; + } + + attr.id = SAI_OUTBOUND_PORT_MAP_PORT_RANGE_ENTRY_ATTR_ACTION; + attr.value.s32 = action_it->second; + attrs.push_back(attr); + + attr.id = SAI_OUTBOUND_PORT_MAP_PORT_RANGE_ENTRY_ATTR_BACKEND_IP; + if (!to_sai(ctxt.metadata.backend_ip(), attr.value.ipaddr)) + { + SWSS_LOG_ERROR("Failed to convert backend IP %s to SAI format", ctxt.metadata.backend_ip().DebugString().c_str()); + return true; + } + attrs.push_back(attr); + + attr.id = SAI_OUTBOUND_PORT_MAP_PORT_RANGE_ENTRY_ATTR_MATCH_PORT_BASE; + attr.value.u32 = ctxt.start_port; + attrs.push_back(attr); + + attr.id = SAI_OUTBOUND_PORT_MAP_PORT_RANGE_ENTRY_ATTR_BACKEND_PORT_BASE; + attr.value.u32 = ctxt.metadata.backend_port_base(); + attrs.push_back(attr); + + auto &object_statuses = ctxt.port_map_range_statuses; + object_statuses.emplace_back(); + port_map_range_bulker_.create_entry(&object_statuses.back(), &entry, (uint32_t)attrs.size(), attrs.data()); + SWSS_LOG_INFO("Adding port map range for %s: start=%d, end=%d", ctxt.parent_map_id.c_str(), ctxt.start_port, ctxt.end_port); + return false; +} + +bool DashPortMapOrch::addPortMapRangePost(DashPortMapRangeBulkContext &ctxt) +{ + SWSS_LOG_ENTER(); + + auto &object_statuses = ctxt.port_map_range_statuses; + if (object_statuses.empty()) + { + return false; + } + + auto it_status = object_statuses.begin(); + sai_status_t status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) + { + SWSS_LOG_INFO("Port map range for %s already exists", ctxt.parent_map_id.c_str()); + return true; + } + SWSS_LOG_ERROR("Failed to create port map range for %s, status: %s", ctxt.parent_map_id.c_str(), sai_serialize_status(status).c_str()); + task_process_status handle_status = handleSaiCreateStatus((sai_api_t)SAI_API_DASH_OUTBOUND_PORT_MAP, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + SWSS_LOG_INFO("Created port map range for %s: start=%d, end=%d", ctxt.parent_map_id.c_str(), ctxt.start_port, ctxt.end_port); + return true; +} + +bool DashPortMapOrch::removePortMapRange(DashPortMapRangeBulkContext &ctxt) +{ + SWSS_LOG_ENTER(); + + auto parent_it = port_map_table_.find(ctxt.parent_map_id); + if (parent_it == port_map_table_.end()) + { + // this should never happen - it's not possible to create a port map range w/o first creating the parent port map, + // and it's not possible to delete a port map while it still has child port map ranges + SWSS_LOG_ERROR("Parent port map %s not found for port map range removal", ctxt.parent_map_id.c_str()); + return true; + } + + sai_outbound_port_map_port_range_entry_t entry; + entry.switch_id = gSwitchId; + entry.outbound_port_map_id = parent_it->second; + sai_u32_range_t port_range; + port_range.min = ctxt.start_port; + port_range.max = ctxt.end_port; + entry.dst_port_range = port_range; + + auto &object_statuses = ctxt.port_map_range_statuses; + object_statuses.emplace_back(); + port_map_range_bulker_.remove_entry(&object_statuses.back(), &entry); + SWSS_LOG_NOTICE("Removing port map range for %s: start=%d, end=%d", ctxt.parent_map_id.c_str(), ctxt.start_port, ctxt.end_port); + return false; +} + +bool DashPortMapOrch::removePortMapRangePost(DashPortMapRangeBulkContext &ctxt) +{ + SWSS_LOG_ENTER(); + + auto &object_statuses = ctxt.port_map_range_statuses; + if (object_statuses.empty()) + { + return false; + } + + auto it_status = object_statuses.begin(); + sai_status_t status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_NOT_FOUND) + { + SWSS_LOG_INFO("Port map range for %s already removed", ctxt.parent_map_id.c_str()); + return true; + } + SWSS_LOG_ERROR("Failed to remove port map range for %s, status: %s", ctxt.parent_map_id.c_str(), sai_serialize_status(status).c_str()); + task_process_status handle_status = handleSaiCreateStatus((sai_api_t)SAI_API_DASH_OUTBOUND_PORT_MAP, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + SWSS_LOG_NOTICE("Removed port map range for %s: start=%d, end=%d", ctxt.parent_map_id.c_str(), ctxt.start_port, ctxt.end_port); + return true; +} + +bool DashPortMapOrch::parsePortMapRange(const std::string &key, DashPortMapRangeBulkContext &ctxt) +{ + SWSS_LOG_ENTER(); + + // Example key format: PORT_MAP_1:1000-2000 + size_t pos = key.find(':'); + if (pos == std::string::npos) + { + SWSS_LOG_ERROR("Invalid port map range key format: %s", key.c_str()); + return false; + } + + ctxt.parent_map_id = key.substr(0, pos); + std::string range = key.substr(pos + 1); + + size_t dash_pos = range.find('-'); + if (dash_pos == std::string::npos) + { + SWSS_LOG_ERROR("Invalid port range format: %s", range.c_str()); + return false; + } + + try + { + ctxt.start_port = std::stoi(range.substr(0, dash_pos)); + ctxt.end_port = std::stoi(range.substr(dash_pos + 1)); + } + catch (const std::invalid_argument &e) + { + SWSS_LOG_ERROR("Invalid port range values in key %s: %s", key.c_str(), e.what()); + return false; + } + + SWSS_LOG_INFO("Parsed port range for %s: start=%d, end=%d", ctxt.parent_map_id.c_str(), ctxt.start_port, ctxt.end_port); + return true; +} diff --git a/orchagent/dash/dashportmaporch.h b/orchagent/dash/dashportmaporch.h new file mode 100644 index 00000000000..7fab1257d5e --- /dev/null +++ b/orchagent/dash/dashportmaporch.h @@ -0,0 +1,69 @@ +#pragma once + +#include "dbconnector.h" +#include "zmqorch.h" +#include "dash_api/outbound_port_map_range.pb.h" +#include "bulker.h" + +struct DashPortMapBulkContext +{ + std::deque port_map_oids; + std::deque port_map_statuses; + + DashPortMapBulkContext() {} + DashPortMapBulkContext(const DashPortMapBulkContext &) = delete; + DashPortMapBulkContext(DashPortMapBulkContext &) = delete; + + void clear() + { + port_map_oids.clear(); + port_map_statuses.clear(); + } +}; + +struct DashPortMapRangeBulkContext +{ + std::string parent_map_id; + int start_port; + int end_port; + dash::outbound_port_map_range::OutboundPortMapRange metadata; + std::deque port_map_range_statuses; + + DashPortMapRangeBulkContext() {} + DashPortMapRangeBulkContext(const DashPortMapRangeBulkContext &) = delete; + DashPortMapRangeBulkContext(DashPortMapRangeBulkContext &) = delete; + + void clear() + { + port_map_range_statuses.clear(); + } +}; + +class DashPortMapOrch : public ZmqOrch +{ +public: + DashPortMapOrch(swss::DBConnector *db, std::vector &tables, swss::DBConnector *app_state_db, swss::ZmqServer *zmqServer); + sai_object_id_t getPortMapOid(const std::string& port_map_name); + +private: + void doTask(ConsumerBase &consumer); + void doTaskPortMapTable(ConsumerBase &consumer); + bool addPortMap(const std::string &port_map_id, DashPortMapBulkContext &ctxt); + bool addPortMapPost(const std::string &port_map_id, DashPortMapBulkContext &ctxt); + bool removePortMap(const std::string &port_map_id, DashPortMapBulkContext &ctxt); + bool removePortMapPost(const std::string &port_map_id, DashPortMapBulkContext &ctxt); + void doTaskPortMapRangeTable(ConsumerBase &consumer); + bool addPortMapRange(DashPortMapRangeBulkContext &ctxt); + bool addPortMapRangePost(DashPortMapRangeBulkContext &ctxt); + bool removePortMapRange(DashPortMapRangeBulkContext &ctxt); + bool removePortMapRangePost(DashPortMapRangeBulkContext &ctxt); + + bool parsePortMapRange(const std::string &key, DashPortMapRangeBulkContext &ctxt); + + ObjectBulker port_map_bulker_; + EntityBulker port_map_range_bulker_; + + std::unordered_map port_map_table_; + std::unique_ptr dash_port_map_result_table_; + std::unique_ptr dash_port_map_range_result_table_; +}; diff --git a/orchagent/dash/dashrouteorch.cpp b/orchagent/dash/dashrouteorch.cpp index 6f99435fb08..9bf82cd094a 100644 --- a/orchagent/dash/dashrouteorch.cpp +++ b/orchagent/dash/dashrouteorch.cpp @@ -19,10 +19,12 @@ #include "dashorch.h" #include "crmorch.h" #include "saihelper.h" +#include "dashtunnelorch.h" #include "taskworker.h" #include "pbutils.h" #include "dash_api/route_type.pb.h" +#include "directory.h" using namespace std; using namespace swss; @@ -33,6 +35,7 @@ extern sai_dash_inbound_routing_api_t* sai_dash_inbound_routing_api; extern sai_object_id_t gSwitchId; extern size_t gMaxBulkSize; extern CrmOrch *gCrmOrch; +extern Directory gDirectory; static std::unordered_map sOutboundAction = { @@ -42,53 +45,76 @@ static std::unordered_map &tableName, DashOrch *dash_orch, ZmqServer *zmqServer) : +DashRouteOrch::DashRouteOrch(DBConnector *db, vector &tableName, DashOrch *dash_orch, DBConnector *app_state_db, ZmqServer *zmqServer) : outbound_routing_bulker_(sai_dash_outbound_routing_api, gMaxBulkSize), inbound_routing_bulker_(sai_dash_inbound_routing_api, gMaxBulkSize), ZmqOrch(db, tableName, zmqServer), dash_orch_(dash_orch) { SWSS_LOG_ENTER(); + dash_route_result_table_ = make_unique
(app_state_db, APP_DASH_ROUTE_TABLE_NAME); + dash_route_rule_result_table_ = make_unique
(app_state_db, APP_DASH_ROUTE_RULE_TABLE_NAME); + dash_route_group_result_table_ = make_unique
(app_state_db, APP_DASH_ROUTE_GROUP_TABLE_NAME); } bool DashRouteOrch::addOutboundRouting(const string& key, OutboundRoutingBulkContext& ctxt) { SWSS_LOG_ENTER(); - bool exists = (routing_entries_.find(key) != routing_entries_.end()); - if (exists) + if (isRouteGroupBound(ctxt.route_group)) { - SWSS_LOG_WARN("Outbound routing entry already exists for %s", key.c_str()); + SWSS_LOG_WARN("Cannot add new route to route group %s as it is already bound", ctxt.route_group.c_str()); return true; } - if (!dash_orch_->getEni(ctxt.eni)) + sai_object_id_t route_group_oid = this->getRouteGroupOid(ctxt.route_group); + if (route_group_oid == SAI_NULL_OBJECT_ID) { - SWSS_LOG_INFO("Retry as ENI entry %s not found", ctxt.eni.c_str()); + SWSS_LOG_INFO("Retry as route group %s not found", ctxt.route_group.c_str()); return false; } - if (ctxt.metadata.has_vnet() && gVnetNameToId.find(ctxt.metadata.vnet()) == gVnetNameToId.end()) + + std::string routing_type_str = dash::route_type::RoutingType_Name(ctxt.metadata.routing_type()); + if (ctxt.metadata.routing_type() == dash::route_type::RoutingType::ROUTING_TYPE_VNET && + ctxt.metadata.has_vnet() && gVnetNameToId.find(ctxt.metadata.vnet()) == gVnetNameToId.end()) { - SWSS_LOG_INFO("Retry as vnet %s not found", ctxt.metadata.vnet().c_str()); + SWSS_LOG_INFO("Retry as vnet %s not found for routing type %s", + ctxt.metadata.vnet().c_str(), + routing_type_str.c_str()); + return false; + } + if (ctxt.metadata.routing_type() == dash::route_type::RoutingType::ROUTING_TYPE_VNET_DIRECT && + ctxt.metadata.has_vnet_direct() && gVnetNameToId.find(ctxt.metadata.vnet_direct().vnet()) == gVnetNameToId.end()) + { + SWSS_LOG_INFO("Retry as vnet %s not found for routing type %s", + ctxt.metadata.vnet_direct().vnet().c_str(), + routing_type_str.c_str()); return false; } sai_outbound_routing_entry_t outbound_routing_entry; outbound_routing_entry.switch_id = gSwitchId; - outbound_routing_entry.eni_id = dash_orch_->getEni(ctxt.eni)->eni_id; + outbound_routing_entry.outbound_routing_group_id = route_group_oid; swss::copy(outbound_routing_entry.destination, ctxt.destination); sai_attribute_t outbound_routing_attr; vector outbound_routing_attrs; auto& object_statuses = ctxt.object_statuses; + auto it = sOutboundAction.find(ctxt.metadata.routing_type()); + if (it == sOutboundAction.end()) + { + SWSS_LOG_WARN("Routing type %s for outbound routing entry %s not allowed", routing_type_str.c_str(), key.c_str()); + return false; + } + outbound_routing_attr.id = SAI_OUTBOUND_ROUTING_ENTRY_ATTR_ACTION; - outbound_routing_attr.value.u32 = sOutboundAction[ctxt.metadata.action_type()]; + outbound_routing_attr.value.u32 = it->second; outbound_routing_attrs.push_back(outbound_routing_attr); - if (ctxt.metadata.action_type() == dash::route_type::RoutingType::ROUTING_TYPE_DIRECT) + if (ctxt.metadata.routing_type() == dash::route_type::RoutingType::ROUTING_TYPE_DIRECT) { - // Intentional empty line, To direct action type, don't need set extra attributes + // Intentional empty line, for direct routing, don't need set extra attributes } - else if (ctxt.metadata.action_type() == dash::route_type::RoutingType::ROUTING_TYPE_VNET + else if (ctxt.metadata.routing_type() == dash::route_type::RoutingType::ROUTING_TYPE_VNET && ctxt.metadata.has_vnet() && !ctxt.metadata.vnet().empty()) { @@ -96,7 +122,7 @@ bool DashRouteOrch::addOutboundRouting(const string& key, OutboundRoutingBulkCon outbound_routing_attr.value.oid = gVnetNameToId[ctxt.metadata.vnet()]; outbound_routing_attrs.push_back(outbound_routing_attr); } - else if (ctxt.metadata.action_type() == dash::route_type::RoutingType::ROUTING_TYPE_VNET_DIRECT + else if (ctxt.metadata.routing_type() == dash::route_type::RoutingType::ROUTING_TYPE_VNET_DIRECT && ctxt.metadata.has_vnet_direct() && !ctxt.metadata.vnet_direct().vnet().empty() && (ctxt.metadata.vnet_direct().overlay_ip().has_ipv4() || ctxt.metadata.vnet_direct().overlay_ip().has_ipv6())) @@ -114,10 +140,47 @@ bool DashRouteOrch::addOutboundRouting(const string& key, OutboundRoutingBulkCon } else { - SWSS_LOG_WARN("Attribute action for outbound routing entry %s", key.c_str()); + SWSS_LOG_WARN("Routing type %s for outbound routing entry %s either invalid or missing required attributes", + dash::route_type::RoutingType_Name(ctxt.metadata.routing_type()).c_str(), key.c_str()); return false; } + if (ctxt.metadata.has_underlay_sip() && ctxt.metadata.underlay_sip().has_ipv4()) + { + outbound_routing_attr.id = SAI_OUTBOUND_ROUTING_ENTRY_ATTR_UNDERLAY_SIP; + if (!to_sai(ctxt.metadata.underlay_sip(), outbound_routing_attr.value.ipaddr)) + { + return false; + } + outbound_routing_attrs.push_back(outbound_routing_attr); + } + + if (ctxt.metadata.has_metering_class_or()) { + outbound_routing_attr.id = SAI_OUTBOUND_ROUTING_ENTRY_ATTR_METER_CLASS_OR; + outbound_routing_attr.value.u32 = ctxt.metadata.metering_class_or(); + outbound_routing_attrs.push_back(outbound_routing_attr); + } + + if (ctxt.metadata.has_metering_class_and()) { + outbound_routing_attr.id = SAI_OUTBOUND_ROUTING_ENTRY_ATTR_METER_CLASS_AND; + outbound_routing_attr.value.u32 = ctxt.metadata.metering_class_and(); + outbound_routing_attrs.push_back(outbound_routing_attr); + } + + if (ctxt.metadata.has_tunnel()) + { + auto dash_tunnel_orch = gDirectory.get(); + sai_object_id_t tunnel_oid = dash_tunnel_orch->getTunnelOid(ctxt.metadata.tunnel()); + if (tunnel_oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_INFO("Retry as tunnel %s not found", ctxt.metadata.tunnel().c_str()); + return false; + } + outbound_routing_attr.id = SAI_OUTBOUND_ROUTING_ENTRY_ATTR_DASH_TUNNEL_ID; + outbound_routing_attr.value.oid = tunnel_oid; + outbound_routing_attrs.push_back(outbound_routing_attr); + } + object_statuses.emplace_back(); outbound_routing_bulker_.create_entry(&object_statuses.back(), &outbound_routing_entry, (uint32_t)outbound_routing_attrs.size(), outbound_routing_attrs.data()); @@ -153,9 +216,6 @@ bool DashRouteOrch::addOutboundRoutingPost(const string& key, const OutboundRout } } - OutboundRoutingEntry entry = { dash_orch_->getEni(ctxt.eni)->eni_id, ctxt.destination, ctxt.metadata }; - routing_entries_[key] = entry; - gCrmOrch->incCrmResUsedCounter(ctxt.destination.isV4() ? CrmResourceType::CRM_DASH_IPV4_OUTBOUND_ROUTING : CrmResourceType::CRM_DASH_IPV6_OUTBOUND_ROUTING); SWSS_LOG_INFO("Outbound routing entry for %s added", key.c_str()); @@ -163,23 +223,21 @@ bool DashRouteOrch::addOutboundRoutingPost(const string& key, const OutboundRout return true; } -bool DashRouteOrch::removeOutboundRouting(const string& key, OutboundRoutingBulkContext& ctxt) +bool DashRouteOrch::removeOutboundRouting(const string& route_group, const IpPrefix& destination, OutboundRoutingBulkContext& ctxt) { SWSS_LOG_ENTER(); - bool exists = (routing_entries_.find(key) != routing_entries_.end()); - if (!exists) + if (isRouteGroupBound(ctxt.route_group)) { - SWSS_LOG_INFO("Failed to find outbound routing entry %s to remove", key.c_str()); - return true; + SWSS_LOG_WARN("Cannot remove route from route group %s as it is already bound", ctxt.route_group.c_str()); + return false; } auto& object_statuses = ctxt.object_statuses; - OutboundRoutingEntry entry = routing_entries_[key]; sai_outbound_routing_entry_t outbound_routing_entry; outbound_routing_entry.switch_id = gSwitchId; - outbound_routing_entry.eni_id = entry.eni; - swss::copy(outbound_routing_entry.destination, entry.destination); + outbound_routing_entry.outbound_routing_group_id = route_group_oid_map_[route_group]; + swss::copy(outbound_routing_entry.destination, destination); object_statuses.emplace_back(); outbound_routing_bulker_.remove_entry(&object_statuses.back(), &outbound_routing_entry); @@ -215,7 +273,6 @@ bool DashRouteOrch::removeOutboundRoutingPost(const string& key, const OutboundR gCrmOrch->decCrmResUsedCounter(ctxt.destination.isV4() ? CrmResourceType::CRM_DASH_IPV4_OUTBOUND_ROUTING : CrmResourceType::CRM_DASH_IPV6_OUTBOUND_ROUTING); - routing_entries_.erase(key); SWSS_LOG_INFO("Outbound routing entry for %s removed", key.c_str()); return true; @@ -226,7 +283,7 @@ void DashRouteOrch::doTaskRouteTable(ConsumerBase& consumer) SWSS_LOG_ENTER(); auto it = consumer.m_toSync.begin(); - + uint32_t result; while (it != consumer.m_toSync.end()) { std::map, @@ -242,19 +299,20 @@ void DashRouteOrch::doTaskRouteTable(ConsumerBase& consumer) std::forward_as_tuple()); bool inserted = rc.second; auto &ctxt = rc.first->second; + result = DASH_RESULT_SUCCESS; if (!inserted) { ctxt.clear(); } - string& eni = ctxt.eni; + string& route_group = ctxt.route_group; IpPrefix& destination = ctxt.destination; vector keys = tokenize(key, ':'); - eni = keys[0]; + route_group = keys[0]; string ip_str; - size_t pos = key.find(":", eni.length()); + size_t pos = key.find(":", route_group.length()); ip_str = key.substr(pos + 1); destination = IpPrefix(ip_str); @@ -266,9 +324,23 @@ void DashRouteOrch::doTaskRouteTable(ConsumerBase& consumer) it = consumer.m_toSync.erase(it); continue; } + if (ctxt.metadata.routing_type() == dash::route_type::RoutingType::ROUTING_TYPE_UNSPECIFIED) + { + // Route::action_type is deprecated in favor of Route::routing_type. For messages still using the old action_type field, + // copy it to the new routing_type field. All subsequent operations will use the new field. + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wdeprecated-declarations" + ctxt.metadata.set_routing_type(ctxt.metadata.action_type()); + #pragma GCC diagnostic pop + } if (addOutboundRouting(key, ctxt)) { it = consumer.m_toSync.erase(it); + /* + * Write result only when removing from consumer in pre-op + * For other cases, this will be handled in post-op + */ + writeResultToDB(dash_route_result_table_, key, result); } else { @@ -277,9 +349,10 @@ void DashRouteOrch::doTaskRouteTable(ConsumerBase& consumer) } else if (op == DEL_COMMAND) { - if (removeOutboundRouting(key, ctxt)) + if (removeOutboundRouting(route_group, destination, ctxt)) { it = consumer.m_toSync.erase(it); + removeResultFromDB(dash_route_result_table_, key); } else { @@ -301,6 +374,7 @@ void DashRouteOrch::doTaskRouteTable(ConsumerBase& consumer) KeyOpFieldsValuesTuple t = it_prev->second; string key = kfvKey(t); string op = kfvOp(t); + result = DASH_RESULT_SUCCESS; auto found = toBulk.find(make_pair(key, op)); if (found == toBulk.end()) { @@ -325,13 +399,16 @@ void DashRouteOrch::doTaskRouteTable(ConsumerBase& consumer) else { it_prev++; + result = DASH_RESULT_FAILURE; } + writeResultToDB(dash_route_result_table_, key, result); } else if (op == DEL_COMMAND) { if (removeOutboundRoutingPost(key, ctxt)) { it_prev = consumer.m_toSync.erase(it_prev); + removeResultFromDB(dash_route_result_table_, key); } else { @@ -346,12 +423,6 @@ bool DashRouteOrch::addInboundRouting(const string& key, InboundRoutingBulkConte { SWSS_LOG_ENTER(); - bool exists = (routing_rule_entries_.find(key) != routing_rule_entries_.end()); - if (exists) - { - SWSS_LOG_WARN("Inbound routing entry already exists for %s", key.c_str()); - return true; - } if (!dash_orch_->getEni(ctxt.eni)) { SWSS_LOG_INFO("Retry as ENI entry %s not found", ctxt.eni.c_str()); @@ -377,7 +448,7 @@ bool DashRouteOrch::addInboundRouting(const string& key, InboundRoutingBulkConte vector inbound_routing_attrs; inbound_routing_attr.id = SAI_INBOUND_ROUTING_ENTRY_ATTR_ACTION; - inbound_routing_attr.value.u32 = ctxt.metadata.pa_validation() ? SAI_INBOUND_ROUTING_ENTRY_ACTION_VXLAN_DECAP_PA_VALIDATE : SAI_INBOUND_ROUTING_ENTRY_ACTION_VXLAN_DECAP; + inbound_routing_attr.value.u32 = ctxt.metadata.pa_validation() ? SAI_INBOUND_ROUTING_ENTRY_ACTION_TUNNEL_DECAP_PA_VALIDATE : SAI_INBOUND_ROUTING_ENTRY_ACTION_TUNNEL_DECAP; inbound_routing_attrs.push_back(inbound_routing_attr); if (ctxt.metadata.has_vnet()) @@ -387,6 +458,18 @@ bool DashRouteOrch::addInboundRouting(const string& key, InboundRoutingBulkConte inbound_routing_attrs.push_back(inbound_routing_attr); } + if (ctxt.metadata.has_metering_class_or()) { + inbound_routing_attr.id = SAI_INBOUND_ROUTING_ENTRY_ATTR_METER_CLASS_OR; + inbound_routing_attr.value.u32 = ctxt.metadata.metering_class_or(); + inbound_routing_attrs.push_back(inbound_routing_attr); + } + + if (ctxt.metadata.has_metering_class_and()) { + inbound_routing_attr.id = SAI_INBOUND_ROUTING_ENTRY_ATTR_METER_CLASS_AND; + inbound_routing_attr.value.u32 = ctxt.metadata.metering_class_and(); + inbound_routing_attrs.push_back(inbound_routing_attr); + } + object_statuses.emplace_back(); inbound_routing_bulker_.create_entry(&object_statuses.back(), &inbound_routing_entry, (uint32_t)inbound_routing_attrs.size(), inbound_routing_attrs.data()); @@ -422,9 +505,6 @@ bool DashRouteOrch::addInboundRoutingPost(const string& key, const InboundRoutin } } - InboundRoutingEntry entry = { dash_orch_->getEni(ctxt.eni)->eni_id, ctxt.vni, ctxt.sip, ctxt.sip_mask, ctxt.metadata }; - routing_rule_entries_[key] = entry; - gCrmOrch->incCrmResUsedCounter(ctxt.sip.isV4() ? CrmResourceType::CRM_DASH_IPV4_INBOUND_ROUTING : CrmResourceType::CRM_DASH_IPV6_INBOUND_ROUTING); SWSS_LOG_INFO("Inbound routing entry for %s added", key.c_str()); @@ -436,22 +516,14 @@ bool DashRouteOrch::removeInboundRouting(const string& key, InboundRoutingBulkCo { SWSS_LOG_ENTER(); - bool exists = (routing_rule_entries_.find(key) != routing_rule_entries_.end()); - if (!exists) - { - SWSS_LOG_INFO("Failed to find inbound routing entry %s to remove", key.c_str()); - return true; - } - auto& object_statuses = ctxt.object_statuses; - InboundRoutingEntry entry = routing_rule_entries_[key]; sai_inbound_routing_entry_t inbound_routing_entry; inbound_routing_entry.switch_id = gSwitchId; - inbound_routing_entry.eni_id = entry.eni; - inbound_routing_entry.vni = entry.vni; - swss::copy(inbound_routing_entry.sip, entry.sip); - swss::copy(inbound_routing_entry.sip_mask, entry.sip_mask); - inbound_routing_entry.priority = entry.metadata.priority(); + inbound_routing_entry.eni_id = dash_orch_->getEni(ctxt.eni)->eni_id; + inbound_routing_entry.vni = ctxt.vni; + swss::copy(inbound_routing_entry.sip, ctxt.sip); + swss::copy(inbound_routing_entry.sip_mask, ctxt.sip_mask); + inbound_routing_entry.priority = ctxt.metadata.priority(); object_statuses.emplace_back(); inbound_routing_bulker_.remove_entry(&object_statuses.back(), &inbound_routing_entry); @@ -487,7 +559,6 @@ bool DashRouteOrch::removeInboundRoutingPost(const string& key, const InboundRou gCrmOrch->decCrmResUsedCounter(ctxt.sip.isV4() ? CrmResourceType::CRM_DASH_IPV4_INBOUND_ROUTING : CrmResourceType::CRM_DASH_IPV6_INBOUND_ROUTING); - routing_rule_entries_.erase(key); SWSS_LOG_INFO("Inbound routing entry for %s removed", key.c_str()); return true; @@ -498,7 +569,7 @@ void DashRouteOrch::doTaskRouteRuleTable(ConsumerBase& consumer) SWSS_LOG_ENTER(); auto it = consumer.m_toSync.begin(); - + uint32_t result; while (it != consumer.m_toSync.end()) { std::map, @@ -514,6 +585,7 @@ void DashRouteOrch::doTaskRouteRuleTable(ConsumerBase& consumer) std::forward_as_tuple()); bool inserted = rc.second; auto &ctxt = rc.first->second; + result = DASH_RESULT_SUCCESS; if (!inserted) { @@ -548,6 +620,11 @@ void DashRouteOrch::doTaskRouteRuleTable(ConsumerBase& consumer) if (addInboundRouting(key, ctxt)) { it = consumer.m_toSync.erase(it); + /* + * Write result only when removing from consumer in pre-op + * For other cases, this will be handled in post-op + */ + writeResultToDB(dash_route_rule_result_table_, key, result); } else { @@ -559,6 +636,7 @@ void DashRouteOrch::doTaskRouteRuleTable(ConsumerBase& consumer) if (removeInboundRouting(key, ctxt)) { it = consumer.m_toSync.erase(it); + removeResultFromDB(dash_route_rule_result_table_, key); } else { @@ -580,6 +658,7 @@ void DashRouteOrch::doTaskRouteRuleTable(ConsumerBase& consumer) KeyOpFieldsValuesTuple t = it_prev->second; string key = kfvKey(t); string op = kfvOp(t); + result = DASH_RESULT_SUCCESS; auto found = toBulk.find(make_pair(key, op)); if (found == toBulk.end()) { @@ -603,14 +682,17 @@ void DashRouteOrch::doTaskRouteRuleTable(ConsumerBase& consumer) } else { + result = DASH_RESULT_FAILURE; it_prev++; } + writeResultToDB(dash_route_rule_result_table_, key, result); } else if (op == DEL_COMMAND) { if (removeInboundRoutingPost(key, ctxt)) { it_prev = consumer.m_toSync.erase(it_prev); + removeResultFromDB(dash_route_rule_result_table_, key); } else { @@ -621,6 +703,179 @@ void DashRouteOrch::doTaskRouteRuleTable(ConsumerBase& consumer) } } +bool DashRouteOrch::addRouteGroup(const string& route_group, const dash::route_group::RouteGroup& entry) +{ + SWSS_LOG_ENTER(); + + sai_object_id_t route_group_oid = this->getRouteGroupOid(route_group); + if (route_group_oid != SAI_NULL_OBJECT_ID) + { + SWSS_LOG_WARN("Route group %s already exists", route_group.c_str()); + return true; + } + + sai_status_t status = sai_dash_outbound_routing_api->create_outbound_routing_group(&route_group_oid, gSwitchId, 0, NULL); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create route group %s", route_group.c_str()); + task_process_status handle_status = handleSaiCreateStatus((sai_api_t) SAI_API_DASH_OUTBOUND_ROUTING, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + route_group_oid_map_[route_group] = route_group_oid; + SWSS_LOG_INFO("Route group %s added", route_group.c_str()); + + return true; +} + +bool DashRouteOrch::removeRouteGroup(const string& route_group) +{ + SWSS_LOG_ENTER(); + + if (isRouteGroupBound(route_group)) + { + SWSS_LOG_WARN("Cannot remove bound route group %s", route_group.c_str()); + return false; + } + + sai_object_id_t route_group_oid = this->getRouteGroupOid(route_group); + if (route_group_oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_INFO("Failed to find route group %s to remove", route_group.c_str()); + return true; + } + + sai_status_t status = sai_dash_outbound_routing_api->remove_outbound_routing_group(route_group_oid); + if (status != SAI_STATUS_SUCCESS) + { + //Retry later if object is in use + if (status == SAI_STATUS_OBJECT_IN_USE) + { + return false; + } + SWSS_LOG_ERROR("Failed to remove route group %s", route_group.c_str()); + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_OUTBOUND_ROUTING, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + route_group_oid_map_.erase(route_group); + SWSS_LOG_INFO("Route group %s removed", route_group.c_str()); + + return true; +} + +sai_object_id_t DashRouteOrch::getRouteGroupOid(const string& route_group) const +{ + SWSS_LOG_ENTER(); + + auto it = route_group_oid_map_.find(route_group); + if (it == route_group_oid_map_.end()) + { + return SAI_NULL_OBJECT_ID; + } + + return it->second; +} + +void DashRouteOrch::bindRouteGroup(const std::string& route_group) +{ + auto it = route_group_bind_count_.find(route_group); + + if (it == route_group_bind_count_.end()) + { + route_group_bind_count_[route_group] = 1; + return; + } + it->second++; +} + +void DashRouteOrch::unbindRouteGroup(const std::string& route_group) +{ + auto it = route_group_bind_count_.find(route_group); + + if (it == route_group_bind_count_.end()) + { + SWSS_LOG_WARN("Cannot unbind route group %s since it is not bound to any ENIs", route_group.c_str()); + return; + } + it->second--; + + if (it->second == 0) + { + SWSS_LOG_INFO("Route group %s completely unbound", route_group.c_str()); + route_group_bind_count_.erase(it); + } +} + +bool DashRouteOrch::isRouteGroupBound(const std::string& route_group) const +{ + auto it = route_group_bind_count_.find(route_group); + if (it == route_group_bind_count_.end()) + { + return false; + } + return it->second > 0; +} + +void DashRouteOrch::doTaskRouteGroupTable(ConsumerBase& consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + uint32_t result; + while (it != consumer.m_toSync.end()) + { + auto t = it->second; + string route_group = kfvKey(t); + string op = kfvOp(t); + result = DASH_RESULT_SUCCESS; + if (op == SET_COMMAND) + { + dash::route_group::RouteGroup entry; + if (!parsePbMessage(kfvFieldsValues(t), entry)) + { + SWSS_LOG_WARN("Requires protobuf at RouteGroup :%s", route_group.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + + if (addRouteGroup(route_group, entry)) + { + it = consumer.m_toSync.erase(it); + } + else + { + result = DASH_RESULT_FAILURE; + it++; + } + writeResultToDB(dash_route_group_result_table_, route_group, result, entry.version()); + } + else if (op == DEL_COMMAND) + { + if (removeRouteGroup(route_group)) + { + it = consumer.m_toSync.erase(it); + removeResultFromDB(dash_route_group_result_table_, route_group); + } + else + { + it++; + } + } + else + { + SWSS_LOG_ERROR("Unknown operation %s", op.c_str()); + it = consumer.m_toSync.erase(it); + } + } +} + void DashRouteOrch::doTask(ConsumerBase& consumer) { SWSS_LOG_ENTER(); @@ -637,6 +892,10 @@ void DashRouteOrch::doTask(ConsumerBase& consumer) { doTaskRouteRuleTable(consumer); } + else if (tn == APP_DASH_ROUTE_GROUP_TABLE_NAME) + { + doTaskRouteGroupTable(consumer); + } else { SWSS_LOG_ERROR("Unknown table: %s", tn.c_str()); diff --git a/orchagent/dash/dashrouteorch.h b/orchagent/dash/dashrouteorch.h index d61fcaa9363..fb2359bec0f 100644 --- a/orchagent/dash/dashrouteorch.h +++ b/orchagent/dash/dashrouteorch.h @@ -18,30 +18,11 @@ #include "dash_api/route.pb.h" #include "dash_api/route_rule.pb.h" - - -struct OutboundRoutingEntry -{ - sai_object_id_t eni; - swss::IpPrefix destination; - dash::route::Route metadata; -}; - -struct InboundRoutingEntry -{ - sai_object_id_t eni; - uint32_t vni; - swss::IpAddress sip; - swss::IpAddress sip_mask; - dash::route_rule::RouteRule metadata; -}; - -typedef std::map RoutingTable; -typedef std::map RoutingRuleTable; +#include "dash_api/route_group.pb.h" struct OutboundRoutingBulkContext { - std::string eni; + std::string route_group; swss::IpPrefix destination; dash::route::Route metadata; std::deque object_statuses; @@ -76,24 +57,34 @@ struct InboundRoutingBulkContext class DashRouteOrch : public ZmqOrch { public: - DashRouteOrch(swss::DBConnector *db, std::vector &tables, DashOrch *dash_orch, swss::ZmqServer *zmqServer); + DashRouteOrch(swss::DBConnector *db, std::vector &tables, DashOrch *dash_orch, swss::DBConnector *app_state_db, swss::ZmqServer *zmqServer); + sai_object_id_t getRouteGroupOid(const std::string& route_group) const; + void bindRouteGroup(const std::string& route_group); + void unbindRouteGroup(const std::string& route_group); + bool isRouteGroupBound(const std::string& route_group) const; private: - RoutingTable routing_entries_; - RoutingRuleTable routing_rule_entries_; EntityBulker outbound_routing_bulker_; EntityBulker inbound_routing_bulker_; DashOrch *dash_orch_; + std::unordered_map route_group_oid_map_; + std::unordered_map route_group_bind_count_; + std::unique_ptr dash_route_result_table_; + std::unique_ptr dash_route_rule_result_table_; + std::unique_ptr dash_route_group_result_table_; void doTask(ConsumerBase &consumer); void doTaskRouteTable(ConsumerBase &consumer); void doTaskRouteRuleTable(ConsumerBase &consumer); + void doTaskRouteGroupTable(ConsumerBase &consumer); bool addOutboundRouting(const std::string& key, OutboundRoutingBulkContext& ctxt); bool addOutboundRoutingPost(const std::string& key, const OutboundRoutingBulkContext& ctxt); - bool removeOutboundRouting(const std::string& key, OutboundRoutingBulkContext& ctxt); + bool removeOutboundRouting(const std::string& route_group, const swss::IpPrefix& destination, OutboundRoutingBulkContext& ctxt); bool removeOutboundRoutingPost(const std::string& key, const OutboundRoutingBulkContext& ctxt); bool addInboundRouting(const std::string& key, InboundRoutingBulkContext& ctxt); bool addInboundRoutingPost(const std::string& key, const InboundRoutingBulkContext& ctxt); bool removeInboundRouting(const std::string& key, InboundRoutingBulkContext& ctxt); bool removeInboundRoutingPost(const std::string& key, const InboundRoutingBulkContext& ctxt); + bool addRouteGroup(const std::string& key, const dash::route_group::RouteGroup& entry); + bool removeRouteGroup(const std::string& key); }; diff --git a/orchagent/dash/dashtagmgr.cpp b/orchagent/dash/dashtagmgr.cpp index 834442395b5..1cbc6a9422e 100644 --- a/orchagent/dash/dashtagmgr.cpp +++ b/orchagent/dash/dashtagmgr.cpp @@ -67,12 +67,6 @@ task_process_status DashTagMgr::update(const string& tag_id, const DashTag& new_ // Update tag prefixes tag.m_prefixes = new_tag.m_prefixes; - for (auto& group_it: tag.m_group_refcnt) - { - const auto& group_id = group_it.first; - m_dash_acl_orch->getDashAclGroupMgr().onUpdate(group_id, tag_id, tag); - } - return task_success; } @@ -87,7 +81,7 @@ task_process_status DashTagMgr::remove(const string& tag_id) return task_success; } - if (!tag_it->second.m_group_refcnt.empty()) + if (!tag_it->second.m_groups.empty()) { SWSS_LOG_WARN("Prefix tag %s is still in use by ACL rule(s)", tag_id.c_str()); return task_need_retry; @@ -123,9 +117,9 @@ task_process_status DashTagMgr::attach(const string& tag_id, const string& group ABORT_IF_NOT(tag_it != m_tag_table.end(), "Tag %s does not exist", tag_id.c_str()); auto& tag = tag_it->second; - ++tag.m_group_refcnt[group_id]; + tag.m_groups.insert(group_id); - SWSS_LOG_NOTICE("Tag %s is used by ACL group %s refcnt: %u", tag_id.c_str(), group_id.c_str(), tag.m_group_refcnt[group_id]); + SWSS_LOG_NOTICE("Tag %s is used by ACL group %s", tag_id.c_str(), group_id.c_str()); return task_success; } @@ -136,15 +130,8 @@ task_process_status DashTagMgr::detach(const string& tag_id, const string& group auto tag_it = m_tag_table.find(tag_id); ABORT_IF_NOT(tag_it != m_tag_table.end(), "Tag %s does not exist", tag_id.c_str()); auto& tag = tag_it->second; - auto group_it = tag.m_group_refcnt.find(group_id); - ABORT_IF_NOT(group_it != tag.m_group_refcnt.end(), "Group %s is not attached to the tag %s", group_id.c_str(), tag_id.c_str()); - - --group_it->second; - if (!group_it->second) - { - tag.m_group_refcnt.erase(group_it); - SWSS_LOG_NOTICE("Tag %s is no longer used by ACL group %s", tag_id.c_str(), group_id.c_str()); - } + tag.m_groups.erase(group_id); + SWSS_LOG_NOTICE("Tag %s is no longer used by ACL group %s", tag_id.c_str(), group_id.c_str()); return task_success; } diff --git a/orchagent/dash/dashtagmgr.h b/orchagent/dash/dashtagmgr.h index 4b69efdaa8e..7269fe49e85 100644 --- a/orchagent/dash/dashtagmgr.h +++ b/orchagent/dash/dashtagmgr.h @@ -15,7 +15,7 @@ struct DashTag { sai_ip_addr_family_t m_ip_version; std::vector m_prefixes; - std::unordered_map m_group_refcnt; + std::unordered_set m_groups; }; bool from_pb(const dash::tag::PrefixTag& data, DashTag& tag); diff --git a/orchagent/dash/dashtunnelorch.cpp b/orchagent/dash/dashtunnelorch.cpp new file mode 100644 index 00000000000..4068bb6d09d --- /dev/null +++ b/orchagent/dash/dashtunnelorch.cpp @@ -0,0 +1,617 @@ +#include "dashtunnelorch.h" +#include "dashorch.h" +#include "orch.h" +#include "sai.h" +#include "taskworker.h" +#include "pbutils.h" +#include "directory.h" +#include "saihelper.h" + +extern size_t gMaxBulkSize; +extern sai_dash_tunnel_api_t* sai_dash_tunnel_api; +extern sai_object_id_t gSwitchId; +extern Directory gDirectory; + +bool ipAddrLt(const dash::types::IpAddress& lhs, const dash::types::IpAddress& rhs) +{ + if (lhs.has_ipv4() && rhs.has_ipv4()) + { + return lhs.ipv4() < rhs.ipv4(); + } + else if (lhs.has_ipv6() && rhs.has_ipv6()) + { + return lhs.ipv6() < rhs.ipv6(); + } + else if (lhs.has_ipv4() && rhs.has_ipv6()) + { + return true; + } + else if (lhs.has_ipv6() && rhs.has_ipv4()) + { + return false; + } + SWSS_LOG_ERROR("One or more IP addresses not set"); + return false; +} + +bool ipAddrEq(const dash::types::IpAddress& lhs, const dash::types::IpAddress& rhs) +{ + if (lhs.has_ipv4() && rhs.has_ipv4()) + { + return lhs.ipv4() == rhs.ipv4(); + } + else if (lhs.has_ipv6() && rhs.has_ipv6()) + { + return lhs.ipv6() == rhs.ipv6(); + } + return false; +}; + + +DashTunnelOrch::DashTunnelOrch( + swss::DBConnector *db, + std::vector &tables, + swss::DBConnector *app_state_db, + swss::ZmqServer *zmqServer) : + tunnel_bulker_(sai_dash_tunnel_api, gSwitchId, gMaxBulkSize, SAI_OBJECT_TYPE_DASH_TUNNEL), + tunnel_member_bulker_(sai_dash_tunnel_api, gSwitchId, gMaxBulkSize, SAI_OBJECT_TYPE_DASH_TUNNEL_MEMBER), + tunnel_nhop_bulker_(sai_dash_tunnel_api, gSwitchId, gMaxBulkSize, SAI_OBJECT_TYPE_DASH_TUNNEL_NEXT_HOP), + ZmqOrch(db, tables, zmqServer) +{ + SWSS_LOG_ENTER(); + dash_tunnel_result_table_ = std::make_unique(app_state_db, APP_DASH_TUNNEL_TABLE_NAME); +} + +sai_object_id_t DashTunnelOrch::getTunnelOid(const std::string& tunnel_name) +{ + SWSS_LOG_ENTER(); + auto it = tunnel_table_.find(tunnel_name); + if (it == tunnel_table_.end()) + { + return SAI_NULL_OBJECT_ID; + } + return it->second.tunnel_oid; +} + +void DashTunnelOrch::doTask(ConsumerBase &consumer) +{ + /* bulk ops here need to happen in multiple steps because DASH_TUNNEL_MEMBERS depend on DASH_TUNNEL and DASH_TUNNEL_NEXT_HOPS already existing + 1. Pre-bulk 1: + - For SET, add DASH_TUNNEL and DASH_TUNNEL_NEXT_HOP objects to bulker + - For DEL, add DASH_TUNNEL, DASH_TUNNEL_NEXT_HOP, and DASH_TUNNEL_MEMBER objects to bulker + 2. Flush tunnel_member bulker first, then tunnel and tunnel_nhop bulkers to SAI + - There shouldn't be any SET ops in the tunnel_member bulker yet, only DEL. We need to flush it first, + otherwise SAI cannot delete the referenced tunnel and tunnel_nhop + 3. Post-bulk 1/pre-bulk 2: + - For SET, add DASH_TUNNEL_MEMBER objects to bulker + - For DEL, we are done + 4. Flush tunnel_member bulker to SAI + 5. Post-bulk 2: + - For SET, we are done + */ + SWSS_LOG_ENTER(); + + const auto& tn = consumer.getTableName(); + uint32_t result; + SWSS_LOG_INFO("doTask: %s", tn.c_str()); + if (tn != APP_DASH_TUNNEL_TABLE_NAME) + { + SWSS_LOG_ERROR("DashTunnelOrch does not support table %s", tn.c_str()); + return; + } + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + std::map, + DashTunnelBulkContext> toBulk; + + while (it != consumer.m_toSync.end()) + { + swss::KeyOpFieldsValuesTuple t = it->second; + std::string tunnel_name = kfvKey(t); + std::string op = kfvOp(t); + auto rc = toBulk.emplace(std::piecewise_construct, + std::forward_as_tuple(tunnel_name, op), + std::forward_as_tuple()); + bool inserted = rc.second; + auto& ctxt = rc.first->second; + result = DASH_RESULT_SUCCESS; + if (!inserted) + { + ctxt.clear(); + } + if (op == SET_COMMAND) + { + if (!parsePbMessage(kfvFieldsValues(t), ctxt.metadata)) + { + SWSS_LOG_WARN("Requires protobuf at Tunnel :%s", tunnel_name.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + if (addTunnel(tunnel_name, ctxt)) + { + it = consumer.m_toSync.erase(it); + /* + * Write result only when removing from consumer in pre-op + * For other cases, this will be handled in post-op + * TODO: There are cases where addTunnel returns true for + * errors that are not retried. Such cases need to be + * written to result table as a failure instead of success. + */ + writeResultToDB(dash_tunnel_result_table_, tunnel_name, result); + } + else + { + it++; + } + } + else if (op == DEL_COMMAND) + { + if (removeTunnel(tunnel_name, ctxt)) + { + /* + * Postpone removal of result from result table until after + * tunnel members are removed. + */ + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + } + } + + tunnel_member_bulker_.flush(); + tunnel_bulker_.flush(); + tunnel_nhop_bulker_.flush(); + + auto it_prev = consumer.m_toSync.begin(); + while (it_prev != it) + { + swss::KeyOpFieldsValuesTuple t = it_prev->second; + std::string tunnel_name = kfvKey(t); + std::string op = kfvOp(t); + result = DASH_RESULT_SUCCESS; + auto found = toBulk.find(std::make_pair(tunnel_name, op)); + if (found == toBulk.end()) + { + it_prev++; + continue; + } + auto& ctxt = found->second; + + if (op == SET_COMMAND) + { + if (addTunnelPost(tunnel_name, ctxt)) + { + it_prev = consumer.m_toSync.erase(it_prev); + /* + * The result should be written here only if the tunnel has + * one endpoint. For more tunnel endpoints, we need to wait + * until after tunnel members post-op. + */ + if (ctxt.metadata.endpoints_size() == 1) + { + writeResultToDB(dash_tunnel_result_table_, tunnel_name, + result); + } + } + else + { + it_prev++; + } + } + else if (op == DEL_COMMAND) + { + if (removeTunnelPost(tunnel_name, ctxt)) + { + it_prev = consumer.m_toSync.erase(it_prev); + removeResultFromDB(dash_tunnel_result_table_, tunnel_name); + } + else + { + it_prev++; + } + } + } + + tunnel_member_bulker_.flush(); + + it_prev = consumer.m_toSync.begin(); + while (it_prev != it) + { + swss::KeyOpFieldsValuesTuple t = it_prev->second; + std::string tunnel_name = kfvKey(t); + std::string op = kfvOp(t); + result = DASH_RESULT_SUCCESS; + auto found = toBulk.find(std::make_pair(tunnel_name, op)); + if (found == toBulk.end()) + { + it_prev++; + continue; + } + auto& ctxt = found->second; + + if (op == SET_COMMAND) + { + if (addTunnelMemberPost(tunnel_name, ctxt)) + { + it_prev = consumer.m_toSync.erase(it_prev); + } + else + { + result = DASH_RESULT_FAILURE; + it_prev++; + } + /* + * Write result for tunnels with more than one endpoint. + */ + writeResultToDB(dash_tunnel_result_table_, tunnel_name, result); + } + else if (op == DEL_COMMAND) + { + // We should never get here + it_prev = consumer.m_toSync.erase(it_prev); + } + } + } +} + +bool DashTunnelOrch::addTunnel(const std::string& tunnel_name, DashTunnelBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + auto dash_orch = gDirectory.get(); + if (!dash_orch->hasApplianceEntry()) + { + SWSS_LOG_WARN("DASH appliance entry not found, skipping DASH tunnel %s creation", tunnel_name.c_str()); + return false; + } + std::vector tunnel_attrs; + sai_attribute_t tunnel_attr; + bool remove_from_consumer = true; + + bool exists = (tunnel_table_.find(tunnel_name) != tunnel_table_.end()); + if (exists) + { + SWSS_LOG_WARN("DASH tunnel %s already exists", tunnel_name.c_str()); + return remove_from_consumer; + } + + tunnel_attr.id = SAI_DASH_TUNNEL_ATTR_MAX_MEMBER_SIZE; + tunnel_attr.value.u32 = ctxt.metadata.endpoints_size(); + tunnel_attrs.push_back(tunnel_attr); + + tunnel_attr.id = SAI_DASH_TUNNEL_ATTR_DASH_ENCAPSULATION; + switch (ctxt.metadata.encap_type()) + { + case dash::route_type::ENCAP_TYPE_VXLAN: + tunnel_attr.value.u32 = SAI_DASH_ENCAPSULATION_VXLAN; + break; + case dash::route_type::ENCAP_TYPE_NVGRE: + tunnel_attr.value.u32 = SAI_DASH_ENCAPSULATION_NVGRE; + break; + default: + SWSS_LOG_ERROR("Unsupported encap type %d", ctxt.metadata.encap_type()); + return remove_from_consumer; + } + tunnel_attrs.push_back(tunnel_attr); + + tunnel_attr.id = SAI_DASH_TUNNEL_ATTR_TUNNEL_KEY; + tunnel_attr.value.u32 = ctxt.metadata.vni(); + tunnel_attrs.push_back(tunnel_attr); + + tunnel_attr.id = SAI_DASH_TUNNEL_ATTR_SIP; + auto tunnel_sip = dash_orch->getApplianceVip(); + to_sai(tunnel_sip, tunnel_attr.value.ipaddr); + tunnel_attrs.push_back(tunnel_attr); + + // deduplicate endpoint IPs + std::sort(ctxt.metadata.mutable_endpoints()->begin(), ctxt.metadata.mutable_endpoints()->end(), ipAddrLt); + auto last = std::unique(ctxt.metadata.mutable_endpoints()->begin(), ctxt.metadata.mutable_endpoints()->end(), ipAddrEq); + ctxt.metadata.mutable_endpoints()->erase(last, ctxt.metadata.mutable_endpoints()->end()); + + if (ctxt.metadata.endpoints_size() == 1) + { + tunnel_attr.id = SAI_DASH_TUNNEL_ATTR_DIP; + to_sai(ctxt.metadata.endpoints(0), tunnel_attr.value.ipaddr); + tunnel_attrs.push_back(tunnel_attr); + } + else + { + addTunnelNextHops(tunnel_name, ctxt); + } + + auto& object_ids = ctxt.tunnel_object_ids; + object_ids.emplace_back(); + tunnel_bulker_.create_entry(&object_ids.back(), (uint32_t) tunnel_attrs.size(), tunnel_attrs.data()); + + remove_from_consumer = false; + return remove_from_consumer; +} + +void DashTunnelOrch::addTunnelNextHops(const std::string& tunnel_name, DashTunnelBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + sai_attribute_t tunnel_nhop_attr; + auto& nhop_object_ids = ctxt.tunnel_nhop_object_ids; + for (auto ip : ctxt.metadata.endpoints()) + { + tunnel_nhop_attr.id = SAI_DASH_TUNNEL_NEXT_HOP_ATTR_DIP; + to_sai(ip, tunnel_nhop_attr.value.ipaddr); + nhop_object_ids.emplace_back(); + tunnel_nhop_bulker_.create_entry(&nhop_object_ids.back(), 1, &tunnel_nhop_attr); + } +} + +bool DashTunnelOrch::addTunnelPost(const std::string& tunnel_name, DashTunnelBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + bool remove_from_consumer = true; + const auto& object_ids = ctxt.tunnel_object_ids; + if (object_ids.empty()) + { + return remove_from_consumer; + } + + auto it_id = object_ids.begin(); + sai_object_id_t tunnel_oid = *it_id++; + if (tunnel_oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("Failed to create DASH tunnel entry for %s", tunnel_name.c_str()); + // even if tunnel creation fails, we need to continue checking nexthop creations + // to remove nexthops created for this tunnel + } + else + { + DashTunnelEntry entry = { tunnel_oid, std::map(), std::string() }; + tunnel_table_[tunnel_name] = entry; + remove_from_consumer = false; + SWSS_LOG_INFO("Tunnel entry added for %s", tunnel_name.c_str()); + } + + return addTunnelNextHopsPost(tunnel_name, ctxt, remove_from_consumer); +} + +bool DashTunnelOrch::addTunnelNextHopsPost(const std::string& tunnel_name, DashTunnelBulkContext& ctxt, const bool parent_tunnel_removed) +{ + SWSS_LOG_ENTER(); + + if (ctxt.metadata.endpoints_size() <= 1) + { + return parent_tunnel_removed; + } + + bool remove_from_consumer = true; + const auto& nhop_oids = ctxt.tunnel_nhop_object_ids; + auto it_nhop = nhop_oids.begin(); + for (auto ip : ctxt.metadata.endpoints()) + { + sai_object_id_t nhop_oid = *it_nhop++; + if (nhop_oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("Failed to create DASH tunnel next hop entry for tunnel %s, endpoint %s", tunnel_name.c_str(), to_string(ip).c_str()); + continue; + } + + if (parent_tunnel_removed) + { + SWSS_LOG_INFO("Removing tunnel next hop OID %" PRIx64" for failed DASH tunnel %s endpoint %s", nhop_oid, tunnel_name.c_str(), to_string(ip).c_str()); + sai_status_t status = sai_dash_tunnel_api->remove_dash_tunnel_next_hop(nhop_oid); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove DASH tunnel next hop OID %" PRIx64" for failed DASH tunnel %s endpoint %s", nhop_oid, tunnel_name.c_str(), to_string(ip).c_str()); + } + continue; + } + + DashTunnelEndpointEntry endpoint = { nhop_oid, SAI_NULL_OBJECT_ID }; + tunnel_table_[tunnel_name].endpoints[to_string(ip)] = endpoint; + SWSS_LOG_INFO("Tunnel next hop entry added for tunnel %s, endpoint %s", tunnel_name.c_str(), to_string(ip).c_str()); + addTunnelMember(tunnel_table_[tunnel_name].tunnel_oid, nhop_oid, ctxt); + remove_from_consumer = false; // if we add at least one tunnel member, tunnel needs to stay in consumer for tunnel member post-bulk ops + } + return remove_from_consumer; +} + +void DashTunnelOrch::addTunnelMember(const sai_object_id_t tunnel_oid, const sai_object_id_t nhop_oid, DashTunnelBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + std::vector tunnel_member_attrs; + sai_attribute_t tunnel_member_attr; + + tunnel_member_attr.id = SAI_DASH_TUNNEL_MEMBER_ATTR_DASH_TUNNEL_ID; + tunnel_member_attr.value.oid = tunnel_oid; + tunnel_member_attrs.push_back(tunnel_member_attr); + + tunnel_member_attr.id = SAI_DASH_TUNNEL_MEMBER_ATTR_DASH_TUNNEL_NEXT_HOP_ID; + tunnel_member_attr.value.oid = nhop_oid; + tunnel_member_attrs.push_back(tunnel_member_attr); + + auto& member_object_ids = ctxt.tunnel_member_object_ids; + member_object_ids.emplace_back(); + tunnel_member_bulker_.create_entry(&member_object_ids.back(), (uint32_t) tunnel_member_attrs.size(), tunnel_member_attrs.data()); +} + +bool DashTunnelOrch::addTunnelMemberPost(const std::string& tunnel_name, const DashTunnelBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + bool remove_from_consumer = true; + const auto& member_oids = ctxt.tunnel_member_object_ids; + auto it_member = member_oids.begin(); + for (auto ip : ctxt.metadata.endpoints()) + { + sai_object_id_t member_oid = *it_member++; + if (member_oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_WARN("Failed to create DASH tunnel member entry for tunnel %s, endpoint %s, continuing", tunnel_name.c_str(), to_string(ip).c_str()); + continue; + } + tunnel_table_[tunnel_name].endpoints[to_string(ip)].tunnel_member_oid = member_oid; + SWSS_LOG_INFO("Tunnel member entry added for tunnel %s, endpoint %s", tunnel_name.c_str(), to_string(ip).c_str()); + } + return remove_from_consumer; +} + +bool DashTunnelOrch::removeTunnel(const std::string& tunnel_name, DashTunnelBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + bool remove_from_consumer = true; + + auto it = tunnel_table_.find(tunnel_name); + if (it == tunnel_table_.end()) + { + SWSS_LOG_WARN("Failed to find DASH tunnel %s to remove", tunnel_name.c_str()); + return remove_from_consumer; + } + + auto& endpoints = it->second.endpoints; + if (endpoints.size() > 1) + { + removeTunnelEndpoints(tunnel_name, ctxt); + } + + + auto& object_statuses = ctxt.tunnel_object_statuses; + object_statuses.emplace_back(); + tunnel_bulker_.remove_entry(&object_statuses.back(), it->second.tunnel_oid); + + remove_from_consumer = false; + return remove_from_consumer; +} + +bool DashTunnelOrch::removeTunnelPost(const std::string& tunnel_name, const DashTunnelBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + bool remove_from_consumer = removeTunnelEndpointsPost(tunnel_name, ctxt); + if (!remove_from_consumer) + { + // If endpoint removal requires a retry, exit immediately since the tunnel can't be deleted if endpoints still exist + return remove_from_consumer; + } + + const auto& object_statuses = ctxt.tunnel_object_statuses; + if (object_statuses.empty()) + { + return remove_from_consumer; + } + + auto it_status = object_statuses.begin(); + sai_status_t status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_OBJECT_IN_USE) + { + // Retry later if object has non-zero reference to it + SWSS_LOG_WARN("DASH tunnel %s is in use, cannot remove", tunnel_name.c_str()); + remove_from_consumer = false; + return remove_from_consumer; + } + + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_TUNNEL, status); + if (handle_status != task_success) + { + remove_from_consumer = parseHandleSaiStatusFailure(handle_status); + return remove_from_consumer; + } + } + + tunnel_table_.erase(tunnel_name); + SWSS_LOG_NOTICE("DASH tunnel entry removed for %s", tunnel_name.c_str()); + + remove_from_consumer = true; + return remove_from_consumer; +} + +void DashTunnelOrch::removeTunnelEndpoints(const std::string& tunnel_name, DashTunnelBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + + auto it = tunnel_table_.find(tunnel_name); + if (it == tunnel_table_.end()) + { + SWSS_LOG_WARN("Failed to find DASH tunnel %s to remove endpoints from", tunnel_name.c_str()); + return; + } + + auto& endpoints = it->second.endpoints; + for (auto& endpoint : endpoints) + { + auto& tunnel_member_statuses = ctxt.tunnel_member_object_statuses; + if (endpoint.second.tunnel_member_oid == SAI_NULL_OBJECT_ID) + { + tunnel_member_statuses.emplace_back(SAI_STATUS_SUCCESS); + } + else + { + tunnel_member_statuses.emplace_back(); + tunnel_member_bulker_.remove_entry(&tunnel_member_statuses.back(), endpoint.second.tunnel_member_oid); + } + + // No null OID check needed since we cannot delete a tunnel nhop without first deleting the associated tunnel member + // If the endpoint entry exists, safe to assume that at least the tunnel nhop still exists + auto& tunnel_nhop_statuses = ctxt.tunnel_nhop_object_statuses; + tunnel_nhop_statuses.emplace_back(); + tunnel_nhop_bulker_.remove_entry(&tunnel_nhop_statuses.back(), endpoint.second.tunnel_nhop_oid); + } +} + +bool DashTunnelOrch::removeTunnelEndpointsPost(const std::string& tunnel_name, const DashTunnelBulkContext& ctxt) +{ + SWSS_LOG_ENTER(); + bool remove_from_consumer = true; + + const auto& tunnel_member_statuses = ctxt.tunnel_member_object_statuses; + const auto& tunnel_nhop_statuses = ctxt.tunnel_nhop_object_statuses; + if (tunnel_member_statuses.empty() && tunnel_nhop_statuses.empty()) + { + return remove_from_consumer; + } + + auto tm_it_status = tunnel_member_statuses.begin(); + auto nh_it_status = tunnel_nhop_statuses.begin(); + auto endpoint_it = tunnel_table_[tunnel_name].endpoints.begin(); + while (endpoint_it != tunnel_table_[tunnel_name].endpoints.end()) + { + sai_status_t tm_status = *tm_it_status++; + sai_status_t nh_status = *nh_it_status++; + if (tm_status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("DASH tunnel member removal for tunnel %s endpoint %s failed with %s", tunnel_name.c_str(), endpoint_it->first.c_str(), sai_serialize_status(tm_status).c_str()); + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_TUNNEL, tm_status); + if (handle_status == task_need_retry) + { + remove_from_consumer = false; + } + } + else + { + if (endpoint_it->second.tunnel_member_oid != SAI_NULL_OBJECT_ID) + { + SWSS_LOG_INFO("DASH tunnel member removed for tunnel %s ip %s", tunnel_name.c_str(), endpoint_it->first.c_str()); + endpoint_it->second.tunnel_member_oid = SAI_NULL_OBJECT_ID; + } + } + + if (nh_status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("DASH tunnel next hop removal for tunnel %s endpoint %s failed with %s", tunnel_name.c_str(), endpoint_it->first.c_str(), sai_serialize_status(tm_status).c_str()); + task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_TUNNEL, nh_status); + if (handle_status == task_need_retry) + { + remove_from_consumer = false; + } + } + else + { + SWSS_LOG_INFO("DASH tunnel next hop removed for tunnel %s ip %s", tunnel_name.c_str(), endpoint_it->first.c_str()); + endpoint_it = tunnel_table_[tunnel_name].endpoints.erase(endpoint_it); + continue; + } + endpoint_it++; + } + + return remove_from_consumer; +} diff --git a/orchagent/dash/dashtunnelorch.h b/orchagent/dash/dashtunnelorch.h new file mode 100644 index 00000000000..28ded117007 --- /dev/null +++ b/orchagent/dash/dashtunnelorch.h @@ -0,0 +1,78 @@ +#pragma once + +#include +#include "dash_api/tunnel.pb.h" +#include "bulker.h" +#include "dbconnector.h" +#include "zmqorch.h" +#include "zmqserver.h" + +struct DashTunnelEndpointEntry +{ + sai_object_id_t tunnel_nhop_oid; + sai_object_id_t tunnel_member_oid; +}; +struct DashTunnelEntry +{ + sai_object_id_t tunnel_oid; + std::map endpoints; + std::string endpoint; +}; + +struct DashTunnelBulkContext +{ + std::deque tunnel_object_ids; + std::deque tunnel_object_statuses; + std::deque tunnel_member_object_ids; + std::deque tunnel_member_object_statuses; + std::deque tunnel_nhop_object_ids; + std::deque tunnel_nhop_object_statuses; + dash::tunnel::Tunnel metadata; + + DashTunnelBulkContext() {} + DashTunnelBulkContext(const DashTunnelBulkContext&) = delete; + DashTunnelBulkContext(DashTunnelBulkContext&&) = delete; + + void clear() + { + tunnel_object_ids.clear(); + tunnel_object_statuses.clear(); + tunnel_member_object_ids.clear(); + tunnel_member_object_statuses.clear(); + tunnel_nhop_object_ids.clear(); + tunnel_nhop_object_statuses.clear(); + } +}; + +class DashTunnelOrch : public ZmqOrch +{ +public: + DashTunnelOrch( + swss::DBConnector *db, + std::vector &tables, + swss::DBConnector *app_state_db, + swss::ZmqServer *zmqServer); + + sai_object_id_t getTunnelOid(const std::string& tunnel_name); + +private: + ObjectBulker tunnel_bulker_; + ObjectBulker tunnel_member_bulker_; + ObjectBulker tunnel_nhop_bulker_; + std::unordered_map tunnel_table_; + std::unique_ptr dash_tunnel_result_table_; + + void doTask(ConsumerBase &consumer); + bool addTunnel(const std::string& tunnel_name, DashTunnelBulkContext& ctxt); + bool addTunnelPost(const std::string& tunnel_name, DashTunnelBulkContext& ctxt); + void addTunnelNextHops(const std::string& tunnel_name, DashTunnelBulkContext& ctxt); + bool addTunnelNextHopsPost(const std::string& tunnel_name, DashTunnelBulkContext& ctxt, const bool tunnel_succeess); + void addTunnelMember(const sai_object_id_t tunnel_oid, const sai_object_id_t nhop_oid, DashTunnelBulkContext& ctxt); + bool addTunnelMemberPost(const std::string& tunnel_name, const DashTunnelBulkContext& ctxt); + bool removeTunnel(const std::string& tunnel_name, DashTunnelBulkContext& ctxt); + bool removeTunnelPost(const std::string& tunnel_name, const DashTunnelBulkContext& ctxt); + bool removeTunnelNextHop(const std::string& tunnel_name, DashTunnelBulkContext& ctxt); + bool removeTunnelNextHopPost(const std::string& tunnel_name, const DashTunnelBulkContext& ctxt); + void removeTunnelEndpoints(const std::string& tunnel_name, DashTunnelBulkContext& ctxt); + bool removeTunnelEndpointsPost(const std::string& tunnel_name, const DashTunnelBulkContext& ctxt); +}; diff --git a/orchagent/dash/dashvnetorch.cpp b/orchagent/dash/dashvnetorch.cpp index e06f1b1e383..adc6e7e642c 100644 --- a/orchagent/dash/dashvnetorch.cpp +++ b/orchagent/dash/dashvnetorch.cpp @@ -20,6 +20,9 @@ #include "dashorch.h" #include "crmorch.h" #include "saihelper.h" +#include "directory.h" +#include "dashtunnelorch.h" +#include "dashportmaporch.h" #include "taskworker.h" #include "pbutils.h" @@ -34,14 +37,17 @@ extern sai_dash_pa_validation_api_t* sai_dash_pa_validation_api; extern sai_object_id_t gSwitchId; extern size_t gMaxBulkSize; extern CrmOrch *gCrmOrch; +extern Directory gDirectory; -DashVnetOrch::DashVnetOrch(DBConnector *db, vector &tables, ZmqServer *zmqServer) : +DashVnetOrch::DashVnetOrch(DBConnector *db, vector &tables, DBConnector *app_state_db, ZmqServer *zmqServer) : vnet_bulker_(sai_dash_vnet_api, gSwitchId, gMaxBulkSize), outbound_ca_to_pa_bulker_(sai_dash_outbound_ca_to_pa_api, gMaxBulkSize), pa_validation_bulker_(sai_dash_pa_validation_api, gMaxBulkSize), ZmqOrch(db, tables, zmqServer) { SWSS_LOG_ENTER(); + dash_vnet_result_table_ = make_unique
(app_state_db, APP_DASH_VNET_TABLE_NAME); + dash_vnet_map_result_table_ = make_unique
(app_state_db, APP_DASH_VNET_MAPPING_TABLE_NAME); } bool DashVnetOrch::addVnet(const string& vnet_name, DashVnetBulkContext& ctxt) @@ -54,6 +60,12 @@ bool DashVnetOrch::addVnet(const string& vnet_name, DashVnetBulkContext& ctxt) SWSS_LOG_WARN("Vnet already exists for %s", vnet_name.c_str()); return true; } + DashOrch* dash_orch = gDirectory.get(); + if (!dash_orch->hasApplianceEntry()) + { + SWSS_LOG_INFO("Retry as no appliance table entry found"); + return false; + } uint32_t attr_count = 1; auto& object_ids = ctxt.object_ids; @@ -84,7 +96,7 @@ bool DashVnetOrch::addVnetPost(const string& vnet_name, const DashVnetBulkContex return false; } - VnetEntry entry = { id, ctxt.metadata }; + VnetEntry entry = { id, ctxt.metadata, std::set() }; vnet_table_[vnet_name] = entry; gVnetNameToId[vnet_name] = id; @@ -106,21 +118,26 @@ bool DashVnetOrch::removeVnet(const string& vnet_name, DashVnetBulkContext& ctxt return true; } - auto& object_statuses = ctxt.object_statuses; + auto& object_statuses = ctxt.vnet_statuses; sai_object_id_t vni; VnetEntry entry = vnet_table_[vnet_name]; vni = entry.vni; object_statuses.emplace_back(); vnet_bulker_.remove_entry(&object_statuses.back(), vni); + removePaValidation(vnet_name, ctxt); return false; } bool DashVnetOrch::removeVnetPost(const string& vnet_name, const DashVnetBulkContext& ctxt) { SWSS_LOG_ENTER(); + if (!ctxt.pa_validation_statuses.empty() && !removePaValidationPost(vnet_name, ctxt)) + { + return false; + } - const auto& object_statuses = ctxt.object_statuses; + const auto& object_statuses = ctxt.vnet_statuses; if (object_statuses.empty()) { @@ -158,7 +175,7 @@ void DashVnetOrch::doTaskVnetTable(ConsumerBase& consumer) SWSS_LOG_ENTER(); auto it = consumer.m_toSync.begin(); - + uint32_t result; while (it != consumer.m_toSync.end()) { // Map to store vnet bulk op results @@ -175,12 +192,13 @@ void DashVnetOrch::doTaskVnetTable(ConsumerBase& consumer) std::forward_as_tuple()); bool inserted = rc.second; auto& vnet_ctxt = rc.first->second; + result = DASH_RESULT_SUCCESS; if (!inserted) { vnet_ctxt.clear(); } - + vnet_ctxt.vnet_name = key; if (op == SET_COMMAND) { if (!parsePbMessage(kfvFieldsValues(tuple), vnet_ctxt.metadata)) @@ -192,6 +210,11 @@ void DashVnetOrch::doTaskVnetTable(ConsumerBase& consumer) if (addVnet(key, vnet_ctxt)) { it = consumer.m_toSync.erase(it); + /* + * Write result only when removing from consumer in pre-op + * For other cases, this will be handled in post-op + */ + writeResultToDB(dash_vnet_result_table_, key, result); } else { @@ -203,6 +226,7 @@ void DashVnetOrch::doTaskVnetTable(ConsumerBase& consumer) if (removeVnet(key, vnet_ctxt)) { it = consumer.m_toSync.erase(it); + removeResultFromDB(dash_vnet_result_table_, key); } else { @@ -216,6 +240,7 @@ void DashVnetOrch::doTaskVnetTable(ConsumerBase& consumer) } } + pa_validation_bulker_.flush(); vnet_bulker_.flush(); auto it_prev = consumer.m_toSync.begin(); @@ -225,6 +250,7 @@ void DashVnetOrch::doTaskVnetTable(ConsumerBase& consumer) string key = kfvKey(t); string op = kfvOp(t); + result = DASH_RESULT_SUCCESS; auto found = toBulk.find(make_pair(key, op)); if (found == toBulk.end()) { @@ -233,35 +259,35 @@ void DashVnetOrch::doTaskVnetTable(ConsumerBase& consumer) } const auto& vnet_ctxt = found->second; - const auto& object_statuses = vnet_ctxt.object_statuses; const auto& object_ids = vnet_ctxt.object_ids; + const auto& vnet_statuses = vnet_ctxt.vnet_statuses; + const auto& pa_validation_statuses = vnet_ctxt.pa_validation_statuses; + + if (object_ids.empty() && vnet_statuses.empty() && pa_validation_statuses.empty()) + { + it_prev++; + continue; + } if (op == SET_COMMAND) { - if (object_ids.empty()) - { - it_prev++; - continue; - } - if (addVnetPost(key, vnet_ctxt)) + if (addVnetPost(key, vnet_ctxt)) { it_prev = consumer.m_toSync.erase(it_prev); } else { + result = DASH_RESULT_FAILURE; it_prev++; } + writeResultToDB(dash_vnet_result_table_, key, result); } else if (op == DEL_COMMAND) { - if (object_statuses.empty()) - { - it_prev++; - continue; - } - if (removeVnetPost(key, vnet_ctxt)) + if (removeVnetPost(key, vnet_ctxt)) { it_prev = consumer.m_toSync.erase(it_prev); + removeResultFromDB(dash_vnet_result_table_, key); } else { @@ -272,7 +298,7 @@ void DashVnetOrch::doTaskVnetTable(ConsumerBase& consumer) } } -void DashVnetOrch::addOutboundCaToPa(const string& key, VnetMapBulkContext& ctxt) +bool DashVnetOrch::addOutboundCaToPa(const string& key, VnetMapBulkContext& ctxt) { SWSS_LOG_ENTER(); @@ -284,21 +310,124 @@ void DashVnetOrch::addOutboundCaToPa(const string& key, VnetMapBulkContext& ctxt sai_attribute_t outbound_ca_to_pa_attr; vector outbound_ca_to_pa_attrs; - outbound_ca_to_pa_attr.id = SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_UNDERLAY_DIP; - to_sai(ctxt.metadata.underlay_ip(), outbound_ca_to_pa_attr.value.ipaddr); - outbound_ca_to_pa_attrs.push_back(outbound_ca_to_pa_attr); + DashOrch* dash_orch = gDirectory.get(); + dash::route_type::RouteType route_type_actions; + if (!dash_orch->getRouteTypeActions(ctxt.metadata.routing_type(), route_type_actions)) + { + SWSS_LOG_INFO("Failed to get route type actions for %s", key.c_str()); + return false; + } + + for (auto action: route_type_actions.items()) + { + if (action.action_type() == dash::route_type::ACTION_TYPE_STATICENCAP) + { + outbound_ca_to_pa_attr.id = SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_DASH_ENCAPSULATION; + if (action.encap_type() == dash::route_type::ENCAP_TYPE_VXLAN) + { + outbound_ca_to_pa_attr.value.u32 = SAI_DASH_ENCAPSULATION_VXLAN; + } + else if (action.encap_type() == dash::route_type::ENCAP_TYPE_NVGRE) + { + outbound_ca_to_pa_attr.value.u32 = SAI_DASH_ENCAPSULATION_NVGRE; + } + else + { + SWSS_LOG_ERROR("Invalid encap type %d for %s", action.encap_type(), key.c_str()); + return true; + } + outbound_ca_to_pa_attrs.push_back(outbound_ca_to_pa_attr); + + outbound_ca_to_pa_attr.id = SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_TUNNEL_KEY; + outbound_ca_to_pa_attr.value.u32 = action.vni(); + outbound_ca_to_pa_attrs.push_back(outbound_ca_to_pa_attr); + + outbound_ca_to_pa_attr.id = SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_UNDERLAY_DIP; + to_sai(ctxt.metadata.underlay_ip(), outbound_ca_to_pa_attr.value.ipaddr); + outbound_ca_to_pa_attrs.push_back(outbound_ca_to_pa_attr); + + } + } + + if (ctxt.metadata.has_tunnel()) + { + auto tunnel_oid = gDirectory.get()->getTunnelOid(ctxt.metadata.tunnel()); + if (tunnel_oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_INFO("Tunnel %s for VnetMap %s does not exist yet", ctxt.metadata.tunnel().c_str(), key.c_str()); + return false; + } + outbound_ca_to_pa_attr.id = SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_DASH_TUNNEL_ID; + outbound_ca_to_pa_attr.value.oid = tunnel_oid; + outbound_ca_to_pa_attrs.push_back(outbound_ca_to_pa_attr); + } - outbound_ca_to_pa_attr.id = SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_DMAC; - memcpy(outbound_ca_to_pa_attr.value.mac, ctxt.metadata.mac_address().c_str(), sizeof(sai_mac_t)); - outbound_ca_to_pa_attrs.push_back(outbound_ca_to_pa_attr); + if (ctxt.metadata.routing_type() == dash::route_type::ROUTING_TYPE_PRIVATELINK) + { + outbound_ca_to_pa_attr.id = SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_ACTION; + outbound_ca_to_pa_attr.value.u32 = SAI_OUTBOUND_CA_TO_PA_ENTRY_ACTION_SET_PRIVATE_LINK_MAPPING; + outbound_ca_to_pa_attrs.push_back(outbound_ca_to_pa_attr); - outbound_ca_to_pa_attr.id = SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_USE_DST_VNET_VNI; - outbound_ca_to_pa_attr.value.booldata = ctxt.metadata.use_dst_vni(); - outbound_ca_to_pa_attrs.push_back(outbound_ca_to_pa_attr); + outbound_ca_to_pa_attr.id = SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_DIP; + to_sai(ctxt.metadata.overlay_dip_prefix().ip(), outbound_ca_to_pa_attr.value.ipaddr); + outbound_ca_to_pa_attrs.push_back(outbound_ca_to_pa_attr); + + outbound_ca_to_pa_attr.id = SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_DIP_MASK; + to_sai(ctxt.metadata.overlay_dip_prefix().mask(), outbound_ca_to_pa_attr.value.ipaddr); + outbound_ca_to_pa_attrs.push_back(outbound_ca_to_pa_attr); + + + outbound_ca_to_pa_attr.id = SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_SIP; + to_sai(ctxt.metadata.overlay_sip_prefix().ip(), outbound_ca_to_pa_attr.value.ipaddr); + outbound_ca_to_pa_attrs.push_back(outbound_ca_to_pa_attr); + + outbound_ca_to_pa_attr.id = SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_SIP_MASK; + to_sai(ctxt.metadata.overlay_sip_prefix().mask(), outbound_ca_to_pa_attr.value.ipaddr); + outbound_ca_to_pa_attrs.push_back(outbound_ca_to_pa_attr); + + if (ctxt.metadata.has_port_map()) + { + auto port_map_oid = + gDirectory.get()->getPortMapOid(ctxt.metadata.port_map()); + if (port_map_oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("Portmap %s for VnetMap %s does not exist yet", + ctxt.metadata.port_map().c_str(), key.c_str()); + return false; + } + outbound_ca_to_pa_attr.id = SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OUTBOUND_PORT_MAP_ID; + outbound_ca_to_pa_attr.value.oid = port_map_oid; + outbound_ca_to_pa_attrs.push_back(outbound_ca_to_pa_attr); + } + } + + if (ctxt.metadata.has_metering_class_or()) + { + outbound_ca_to_pa_attr.id = SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_METER_CLASS_OR; + outbound_ca_to_pa_attr.value.u32 = ctxt.metadata.metering_class_or(); + outbound_ca_to_pa_attrs.push_back(outbound_ca_to_pa_attr); + } + + if (ctxt.metadata.has_mac_address()) + { + outbound_ca_to_pa_attr.id = SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_DMAC; + memcpy(outbound_ca_to_pa_attr.value.mac, ctxt.metadata.mac_address().c_str(), sizeof(sai_mac_t)); + outbound_ca_to_pa_attrs.push_back(outbound_ca_to_pa_attr); + } + + if (ctxt.metadata.has_use_dst_vni()) + { + outbound_ca_to_pa_attr.id = SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_USE_DST_VNET_VNI; + outbound_ca_to_pa_attr.value.booldata = ctxt.metadata.use_dst_vni(); + outbound_ca_to_pa_attrs.push_back(outbound_ca_to_pa_attr); + } object_statuses.emplace_back(); outbound_ca_to_pa_bulker_.create_entry(&object_statuses.back(), &outbound_ca_to_pa_entry, (uint32_t)outbound_ca_to_pa_attrs.size(), outbound_ca_to_pa_attrs.data()); + + addPaValidation(key, ctxt); + return false; } void DashVnetOrch::addPaValidation(const string& key, VnetMapBulkContext& ctxt) @@ -308,18 +437,13 @@ void DashVnetOrch::addPaValidation(const string& key, VnetMapBulkContext& ctxt) auto& object_statuses = ctxt.pa_validation_object_statuses; string underlay_ip_str = to_string(ctxt.metadata.underlay_ip()); string pa_ref_key = ctxt.vnet_name + ":" + underlay_ip_str; - auto it = pa_refcount_table_.find(pa_ref_key); - if (it != pa_refcount_table_.end()) - { - /* - * PA validation entry already exisits. Just increment refcount and add - * a dummy success status to satisfy postop - */ - object_statuses.emplace_back(SAI_STATUS_SUCCESS); - pa_refcount_table_[pa_ref_key]++; - SWSS_LOG_INFO("Increment PA refcount to %u for PA IP %s", - pa_refcount_table_[pa_ref_key], - underlay_ip_str.c_str()); + + auto& vnet_underlay_ips = vnet_table_[ctxt.vnet_name].underlay_ips; + std::string underlay_sip_str = to_string(ctxt.metadata.underlay_ip()); + if (vnet_underlay_ips.find(underlay_sip_str) != vnet_underlay_ips.end()) + { + SWSS_LOG_INFO("Vnet %s already has PA validation entry for IP %s", ctxt.vnet_name.c_str(), to_string(ctxt.metadata.underlay_ip()).c_str()); + object_statuses.emplace_back(SAI_STATUS_ITEM_ALREADY_EXISTS); return; } @@ -336,35 +460,22 @@ void DashVnetOrch::addPaValidation(const string& key, VnetMapBulkContext& ctxt) object_statuses.emplace_back(); pa_validation_bulker_.create_entry(&object_statuses.back(), &pa_validation_entry, attr_count, &pa_validation_attr); - pa_refcount_table_[pa_ref_key] = 1; - SWSS_LOG_INFO("Initialize PA refcount to 1 for PA IP %s", - underlay_ip_str.c_str()); + vnet_table_[ctxt.vnet_name].underlay_ips.insert(underlay_sip_str); + SWSS_LOG_INFO("Bulk create PA validation entry for Vnet %s underlay IP %s", + ctxt.vnet_name.c_str(), to_string(ctxt.metadata.underlay_ip()).c_str()); } bool DashVnetOrch::addVnetMap(const string& key, VnetMapBulkContext& ctxt) { SWSS_LOG_ENTER(); - bool exists = (vnet_map_table_.find(key) != vnet_map_table_.end()); - if (!exists) + bool vnet_exists = (gVnetNameToId.find(ctxt.vnet_name) != gVnetNameToId.end()); + if (!vnet_exists) { - bool vnet_exists = (gVnetNameToId.find(ctxt.vnet_name) != gVnetNameToId.end()); - if (vnet_exists) - { - addOutboundCaToPa(key, ctxt); - addPaValidation(key, ctxt); - } - else - { - SWSS_LOG_INFO("Not creating VNET map for %s since VNET %s doesn't exist", key.c_str(), ctxt.vnet_name.c_str()); - } + SWSS_LOG_INFO("Not creating VNET map for %s since VNET %s doesn't exist", key.c_str(), ctxt.vnet_name.c_str()); return false; } - /* - * If the VNET map is already added, don't add it to the bulker and - * return true so it's removed from the consumer - */ - return true; + return addOutboundCaToPa(key, ctxt); } bool DashVnetOrch::addOutboundCaToPaPost(const string& key, const VnetMapBulkContext& ctxt) @@ -383,8 +494,7 @@ bool DashVnetOrch::addOutboundCaToPaPost(const string& key, const VnetMapBulkCon { if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) { - // Retry if item exists in the bulker - return false; + return true; } SWSS_LOG_ERROR("Failed to create CA to PA entry for %s", key.c_str()); @@ -418,12 +528,9 @@ bool DashVnetOrch::addPaValidationPost(const string& key, const VnetMapBulkConte sai_status_t status = *it_status++; if (status != SAI_STATUS_SUCCESS) { - /* PA validation entry add failed. Remove PA refcount entry */ - pa_refcount_table_.erase(pa_ref_key); if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) { - // Retry if item exists in the bulker - return false; + return true; } SWSS_LOG_ERROR("Failed to create PA validation entry for %s", key.c_str()); @@ -445,19 +552,16 @@ bool DashVnetOrch::addVnetMapPost(const string& key, const VnetMapBulkContext& c { SWSS_LOG_ENTER(); - bool status = addOutboundCaToPaPost(key, ctxt) && addPaValidationPost(key, ctxt); - if (!status) + bool remove_from_consumer = addOutboundCaToPaPost(key, ctxt) && addPaValidationPost(key, ctxt); + if (!remove_from_consumer) { SWSS_LOG_ERROR("addVnetMapPost failed for %s ", key.c_str()); - return false; + return remove_from_consumer; } - string vnet_name = ctxt.vnet_name; - VnetMapEntry entry = { gVnetNameToId[vnet_name], ctxt.dip, ctxt.metadata }; - vnet_map_table_[key] = entry; SWSS_LOG_INFO("Vnet map added for %s", key.c_str()); - return true; + return remove_from_consumer; } void DashVnetOrch::removeOutboundCaToPa(const string& key, VnetMapBulkContext& ctxt) @@ -466,53 +570,32 @@ void DashVnetOrch::removeOutboundCaToPa(const string& key, VnetMapBulkContext& c auto& object_statuses = ctxt.outbound_ca_to_pa_object_statuses; sai_outbound_ca_to_pa_entry_t outbound_ca_to_pa_entry; - outbound_ca_to_pa_entry.dst_vnet_id = vnet_map_table_[key].dst_vnet_id; + outbound_ca_to_pa_entry.dst_vnet_id = gVnetNameToId[ctxt.vnet_name]; outbound_ca_to_pa_entry.switch_id = gSwitchId; - swss::copy(outbound_ca_to_pa_entry.dip, vnet_map_table_[key].dip); + swss::copy(outbound_ca_to_pa_entry.dip, ctxt.dip); object_statuses.emplace_back(); outbound_ca_to_pa_bulker_.remove_entry(&object_statuses.back(), &outbound_ca_to_pa_entry); } -void DashVnetOrch::removePaValidation(const string& key, VnetMapBulkContext& ctxt) +void DashVnetOrch::removePaValidation(const string& key, DashVnetBulkContext& ctxt) { SWSS_LOG_ENTER(); - auto& object_statuses = ctxt.pa_validation_object_statuses; - string underlay_ip = to_string(vnet_map_table_[key].metadata.underlay_ip()); - string pa_ref_key = ctxt.vnet_name + ":" + underlay_ip; - auto it = pa_refcount_table_.find(pa_ref_key); - if (it == pa_refcount_table_.end()) + auto& object_statuses = ctxt.pa_validation_statuses; + for (auto ip_str : vnet_table_[ctxt.vnet_name].underlay_ips) { - return; - } - else - { - if (--pa_refcount_table_[pa_ref_key] > 0) - { - /* - * PA validation entry already exisits. Just decrement refcount and add - * a dummy success status to satisfy postop - */ - object_statuses.emplace_back(SAI_STATUS_SUCCESS); - SWSS_LOG_INFO("Decrement PA refcount to %u for PA IP %s", - pa_refcount_table_[pa_ref_key], - underlay_ip.c_str()); - return; - } - else - { - sai_pa_validation_entry_t pa_validation_entry; - pa_validation_entry.vnet_id = vnet_map_table_[key].dst_vnet_id; - pa_validation_entry.switch_id = gSwitchId; - to_sai(vnet_map_table_[key].metadata.underlay_ip(), pa_validation_entry.sip); - - object_statuses.emplace_back(); - pa_validation_bulker_.remove_entry(&object_statuses.back(), &pa_validation_entry); - SWSS_LOG_INFO("PA refcount refcount is zero for PA IP %s, removing refcount table entry", - underlay_ip.c_str()); - pa_refcount_table_.erase(pa_ref_key); - } + swss::IpAddress underlay_ip(ip_str); + sai_pa_validation_entry_t pa_validation_entry; + pa_validation_entry.vnet_id = gVnetNameToId[ctxt.vnet_name]; + pa_validation_entry.switch_id = gSwitchId; + swss::copy(pa_validation_entry.sip, underlay_ip); + + object_statuses.emplace_back(); + pa_validation_bulker_.remove_entry(&object_statuses.back(), &pa_validation_entry); + SWSS_LOG_INFO("Bulk remove PA validation entry for Vnet %s IP %s, removing refcount table entry", + ctxt.vnet_name.c_str(), underlay_ip.to_string().c_str()); + } } @@ -520,14 +603,6 @@ bool DashVnetOrch::removeVnetMap(const string& key, VnetMapBulkContext& ctxt) { SWSS_LOG_ENTER(); - bool exists = (vnet_map_table_.find(key) != vnet_map_table_.end()); - if (!exists) - { - SWSS_LOG_INFO("Failed to find vnet mapping %s to remove", key.c_str()); - return true; - } - - removePaValidation(key, ctxt); removeOutboundCaToPa(key, ctxt); return false; @@ -553,7 +628,13 @@ bool DashVnetOrch::removeOutboundCaToPaPost(const string& key, const VnetMapBulk return false; } - SWSS_LOG_ERROR("Failed to remove outbound routing entry for %s", key.c_str()); + if (status == SAI_STATUS_ITEM_NOT_FOUND) + { + SWSS_LOG_WARN("Outbound CA to PA entry for %s already removed", key.c_str()); + return true; + } + + SWSS_LOG_ERROR("Failed to remove outbound CA to PA entry for %s", key.c_str()); task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_OUTBOUND_CA_TO_PA, status); if (handle_status != task_success) { @@ -561,63 +642,68 @@ bool DashVnetOrch::removeOutboundCaToPaPost(const string& key, const VnetMapBulk } } - gCrmOrch->decCrmResUsedCounter(vnet_map_table_[key].dip.isV4() ? CrmResourceType::CRM_DASH_IPV4_OUTBOUND_CA_TO_PA : CrmResourceType::CRM_DASH_IPV6_OUTBOUND_CA_TO_PA); + gCrmOrch->decCrmResUsedCounter(ctxt.dip.isV4() ? CrmResourceType::CRM_DASH_IPV4_OUTBOUND_CA_TO_PA : CrmResourceType::CRM_DASH_IPV6_OUTBOUND_CA_TO_PA); SWSS_LOG_INFO("Outbound CA to PA map entry for %s removed", key.c_str()); return true; } -bool DashVnetOrch::removePaValidationPost(const string& key, const VnetMapBulkContext& ctxt) +bool DashVnetOrch::removePaValidationPost(const string& key, const DashVnetBulkContext& ctxt) { SWSS_LOG_ENTER(); + bool remove_from_consumer = true; - string underlay_ip = to_string(vnet_map_table_[key].metadata.underlay_ip()); - string pa_ref_key = ctxt.vnet_name + ":" + underlay_ip; - const auto& object_statuses = ctxt.pa_validation_object_statuses; + const auto& object_statuses = ctxt.pa_validation_statuses; if (object_statuses.empty()) { return false; } auto it_status = object_statuses.begin(); - sai_status_t status = *it_status++; - if (status != SAI_STATUS_SUCCESS) + auto it_ip = vnet_table_[ctxt.vnet_name].underlay_ips.begin(); + while (it_ip != vnet_table_[ctxt.vnet_name].underlay_ips.end()) { - // Retry later if object has non-zero reference to it - if (status == SAI_STATUS_NOT_EXECUTED) + sai_status_t status = *it_status++; + swss::IpAddress underlay_ip(*it_ip); + if (status != SAI_STATUS_SUCCESS) { - return false; - } + // Retry later if object has non-zero reference to it + if (status == SAI_STATUS_OBJECT_IN_USE) + { + SWSS_LOG_INFO("PA validation entry for Vnet %s IP %s still in use", + ctxt.vnet_name.c_str(), it_ip->c_str()); + remove_from_consumer = false; + it_ip++; + continue; + } - SWSS_LOG_ERROR("Failed to remove PA validation entry for %s", key.c_str()); - task_process_status handle_status = handleSaiRemoveStatus((sai_api_t) SAI_API_DASH_PA_VALIDATION, status); - if (handle_status != task_success) + SWSS_LOG_ERROR("Failed to remove PA validation entry for %s", key.c_str()); + + } + it_ip = vnet_table_[ctxt.vnet_name].underlay_ips.erase(it_ip); + if (*it_status == SAI_STATUS_SUCCESS) { - return parseHandleSaiStatusFailure(handle_status); + SWSS_LOG_INFO("PA validation entry for %s removed", key.c_str()); } + gCrmOrch->decCrmResUsedCounter(underlay_ip.isV4() ? CrmResourceType::CRM_DASH_IPV4_PA_VALIDATION : CrmResourceType::CRM_DASH_IPV6_PA_VALIDATION); } - - gCrmOrch->decCrmResUsedCounter(vnet_map_table_[key].metadata.underlay_ip().has_ipv4() ? CrmResourceType::CRM_DASH_IPV4_PA_VALIDATION : CrmResourceType::CRM_DASH_IPV6_PA_VALIDATION); - - SWSS_LOG_INFO("PA validation entry for %s removed", key.c_str()); - - return true; + return remove_from_consumer; } bool DashVnetOrch::removeVnetMapPost(const string& key, const VnetMapBulkContext& ctxt) { SWSS_LOG_ENTER(); - bool status = removeOutboundCaToPaPost(key, ctxt) && removePaValidationPost(key, ctxt); - if (!status) + bool remove_from_consumer = removeOutboundCaToPaPost(key, ctxt); + if (!remove_from_consumer) { - return false; + SWSS_LOG_ERROR("removeVnetMapPost failed for %s ", key.c_str()); + return remove_from_consumer; } - vnet_map_table_.erase(key); SWSS_LOG_INFO("Vnet map removed for %s", key.c_str()); - return true; + return remove_from_consumer; } void DashVnetOrch::doTaskVnetMapTable(ConsumerBase& consumer) @@ -625,7 +711,7 @@ void DashVnetOrch::doTaskVnetMapTable(ConsumerBase& consumer) SWSS_LOG_ENTER(); auto it = consumer.m_toSync.begin(); - + uint32_t result; while (it != consumer.m_toSync.end()) { std::map, @@ -641,6 +727,7 @@ void DashVnetOrch::doTaskVnetMapTable(ConsumerBase& consumer) std::forward_as_tuple()); bool inserted = rc.second; auto& ctxt = rc.first->second; + result = DASH_RESULT_SUCCESS; if (!inserted) { @@ -664,9 +751,24 @@ void DashVnetOrch::doTaskVnetMapTable(ConsumerBase& consumer) it = consumer.m_toSync.erase(it); continue; } + if (ctxt.metadata.routing_type() == dash::route_type::RoutingType::ROUTING_TYPE_UNSPECIFIED) + { + // VnetMapping::action_type is deprecated in favor of VnetMapping::routing_type. For messages still using the old action_type field, + // copy it to the new routing_type field. All subsequent operations will use the new field. + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wdeprecated-declarations" + SWSS_LOG_WARN("VnetMapping::action_type is deprecated. Use VnetMapping::routing_type instead"); + ctxt.metadata.set_routing_type(ctxt.metadata.action_type()); + #pragma GCC diagnostic pop + } if (addVnetMap(key, ctxt)) { it = consumer.m_toSync.erase(it); + /* + * Write result only when removing from consumer in pre-op + * For other cases, this will be handled in post-op + */ + writeResultToDB(dash_vnet_map_result_table_, key, result); } else { @@ -678,6 +780,7 @@ void DashVnetOrch::doTaskVnetMapTable(ConsumerBase& consumer) if (removeVnetMap(key, ctxt)) { it = consumer.m_toSync.erase(it); + removeResultFromDB(dash_vnet_map_result_table_, key); } else { @@ -700,6 +803,7 @@ void DashVnetOrch::doTaskVnetMapTable(ConsumerBase& consumer) KeyOpFieldsValuesTuple t = it_prev->second; string key = kfvKey(t); string op = kfvOp(t); + result = DASH_RESULT_SUCCESS; auto found = toBulk.find(make_pair(key, op)); if (found == toBulk.end()) { @@ -710,7 +814,7 @@ void DashVnetOrch::doTaskVnetMapTable(ConsumerBase& consumer) const auto& ctxt = found->second; const auto& outbound_ca_to_pa_object_statuses = ctxt.outbound_ca_to_pa_object_statuses; const auto& pa_validation_object_statuses = ctxt.pa_validation_object_statuses; - if (outbound_ca_to_pa_object_statuses.empty() || pa_validation_object_statuses.empty()) + if (outbound_ca_to_pa_object_statuses.empty() && pa_validation_object_statuses.empty()) { it_prev++; continue; @@ -724,14 +828,17 @@ void DashVnetOrch::doTaskVnetMapTable(ConsumerBase& consumer) } else { + result = DASH_RESULT_FAILURE; it_prev++; } + writeResultToDB(dash_vnet_map_result_table_, key, result); } else if (op == DEL_COMMAND) { if (removeVnetMapPost(key, ctxt)) { it_prev = consumer.m_toSync.erase(it_prev); + removeResultFromDB(dash_vnet_map_result_table_, key); } else { diff --git a/orchagent/dash/dashvnetorch.h b/orchagent/dash/dashvnetorch.h index 3a5a06fd98d..d1ff43cf4e2 100644 --- a/orchagent/dash/dashvnetorch.h +++ b/orchagent/dash/dashvnetorch.h @@ -21,25 +21,18 @@ struct VnetEntry { sai_object_id_t vni; dash::vnet::Vnet metadata; -}; - -struct VnetMapEntry -{ - sai_object_id_t dst_vnet_id; - swss::IpAddress dip; - dash::vnet_mapping::VnetMapping metadata; + std::set underlay_ips; }; typedef std::unordered_map DashVnetTable; -typedef std::unordered_map DashVnetMapTable; -typedef std::unordered_map PaRefCountTable; struct DashVnetBulkContext { std::string vnet_name; dash::vnet::Vnet metadata; std::deque object_ids; - std::deque object_statuses; + std::deque vnet_statuses; + std::deque pa_validation_statuses; DashVnetBulkContext() {} DashVnetBulkContext(const DashVnetBulkContext&) = delete; @@ -48,7 +41,8 @@ struct DashVnetBulkContext void clear() { object_ids.clear(); - object_statuses.clear(); + vnet_statuses.clear(); + pa_validation_statuses.clear(); } }; @@ -74,31 +68,35 @@ struct VnetMapBulkContext class DashVnetOrch : public ZmqOrch { public: - DashVnetOrch(swss::DBConnector *db, std::vector &tables, swss::ZmqServer *zmqServer); + DashVnetOrch(swss::DBConnector *db, std::vector &tables, swss::DBConnector *app_state_db, swss::ZmqServer *zmqServer); private: DashVnetTable vnet_table_; - DashVnetMapTable vnet_map_table_; - PaRefCountTable pa_refcount_table_; ObjectBulker vnet_bulker_; EntityBulker outbound_ca_to_pa_bulker_; EntityBulker pa_validation_bulker_; + std::unique_ptr dash_vnet_result_table_; + std::unique_ptr dash_vnet_map_result_table_; void doTask(ConsumerBase &consumer); void doTaskVnetTable(ConsumerBase &consumer); void doTaskVnetMapTable(ConsumerBase &consumer); + + // The following add/remove methods will return true if the provided key should be removed from the + // consumer (i.e. task is done and no retries are required) and false otherwise. + // Methods which only have one possible outcome will have return type void. bool addVnet(const std::string& key, DashVnetBulkContext& ctxt); bool addVnetPost(const std::string& key, const DashVnetBulkContext& ctxt); bool removeVnet(const std::string& key, DashVnetBulkContext& ctxt); bool removeVnetPost(const std::string& key, const DashVnetBulkContext& ctxt); - void addOutboundCaToPa(const std::string& key, VnetMapBulkContext& ctxt); + bool addOutboundCaToPa(const std::string& key, VnetMapBulkContext& ctxt); bool addOutboundCaToPaPost(const std::string& key, const VnetMapBulkContext& ctxt); void removeOutboundCaToPa(const std::string& key, VnetMapBulkContext& ctxt); bool removeOutboundCaToPaPost(const std::string& key, const VnetMapBulkContext& ctxt); void addPaValidation(const std::string& key, VnetMapBulkContext& ctxt); bool addPaValidationPost(const std::string& key, const VnetMapBulkContext& ctxt); - void removePaValidation(const std::string& key, VnetMapBulkContext& ctxt); - bool removePaValidationPost(const std::string& key, const VnetMapBulkContext& ctxt); + void removePaValidation(const std::string& key, DashVnetBulkContext& ctxt); + bool removePaValidationPost(const std::string& key, const DashVnetBulkContext& ctxt); bool addVnetMap(const std::string& key, VnetMapBulkContext& ctxt); bool addVnetMapPost(const std::string& key, const VnetMapBulkContext& ctxt); bool removeVnetMap(const std::string& key, VnetMapBulkContext& ctxt); diff --git a/orchagent/dash/pbutils.cpp b/orchagent/dash/pbutils.cpp index e8cd98f9e81..505ccf5d52c 100644 --- a/orchagent/dash/pbutils.cpp +++ b/orchagent/dash/pbutils.cpp @@ -1,4 +1,5 @@ #include "pbutils.h" +#include using namespace std; @@ -91,6 +92,33 @@ bool to_sai(const RepeatedPtrField &pb_prefixes, vector pb_range.range().max()) + { + SWSS_LOG_WARN("The range %s is invalid", pb_range.range().DebugString().c_str()); + return false; + } + sai_range.min = pb_range.range().min(); + sai_range.max = pb_range.range().max(); + } + else + { + SWSS_LOG_WARN("The ValueOrRange %s is invalid", pb_range.DebugString().c_str()); + return false; + } + return true; +} + ip_addr_t to_swss(const dash::types::IpAddress &pb_address) { SWSS_LOG_ENTER(); @@ -120,3 +148,162 @@ std::string to_string(const dash::types::IpAddress &pb_address) return IpAddress(to_swss(pb_address)).to_string(); } + +sai_uint16_t to_sai(const dash::types::HaRole ha_role) +{ + SWSS_LOG_ENTER(); + + sai_dash_ha_role_t sai_ha_role = SAI_DASH_HA_ROLE_DEAD; + + switch (ha_role) + { + case dash::types::HA_ROLE_DEAD: + sai_ha_role = SAI_DASH_HA_ROLE_DEAD; + break; + case dash::types::HA_ROLE_ACTIVE: + sai_ha_role = SAI_DASH_HA_ROLE_ACTIVE; + break; + case dash::types::HA_ROLE_STANDBY: + sai_ha_role = SAI_DASH_HA_ROLE_STANDBY; + break; + case dash::types::HA_ROLE_STANDALONE: + sai_ha_role = SAI_DASH_HA_ROLE_STANDALONE; + break; + case dash::types::HA_ROLE_SWITCHING_TO_ACTIVE: + sai_ha_role = SAI_DASH_HA_ROLE_SWITCHING_TO_ACTIVE; + break; + default: + SWSS_LOG_ERROR("Invalid HA Role %s", dash::types::HaRole_Name(ha_role).c_str()); + } + + return static_cast(sai_ha_role); +} + +dash::types::HaRole to_pb(const sai_dash_ha_role_t ha_role) +{ + SWSS_LOG_ENTER(); + + switch (ha_role) + { + case SAI_DASH_HA_ROLE_DEAD: + return dash::types::HA_ROLE_DEAD; + case SAI_DASH_HA_ROLE_ACTIVE: + return dash::types::HA_ROLE_ACTIVE; + case SAI_DASH_HA_ROLE_STANDBY: + return dash::types::HA_ROLE_STANDBY; + case SAI_DASH_HA_ROLE_STANDALONE: + return dash::types::HA_ROLE_STANDALONE; + case SAI_DASH_HA_ROLE_SWITCHING_TO_ACTIVE: + return dash::types::HA_ROLE_SWITCHING_TO_ACTIVE; + default: + return dash::types::HA_ROLE_DEAD; + } +} + +bool to_pb(const std::string &ha_role, dash::types::HaRole &pb_ha_role) +{ + SWSS_LOG_ENTER(); + + if (ha_role == "dead") + { + pb_ha_role = dash::types::HA_ROLE_DEAD; + } + else if (ha_role == "active") + { + pb_ha_role = dash::types::HA_ROLE_ACTIVE; + } + else if (ha_role == "standby") + { + pb_ha_role = dash::types::HA_ROLE_STANDBY; + } + else if (ha_role == "standalone") + { + pb_ha_role = dash::types::HA_ROLE_STANDALONE; + } + else if (ha_role == "switching_to_active") + { + pb_ha_role = dash::types::HA_ROLE_SWITCHING_TO_ACTIVE; + } + else + { + SWSS_LOG_NOTICE("Unspecified HA Role %s, defaulting to dead", ha_role.c_str()); + pb_ha_role = dash::types::HA_ROLE_DEAD; + return false; + } + + return true; +} + +bool to_pb(const std::string &ha_owner, dash::types::HaOwner &pb_ha_owner) +{ + SWSS_LOG_ENTER(); + + if (ha_owner == "switch") + { + pb_ha_owner = dash::types::HA_OWNER_SWITCH; + } + else if (ha_owner == "dpu") + { + pb_ha_owner = dash::types::HA_OWNER_DPU; + } + else + { + SWSS_LOG_NOTICE("Unspecified HA Owner %s, defaulting to DPU", ha_owner.c_str()); + pb_ha_owner = dash::types::HA_OWNER_DPU; + return false; + } + + return true; +} + +bool to_pb(const std::string &ha_scope, dash::types::HaScope &pb_ha_scope) +{ + SWSS_LOG_ENTER(); + + if (ha_scope == "eni") + { + pb_ha_scope = dash::types::HA_SCOPE_ENI; + } + else if (ha_scope == "dpu") + { + pb_ha_scope = dash::types::HA_SCOPE_DPU; + } + else + { + SWSS_LOG_NOTICE("Unspecified HA Scope %s, defaulting to DPU", ha_scope.c_str()); + pb_ha_scope = dash::types::HA_SCOPE_DPU; + return false; + } + + return true; +} + +bool to_pb(const std::string &ip_address, dash::types::IpAddress &pb_address) +{ + SWSS_LOG_ENTER(); + + if (ip_address.empty()) + { + SWSS_LOG_WARN("Empty IP address string"); + return false; + } + + uint8_t buf[16]; + + if (inet_pton(AF_INET, ip_address.c_str(), buf) == 1) + { + uint32_t ipv4; + std::memcpy(&ipv4, buf, 4); + pb_address.set_ipv4(ipv4); + return true; + } + + if (inet_pton(AF_INET6, ip_address.c_str(), buf) == 1) + { + pb_address.set_ipv6(std::string(reinterpret_cast(buf), 16)); + return true; + } + + SWSS_LOG_WARN("Invalid IP address format: %s", ip_address.c_str()); + return false; +} \ No newline at end of file diff --git a/orchagent/dash/pbutils.h b/orchagent/dash/pbutils.h index 080cac46666..0a27ac35950 100644 --- a/orchagent/dash/pbutils.h +++ b/orchagent/dash/pbutils.h @@ -18,6 +18,8 @@ bool to_sai(const dash::types::IpPrefix &pb_prefix, sai_ip_prefix_t &sai_prefix) bool to_sai(const google::protobuf::RepeatedPtrField &pb_prefixes, std::vector &sai_prefixes); +bool to_sai(const dash::types::ValueOrRange &pb_range, sai_u32_range_t &sai_range); + template bool to_sai(const dash::types::ValueOrRange &pb_range, RangeType &sai_range) { @@ -77,3 +79,15 @@ bool to_sai(const google::protobuf::RepeatedPtrField swss::ip_addr_t to_swss(const dash::types::IpAddress &pb_address); std::string to_string(const dash::types::IpAddress &pb_address); + +sai_uint16_t to_sai(const dash::types::HaRole ha_role); + +dash::types::HaRole to_pb(const sai_dash_ha_role_t ha_role); + +bool to_pb(const std::string &ha_role, dash::types::HaRole &pb_ha_role); + +bool to_pb(const std::string &ha_owner, dash::types::HaOwner &pb_ha_owner); + +bool to_pb(const std::string &ha_scope, dash::types::HaScope &pb_ha_scope); + +bool to_pb(const std::string &ip_address, dash::types::IpAddress &pb_address); diff --git a/orchagent/debugcounterorch.cpp b/orchagent/debugcounterorch.cpp index ed27f400d43..85230ebf19b 100644 --- a/orchagent/debugcounterorch.cpp +++ b/orchagent/debugcounterorch.cpp @@ -37,6 +37,26 @@ DebugCounterOrch::DebugCounterOrch(DBConnector *db, const vector& table_ publishDropCounterCapabilities(); gPortsOrch->attach(this); + + // Add drop monitor lua script + string dropMonitorPluginName = "drop_monitor.lua"; + string dropMonitorSha; + + try + { + string dropMonitorLuaScript = swss::loadLuaScript(dropMonitorPluginName); + dropMonitorSha = swss::loadRedisScript(m_countersDb.get(), dropMonitorLuaScript); + } + catch (const runtime_error &e) + { + SWSS_LOG_ERROR("Drop monitor flex counter group was not set successfully: %s", e.what()); + } + + setFlexCounterGroupParameter(DEBUG_DROP_MONITOR_FLEX_COUNTER_GROUP, + DEBUG_DROP_MONITOR_FLEX_COUNTER_POLLING_INTERVAL_MS, + STATS_MODE_READ, + PORT_PLUGIN_FIELD, + dropMonitorSha); } DebugCounterOrch::~DebugCounterOrch(void) @@ -193,6 +213,70 @@ void DebugCounterOrch::doTask(Consumer& consumer) SWSS_LOG_ERROR("Unknown operation type %s\n", op.c_str()); } } + else if (table_name == "DEBUG_DROP_MONITOR") + { + if (op == SET_COMMAND) + { + if (key == "CONFIG") + { + for (const auto& value : values) + { + string config_name = value.first; + string config_value = value.second; + + // Check the status of the drop counter monitor feature + try + { + if (config_name == "status") + { + if (config_value == "enabled") + { + debug_monitor_enabled = true; + string monitored_debug_counter_stat = counterIdsToStr(portDebugMonitorStatIds); + SWSS_LOG_DEBUG("Enabling debug drop monitor: %s", monitored_debug_counter_stat.c_str()); + setFlexCounterGroupOperation(DEBUG_DROP_MONITOR_FLEX_COUNTER_GROUP, "enable"); + for (auto const &curr : gPortsOrch->getAllPorts()) + { + string key = string(DEBUG_DROP_MONITOR_FLEX_COUNTER_GROUP) + ":" + sai_serialize_object_id(curr.second.m_port_id); + startFlexCounterPolling(gSwitchId, key, monitored_debug_counter_stat, PORT_COUNTER_ID_LIST); + } + } + else if (config_value == "disabled") + { + debug_monitor_enabled = false; + SWSS_LOG_DEBUG("Disabling debug drop monitor"); + setFlexCounterGroupOperation(DEBUG_DROP_MONITOR_FLEX_COUNTER_GROUP, "disable"); + for (auto const &curr : gPortsOrch->getAllPorts()) + { + string key = string(DEBUG_DROP_MONITOR_FLEX_COUNTER_GROUP) + ":" + sai_serialize_object_id(curr.second.m_port_id); + stopFlexCounterPolling(gSwitchId, key); + } + } + else + { + SWSS_LOG_ERROR("The status of drop counter monitor was not recognized: %s. Accepted values are enabled/disabled.", config_value.c_str()); + task_status = task_process_status::task_failed; + } + } + else + { + SWSS_LOG_ERROR("Config for drop counter monitor was not recognized: %s. Accepted values are status.", config_value.c_str()); + task_status = task_process_status::task_failed; + } + } + catch(const std::runtime_error& e) + { + SWSS_LOG_ERROR("Encountered an error when updating DEBUG_DROP_MONITOR. config_name: %s, config_value: %s", config_name.c_str(), config_value.c_str()); + task_status = task_process_status::task_failed; + } + } + } + } + else + { + SWSS_LOG_ERROR("Unknown operation type %s\n", op.c_str()); + } + } else { SWSS_LOG_ERROR("Received update from unknown table '%s'", table_name.c_str()); @@ -227,7 +311,7 @@ void DebugCounterOrch::doTask(Consumer& consumer) // publishDropCounterCapabilities queries the SAI for available drop counter // capabilities on this device and publishes the information to the -// DROP_COUNTER_CAPABILITIES table in STATE_DB. +// DEBUG_COUNTER_CAPABILITIES table in STATE_DB. void DebugCounterOrch::publishDropCounterCapabilities() { supported_ingress_drop_reasons = DropCounter::getSupportedDropReasons(SAI_DEBUG_COUNTER_ATTR_IN_DROP_REASON_LIST); @@ -336,8 +420,7 @@ task_process_status DebugCounterOrch::uninstallDebugCounter(const string& counte string counter_type = counter->getCounterType(); string counter_stat = counter->getDebugCounterSAIStat(); - debug_counters.erase(it); - uninstallDebugFlexCounters(counter_type, counter_stat); + uninstallDebugFlexCounters(counter_type, counter_stat, SAI_NULL_OBJECT_ID, counter_name); if (counter_type == PORT_INGRESS_DROPS || counter_type == PORT_EGRESS_DROPS) { @@ -347,6 +430,7 @@ task_process_status DebugCounterOrch::uninstallDebugCounter(const string& counte { m_counterNameToSwitchStatMap->hdel("", counter_name); } + debug_counters.erase(it); SWSS_LOG_NOTICE("Successfully deleted drop counter %s", counter_name.c_str()); return task_process_status::task_success; @@ -531,6 +615,11 @@ void DebugCounterOrch::installDebugFlexCounters(const string& counter_type, SWSS_LOG_ENTER(); CounterType flex_counter_type = getFlexCounterType(counter_type); + // Track the new counter_stat in debug drop monitor + portDebugMonitorStatIds.insert(counter_stat); + string monitored_debug_counter_stat = counterIdsToStr(portDebugMonitorStatIds); + SWSS_LOG_DEBUG("Added %s to: %s", counter_stat.c_str(), monitored_debug_counter_stat.c_str()); + if (flex_counter_type == CounterType::SWITCH_DEBUG) { flex_counter_manager.addFlexCounterStat(gSwitchId, flex_counter_type, counter_stat); @@ -556,17 +645,34 @@ void DebugCounterOrch::installDebugFlexCounters(const string& counter_type, curr.second.m_port_id, flex_counter_type, counter_stat); + + if (debug_monitor_enabled) + { + string key = string(DEBUG_DROP_MONITOR_FLEX_COUNTER_GROUP) + ":" + sai_serialize_object_id(curr.second.m_port_id); + stopFlexCounterPolling(gSwitchId, key); + startFlexCounterPolling(gSwitchId, key, monitored_debug_counter_stat, PORT_COUNTER_ID_LIST); + } } } } void DebugCounterOrch::uninstallDebugFlexCounters(const string& counter_type, const string& counter_stat, - sai_object_id_t port_id) + sai_object_id_t port_id, + const string& counter_name) { SWSS_LOG_ENTER(); CounterType flex_counter_type = getFlexCounterType(counter_type); + // Remove the counter_stat from being tracked in debug drop monitor + auto counter_stat_iter = portDebugMonitorStatIds.find(counter_stat); + portDebugMonitorStatIds.erase(counter_stat_iter); + string monitored_debug_counter_stat = counterIdsToStr(portDebugMonitorStatIds); + SWSS_LOG_DEBUG("Removed %s from: %s", counter_stat.c_str(), monitored_debug_counter_stat.c_str()); + + // Make a vector of keys to delete from COUNTERS_DB, these keys are used by drop counter monitor + std::vector debug_drop_monitor_stats_fields; + if (flex_counter_type == CounterType::SWITCH_DEBUG) { flex_counter_manager.removeFlexCounterStat(gSwitchId, flex_counter_type, counter_stat); @@ -575,6 +681,10 @@ void DebugCounterOrch::uninstallDebugFlexCounters(const string& counter_type, { for (auto const &curr : gPortsOrch->getAllPorts()) { + // Remove debug counter stat from being tracked by drop counter monitor + string key = string(DEBUG_COUNTER_FLEX_COUNTER_GROUP) + ":" + sai_serialize_object_id(curr.second.m_port_id); + stopFlexCounterPolling(gSwitchId, key); + if (port_id != SAI_NULL_OBJECT_ID) { if (curr.second.m_port_id != port_id) @@ -592,8 +702,20 @@ void DebugCounterOrch::uninstallDebugFlexCounters(const string& counter_type, curr.second.m_port_id, flex_counter_type, counter_stat); + + debug_drop_monitor_stats_fields.push_back("DEBUG_DROP_MONITOR_STATS|" + counter_name + "|" + curr.first); + + if (debug_monitor_enabled) + { + string key = string(DEBUG_DROP_MONITOR_FLEX_COUNTER_GROUP) + ":" + sai_serialize_object_id(curr.second.m_port_id); + stopFlexCounterPolling(gSwitchId, key); + startFlexCounterPolling(gSwitchId, key, monitored_debug_counter_stat, PORT_COUNTER_ID_LIST); + } } } + + // Delete DEBUG_DROP_MONITOR_STATS for this debug counter + m_countersDb->del(debug_drop_monitor_stats_fields); } // Debug Counter Initialization Helper Functions START HERE ---------------------------------------- @@ -657,6 +779,11 @@ void DebugCounterOrch::createDropCounter(const string& counter_name, const strin } } +bool DebugCounterOrch::getDebugMonitorStatus() +{ + return debug_monitor_enabled; +} + // Debug Counter Configuration Helper Functions START HERE ----------------------------------------- // parseDropReasonUpdate takes a key from CONFIG_DB and returns the 1) the counter name being targeted and @@ -684,5 +811,21 @@ bool DebugCounterOrch::isDropReasonValid(const string& drop_reason) const return true; } +string DebugCounterOrch::counterIdsToStr(const std::unordered_set& ids) const +{ + SWSS_LOG_ENTER(); + string str; + + for (const auto& i: ids) + { + str += i + ","; + } + // Remove trailing ',' + if (!str.empty()) + { + str.pop_back(); + } + return str; +} diff --git a/orchagent/debugcounterorch.h b/orchagent/debugcounterorch.h index edfb5d98e04..7df76efc2f1 100644 --- a/orchagent/debugcounterorch.h +++ b/orchagent/debugcounterorch.h @@ -17,6 +17,8 @@ extern "C" { } #define DEBUG_COUNTER_FLEX_COUNTER_GROUP "DEBUG_COUNTER" +#define DEBUG_DROP_MONITOR_FLEX_COUNTER_GROUP "DEBUG_MONITOR_COUNTER" +#define DEBUG_DROP_MONITOR_FLEX_COUNTER_POLLING_INTERVAL_MS "60000" using DebugCounterMap = std::unordered_map>; @@ -31,6 +33,8 @@ class DebugCounterOrch: public Orch, public Observer void doTask(Consumer& consumer); void update(SubjectType, void *cntx); + + bool getDebugMonitorStatus(); private: // Debug Capability Reporting Functions void publishDropCounterCapabilities(); @@ -57,7 +61,8 @@ class DebugCounterOrch: public Orch, public Observer void uninstallDebugFlexCounters( const std::string& counter_type, const std::string& counter_stat, - sai_object_id_t port_id = SAI_NULL_OBJECT_ID); + sai_object_id_t port_id = SAI_NULL_OBJECT_ID, + const std::string& counter_name = ""); // Debug Counter Initialization Helper Functions std::string getDebugCounterType( @@ -74,6 +79,7 @@ class DebugCounterOrch: public Orch, public Observer std::string *counter_name, std::string *drop_reason) const; bool isDropReasonValid(const std::string& drop_reason) const; + std::string counterIdsToStr(const std::unordered_set& ids) const; // Data Members std::shared_ptr m_stateDb = nullptr; @@ -91,6 +97,11 @@ class DebugCounterOrch: public Orch, public Observer DebugCounterMap debug_counters; + // portDebugStatIds will store the debug counter stats that have been + // configured + bool debug_monitor_enabled = false; + std::unordered_set portDebugMonitorStatIds; + // free_drop_counters are drop counters that have been created by a user // that do not have any drop reasons associated with them yet. Because // we cannot create a drop counter without any drop reasons, we keep track diff --git a/orchagent/drop_monitor.lua b/orchagent/drop_monitor.lua new file mode 100644 index 00000000000..b9431f38b57 --- /dev/null +++ b/orchagent/drop_monitor.lua @@ -0,0 +1,88 @@ +-- KEYS - port IDs +-- ARGV[1] - counters db index +-- ARGV[2] - counters table name +-- ARGV[3] - poll time interval (milliseconds) + +local counters_db = ARGV[1] +local config_db = 4 +local debug_drop_monitor_stat_table = 'DEBUG_DROP_MONITOR_STATS' +local persistent_drop_alert_table = 'PERSISTENT_DROP_ALERTS' + +redis.call('SELECT', counters_db) + +-- Helper functions +local function parse_boolean(str) return str == "true" end +local function parse_number(str) return tonumber(str) or 0 end + +-- Get the debug counters and port name map +local debug_counter_to_port_stat_map = redis.call('HGETALL', "COUNTERS_DEBUG_NAME_PORT_STAT_MAP") +local debug_counter_to_port_stat_map_len = redis.call('HLEN', "COUNTERS_DEBUG_NAME_PORT_STAT_MAP") +local port_name_map = redis.call('HGETALL', "COUNTERS_PORT_NAME_MAP") +local port_name_map_len = redis.call('HLEN', "COUNTERS_PORT_NAME_MAP") + +-- Iterate over the debug counter and get their specific configuration +for debug_counter_index = 1, debug_counter_to_port_stat_map_len, 2 do + local debug_counter = debug_counter_to_port_stat_map[debug_counter_index] + local debug_counter_stat = debug_counter_to_port_stat_map[debug_counter_index + 1] + + -- Get the configuration of debug counter + redis.call('SELECT', config_db) + local debug_counter_table = "DEBUG_COUNTER|" .. debug_counter + local status = redis.call('HGET', debug_counter_table, 'drop_monitor_status') + local drop_count_threshold = parse_number(redis.call('HGET', debug_counter_table, 'drop_count_threshold')) + local incident_count_threshold = parse_number(redis.call('HGET', debug_counter_table, 'incident_count_threshold')) + local window = parse_number(redis.call('HGET', debug_counter_table, 'window')) + redis.call('SELECT', counters_db) + + -- Detect persistent drops if status is enabled + if status == 'enabled' then + -- Iterate over all ports + for port_index = 1, port_name_map_len, 2 do + -- Get counter stats + local port = port_name_map[port_index] + local port_oid = port_name_map[port_index + 1] + local counter_stat_map = "COUNTERS:" .. port_oid + local current_drop_count = parse_number(redis.call('HGET', counter_stat_map, debug_counter_stat)) + + -- Calculate the delta since previous poll + local prev_drop_count = parse_number(redis.call('HGET', debug_drop_monitor_stat_table .. '|' .. debug_counter .. '|' .. port, 'prev_drop_count')) + local delta_drop_count = current_drop_count - prev_drop_count + + -- Update the previous drop count + redis.call('HSET', debug_drop_monitor_stat_table .. '|' .. debug_counter .. '|' .. port, 'prev_drop_count', current_drop_count) + + -- Fetch the current timestamp + local time = redis.call('TIME') + local curr_unix_timestamp = tonumber(time[1]) + + -- Check if drop count is greater than drop count threshold + if delta_drop_count > drop_count_threshold then + redis.call('RPUSH', debug_drop_monitor_stat_table .. '|' .. debug_counter .. '|' .. port .. '|incidents', curr_unix_timestamp) + end + + -- Remove outdated incidents + local incident_count = 0 + local number_of_outdated_incidents = 0 + local number_of_incidents = redis.call('LLEN', debug_drop_monitor_stat_table .. '|' .. debug_counter .. '|' .. port .. '|incidents') + local incident_timestamps = redis.call('LRANGE', debug_drop_monitor_stat_table .. '|' .. debug_counter .. '|' .. port .. '|incidents', 0, number_of_incidents) + for incident_index = 1, number_of_incidents do + local time_delta = curr_unix_timestamp - incident_timestamps[incident_index] + if (time_delta > window) then + number_of_outdated_incidents = number_of_outdated_incidents + 1 + else + incident_count = incident_count + 1 + end + end + + -- Delete incidents that are outside the window + redis.call('LPOP', debug_drop_monitor_stat_table .. '|' .. debug_counter .. '|' .. port .. '|incidents', number_of_outdated_incidents) + + if incident_count > incident_count_threshold then + -- Generate alert for persistent drops + redis.call('HSET', persistent_drop_alert_table, debug_counter .. '|' .. curr_unix_timestamp, 'Persistent packet drops detected on ' .. port) + -- Delete all incidents since a persistent drop alert was issued + redis.call('DEL', debug_drop_monitor_stat_table .. '|' .. debug_counter .. '|' .. port .. '|incidents') + end + end + end +end diff --git a/orchagent/eliminate_events.lua b/orchagent/eliminate_events.lua new file mode 100644 index 00000000000..871e6c1fb0f --- /dev/null +++ b/orchagent/eliminate_events.lua @@ -0,0 +1,63 @@ +-- KEYS - None +-- ARGV - None + +local state_db = "6" +local config_db = "4" + +local result = {} + +redis.call('SELECT', config_db) +local severity_keys = redis.call('KEYS', 'SUPPRESS_ASIC_SDK_HEALTH_EVENT*') +if #severity_keys == 0 then + return result +end + +local max_events = {} +for i = 1, #severity_keys, 1 do + local max_event = redis.call('HGET', severity_keys[i], 'max_events') + if max_event then + max_events[string.sub(severity_keys[i], 32, -1)] = tonumber(max_event) + end +end + +if not next (max_events) then + return result +end + +redis.call('SELECT', state_db) +local events = {} + +local event_keys = redis.call('KEYS', 'ASIC_SDK_HEALTH_EVENT_TABLE*') + +if #event_keys == 0 then + return result +end + +for i = 1, #event_keys, 1 do + local severity = redis.call('HGET', event_keys[i], 'severity') + if max_events[severity] ~= nil then + if events[severity] == nil then + events[severity] = {} + end + table.insert(events[severity], event_keys[i]) + end +end + +for severity in pairs(max_events) do + local number_received_events = 0 + if events[severity] ~= nil then + number_received_events = #events[severity] + end + if number_received_events > max_events[severity] then + table.sort(events[severity]) + local number_to_eliminate = number_received_events - max_events[severity] + for i = 1, number_to_eliminate, 1 do + redis.call('DEL', events[severity][i]) + end + table.insert(result, severity .. " events: maximum " .. max_events[severity] .. ", received " .. number_received_events .. ", eliminated " .. number_to_eliminate) + else + table.insert(result, severity .. " events: maximum " .. max_events[severity] .. ", received " .. number_received_events .. ", not exceeding the maximum") + end +end + +return result diff --git a/orchagent/fabricportsorch.cpp b/orchagent/fabricportsorch.cpp index d521e02b1a5..d818d69ef5d 100644 --- a/orchagent/fabricportsorch.cpp +++ b/orchagent/fabricportsorch.cpp @@ -10,6 +10,13 @@ #include "sai_serialize.h" #include "timer.h" #include "saihelper.h" +#include "converter.h" +#include "stringutility.h" +#include +#include + +using Clock = std::chrono::system_clock; +using TimePoint = std::chrono::time_point; #define FABRIC_POLLING_INTERVAL_DEFAULT (30) #define FABRIC_PORT_PREFIX "PORT" @@ -19,11 +26,32 @@ #define FABRIC_PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 10000 #define FABRIC_QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP "FABRIC_QUEUE_STAT_COUNTER" #define FABRIC_QUEUE_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 100000 +#define FABRIC_DEBUG_POLLING_INTERVAL_DEFAULT (12) +#define FABRIC_MONITOR_DATA "FABRIC_MONITOR_DATA" +#define APPL_FABRIC_PORT_PREFIX "Fabric" +#define SWITCH_DEBUG_COUNTER_FLEX_COUNTER_GROUP "SWITCH_DEBUG_COUNTER" +#define SWITCH_DEBUG_COUNTER_POLLING_INTERVAL_MS 500 +#define FABRIC_SWITCH_DEBUG_COUNTER_POLLING_INTERVAL_MS 60000 +#define SWITCH_STANDARD_DROP_COUNTERS "SWITCH_ID" + +// constants for link monitoring +#define CHECK_TIME 120 +#define MAX_SKIP_CRCERR_ON_LNKUP_POLLS 20 +#define MAX_SKIP_FECERR_ON_LNKUP_POLLS 20 +// the follow will be replaced with the number in config_db +#define FEC_ISOLATE_POLLS 2 +#define FEC_UNISOLATE_POLLS 8 +#define ISOLATION_POLLS_CFG 1 +#define RECOVERY_POLLS_CFG 8 +#define ERROR_RATE_CRC_CELLS_CFG 1 +#define ERROR_RATE_RX_CELLS_CFG 61035156 +#define FABRIC_LINK_RATE 44316 extern sai_object_id_t gSwitchId; extern sai_switch_api_t *sai_switch_api; extern sai_port_api_t *sai_port_api; extern sai_queue_api_t *sai_queue_api; +extern string gMySwitchType; const vector port_stat_ids = { @@ -44,6 +72,11 @@ static const vector queue_stat_ids = SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL, }; +const vector switch_drop_counter_ids = +{ + SAI_SWITCH_STAT_PACKET_INTEGRITY_DROP +}; + FabricPortsOrch::FabricPortsOrch(DBConnector *appl_db, vector &tableNames, bool fabricPortStatEnabled, bool fabricQueueStatEnabled) : Orch(appl_db, tableNames), @@ -51,7 +84,8 @@ FabricPortsOrch::FabricPortsOrch(DBConnector *appl_db, vector(new DBConnector("STATE_DB", 0)); m_stateTable = unique_ptr
(new Table(m_state_db.get(), APP_FABRIC_PORT_TABLE_NAME)); + m_fabricCapacityTable = unique_ptr
(new Table(m_state_db.get(), STATE_FABRIC_CAPACITY_TABLE_NAME)); m_counter_db = shared_ptr(new DBConnector("COUNTERS_DB", 0)); m_portNameQueueCounterTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_FABRIC_QUEUE_NAME_MAP)); m_portNamePortCounterTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_FABRIC_PORT_NAME_MAP)); + m_fabricCounterTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_TABLE)); + + // Create Switch level drop counters for voq & fabric switch. + if ((gMySwitchType == "voq") || (gMySwitchType == "fabric")) + { + auto timer = ((gMySwitchType == "voq") ? SWITCH_DEBUG_COUNTER_POLLING_INTERVAL_MS : FABRIC_SWITCH_DEBUG_COUNTER_POLLING_INTERVAL_MS); + switch_drop_counter_manager = new FlexCounterManager(SWITCH_DEBUG_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, + timer, true); + m_counterNameToSwitchStatMap = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_DEBUG_NAME_SWITCH_STAT_MAP)); + } - m_flex_db = shared_ptr(new DBConnector("FLEX_COUNTER_DB", 0)); - m_flexCounterTable = unique_ptr(new ProducerTable(m_flex_db.get(), APP_FABRIC_PORT_TABLE_NAME)); + m_appl_db = shared_ptr(new DBConnector("APPL_DB", 0)); + m_applTable = unique_ptr
(new Table(m_appl_db.get(), APP_FABRIC_MONITOR_PORT_TABLE_NAME)); + m_applMonitorConstTable = unique_ptr
(new Table(m_appl_db.get(), APP_FABRIC_MONITOR_DATA_TABLE_NAME)); m_fabricPortStatEnabled = fabricPortStatEnabled; m_fabricQueueStatEnabled = fabricQueueStatEnabled; @@ -75,6 +121,39 @@ FabricPortsOrch::FabricPortsOrch(DBConnector *appl_db, vectorstart(); + + auto debug_executor = new ExecutableTimer(m_debugTimer, this, "FABRIC_DEBUG_POLL"); + Orch::addExecutor(debug_executor); + bool fabricPortMonitor = checkFabricPortMonState(); + if (fabricPortMonitor) + { + m_debugTimer->start(); + SWSS_LOG_INFO("Fabric monitor starts at init time"); + } +} + +bool FabricPortsOrch::checkFabricPortMonState() +{ + bool enabled = false; + std::vector constValues; + bool setCfgVal = m_applMonitorConstTable->get("FABRIC_MONITOR_DATA", constValues); + if (!setCfgVal) + { + return enabled; + } + SWSS_LOG_INFO("FabricPortsOrch::checkFabricPortMonState starts"); + for (auto cv : constValues) + { + if (fvField(cv) == "monState") + { + if (fvValue(cv) == "enable") + { + enabled = true; + return enabled; + } + } + } + return enabled; } int FabricPortsOrch::getFabricPortList() @@ -269,8 +348,8 @@ void FabricPortsOrch::updateFabricPortState() string key = FABRIC_PORT_PREFIX + to_string(lane); std::vector values; - uint32_t remote_peer; - uint32_t remote_port; + uint32_t remote_peer = 0; + uint32_t remote_port = 0; attr.id = SAI_PORT_ATTR_FABRIC_ATTACHED; status = sai_port_api->get_port_attribute(port, 1, &attr); @@ -336,25 +415,1306 @@ void FabricPortsOrch::updateFabricPortState() } } +void FabricPortsOrch::updateFabricDebugCounters() +{ + if (!m_getFabricPortListDone) return; + + SWSS_LOG_ENTER(); + + // Get time + time_t now; + struct timespec time_now; + if (clock_gettime(CLOCK_MONOTONIC, &time_now) < 0) + { + return; + } + now = time_now.tv_sec; + auto checkTime = Clock::now(); + + uint64_t fecIsolatedPolls = FEC_ISOLATE_POLLS; // monPollThreshIsolation + uint64_t fecUnisolatePolls = FEC_UNISOLATE_POLLS; // monPollThreshRecovery + uint64_t isolationPollsCfg = ISOLATION_POLLS_CFG; // monPollThreshIsolation + uint64_t recoveryPollsCfg = RECOVERY_POLLS_CFG; // monPollThreshRecovery + uint64_t errorRateCrcCellsCfg = ERROR_RATE_CRC_CELLS_CFG; // monErrThreshCrcCells + uint64_t errorRateRxCellsCfg = ERROR_RATE_RX_CELLS_CFG; // monErrThreshRxCells + string applConstKey = FABRIC_MONITOR_DATA; + std::vector constValues; + SWSS_LOG_INFO("updateFabricDebugCounters"); + + bool setCfgVal = m_applMonitorConstTable->get("FABRIC_MONITOR_DATA", constValues); + if (!setCfgVal) + { + SWSS_LOG_INFO("applConstKey %s default values not set", applConstKey.c_str()); + } + else + { + SWSS_LOG_INFO("applConstKey %s default values get set", applConstKey.c_str()); + } + string configVal = "1"; + for (auto cv : constValues) + { + configVal = fvValue(cv); + if (fvField(cv) == "monErrThreshCrcCells") + { + errorRateCrcCellsCfg = stoi(configVal); + SWSS_LOG_INFO("monErrThreshCrcCells: %s %s", configVal.c_str(), fvField(cv).c_str()); + continue; + } + if (fvField(cv) == "monErrThreshRxCells") + { + errorRateRxCellsCfg = stoi(configVal); + SWSS_LOG_INFO("monErrThreshRxCells: %s %s", configVal.c_str(), fvField(cv).c_str()); + continue; + } + if (fvField(cv) == "monPollThreshIsolation") + { + fecIsolatedPolls = stoi(configVal); + isolationPollsCfg = stoi(configVal); + SWSS_LOG_INFO("monPollThreshIsolation: %s %s", configVal.c_str(), fvField(cv).c_str()); + continue; + } + if (fvField(cv) == "monPollThreshRecovery") + { + fecUnisolatePolls = stoi(configVal); + recoveryPollsCfg = stoi(configVal); + SWSS_LOG_INFO("monPollThreshRecovery: %s", configVal.c_str()); + continue; + } + } + + // Get debug countesrs (e.g. # of cells with crc errors, # of cells) + for (auto p : m_fabricLanePortMap) + { + int lane = p.first; + sai_object_id_t port = p.second; + + string key = FABRIC_PORT_PREFIX + to_string(lane); + // so basically port is the oid + vector fieldValues; + static const array cntNames = + { + "SAI_PORT_STAT_IF_IN_ERRORS", // cells with crc errors + "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS", // rx data cells + "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES" // cell with uncorrectable errors + }; + if (!m_fabricCounterTable->get(sai_serialize_object_id(port), fieldValues)) + { + SWSS_LOG_INFO("no port %s", sai_serialize_object_id(port).c_str()); + } + + uint64_t rxCells = 0; + uint64_t crcErrors = 0; + uint64_t codeErrors = 0; + for (const auto& fv : fieldValues) + { + const auto field = fvField(fv); + const auto value = fvValue(fv); + for (size_t cnt = 0; cnt != cntNames.size(); cnt++) + { + if (field == "SAI_PORT_STAT_IF_IN_ERRORS") + { + crcErrors = stoull(value); + } + else if (field == "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS") + { + rxCells = stoull(value); + } + else if (field == "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES") + { + codeErrors = stoull(value); + } + SWSS_LOG_INFO("port %s %s %lld %lld %lld at %s", + sai_serialize_object_id(port).c_str(), field.c_str(), (long long)crcErrors, + (long long)rxCells, (long long)codeErrors, asctime(gmtime(&now))); + } + } + // now we get the values of: + // *totalNumCells *cellsWithCrcErrors *cellsWithUncorrectableErrors + // + // Check if the error rate (crcErrors/numRxCells) is greater than configured error threshold + // (errorRateCrcCellsCfg/errorRateRxCellsCfg). + // This is changing to check (crcErrors * errorRateRxCellsCfg) > (numRxCells * errorRateCrcCellsCfg) + // Default value is: (crcErrors * 61035156) > (numRxCells * 1) + // numRxCells = snmpBcmRxDataCells + snmpBcmRxControlCells + // As we don't have snmpBcmRxControlCells polled right now, + // we can use snmpBcmRxDataCells only and add snmpBcmRxControlCells later when it is getting polled. + // + // In STATE_DB, add several new attribute for each port: + // consecutivePollsWithErrors POLL_WITH_ERRORS + // consecutivePollsWithNoErrors POLL_WITH_NO_ERRORS + // consecutivePollsWithFecErrs POLL_WITH_FEC_ERRORS + // consecutivePollsWithNoFecErrs POLL_WITH_NOFEC_ERRORS + // + // skipErrorsOnLinkupCount SKIP_ERR_ON_LNKUP_CNT -- for skip all errors during boot up time + // skipCrcErrorsOnLinkupCount SKIP_CRC_ERR_ON_LNKUP_CNT + // skipFecErrorsOnLinkupCount SKIP_FEC_ERR_ON_LNKUP_CNT + // removeProblemLinkCount RM_PROBLEM_LNK_CNT -- this is for feature of remove a flaky link permanently + // + // cfgIsolated CONFIG_ISOLATED + + uint64_t consecutivePollsWithErrors = 0; + uint64_t consecutivePollsWithNoErrors = 0; + uint64_t consecutivePollsWithFecErrs = 0; + uint64_t consecutivePollsWithNoFecErrs = 0; + + uint64_t skipCrcErrorsOnLinkupCount = 0; + uint64_t skipFecErrorsOnLinkupCount = 0; + uint64_t prevRxCells = 0; + uint64_t prevCrcErrors = 0; + uint64_t prevCodeErrors = 0; + + uint64_t testCrcErrors = 0; + uint64_t testCodeErrors = 0; + + // isolation status + int autoIsolated = 0; + int cfgIsolated = 0; + int isolated = 0; + int origIsolated = 0; + int origPermIsolated = 0; + int permIsolate = 0; + int linkFlap = 0; + + // link status + string lnkStatus = "down"; + uint64_t lnkDownCnt = 0; + uint64_t preLnkDwnCnt = 0; + + // for testing + string testState = "product"; + + // Get appl_db values, and update state_db later with other attributes + string applKey = APPL_FABRIC_PORT_PREFIX + to_string(lane); + std::vector applValues; + string applResult = "False"; + bool exist = m_applTable->get(applKey, applValues); + if (!exist) + { + SWSS_LOG_INFO("No app infor for port %s", applKey.c_str()); + } + else + { + for (auto v : applValues) + { + applResult = fvValue(v); + if (fvField(v) == "isolateStatus") + { + if (applResult == "True") + { + cfgIsolated = 1; + } + else + { + cfgIsolated = 0; + } + SWSS_LOG_INFO("Port %s isolateStatus: %s %d", + applKey.c_str(), applResult.c_str(), cfgIsolated); + } + } + } + + // Get the consecutive polls from the state db + std::vector values; + string valuePt; + exist = m_stateTable->get(key, values); + if (!exist) + { + SWSS_LOG_INFO("No state infor for port %s", key.c_str()); + return; + } + for (auto val : values) + { + valuePt = fvValue(val); + if (fvField(val) == "STATUS") + { + lnkStatus = valuePt; + continue; + } + if (fvField(val) == "PORT_DOWN_COUNT") + { + lnkDownCnt = std::stoull(valuePt); + continue; + } + if (fvField(val) == "PORT_DOWN_COUNT_handled") + { + preLnkDwnCnt = std::stoull(valuePt); + continue; + } + if (fvField(val) == "POLL_WITH_ERRORS") + { + consecutivePollsWithErrors = std::stoull(valuePt); + continue; + } + if (fvField(val) == "POLL_WITH_NO_ERRORS") + { + consecutivePollsWithNoErrors = std::stoull(valuePt); + continue; + } + if (fvField(val) == "POLL_WITH_FEC_ERRORS") + { + consecutivePollsWithFecErrs = std::stoull(valuePt); + continue; + } + if (fvField(val) == "POLL_WITH_NOFEC_ERRORS") + { + consecutivePollsWithNoFecErrs = std::stoull(valuePt); + continue; + } + if (fvField(val) == "SKIP_CRC_ERR_ON_LNKUP_CNT") + { + skipCrcErrorsOnLinkupCount = std::stoull(valuePt); + continue; + } + if (fvField(val) == "SKIP_FEC_ERR_ON_LNKUP_CNT") + { + skipFecErrorsOnLinkupCount = std::stoull(valuePt); + continue; + } + if (fvField(val) == "RX_CELLS") + { + prevRxCells = std::stoull(valuePt); + continue; + } + if (fvField(val) == "CRC_ERRORS") + { + prevCrcErrors = std::stoull(valuePt); + continue; + } + if (fvField(val) == "CODE_ERRORS") + { + prevCodeErrors = std::stoull(valuePt); + continue; + } + if (fvField(val) == "AUTO_ISOLATED") + { + autoIsolated = to_uint(valuePt); + SWSS_LOG_INFO("port %s currently autoisolated: %s", key.c_str(),valuePt.c_str()); + continue; + } + if (fvField(val) == "ISOLATED") + { + origIsolated = to_uint(valuePt); + SWSS_LOG_INFO("port %s currently isolated: %s", key.c_str(),valuePt.c_str()); + continue; + } + if (fvField(val) == "PRM_ISOLATED") + { + origPermIsolated = to_uint(valuePt); + SWSS_LOG_INFO("port %s perm isolated: %s", key.c_str(),valuePt.c_str()); + continue; + } + if (fvField(val) == "TEST_CRC_ERRORS") + { + testCrcErrors = std::stoull(valuePt); + continue; + } + if (fvField(val) == "TEST_CODE_ERRORS") + { + testCodeErrors = std::stoull(valuePt); + continue; + } + if (fvField(val) == "TEST") + { + testState = valuePt; + continue; + } + } + + // if PORT_DOWN_COUNT != PORT_DOWN_COUNT_prev: + // there was a link down event in between, so we + // clear skipFecErrorsOnLinkupCount + // clear skipCrcErrorsOnLinkupCount + // clear consecutivePollsWithErrors + // clear consecutivePollsWithNoErrors + // clear consecutivePollsWithFecErrs + // clear consecutivePollsWithNoFecErrs + // + // if !MANUAL_ISOLATED && AUTO_ISOLATED : + // autoIsolated = 0 + // isolated = 0 + // if MANUAL_ISOLATED: + // autoIsolated = 0 + // + SWSS_LOG_INFO("Port %d lnk down cnt %lld handled: %lld", lane, (long long)lnkDownCnt, (long long)preLnkDwnCnt); + if (lnkDownCnt != preLnkDwnCnt) + { + linkFlap = checkDownCnt(key, checkTime) ? 1 : 0; + bool clearCnt = false; + if (origIsolated == 1 && cfgIsolated == 0) + { + clearCnt = true; + } + + SWSS_LOG_INFO("port %s about to clear counters.", key.c_str()); + SWSS_LOG_INFO("origIsolated %d isolated %d cfgIsolated %d clearCnt %s", origIsolated, isolated, cfgIsolated, clearCnt ? "true":"flase"); + clearFabricCnt(lane, clearCnt); + + if (linkFlap > 0 ) + { + SWSS_LOG_NOTICE("port %s possibly flapping %d", key.c_str(), linkFlap); + } + updateStateDbTable(m_stateTable, key, "PORT_DOWN_COUNT_handled", lnkDownCnt); + continue; + } + // clear lane done + + SWSS_LOG_INFO("port %s health check after clearing", key.c_str()); + + // Now should be the event monitoring on an up link + + // checking crc errors + uint64_t maxSkipCrcCnt = MAX_SKIP_CRCERR_ON_LNKUP_POLLS; + if (testState == "TEST"){ + maxSkipCrcCnt = 2; + } + if (skipCrcErrorsOnLinkupCount < maxSkipCrcCnt) + { + skipCrcErrorsOnLinkupCount += 1; + valuePt = to_string(skipCrcErrorsOnLinkupCount); + updateStateDbTable(m_stateTable, key, "SKIP_CRC_ERR_ON_LNKUP_CNT", skipCrcErrorsOnLinkupCount); + // update error counters. + prevCrcErrors = crcErrors; + } + else + { + uint64_t diffRxCells = 0; + uint64_t diffCrcCells = 0; + + diffRxCells = rxCells - prevRxCells; + if (testState == "TEST"){ + diffCrcCells = testCrcErrors - prevCrcErrors; + prevCrcErrors = 0; + isolationPollsCfg = isolationPollsCfg + 1; + } + else + { + diffCrcCells = crcErrors - prevCrcErrors; + prevCrcErrors = crcErrors; + } + bool isErrorRateMore = + ((diffCrcCells * errorRateRxCellsCfg) > + (diffRxCells * errorRateCrcCellsCfg)); + if (isErrorRateMore) + { + if (consecutivePollsWithErrors < isolationPollsCfg) + { + consecutivePollsWithErrors += 1; + consecutivePollsWithNoErrors = 0; + } + } else { + if (consecutivePollsWithNoErrors < recoveryPollsCfg) + { + consecutivePollsWithNoErrors += 1; + consecutivePollsWithErrors = 0; + } + } + SWSS_LOG_INFO("port %s diffCrcCells %lld", key.c_str(), (long long)diffCrcCells); + SWSS_LOG_INFO("consecutivePollsWithCRCErrs %lld consecutivePollsWithNoCRCErrs %lld", + (long long)consecutivePollsWithErrors, (long long)consecutivePollsWithNoErrors); + } + + // checking FEC errors + uint64_t maxSkipFecCnt = MAX_SKIP_FECERR_ON_LNKUP_POLLS; + if (testState == "TEST"){ + maxSkipFecCnt = 2; + } + if (skipFecErrorsOnLinkupCount < maxSkipFecCnt) + { + skipFecErrorsOnLinkupCount += 1; + valuePt = to_string(skipFecErrorsOnLinkupCount); + updateStateDbTable(m_stateTable, key, "SKIP_FEC_ERR_ON_LNKUP_CNT", skipFecErrorsOnLinkupCount); + // update error counters + prevCodeErrors = codeErrors; + } + else + { + uint64_t diffCodeErrors = 0; + if (testState == "TEST"){ + diffCodeErrors = testCodeErrors - prevCodeErrors; + prevCodeErrors = 0; + fecIsolatedPolls = fecIsolatedPolls + 1; + } + else + { + diffCodeErrors = codeErrors - prevCodeErrors; + prevCodeErrors = codeErrors; + } + SWSS_LOG_INFO("port %s diffCodeErrors %lld", key.c_str(), (long long)diffCodeErrors); + if (diffCodeErrors > 0) + { + if (consecutivePollsWithFecErrs < fecIsolatedPolls) + { + consecutivePollsWithFecErrs += 1; + consecutivePollsWithNoFecErrs = 0; + } + } + else if (diffCodeErrors <= 0) + { + if (consecutivePollsWithNoFecErrs < fecUnisolatePolls) + { + consecutivePollsWithNoFecErrs += 1; + consecutivePollsWithFecErrs = 0; + } + } + SWSS_LOG_INFO("consecutivePollsWithFecErrs %lld consecutivePollsWithNoFecErrs %lld", + (long long)consecutivePollsWithFecErrs, (long long)consecutivePollsWithNoFecErrs); + SWSS_LOG_INFO("fecUnisolatePolls %lld", (long long)fecUnisolatePolls); + } + + // take care serdes link shut state setting + // debug information + SWSS_LOG_INFO("port %s status up autoIsolated %d", + key.c_str(), autoIsolated); + SWSS_LOG_INFO("consecutivePollsWithErrors %lld consecutivePollsWithFecErrs %lld", + (long long)consecutivePollsWithErrors, (long long)consecutivePollsWithFecErrs); + SWSS_LOG_INFO("consecutivePollsWithNoErrors %lld consecutivePollsWithNoFecErrs %lld", + (long long)consecutivePollsWithNoErrors, (long long)consecutivePollsWithNoFecErrs); + if (autoIsolated == 0 && (consecutivePollsWithErrors >= isolationPollsCfg + || consecutivePollsWithFecErrs >= fecIsolatedPolls)) + { + // Link needs to be isolated. + SWSS_LOG_INFO("port %s auto isolated", key.c_str()); + autoIsolated = 1; + permIsolate = addErrorTime(key, checkTime) ? 1 : 0; + if (origPermIsolated == 1) + { + permIsolate = 1; + } + SWSS_LOG_NOTICE("port %s get permIsolated", key.c_str() ); + updateStateDbTable(m_stateTable, key, "AUTO_ISOLATED", autoIsolated); + SWSS_LOG_NOTICE("port %s set AUTO_ISOLATED %d", key.c_str(), autoIsolated); + } + else if (autoIsolated == 1 && consecutivePollsWithNoErrors >= recoveryPollsCfg + && consecutivePollsWithNoFecErrs >= fecUnisolatePolls) + { + // Link is isolated, but no longer needs to be. + SWSS_LOG_INFO("port %s healthy again", key.c_str()); + autoIsolated = 0; + updateStateDbTable(m_stateTable, key, "AUTO_ISOLATED", autoIsolated); + SWSS_LOG_NOTICE("port %s set AUTO_ISOLATED %d", key.c_str(), autoIsolated); + } + if (cfgIsolated == 1) + { + isolated = 1; + SWSS_LOG_INFO("port %s keep isolated due to configuation",key.c_str()); + } + else + { + if (autoIsolated == 1) + { + isolated = 1; + SWSS_LOG_INFO("port %s keep isolated due to autoisolation",key.c_str()); + } + else + { + isolated = 0; + SWSS_LOG_INFO("port %s unisolated",key.c_str()); + } + } + // if "ISOLATED" is true, Call SAI api here to actually isolated the link + // if "ISOLATED" is false, Call SAP api to actually unisolate the link + + if (permIsolate == 1 || origPermIsolated == 1) + { + isolated = 1; + permIsolate = 1; + SWSS_LOG_INFO("port %s permentantly isolated %d",key.c_str(), permIsolate ); + } + + if (origIsolated != isolated) + { + bool setVal = false; + if (isolated == 1) + { + setVal = true; + } + isolateFabricLink(lane, setVal); + } + else + { + SWSS_LOG_INFO( "Same isolation status for %d", lane); + } + + // Update state_db with link isolation data + updateStateDbTable(m_stateTable, key, "POLL_WITH_ERRORS", consecutivePollsWithErrors); + updateStateDbTable(m_stateTable, key, "POLL_WITH_NO_ERRORS", consecutivePollsWithNoErrors); + updateStateDbTable(m_stateTable, key, "POLL_WITH_FEC_ERRORS", consecutivePollsWithFecErrs); + updateStateDbTable(m_stateTable, key, "POLL_WITH_NOFEC_ERRORS", consecutivePollsWithNoFecErrs); + updateStateDbTable(m_stateTable, key, "CONFIG_ISOLATED", cfgIsolated); + updateStateDbTable(m_stateTable, key, "ISOLATED", isolated); + updateStateDbTable(m_stateTable, key, "PRM_ISOLATED", permIsolate); + + // Update state_db with error rate + valuePt = to_string(rxCells); + m_stateTable->hset(key, "RX_CELLS", valuePt.c_str()); + SWSS_LOG_INFO("port %s set RX_CELLS %s", + key.c_str(), valuePt.c_str()); + + valuePt = to_string(prevCrcErrors); + m_stateTable->hset(key, "CRC_ERRORS", valuePt.c_str()); + SWSS_LOG_INFO("port %s set CRC_ERRORS %s", + key.c_str(), valuePt.c_str()); + + valuePt = to_string(prevCodeErrors); + m_stateTable->hset(key, "CODE_ERRORS", valuePt.c_str()); + SWSS_LOG_INFO("port %s set CODE_ERRORS %s", + key.c_str(), valuePt.c_str()); + } +} + +// Update state_db tables +void FabricPortsOrch::updateStateDbTable( + const std::unique_ptr
& stateTable, + const std::string& key, + const std::string& field, + uint64_t value) +{ + // Convert the integer value to a string + std::string valueStr = std::to_string(value); + + // Update the state table + stateTable->hset(key, field, valueStr.c_str()); + + // Log the update + SWSS_LOG_INFO("%s updates %s to %s %lld", + key.c_str(), field.c_str(), valueStr.c_str(), (long long)value); +} + +// Isolate/Unisolate a fabric link +void FabricPortsOrch::isolateFabricLink(int lane, bool isolate) +{ + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_FABRIC_ISOLATE; + attr.value.booldata = isolate; + SWSS_LOG_INFO("Set fabric port %d with isolate %s", lane, isolate? "true" : "false"); + if (m_fabricLanePortMap.find(lane) == m_fabricLanePortMap.end()) + { + SWSS_LOG_INFO("NOT find fabric lane %d", lane); + } + else + { + sai_status_t status = sai_port_api->set_port_attribute(m_fabricLanePortMap[lane], &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set admin status"); + } + SWSS_LOG_NOTICE("Set fabric port %d state isolated %s done", lane, isolate? "true" : "false"); + } +} + +// Clear fabric link counters +void FabricPortsOrch::clearFabricCnt(int lane, bool clearIsolation) +{ + // Key to get/set state_db valuse. + string key = FABRIC_PORT_PREFIX + to_string(lane); + SWSS_LOG_INFO("clearing port %s", key.c_str()); + + // clear counters + int skipCrcErrorsOnLinkupCount = 0; + int skipFecErrorsOnLinkupCount = 0; + + int consecutivePollsWithErrors = 0; + int consecutivePollsWithNoErrors = 0; + int consecutivePollsWithFecErrs = 0; + int consecutivePollsWithNoFecErrs = 0; + + int autoIsolated = 0; + int isolated = 1; + + // unisolate the link if needed + SWSS_LOG_INFO("Unisolation needed? %s", clearIsolation? "true" : "false"); + if (clearIsolation) + { + isolated = 0; + // sai call to unisolate the link + isolateFabricLink(lane, !clearIsolation); + updateStateDbTable(m_stateTable, key, "ISOLATED", isolated); + } + + // update state_db + updateStateDbTable(m_stateTable, key, "SKIP_CRC_ERR_ON_LNKUP_CNT", skipCrcErrorsOnLinkupCount); + updateStateDbTable(m_stateTable, key, "SKIP_FEC_ERR_ON_LNKUP_CNT", skipFecErrorsOnLinkupCount); + updateStateDbTable(m_stateTable, key, "POLL_WITH_ERRORS", consecutivePollsWithErrors); + updateStateDbTable(m_stateTable, key, "POLL_WITH_NO_ERRORS", consecutivePollsWithNoErrors); + updateStateDbTable(m_stateTable, key, "POLL_WITH_FEC_ERRORS", consecutivePollsWithFecErrs); + updateStateDbTable(m_stateTable, key, "POLL_WITH_NOFEC_ERRORS", consecutivePollsWithNoFecErrs); + updateStateDbTable(m_stateTable, key, "AUTO_ISOLATED", autoIsolated); +} + +// Update fabric capacity +void FabricPortsOrch::updateFabricCapacity() +{ + // Init value for fabric capacity monitoring + int capacity = 0; + int downCapacity = 0; + string lnkStatus = "down"; + string configIsolated = "0"; + string isolated = "0"; + string autoIsolated = "0"; + int operating_links = 0; + int total_links = 0; + int threshold = 100; + std::vector constValues; + string applKey = FABRIC_MONITOR_DATA; + + // Get capacity warning threshold from APPL_DB table FABRIC_MONITOR_DATA + // By default, this threshold is 100 (percentage). + bool cfgVal = m_applMonitorConstTable->get("FABRIC_MONITOR_DATA", constValues); + if(!cfgVal) + { + SWSS_LOG_INFO("%s default values not set", applKey.c_str()); + } + else + { + SWSS_LOG_INFO("%s has default values", applKey.c_str()); + } + string configVal = "1"; + for (auto cv : constValues) + { + configVal = fvValue(cv); + if (fvField(cv) == "monCapacityThreshWarn") + { + threshold = stoi(configVal); + SWSS_LOG_INFO("monCapacityThreshWarn: %s %s", configVal.c_str(), fvField(cv).c_str()); + continue; + } + } + + // Check fabric capacity. + SWSS_LOG_INFO("FabricPortsOrch::updateFabricCapacity start"); + for (auto p : m_fabricLanePortMap) + { + int lane = p.first; + string key = FABRIC_PORT_PREFIX + to_string(lane); + std::vector values; + string valuePt; + + // Get fabric serdes link status from STATE_DB + bool exist = m_stateTable->get(key, values); + if (!exist) + { + SWSS_LOG_INFO("No state infor for port %s", key.c_str()); + return; + } + for (auto val : values) + { + valuePt = fvValue(val); + if (fvField(val) == "STATUS") + { + lnkStatus = valuePt; + continue; + } + if (fvField(val) == "CONFIG_ISOLATED") + { + configIsolated = valuePt; + continue; + } + if (fvField(val) == "ISOLATED") + { + isolated = valuePt; + continue; + } + if (fvField(val) == "AUTO_ISOLATED") + { + autoIsolated = valuePt; + continue; + } + } + // Calculate total number of serdes link, number of operational links, + // total fabric capacity. + bool linkIssue = false; + if (configIsolated == "1" || isolated == "1" || autoIsolated == "1") + { + linkIssue = true; + } + + if (lnkStatus == "down" || linkIssue == true) + { + downCapacity += FABRIC_LINK_RATE; + } + else + { + capacity += FABRIC_LINK_RATE; + operating_links += 1; + } + total_links += 1; + } + + SWSS_LOG_INFO("Capacity: %d Missing %d", capacity, downCapacity); + + // Get LAST_EVENT from STATE_DB + + // Calculate the current capacity to see if + // it is lower or higher than the threshold + string cur_event = "None"; + string event = "None"; + string lastEvent = "None"; + string lastTime = "Never"; + + int expect_links = total_links * threshold / 100; + if (expect_links > operating_links) + { + cur_event = "Lower"; + } + else + { + cur_event = "Higher"; + } + + // Update the capacity data in this poll to STATE_DB + SWSS_LOG_INFO("Capacity: %d Missing %d", capacity, downCapacity); + + // Get the last event and time that event happend from STATE_DB + bool capacity_data = m_fabricCapacityTable->get("FABRIC_CAPACITY_DATA", constValues); + if (capacity_data) + { + for (auto cv : constValues) + { + if(fvField(cv) == "last_event") + { + lastEvent = fvValue(cv); + continue; + } + if(fvField(cv) == "last_event_time") + { + lastTime = fvValue(cv); + continue; + } + } + } + + auto now = std::chrono::system_clock::now(); + auto now_s = std::chrono::time_point_cast(now); + auto nse = now_s.time_since_epoch(); + + // If last event is None or higher, but the capacity is lower in this poll, + // update the STATE_DB with the event (lower) and the time. + // If the last event is lower, and the capacity is back to higher than the threshold, + // update the STATE_DB with the event (higher) and the time. + event = lastEvent; + if (cur_event == "Lower") + { + if (lastEvent == "None" || lastEvent == "Higher") + { + event = "Lower"; + lastTime = to_string(nse.count()); + if (gMySwitchType == "voq") + { + SWSS_LOG_NOTICE("Total links %d. Expected up links %d. Operational links %d. Fabric capacity %s than threshold.", + total_links, expect_links, operating_links, cur_event.c_str()); + } + } + } + else if (cur_event == "Higher") + { + if (lastEvent == "Lower") + { + event = "Higher"; + lastTime = to_string(nse.count()); + if (gMySwitchType == "voq") + { + SWSS_LOG_NOTICE("Total links %d. Expected up links %d. Operational links %d. Fabric capacity %s than threshold.", + total_links, expect_links, operating_links, cur_event.c_str()); + + } + } + } + + // Update STATE_DB + SWSS_LOG_INFO("FabricPortsOrch::updateFabricCapacity now update STATE_DB"); + m_fabricCapacityTable->hset("FABRIC_CAPACITY_DATA", "fabric_capacity", to_string(capacity)); + m_fabricCapacityTable->hset("FABRIC_CAPACITY_DATA", "missing_capacity", to_string(downCapacity)); + m_fabricCapacityTable->hset("FABRIC_CAPACITY_DATA", "operating_links", to_string(operating_links)); + m_fabricCapacityTable->hset("FABRIC_CAPACITY_DATA", "number_of_links", to_string(total_links)); + m_fabricCapacityTable->hset("FABRIC_CAPACITY_DATA", "warning_threshold", to_string(threshold)); + m_fabricCapacityTable->hset("FABRIC_CAPACITY_DATA", "last_event", event); + m_fabricCapacityTable->hset("FABRIC_CAPACITY_DATA", "last_event_time", lastTime); +} + + +// Update rate on fabric links +void FabricPortsOrch::updateFabricRate() +{ + for (auto p : m_fabricLanePortMap) + { + int lane = p.first; + string key = FABRIC_PORT_PREFIX + to_string(lane); + + // get oldRateAverage, oldData, oldTime(time.time) from state db + std::vector values; + string valuePt; + bool exist = m_stateTable->get(key, values); + double oldRxRate = 0; + uint64_t oldRxData = 0; + double oldTxRate = 0; + uint64_t oldTxData = 0; + auto now = std::chrono::system_clock::now(); + + string oldTime = "0"; + string testState = "product"; + + if(!exist) + { + SWSS_LOG_INFO("No state infor for port %s", key.c_str()); + return; + } + for (auto val : values) + { + valuePt = fvValue(val); + if (fvField(val) == "OLD_RX_RATE_AVG") + { + oldRxRate = stod(valuePt); + continue; + } + if (fvField(val) == "OLD_RX_DATA") + { + oldRxData = stoull(valuePt); + continue; + } + if (fvField(val) == "OLD_TX_RATE_AVG") + { + oldTxRate = stod(valuePt); + continue; + } + if (fvField(val) == "OLD_TX_DATA") + { + oldTxData = stoull(valuePt); + continue; + } + if (fvField(val) == "LAST_TIME") + { + oldTime = valuePt; + continue; + } + if (fvField(val) == "TEST") + { + testState = valuePt; + continue; + } + } + + + // get the newData and newTime for this poll + vector fieldValues; + sai_object_id_t port = p.second; + static const array cntNames = + { + "SAI_PORT_STAT_IF_OUT_OCTETS", // snmpBcmTxDataBytes + "SAI_PORT_STAT_IF_IN_OCTETS", // snmpBcmRxDataBytes + }; + if (!m_fabricCounterTable->get(sai_serialize_object_id(port), fieldValues)) + { + SWSS_LOG_INFO("no port %s", sai_serialize_object_id(port).c_str()); + } + uint64_t rxBytes = 0; + uint64_t txBytes = 0; + for (const auto& fv : fieldValues) + { + const auto field = fvField(fv); + const auto value = fvValue(fv); + for (size_t cnt = 0; cnt != cntNames.size(); cnt++) + { + if (field == "SAI_PORT_STAT_IF_OUT_OCTETS") + { + txBytes = stoull(value); + } + else if (field == "SAI_PORT_STAT_IF_IN_OCTETS") + { + rxBytes = stoull(value); + } + } + } + // This is for testing purpose + if (testState == "TEST") + { + txBytes = oldTxData + 295000000; + } + // calcuate the newRateAverage + //RX first + uint64_t deltaBytes = rxBytes - oldRxData; // bytes + uint64_t deltaMegabits = deltaBytes / 1000000 * 8; // Mega bits + + //cacluate rate + auto now_s = std::chrono::time_point_cast(now); + auto nse = now_s.time_since_epoch(); + long long newTime = nse.count(); + + long long deltaTime = 1; + if (stoll(oldTime) > 0) + { + deltaTime = newTime - stoll(oldTime); + } + SWSS_LOG_INFO("port %s %lld %ld ", sai_serialize_object_id(port).c_str(), + newTime, stol(oldTime)); + double percent; + long long loadInterval = FABRIC_DEBUG_POLLING_INTERVAL_DEFAULT; + percent = exp( - deltaTime / loadInterval ); + double newRate = + (oldRxRate * percent) + (static_cast(deltaMegabits) / static_cast(deltaTime)) * (1.0 - percent); + double newRxRate = newRate; + + + // TX + deltaBytes = txBytes - oldTxData; // bytes + deltaMegabits = deltaBytes / 1000000 * 8; // mb + newRate = + (oldTxRate * percent) + (static_cast(deltaMegabits) / static_cast(deltaTime)) * (1.0 - percent); + double newTxRate = newRate; + + // store the newRateAverage, newData, newTime + + SWSS_LOG_INFO( "old rx %lld rxData %lld tx %lld txData %lld time %ld", + (long long)oldRxRate, (long long)oldRxData, + (long long)oldTxRate, (long long)oldTxData, stol(oldTime) ); + SWSS_LOG_INFO( "new rx %lld rxData %lld tx %lld txData %lld time %lld", + (long long)newRxRate, (long long)rxBytes, + (long long)newTxRate, (long long)txBytes, newTime ); + + valuePt = to_string(newRxRate); + m_stateTable->hset(key, "OLD_RX_RATE_AVG", valuePt.c_str()); + + valuePt = to_string(rxBytes); + m_stateTable->hset(key, "OLD_RX_DATA", valuePt.c_str()); + + valuePt = to_string(newTxRate); + m_stateTable->hset(key, "OLD_TX_RATE_AVG", valuePt.c_str()); + + valuePt = to_string(txBytes); + m_stateTable->hset(key, "OLD_TX_DATA", valuePt.c_str()); + + valuePt = to_string(newTime); + m_stateTable->hset(key, "LAST_TIME", valuePt.c_str()); + } +} + void FabricPortsOrch::doTask() { } +void FabricPortsOrch::doFabricPortTask(Consumer &consumer) +{ + if (!checkFabricPortMonState()) + { + SWSS_LOG_INFO("doFabricPortTask returns early due to feature disabled"); + return; + } + SWSS_LOG_INFO("FabricPortsOrch::doFabricPortTask starts"); + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + string key = kfvKey(t); + string op = kfvOp(t); + + if (op == SET_COMMAND) + { + string alias, lanes; + string isolateStatus; + int forceIsolateCnt = 0; + + for (auto i : kfvFieldsValues(t)) + { + if (fvField(i) == "alias") + { + alias = fvValue(i); + } + else if (fvField(i) == "lanes") + { + lanes = fvValue(i); + } + else if (fvField(i) == "isolateStatus") + { + isolateStatus = fvValue(i); + } + else if (fvField(i) == "forceUnisolateStatus") + { + forceIsolateCnt = stoi(fvValue(i)); + } + } + // This method may be called with only some fields included. + // In that case read in the missing field data. + if (alias == "") + { + string new_alias; + SWSS_LOG_INFO("alias is NULL, key: %s", key.c_str()); + if (m_applTable->hget(key, "alias", new_alias)) + { + alias = new_alias; + SWSS_LOG_INFO("read new_alias, key: '%s', value: '%s'", key.c_str(), new_alias.c_str()); + } + else + { + SWSS_LOG_INFO("hget failed for key: %s, alias", key.c_str()); + } + } + if (lanes == "") + { + string new_lanes; + SWSS_LOG_INFO("lanes is NULL, key: %s", key.c_str()); + if (m_applTable->hget(key, "lanes", new_lanes)) + { + lanes = new_lanes; + SWSS_LOG_INFO("read new_lanes, key: '%s', value: '%s'", key.c_str(), new_lanes.c_str()); + } + else + { + SWSS_LOG_INFO("hget failed for key: %s, lanes", key.c_str()); + } + + } + if (isolateStatus == "") + { + string new_isolateStatus; + SWSS_LOG_INFO("isolateStatus is NULL, key: %s", key.c_str()); + if (m_applTable->hget(key, "isolateStatus", new_isolateStatus)) + { + isolateStatus = new_isolateStatus; + SWSS_LOG_INFO("read new_isolateStatus, key: '%s', value: '%s'", key.c_str(), new_isolateStatus.c_str()); + } + else + { + SWSS_LOG_INFO("hget failed for key: %s, isolateStatus", key.c_str()); + } + } + // Do not process if some data is still missing. + if (alias == "" || lanes == "" || isolateStatus == "" ) + { + SWSS_LOG_INFO("NULL values, skipping %s", key.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + SWSS_LOG_INFO("key %s alias %s isolateStatus %s lanes %s", + key.c_str(), alias.c_str(), isolateStatus.c_str(), lanes.c_str()); + + if (isolateStatus == "False") + { + // get state db value of forceIolatedCntInStateDb, + // if forceIolatedCnt != forceIolatedCntInStateDb + // 1) clear all isolate related flags in stateDb + // 2) replace the cnt in stateb + // + + std::vector values; + string state_key = FABRIC_PORT_PREFIX + lanes; + bool exist = m_stateTable->get(state_key, values); + if (!exist) + { + SWSS_LOG_INFO("React to unshut No state infor for port %s", state_key.c_str()); + } + else + { + SWSS_LOG_INFO("React to unshut port %s", state_key.c_str()); + } + int curVal = 0; + for (auto val : values) + { + if(fvField(val) == "FORCE_UN_ISOLATE") + { + curVal = stoi(fvValue(val)); + } + } + SWSS_LOG_INFO("Current %d Config %d", curVal, forceIsolateCnt); + if (curVal != forceIsolateCnt) + { + // update all related fields in state_db: + // POLL_WITH_ERRORS 0 + // POLL_WITH_NO_ERRORS 8 + // POLL_WITH_FEC_ERRORS 0 + // POLL_WITH_NOFEC_ERRORS 8 + // CONFIG_ISOLATED 0 + // ISOLATED 0 + // AUTO_ISOLATED 0 + // PRM_ISOLATED 0 + updateStateDbTable(m_stateTable, state_key, "FORCE_UN_ISOLATE", forceIsolateCnt); + updateStateDbTable(m_stateTable, state_key, "POLL_WITH_ERRORS", m_defaultPollWithErrors); + updateStateDbTable(m_stateTable, state_key, "POLL_WITH_NO_ERRORS", m_defaultPollWithNoErrors); + updateStateDbTable(m_stateTable, state_key, "POLL_WITH_FEC_ERRORS", m_defaultPollWithFecErrors); + updateStateDbTable(m_stateTable, state_key, "POLL_WITH_NOFEC_ERRORS", m_defaultPollWithNoFecErrors); + updateStateDbTable(m_stateTable, state_key, "CONFIG_ISOLATED", m_defaultConfigIsolated); + updateStateDbTable(m_stateTable, state_key, "ISOLATED", m_defaultIsolated); + updateStateDbTable(m_stateTable, state_key, "AUTO_ISOLATED", m_defaultAutoIsolated); + updateStateDbTable(m_stateTable, state_key, "PRM_ISOLATED", m_defaultIsolated); + linkQueues.clear(); + + // unisolate the link + bool setVal = false; + isolateFabricLink(to_uint(lanes), setVal); + } + } + } + it = consumer.m_toSync.erase(it); + } +} + void FabricPortsOrch::doTask(Consumer &consumer) { + SWSS_LOG_NOTICE("doTask from FabricPortsOrch"); + + string table_name = consumer.getTableName(); + SWSS_LOG_INFO("Table name: %s", table_name.c_str()); + + if (table_name == APP_FABRIC_MONITOR_PORT_TABLE_NAME) + { + doFabricPortTask(consumer); + } } void FabricPortsOrch::doTask(swss::SelectableTimer &timer) { SWSS_LOG_ENTER(); - if (!m_getFabricPortListDone) + if (timer.getFd() == m_timer->getFd()) + { + if (!m_getFabricPortListDone) + { + getFabricPortList(); + } + + if (m_getFabricPortListDone) + { + updateFabricPortState(); + } + if (((gMySwitchType == "voq") || (gMySwitchType == "fabric")) && (!m_isSwitchStatsGenerated)) + { + createSwitchDropCounters(); + m_isSwitchStatsGenerated = true; + } + if (checkFabricPortMonState() && !m_debugTimerEnabled) + { + m_debugTimer->start(); + m_debugTimerEnabled = true; + } + else if (!checkFabricPortMonState()) + { + m_debugTimerEnabled = false; + } + } + else if (timer.getFd() == m_debugTimer->getFd()) + { + if (!m_getFabricPortListDone) + { + // Skip collecting debug information + // as we don't have all fabric ports yet. + return; + } + + if (!m_debugTimerEnabled) + { + m_debugTimer->stop(); + return; + } + + if (m_getFabricPortListDone) + { + SWSS_LOG_INFO("Fabric monitor enabled"); + updateFabricDebugCounters(); + updateFabricCapacity(); + updateFabricRate(); + } + } +} + +void FabricPortsOrch::createSwitchDropCounters(void) +{ + std::unordered_set counter_stats; + for (const auto& it: switch_drop_counter_ids) { - getFabricPortList(); + std::string drop_stats = sai_serialize_switch_stat(it); + counter_stats.emplace(drop_stats); } + const auto switch_id= sai_serialize_object_id(gSwitchId); + vector switchNameSwitchCounterMap; + switchNameSwitchCounterMap.emplace_back(SWITCH_STANDARD_DROP_COUNTERS, switch_id); + m_counterNameToSwitchStatMap->set("", switchNameSwitchCounterMap); + + switch_drop_counter_manager->setCounterIdList(gSwitchId, CounterType::SWITCH_DEBUG, counter_stats); +} - if (m_getFabricPortListDone) +bool FabricPortsOrch::addErrorTime(const std::string& link, TimePoint now) +{ + bool permIsolate = false; + auto& timestamps = linkQueues[link]; + std::time_t now_c = Clock::to_time_t(now); + SWSS_LOG_INFO("link: %s auto isolate at %s", link.c_str(), asctime(gmtime(&now_c))); + + // Add new timestamp to the queue + timestamps.push(now); + // Check if we have at least 3 timestamps, and pop the old timestamps + auto last = timestamps.back(); + auto first = timestamps.front(); + + auto diff = last - first; + auto checkPeriod = std::chrono::minutes(CHECK_TIME); + auto hours = checkPeriod.count(); + SWSS_LOG_INFO("check time window: %lld", static_cast(hours) ); + while (diff > checkPeriod) { - updateFabricPortState(); + timestamps.pop(); // Remove old timestamp + first = timestamps.front(); + diff = std::chrono::duration_cast(last - first); + } + if (timestamps.size() >= 3) + { + first = timestamps.front(); + diff = last - first; + + if (diff <= checkPeriod) + { // If within 2 hours + permIsolate = true; + } else { + SWSS_LOG_INFO("do not perm isolated the link"); + } + } else { + SWSS_LOG_INFO("Not enough events yet"); } + auto ptTime = std::chrono::duration_cast(diff).count(); + if (permIsolate) + { + SWSS_LOG_INFO("Event queue size %u isolation within %lld, so perm isolated: %d", + static_cast(timestamps.size()), + static_cast(ptTime), permIsolate); + } + SWSS_LOG_INFO("Add isolation event: check period diff %lld size %u perm: %d", + static_cast(ptTime), + static_cast(timestamps.size()), permIsolate); + return permIsolate; +} + +// The link status will shows down if the card get removed/power cycled +// or link actually flaping. If the link get status down too many times +// during the last several hours, say 2 hours, we consider the links mostly +// flaky, and will try to isolate the link. +bool FabricPortsOrch::checkDownCnt(const std::string& link, TimePoint now) +{ + bool linkFlapped = false; + + auto& timestamps = linkQueues[link]; + timestamps.push(now); + + auto last = timestamps.back(); + auto first = timestamps.front(); + auto diff = last - first; + auto checkPeriod = std::chrono::minutes(CHECK_TIME); + while (diff > checkPeriod) + { + timestamps.pop(); // Remove old timestamp + first = timestamps.front(); + diff = std::chrono::duration_cast(last - first); + } + if (timestamps.size() >= 3) + { + first = timestamps.front(); + diff = last - first; + + if (diff <= checkPeriod) + { // If within 2 hours + linkFlapped = true; + } else { + SWSS_LOG_INFO("The link down may from peer cards gone"); + } + } else { + SWSS_LOG_INFO("Not enough events to check yet"); + } + + return linkFlapped; } diff --git a/orchagent/fabricportsorch.h b/orchagent/fabricportsorch.h index de7ee7a7b0e..4f6a3923928 100644 --- a/orchagent/fabricportsorch.h +++ b/orchagent/fabricportsorch.h @@ -9,6 +9,12 @@ #include "producertable.h" #include "flex_counter_manager.h" +using Clock = std::chrono::system_clock; +using TimePoint = std::chrono::time_point; + +#define STATE_FABRIC_CAPACITY_TABLE_NAME "FABRIC_CAPACITY_TABLE" +#define STATE_PORT_CAPACITY_TABLE_NAME "PORT_CAPACITY_TABLE" + class FabricPortsOrch : public Orch, public Subject { public: @@ -23,17 +29,24 @@ class FabricPortsOrch : public Orch, public Subject shared_ptr m_state_db; shared_ptr m_counter_db; - shared_ptr m_flex_db; + shared_ptr m_appl_db; unique_ptr
m_stateTable; unique_ptr
m_portNameQueueCounterTable; unique_ptr
m_portNamePortCounterTable; + unique_ptr
m_fabricCounterTable; + unique_ptr
m_applTable; + unique_ptr
m_fabricCapacityTable; + unique_ptr
m_applMonitorConstTable; unique_ptr m_flexCounterTable; + shared_ptr
m_counterNameToSwitchStatMap; swss::SelectableTimer *m_timer = nullptr; + swss::SelectableTimer *m_debugTimer = nullptr; FlexCounterManager port_stat_manager; FlexCounterManager queue_stat_manager; + FlexCounterManager *switch_drop_counter_manager = nullptr; sai_uint32_t m_fabricPortCount; map m_fabricLanePortMap; @@ -43,13 +56,46 @@ class FabricPortsOrch : public Orch, public Subject bool m_getFabricPortListDone = false; bool m_isQueueStatsGenerated = false; + bool m_debugTimerEnabled = false; + bool m_isSwitchStatsGenerated = false; + + int m_defaultPollWithErrors = 0; + int m_defaultPollWithNoErrors = 8; + int m_defaultPollWithFecErrors = 0; + int m_defaultPollWithNoFecErrors = 8; + int m_defaultConfigIsolated = 0; + int m_defaultIsolated = 0; + int m_defaultAutoIsolated = 0; + std::unordered_map> linkQueues; + std::unordered_map> dnLkQueues; + int getFabricPortList(); void generatePortStats(); void updateFabricPortState(); + void updateFabricDebugCounters(); + void updateFabricCapacity(); + bool checkFabricPortMonState(); + void updateFabricRate(); + void createSwitchDropCounters(); + void clearFabricCnt(int lane, bool clearIsolation); + void updateStateDbTable( + const unique_ptr
& stateTable, + const string& key, + const string& field, + uint64_t value); + void isolateFabricLink(int lane, bool isolate); void doTask() override; void doTask(Consumer &consumer); + void doFabricPortTask(Consumer &consumer); void doTask(swss::SelectableTimer &timer); + + bool addErrorTime( + const std::string& link, + TimePoint now); + bool checkDownCnt( + const std::string& link, + TimePoint now); }; #endif /* SWSS_FABRICPORTSORCH_H */ diff --git a/orchagent/fdborch.cpp b/orchagent/fdborch.cpp index 3861e823c9b..cbd76835d7e 100644 --- a/orchagent/fdborch.cpp +++ b/orchagent/fdborch.cpp @@ -42,8 +42,8 @@ FdbOrch::FdbOrch(DBConnector* applDbConnector, vector app Orch::addExecutor(flushNotifier); /* Add FDB notifications support from ASIC */ - DBConnector *notificationsDb = new DBConnector("ASIC_DB", 0); - m_fdbNotificationConsumer = new swss::NotificationConsumer(notificationsDb, "NOTIFICATIONS"); + m_notificationsDb = make_shared("ASIC_DB", 0); + m_fdbNotificationConsumer = new swss::NotificationConsumer(m_notificationsDb.get(), "NOTIFICATIONS"); auto fdbNotifier = new Notifier(m_fdbNotificationConsumer, this, "FDB_NOTIFICATIONS"); Orch::addExecutor(fdbNotifier); } @@ -772,6 +772,7 @@ void FdbOrch::doTask(Consumer& consumer) string esi = ""; unsigned int vni = 0; string sticky = ""; + string discard = "false"; for (auto i : kfvFieldsValues(t)) { @@ -784,6 +785,10 @@ void FdbOrch::doTask(Consumer& consumer) { type = fvValue(i); } + if (fvField(i) == "discard") + { + discard = fvValue(i); + } if(origin == FDB_ORIGIN_VXLAN_ADVERTIZED) { @@ -850,6 +855,8 @@ void FdbOrch::doTask(Consumer& consumer) } } + // set entry port_name, which is used in mux fdb update logic + entry.port_name = port; FdbData fdbData; fdbData.bridge_port_id = SAI_NULL_OBJECT_ID; @@ -859,6 +866,7 @@ void FdbOrch::doTask(Consumer& consumer) fdbData.esi = esi; fdbData.vni = vni; fdbData.is_flush_pending = false; + fdbData.discard = discard; if (addFdbEntry(entry, port, fdbData)) { if (origin == FDB_ORIGIN_MCLAG_ADVERTIZED) @@ -1138,6 +1146,35 @@ void FdbOrch::flushFDBEntries(sai_object_id_t bridge_port_oid, } } } +void FdbOrch::flushFdbByVlan(const string &alias) +{ + sai_status_t status; + swss::Port vlan; + sai_attribute_t vlan_attr[2]; + + if (!m_portsOrch->getPort(alias, vlan)) + { + return; + } + + vlan_attr[0].id = SAI_FDB_FLUSH_ATTR_BV_ID; + vlan_attr[0].value.oid = vlan.m_vlan_info.vlan_oid; + vlan_attr[1].id = SAI_FDB_FLUSH_ATTR_ENTRY_TYPE; + vlan_attr[1].value.s32 = SAI_FDB_FLUSH_ENTRY_TYPE_DYNAMIC; + status = sai_fdb_api->flush_fdb_entries(gSwitchId, 2, vlan_attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Flush fdb failed, return code %x", status); + } + else + { + SWSS_LOG_INFO("Flush by vlan %s vlan_oid 0x%" PRIx64 "", + alias.c_str(), vlan.m_vlan_info.vlan_oid); + } + + return; +} void FdbOrch::notifyObserversFDBFlush(Port &port, sai_object_id_t& bvid) { @@ -1170,6 +1207,12 @@ void FdbOrch::updatePortOperState(const PortOperStateUpdate& update) if (update.operStatus == SAI_PORT_OPER_STATUS_DOWN) { swss::Port p = update.port; + if (gMlagOrch->isMlagInterface(p.m_alias)) + { + SWSS_LOG_NOTICE("Ignoring fdb flush on MCLAG port:%s", p.m_alias.c_str()); + return; + } + if (p.m_bridge_port_id != SAI_NULL_OBJECT_ID) { flushFDBEntries(p.m_bridge_port_id, SAI_NULL_OBJECT_ID); @@ -1451,7 +1494,9 @@ bool FdbOrch::addFdbEntry(const FdbEntry& entry, const string& port_name, attrs.push_back(attr); } } - + attr.id = SAI_FDB_ENTRY_ATTR_PACKET_ACTION; + attr.value.s32 = (fdbData.discard == "true") ? SAI_PACKET_ACTION_DROP: SAI_PACKET_ACTION_FORWARD; + attrs.push_back(attr); if (macUpdate) { SWSS_LOG_INFO("MAC-Update FDB %s in %s on from-%s:to-%s from-%s:to-%s origin-%d-to-%d", diff --git a/orchagent/fdborch.h b/orchagent/fdborch.h index 09bc6dcc69e..ef912b84005 100644 --- a/orchagent/fdborch.h +++ b/orchagent/fdborch.h @@ -65,6 +65,7 @@ struct FdbData string esi; unsigned int vni; sai_fdb_entry_type_t sai_fdb_type; + string discard; }; struct SavedFdbEntry @@ -102,6 +103,7 @@ class FdbOrch: public Orch, public Subject, public Observer static const int fdborch_pri; void flushFDBEntries(sai_object_id_t bridge_port_oid, sai_object_id_t vlan_oid); + void flushFdbByVlan(const string &); void notifyObserversFDBFlush(Port &p, sai_object_id_t&); private: @@ -113,6 +115,7 @@ class FdbOrch: public Orch, public Subject, public Observer Table m_mclagFdbStateTable; NotificationConsumer* m_flushNotificationsConsumer; NotificationConsumer* m_fdbNotificationConsumer; + shared_ptr m_notificationsDb; void doTask(Consumer& consumer); void doTask(NotificationConsumer& consumer); diff --git a/orchagent/fgnhgorch.cpp b/orchagent/fgnhgorch.cpp index 0deabdb24d8..7a937205585 100644 --- a/orchagent/fgnhgorch.cpp +++ b/orchagent/fgnhgorch.cpp @@ -1,6 +1,7 @@ #include #include #include "fgnhgorch.h" +#include "orch_zmq_config.h" #include "routeorch.h" #include "logger.h" #include "swssnet.h" @@ -23,11 +24,12 @@ extern PortsOrch *gPortsOrch; FgNhgOrch::FgNhgOrch(DBConnector *db, DBConnector *appDb, DBConnector *stateDb, vector &tableNames, NeighOrch *neighOrch, IntfsOrch *intfsOrch, VRFOrch *vrfOrch) : Orch(db, tableNames), + m_zmqClient(create_local_zmq_client(ORCH_NORTHBOND_ROUTE_ZMQ_ENABLED, false)), m_neighOrch(neighOrch), m_intfsOrch(intfsOrch), m_vrfOrch(vrfOrch), m_stateWarmRestartRouteTable(stateDb, STATE_FG_ROUTE_TABLE_NAME), - m_routeTable(appDb, APP_ROUTE_TABLE_NAME) + m_routeTable(createProducerStateTable(appDb, APP_ROUTE_TABLE_NAME, m_zmqClient)) { SWSS_LOG_ENTER(); isFineGrainedConfigured = false; @@ -69,7 +71,7 @@ void FgNhgOrch::update(SubjectType type, void *cntx) { continue; } - + if (!validNextHopInNextHopGroup(nhk)) { SWSS_LOG_WARN("Failed validNextHopInNextHopGroup for nh %s ip %s", @@ -142,32 +144,47 @@ bool FgNhgOrch::bake() } /* calculateBankHashBucketStartIndices: generates the hash_bucket_indices for all banks - * and stores it in fgNhgEntry for the group. - * The function will identify the # of next-hops assigned to each bank and + * and stores it in fgNhgEntry for the group. + * The function will identify the # of next-hops assigned to each bank and * assign the total number of hash buckets for a bank, based on the proportional - * number of next-hops in the bank. - * eg: Bank0: 6 nh, Bank1: 3 nh, total buckets: 30 => + * number of next-hops in the bank. + * eg: Bank0: 6 nh, Bank1: 3 nh, total buckets: 30 => * calculateBankHashBucketStartIndices: Bank0: Bucket# 0-19, Bank1: Bucket# 20-29 */ void FgNhgOrch::calculateBankHashBucketStartIndices(FgNhgEntry *fgNhgEntry) { SWSS_LOG_ENTER(); - uint32_t num_banks = 0; + vector memb_per_bank; - for (auto nh : fgNhgEntry->next_hops) + uint32_t buckets_per_nexthop; + uint32_t extra_buckets; + uint32_t split_extra_buckets_among_bank; + uint32_t num_banks = 0; + if (fgNhgEntry->match_mode == FGMatchMode::PREFIX_BASED) + { + // For prefix_based match_mode there is no bank configuration, so a single bank is used + num_banks = 1; + memb_per_bank.push_back(fgNhgEntry->max_next_hops); // all nexthops are in bank 0 + buckets_per_nexthop = fgNhgEntry->real_bucket_size / fgNhgEntry->max_next_hops; + extra_buckets = fgNhgEntry->real_bucket_size - (buckets_per_nexthop * fgNhgEntry->max_next_hops); + } + else { - while (nh.second.bank + 1 > num_banks) + for (auto nh : fgNhgEntry->next_hops) { - num_banks++; - memb_per_bank.push_back(0); + while (nh.second.bank + 1 > num_banks) + { + num_banks++; + memb_per_bank.push_back(0); + } + memb_per_bank[nh.second.bank] = memb_per_bank[nh.second.bank] + 1; } - memb_per_bank[nh.second.bank] = memb_per_bank[nh.second.bank] + 1; - } - uint32_t buckets_per_nexthop = fgNhgEntry->real_bucket_size/((uint32_t)fgNhgEntry->next_hops.size()); - uint32_t extra_buckets = fgNhgEntry->real_bucket_size - (buckets_per_nexthop*((uint32_t)fgNhgEntry->next_hops.size())); - uint32_t split_extra_buckets_among_bank = extra_buckets/num_banks; - extra_buckets = extra_buckets - (split_extra_buckets_among_bank*num_banks); + buckets_per_nexthop = fgNhgEntry->real_bucket_size / ((uint32_t)fgNhgEntry->next_hops.size()); + extra_buckets = fgNhgEntry->real_bucket_size - (buckets_per_nexthop * ((uint32_t)fgNhgEntry->next_hops.size())); + } + split_extra_buckets_among_bank = extra_buckets / num_banks; + extra_buckets = extra_buckets - (split_extra_buckets_among_bank * num_banks); uint32_t prev_idx = 0; @@ -201,32 +218,16 @@ void FgNhgOrch::setStateDbRouteEntry(const IpPrefix &ipPrefix, uint32_t index, N SWSS_LOG_ENTER(); string key = ipPrefix.to_string(); - // Write to StateDb - std::vector fvs; + string field = std::to_string(index); + string value = nextHop.to_string(); - // check if profile already exists - if yes - skip creation - m_stateWarmRestartRouteTable.get(key, fvs); - - //bucket rewrite - if (fvs.size() > index) - { - FieldValueTuple fv(std::to_string(index), nextHop.to_string()); - fvs[index] = fv; - SWSS_LOG_INFO("Set state db entry for ip prefix %s next hop %s with index %d", - ipPrefix.to_string().c_str(), nextHop.to_string().c_str(), index); - m_stateWarmRestartRouteTable.set(key, fvs); - } - else - { - fvs.push_back(FieldValueTuple(std::to_string(index), nextHop.to_string())); - SWSS_LOG_INFO("Add new next hop entry %s with index %d for ip prefix %s", - nextHop.to_string().c_str(), index, ipPrefix.to_string().c_str()); - m_stateWarmRestartRouteTable.set(key, fvs); - } + m_stateWarmRestartRouteTable.hset(key, field, value); + SWSS_LOG_INFO("Set state db entry for ip prefix %s next hop %s with index %d", + key.c_str(), value.c_str(), index); } -bool FgNhgOrch::writeHashBucketChange(FGNextHopGroupEntry *syncd_fg_route_entry, uint32_t index, sai_object_id_t nh_oid, +bool FgNhgOrch::writeHashBucketChange(FGNextHopGroupEntry *syncd_fg_route_entry, HashBucketIdx index, sai_object_id_t nh_oid, const IpPrefix &ipPrefix, NextHopKey nextHop) { SWSS_LOG_ENTER(); @@ -282,7 +283,7 @@ bool FgNhgOrch::createFineGrainedNextHopGroup(FGNextHopGroupEntry &syncd_fg_rout if (platform == VS_PLATFORM_SUBSTRING) { - /* TODO: need implementation for SAI_NEXT_HOP_GROUP_ATTR_REAL_SIZE */ + /* TODO: need implementation for SAI_NEXT_HOP_GROUP_ATTR_REAL_SIZE */ fgNhgEntry->real_bucket_size = fgNhgEntry->configured_bucket_size; } else @@ -306,7 +307,8 @@ bool FgNhgOrch::createFineGrainedNextHopGroup(FGNextHopGroupEntry &syncd_fg_rout } fgNhgEntry->real_bucket_size = nhg_attr.value.u32; } - + // Initialize the vector to store sai next hop group members + syncd_fg_route_entry.nhopgroup_members.resize(fgNhgEntry->real_bucket_size, SAI_NULL_OBJECT_ID); calculateBankHashBucketStartIndices(fgNhgEntry); SWSS_LOG_NOTICE("fgnhgorch created next hop group %s of size %d", nextHops.to_string().c_str(), fgNhgEntry->real_bucket_size); @@ -320,7 +322,7 @@ bool FgNhgOrch::removeFineGrainedNextHopGroup(FGNextHopGroupEntry *syncd_fg_rout sai_status_t status; - for (auto nhgm : syncd_fg_route_entry->nhopgroup_members) + for (auto &nhgm : syncd_fg_route_entry->nhopgroup_members) { status = sai_next_hop_group_api->remove_next_hop_group_member(nhgm); if (status != SAI_STATUS_SUCCESS) @@ -399,12 +401,11 @@ bool FgNhgOrch::validNextHopInNextHopGroup(const NextHopKey& nexthop) } fgNhgEntry = member_entry->second; } - else + else { fgNhgEntry = prefix_entry->second; } std::map nhopgroup_members_set; - std::vector bank_member_changes( fgNhgEntry->hash_bucket_indices.size(), BankMemberChanges()); @@ -418,11 +419,19 @@ bool FgNhgOrch::validNextHopInNextHopGroup(const NextHopKey& nexthop) { /* Only happens the 1st time when hash_bucket_indices are not inited */ - for (auto it : fgNhgEntry->next_hops) + if (fgNhgEntry->match_mode == FGMatchMode::PREFIX_BASED) + { + // create a single bank in case of prefix_based match mode + bank_member_changes.push_back(BankMemberChanges()); + } + else { - while (bank_member_changes.size() <= it.second.bank) + for (auto it : fgNhgEntry->next_hops) { - bank_member_changes.push_back(BankMemberChanges()); + while (bank_member_changes.size() <= it.second.bank) + { + bank_member_changes.push_back(BankMemberChanges()); + } } } } @@ -453,11 +462,12 @@ bool FgNhgOrch::validNextHopInNextHopGroup(const NextHopKey& nexthop) { for (auto active_nh : syncd_fg_route_entry->active_nexthops) { + // include the existing active nhs into the configured bank bank_member_changes[fgNhgEntry->next_hops[active_nh.ip_address].bank]. active_nhs.push_back(active_nh); } - if (!computeAndSetHashBucketChanges(syncd_fg_route_entry, fgNhgEntry, + if (!computeAndSetHashBucketChanges(syncd_fg_route_entry, fgNhgEntry, bank_member_changes, nhopgroup_members_set, route_table.first)) { SWSS_LOG_ERROR("Failed to set fine grained next hop %s", @@ -503,7 +513,7 @@ bool FgNhgOrch::invalidNextHopInNextHopGroup(const NextHopKey& nexthop) } fgNhgEntry = member_entry->second; } - else + else { fgNhgEntry = prefix_entry->second; } @@ -536,7 +546,7 @@ bool FgNhgOrch::invalidNextHopInNextHopGroup(const NextHopKey& nexthop) bank_member_changes[fgNhgEntry->next_hops[nexthop.ip_address].bank]. nhs_to_del.push_back(nexthop); - if (!computeAndSetHashBucketChanges(syncd_fg_route_entry, fgNhgEntry, + if (!computeAndSetHashBucketChanges(syncd_fg_route_entry, fgNhgEntry, bank_member_changes, nhopgroup_members_set, route_table.first)) { SWSS_LOG_ERROR("Failed to set fine grained next hop %s", @@ -558,30 +568,32 @@ bool FgNhgOrch::invalidNextHopInNextHopGroup(const NextHopKey& nexthop) /* setActiveBankHashBucketChanges: Sets hash buckets for active banks and called on a PER bank basis * This function deals with a scenario where next-hop changes occurred for the route, * and the next-hop change didn't cause an entire bank to go active/inactive. - * The function uses bank_member_changes to compute the hash buckets to modify, in order to satisfy the next-hop + * The function uses bank_member_changes to compute the hash buckets to modify, in order to satisfy the next-hop * availability for the route/neigh. * Eg: Prefix A had nhs 1, 2, 3 with 1, 2, 3, being equally distributed over hash buckets * 0-59(20 buckets per nh). If there was a nh removal of nh 2, this fn would equally redistribute hash buckets - * for nh 2 to nh 1 and nh 3. Leading to 30 hash buckets, each, for nh 1 and nh 3, and none for nh 2. + * for nh 2 to nh 1 and nh 3. Leading to 30 hash buckets, each, for nh 1 and nh 3, and none for nh 2. * Thereby achieving consistent and layered hashing. */ bool FgNhgOrch::setActiveBankHashBucketChanges(FGNextHopGroupEntry *syncd_fg_route_entry, FgNhgEntry *fgNhgEntry, - uint32_t bank, uint32_t syncd_bank, std::vector bank_member_changes, + uint32_t syncd_bank, BankMemberChanges bank_member_change, std::map &nhopgroup_members_set, const IpPrefix &ipPrefix) { SWSS_LOG_ENTER(); - BankMemberChanges bank_member_change = bank_member_changes[bank]; uint32_t add_idx = 0, del_idx = 0; FGNextHopGroupMap *bank_fgnhg_map = &(syncd_fg_route_entry->syncd_fgnhg_map[syncd_bank]); + // Replace hash bucket indices of deleted NHs with added NHs while(del_idx < bank_member_change.nhs_to_del.size() && add_idx < bank_member_change.nhs_to_add.size()) { + // get the hash bucket indices for the deleted NHs HashBuckets *hash_buckets = &(bank_fgnhg_map->at(bank_member_change.nhs_to_del[del_idx])); + // fill the hash bucket indices with the added NHs for (uint32_t i = 0; i < hash_buckets->size(); i++) { - if (!writeHashBucketChange(syncd_fg_route_entry, hash_buckets->at(i), + if (!writeHashBucketChange(syncd_fg_route_entry, hash_buckets->at(i), nhopgroup_members_set[bank_member_change.nhs_to_add[add_idx]], ipPrefix, bank_member_change.nhs_to_add[add_idx])) { @@ -601,7 +613,7 @@ bool FgNhgOrch::setActiveBankHashBucketChanges(FGNextHopGroupEntry *syncd_fg_rou } /* Given that we resolved add + del on a bank in the above while stmt - * We will either have add OR delete left to do, and the logic below + * We will either have add OR delete left to do, and the logic below * relies on this fact */ if (del_idx < bank_member_change.nhs_to_del.size()) @@ -610,63 +622,81 @@ bool FgNhgOrch::setActiveBankHashBucketChanges(FGNextHopGroupEntry *syncd_fg_rou fgNhgEntry->hash_bucket_indices[syncd_bank].start_index; uint32_t exp_bucket_size = num_buckets_in_bank / (uint32_t)bank_member_change.active_nhs.size(); uint32_t num_nhs_with_one_more = (num_buckets_in_bank % (uint32_t)bank_member_change.active_nhs.size()); + auto active_nh_list = bank_member_change.active_nhs; + bool move_bkt = false; + bool remove_nh = false; - - while(del_idx < bank_member_change.nhs_to_del.size()) + auto it = active_nh_list.begin(); + while (del_idx < bank_member_change.nhs_to_del.size()) { HashBuckets *hash_buckets = &(bank_fgnhg_map->at(bank_member_change.nhs_to_del[del_idx])); - for (uint32_t i = 0; i < hash_buckets->size(); i++) - { - NextHopKey round_robin_nh = bank_member_change.active_nhs[i % - bank_member_change.active_nhs.size()]; - if (!writeHashBucketChange(syncd_fg_route_entry, hash_buckets->at(i), - nhopgroup_members_set[round_robin_nh], ipPrefix, round_robin_nh)) + uint32_t bkt_idx = 0; + while(bkt_idx < hash_buckets->size()) + { + move_bkt = false; remove_nh = false; + // wraparound the active_nh_list if needed + if (it == active_nh_list.end()) { - return false; + it = active_nh_list.begin(); + if (it == active_nh_list.end()) + { + // this can neven happen + SWSS_LOG_ERROR("%s Unexpected no more active NHs before adding the %zu buckets, exp_bucket_size(%d)", + ipPrefix.to_string().c_str(), + hash_buckets->size(), exp_bucket_size); + return false; + } } - bank_fgnhg_map->at(round_robin_nh).push_back(hash_buckets->at(i)); /* Logic below ensure that # hash buckets assigned to a nh is equalized, - * we could have used simple round robin to reassign hash buckets to - * other available nhs, but for cases where # hash buckets is not + * we could have used simple round robin to reassign hash buckets to + * other available nhs, but for cases where # hash buckets is not * divisible by # of nhs, simple round robin can make the hash bucket * distribution non-ideal, thereby nhs can attract unequal traffic */ - if (num_nhs_with_one_more == 0) + if (bank_fgnhg_map->at(*it).size() == exp_bucket_size) { - if (bank_fgnhg_map->at(round_robin_nh).size() == exp_bucket_size) + if (num_nhs_with_one_more == 0) { - SWSS_LOG_INFO("%s reached %d, don't remove more buckets", - (bank_member_change.active_nhs[i % bank_member_change.active_nhs.size()]).to_string().c_str(), - exp_bucket_size); - bank_member_change.active_nhs.erase(bank_member_change.active_nhs.begin() + - (i % bank_member_change.active_nhs.size())); + + SWSS_LOG_INFO("%s already reached expected bucket size %d, don't add more buckets, bkt_idx(%d)", + (*it).to_string().c_str(), exp_bucket_size, bkt_idx); + remove_nh = true; } - else if (bank_fgnhg_map->at(round_robin_nh).size() > exp_bucket_size) + else { - SWSS_LOG_WARN("Unexpected bucket size for nh %s, size %zu, exp_size %d", - round_robin_nh.to_string().c_str(), bank_fgnhg_map->at(round_robin_nh).size(), - exp_bucket_size); + num_nhs_with_one_more--; + SWSS_LOG_INFO("%s reached %d, don't add more buckets after this one, num_nhs_with_one_more remaining=%d, bkt_idx(%d)", + (*it).to_string().c_str(), exp_bucket_size + 1, num_nhs_with_one_more, bkt_idx); + move_bkt = true; + remove_nh = true; } } else { - if (bank_fgnhg_map->at(round_robin_nh).size() == exp_bucket_size +1) - { + SWSS_LOG_INFO("%s has not reached min expected size of %d, keep adding to this nexthop, bkt_idx(%d)", + (*it).to_string().c_str(), exp_bucket_size, bkt_idx); + move_bkt = true; + } - SWSS_LOG_INFO("%s reached %d, don't remove more buckets num_nhs_with_one_more %d", - (bank_member_change.active_nhs[i %bank_member_change.active_nhs.size()]).to_string().c_str(), - exp_bucket_size +1, num_nhs_with_one_more -1); - bank_member_change.active_nhs.erase(bank_member_change.active_nhs.begin() + - (i % bank_member_change.active_nhs.size())); - num_nhs_with_one_more--; - } - else if (bank_fgnhg_map->at(round_robin_nh).size() > exp_bucket_size +1) + if (move_bkt) + { + if (!writeHashBucketChange(syncd_fg_route_entry, hash_buckets->at(bkt_idx), + nhopgroup_members_set[*it], ipPrefix, *it)) { - SWSS_LOG_WARN("Unexpected bucket size for nh %s, size %zu, exp_size %d", - round_robin_nh.to_string().c_str(), bank_fgnhg_map->at(round_robin_nh).size(), - exp_bucket_size + 1); + return false; } + bank_fgnhg_map->at(*it).push_back(hash_buckets->at(bkt_idx)); + bkt_idx++; + } + if (remove_nh) + { + // remove the nh from the active list + active_nh_list.erase(it); + } + else + { + it++; } } @@ -687,11 +717,16 @@ bool FgNhgOrch::setActiveBankHashBucketChanges(FGNextHopGroupEntry *syncd_fg_rou uint32_t num_nhs_with_eq_to_exp = total_nhs - num_nhs_with_one_more; uint32_t add_nh_exp_bucket_size = exp_bucket_size; - while(add_idx < bank_member_change.nhs_to_add.size()) + + auto active_nh_list = bank_member_change.active_nhs; + bool move_bkt; + bool remove_nh; + + while (add_idx < bank_member_change.nhs_to_add.size()) { - (*bank_fgnhg_map)[bank_member_change.nhs_to_add[add_idx]] = - std::vector(); - auto it = bank_member_change.active_nhs.begin(); + (*bank_fgnhg_map)[bank_member_change.nhs_to_add[add_idx]] = + HashBuckets(); + auto it = active_nh_list.begin(); if (num_nhs_with_eq_to_exp > 0) { num_nhs_with_eq_to_exp--; @@ -701,77 +736,80 @@ bool FgNhgOrch::setActiveBankHashBucketChanges(FGNextHopGroupEntry *syncd_fg_rou add_nh_exp_bucket_size = exp_bucket_size + 1; num_nhs_with_one_more--; } - - while(bank_fgnhg_map->at(bank_member_change.nhs_to_add[add_idx]).size() != add_nh_exp_bucket_size) + for (const auto &nh : active_nh_list) { - if (it == bank_member_change.active_nhs.end()) - { - it = bank_member_change.active_nhs.begin(); - } - vector *map_entry = &(bank_fgnhg_map->at(*it)); - if ((*map_entry).size() <= 1) - { - /* Case where the number of hash buckets for the nh is <= 1 */ - SWSS_LOG_WARN("Next-hop %s has %d entries, either number of buckets were less or we hit a bug", - (*it).to_string().c_str(), ((int)(*map_entry).size())); - return false; - } - else + SWSS_LOG_INFO("Active next-hop %s has %zu buckets assigned", + nh.to_string().c_str(), bank_fgnhg_map->at(nh).size()); + } + // add buckets to this nh until HashBuckets size has reached the expected size + while (bank_fgnhg_map->at(bank_member_change.nhs_to_add[add_idx]).size() != add_nh_exp_bucket_size) + { + move_bkt = false; + remove_nh = false; + if (it == active_nh_list.end()) { - uint32_t last_elem = map_entry->at((*map_entry).size() - 1); - - if (!writeHashBucketChange(syncd_fg_route_entry, last_elem, - nhopgroup_members_set[bank_member_change.nhs_to_add[add_idx]], - ipPrefix, bank_member_change.nhs_to_add[add_idx])) - { - return false; - } - - (*bank_fgnhg_map)[bank_member_change.nhs_to_add[add_idx]].push_back(last_elem); - (*map_entry).erase((*map_entry).end() - 1); + it = active_nh_list.begin(); } + // get the active nh's hashbuckets + HashBuckets *map_entry = &(bank_fgnhg_map->at(*it)); /* Logic below ensure that # hash buckets assigned to a nh is equalized, - * we could have used simple round robin to reassign hash buckets to - * other available nhs, but for cases where # hash buckets is not + * we could have used simple round robin to reassign hash buckets to + * other available nhs, but for cases where # hash buckets is not * divisible by # of nhs, simple round robin can make the hash bucket * distribution non-ideal, thereby nhs can attract unequal traffic */ - if (num_nhs_with_one_more == 0) + if (map_entry->size() > exp_bucket_size + 1) { - if (map_entry->size() == exp_bucket_size) - { - SWSS_LOG_INFO("%s reached %d, don't remove more buckets", it->to_string().c_str(), exp_bucket_size); - it = bank_member_change.active_nhs.erase(it); - } - else if (map_entry->size() < exp_bucket_size) + SWSS_LOG_INFO("Nexthop %s has %zu, continue to remove more buckets after this.", + it->to_string().c_str(), map_entry->size()); + move_bkt=true; + } + else if (map_entry->size() == exp_bucket_size + 1) + { + if (num_nhs_with_one_more == 0) { - SWSS_LOG_WARN("Unexpected bucket size for nh %s, size %zu, exp_size %d", - it->to_string().c_str(), map_entry->size(), exp_bucket_size); - it++; + SWSS_LOG_INFO("Nexthop %s has %zu, take one bucket from it and remove it from the list.", + it->to_string().c_str(), map_entry->size()); + remove_nh = true; + move_bkt = true; } else { - it++; + SWSS_LOG_INFO("Nexthop %s with %zu buckets is one of the nexthops with one more, remove it \ + from the list. remaining num_nhs_with_one_more = %d", + it->to_string().c_str(), map_entry->size(), num_nhs_with_one_more - 1); + remove_nh = true; + num_nhs_with_one_more--; } } else { - if (map_entry->size() == exp_bucket_size +1) - { - SWSS_LOG_INFO("%s reached %d, don't remove more buckets num_nhs_with_one_more %d", - it->to_string().c_str(), exp_bucket_size + 1, num_nhs_with_one_more -1); - it = bank_member_change.active_nhs.erase(it); - num_nhs_with_one_more--; - } - else if (map_entry->size() < exp_bucket_size) - { - SWSS_LOG_WARN("Unexpected bucket size for nh %s, size %zu, exp_size %d", - it->to_string().c_str(), map_entry->size(), exp_bucket_size + 1); - it++; - } - else + SWSS_LOG_WARN("Nexthop %s already has %zu, don't remove any buckets but remove this nexthop from the list", + it->to_string().c_str(), map_entry->size()); + remove_nh=true; + } + + // Replace the active nh's buckets from the end + if (move_bkt) + { + HashBucketIdx last_elem = map_entry->at((*map_entry).size() - 1); + if (!writeHashBucketChange(syncd_fg_route_entry, last_elem, + nhopgroup_members_set[bank_member_change.nhs_to_add[add_idx]], + ipPrefix, bank_member_change.nhs_to_add[add_idx])) { - it++; + return false; } + + (*bank_fgnhg_map)[bank_member_change.nhs_to_add[add_idx]].push_back(last_elem); + (*map_entry).erase((*map_entry).end() - 1); + } + + // If done with current nexthop from the active_nh_list, then remove it. Else simply move to next nexthop + // in the active_nh_list + if (remove_nh) + { + active_nh_list.erase(it); + } else { + it++; } } syncd_fg_route_entry->active_nexthops.insert(bank_member_change.nhs_to_add[add_idx]); @@ -878,10 +916,10 @@ bool FgNhgOrch::setInactiveBankToNextAvailableActiveBank(FGNextHopGroupEntry *sy * Eg: Lets assume prefix A had nhs 1, 2, 3, 4, 5, 6 with nhs being equally distributed over hash buckets * 0-59(10 per nh). Now there was a nh deletion of 1, 2, 3 which constituted bank 0(4, 5, 6 constituted bank 1) * This function will identify that all of bank 0's nh are down and re-assign all the hash buckets(0-29) for these nhs to - * nhs from bank 1, along with making local struct changes to track this for future route/neigh changes. + * nhs from bank 1, along with making local struct changes to track this for future route/neigh changes. */ bool FgNhgOrch::setInactiveBankHashBucketChanges(FGNextHopGroupEntry *syncd_fg_route_entry, FgNhgEntry *fgNhgEntry, - uint32_t bank,std::vector &bank_member_changes, + uint32_t bank,std::vector &bank_member_changes, std::map &nhopgroup_members_set, const IpPrefix &ipPrefix) { SWSS_LOG_ENTER(); @@ -889,6 +927,7 @@ bool FgNhgOrch::setInactiveBankHashBucketChanges(FGNextHopGroupEntry *syncd_fg_r if (bank_member_changes[bank].nhs_to_add.size() > 0) { /* Previously inactive bank now transitions to active */ + SWSS_LOG_INFO("Previously inactive bank now transitions to active bank %u", bank); syncd_fg_route_entry->syncd_fgnhg_map[bank].clear(); for (uint32_t i = fgNhgEntry->hash_bucket_indices[bank].start_index; i <= fgNhgEntry->hash_bucket_indices[bank].end_index; i++) @@ -896,7 +935,7 @@ bool FgNhgOrch::setInactiveBankHashBucketChanges(FGNextHopGroupEntry *syncd_fg_r NextHopKey bank_nh_memb = bank_member_changes[bank]. nhs_to_add[i % bank_member_changes[bank].nhs_to_add.size()]; - if (!writeHashBucketChange(syncd_fg_route_entry, i, + if (!writeHashBucketChange(syncd_fg_route_entry, i, nhopgroup_members_set[bank_nh_memb], ipPrefix, bank_nh_memb)) { return false; @@ -907,12 +946,13 @@ bool FgNhgOrch::setInactiveBankHashBucketChanges(FGNextHopGroupEntry *syncd_fg_r } syncd_fg_route_entry->inactive_to_active_map[bank] = bank; - SWSS_LOG_NOTICE("Bank# %d of FG next-hops is up for prefix %s", + SWSS_LOG_NOTICE("Bank# %d of FG next-hops is up for prefix %s", bank, ipPrefix.to_string().c_str()); } else if (bank_member_changes[bank].nhs_to_del.size() > 0) { /* Previously active bank now transitions to inactive */ + SWSS_LOG_INFO("Previously active bank now transitions to inactive bank %u", bank); if (!setInactiveBankToNextAvailableActiveBank(syncd_fg_route_entry, fgNhgEntry, bank, bank_member_changes, nhopgroup_members_set, ipPrefix)) { @@ -925,28 +965,27 @@ bool FgNhgOrch::setInactiveBankHashBucketChanges(FGNextHopGroupEntry *syncd_fg_r syncd_fg_route_entry->active_nexthops.erase(memb); } - SWSS_LOG_NOTICE("Bank# %d of FG next-hops is down for prefix %s", bank, + SWSS_LOG_NOTICE("Bank# %d of FG next-hops is down for prefix %s", bank, ipPrefix.to_string().c_str()); } else { /* Previously inactive bank remains inactive */ uint32_t active_bank = syncd_fg_route_entry->inactive_to_active_map[bank]; + SWSS_LOG_INFO("Previously inactive bank remains inactive bank %u active bank %u", bank, active_bank); if (bank_member_changes[active_bank].active_nhs.size() == 0) { if (!setInactiveBankToNextAvailableActiveBank(syncd_fg_route_entry, fgNhgEntry, bank, bank_member_changes, nhopgroup_members_set, ipPrefix)) { - SWSS_LOG_INFO("Failed to map to active_bank and set nh in SAI"); return false; } } else { - if (!setActiveBankHashBucketChanges(syncd_fg_route_entry, fgNhgEntry, - active_bank, bank, bank_member_changes, nhopgroup_members_set, ipPrefix)) + if (!setActiveBankHashBucketChanges(syncd_fg_route_entry, fgNhgEntry, + bank, bank_member_changes[active_bank], nhopgroup_members_set, ipPrefix)) { - SWSS_LOG_INFO("Failed setActiveBankHashBucketChanges"); return false; } } @@ -955,8 +994,8 @@ bool FgNhgOrch::setInactiveBankHashBucketChanges(FGNextHopGroupEntry *syncd_fg_r } -bool FgNhgOrch::computeAndSetHashBucketChanges(FGNextHopGroupEntry *syncd_fg_route_entry, - FgNhgEntry *fgNhgEntry, std::vector &bank_member_changes, +bool FgNhgOrch::computeAndSetHashBucketChanges(FGNextHopGroupEntry *syncd_fg_route_entry, + FgNhgEntry *fgNhgEntry, std::vector &bank_member_changes, std::map &nhopgroup_members_set, const IpPrefix &ipPrefix) { @@ -968,20 +1007,21 @@ bool FgNhgOrch::computeAndSetHashBucketChanges(FGNextHopGroupEntry *syncd_fg_rou (bank_member_changes[bank_idx].nhs_to_add.size() != 0 && bank_member_changes[bank_idx].nhs_to_del.size() != 0)) { + SWSS_LOG_INFO("active nhs in bank %u", bank_idx); /* Active bank is is determined by there being active nhs on the bank OR * an edge case where all active_nhs went down(nhs_to_del > 0) BUT - * simultaneously, nhs were added(nhs_to_add > 0). + * simultaneously, nhs were added(nhs_to_add > 0). * Route this to fn which deals with active banks */ - if (!setActiveBankHashBucketChanges(syncd_fg_route_entry, fgNhgEntry, - bank_idx, bank_idx, bank_member_changes, nhopgroup_members_set, ipPrefix)) + if (!setActiveBankHashBucketChanges(syncd_fg_route_entry, fgNhgEntry, + bank_idx, bank_member_changes[bank_idx], nhopgroup_members_set, ipPrefix)) { return false; } } else { - if (!setInactiveBankHashBucketChanges(syncd_fg_route_entry, fgNhgEntry, + if (!setInactiveBankHashBucketChanges(syncd_fg_route_entry, fgNhgEntry, bank_idx, bank_member_changes, nhopgroup_members_set, ipPrefix)) { return false; @@ -993,126 +1033,166 @@ bool FgNhgOrch::computeAndSetHashBucketChanges(FGNextHopGroupEntry *syncd_fg_rou } -bool FgNhgOrch::setNewNhgMembers(FGNextHopGroupEntry &syncd_fg_route_entry, FgNhgEntry *fgNhgEntry, +bool FgNhgOrch::setNewNhgMembers(FGNextHopGroupEntry &syncd_fg_route_entry, FgNhgEntry *fgNhgEntry, std::vector &bank_member_changes, std::map &nhopgroup_members_set, const IpPrefix &ipPrefix) { SWSS_LOG_ENTER(); - sai_status_t status; - bool isWarmReboot = false; - auto nexthopsMap = m_recoveryMap.find(ipPrefix.to_string()); - for (uint32_t i = 0; i < fgNhgEntry->hash_bucket_indices.size(); i++) + auto num_banks = fgNhgEntry->hash_bucket_indices.size(); + std::vector active_banks; + std::vector inactive_banks; + + for (uint32_t i = 0; i < num_banks; i++) { - uint32_t bank = i; - syncd_fg_route_entry.inactive_to_active_map[bank] = bank; + if (bank_member_changes[i].nhs_to_add.size() != 0) + { + active_banks.push_back(i); + SWSS_LOG_INFO("active bank %d num nhs %zu", i, bank_member_changes[i].nhs_to_add.size()); + } + else + { + inactive_banks.push_back(i); + SWSS_LOG_INFO("inactive bank %d", i); + } + + // add a new fgnh group map if this is a new bank if (i + 1 > syncd_fg_route_entry.syncd_fgnhg_map.size()) { syncd_fg_route_entry.syncd_fgnhg_map.push_back(FGNextHopGroupMap()); } + } - if (bank_member_changes[i].nhs_to_add.size() == 0) - { - /* Case where bank is empty */ - for (uint32_t active_bank = 0; active_bank < bank_member_changes.size(); active_bank++) - { - if (bank_member_changes[active_bank].nhs_to_add.size() != 0) - { - bank = active_bank; - syncd_fg_route_entry.inactive_to_active_map[i] = active_bank; - break; - } - } - SWSS_LOG_NOTICE("Bank# %d of FG next-hops is down for prefix %s", - i, ipPrefix.to_string().c_str()); - } - if (bank_member_changes[bank].nhs_to_add.size() == 0) + /* Case where all banks are empty, we let retry logic(upon rv false) take care of this scenario */ + if (active_banks.size() == 0) + { + SWSS_LOG_INFO("Found no next-hops to add, skipping"); + return false; + } + // spray the nhs of active banks into active bank's buckets + for (auto active_bank : active_banks) + { + syncd_fg_route_entry.inactive_to_active_map[active_bank] = active_bank; + if (!sprayBankNhgMembers(syncd_fg_route_entry, ipPrefix, + fgNhgEntry->hash_bucket_indices[active_bank], fgNhgEntry, + active_bank, bank_member_changes[active_bank], + nhopgroup_members_set)) { - /* Case where all banks are empty, we let retry logic(upon rv false) take care of this scenario */ - SWSS_LOG_INFO("Found no next-hops to add, skipping"); return false; } + } - // recover state before warm reboot - if (nexthopsMap != m_recoveryMap.end()) + // spray the nhs of active banks into inactive bank's buckets + for (uint32_t i = 0; i < inactive_banks.size(); i++) + { + auto active_bank = active_banks[i % active_banks.size()]; + syncd_fg_route_entry.inactive_to_active_map[inactive_banks[i]] = + active_bank; + if(!sprayBankNhgMembers(syncd_fg_route_entry, ipPrefix, + fgNhgEntry->hash_bucket_indices[inactive_banks[i]], fgNhgEntry, + inactive_banks[i], bank_member_changes[active_bank], + nhopgroup_members_set)) { - isWarmReboot = true; + return false; } + SWSS_LOG_NOTICE("Bank# %d of FG next-hops is down for prefix %s", + inactive_banks[i], ipPrefix.to_string().c_str()); + } + + auto nexthopsMap = m_recoveryMap.find(ipPrefix.to_string()); + if (nexthopsMap != m_recoveryMap.end()) { + m_recoveryMap.erase(nexthopsMap); + } + + syncd_fg_route_entry.points_to_rif = false; + + return true; +} + +bool FgNhgOrch::sprayBankNhgMembers(FGNextHopGroupEntry &syncd_fg_route_entry, const IpPrefix &ipPrefix, + BankIndexRange hash_idx_range, FgNhgEntry *fgNhgEntry, + uint32_t bank, BankMemberChanges &bank_member_change, + std::map &nhopgroup_members_set) +{ + sai_status_t status; + bool isWarmReboot = false; + auto nexthopsMap = m_recoveryMap.find(ipPrefix.to_string()); - SWSS_LOG_INFO("Warm reboot is set to %d", isWarmReboot); + SWSS_LOG_ENTER(); - for (uint32_t j = fgNhgEntry->hash_bucket_indices[i].start_index; - j <= fgNhgEntry->hash_bucket_indices[i].end_index; j++) + // recover state before warm reboot + if (nexthopsMap != m_recoveryMap.end()) + { + isWarmReboot = true; + } + + SWSS_LOG_INFO("Warm reboot is set to %d, bank %d", isWarmReboot, bank); + + // fill the hash idx range with the nhs + for (uint32_t bucket_idx = hash_idx_range.start_index; + bucket_idx <= hash_idx_range.end_index; bucket_idx++) + { + NextHopKey nh_memb_key; + if (isWarmReboot) { - NextHopKey bank_nh_memb; - if (isWarmReboot) + nh_memb_key = nexthopsMap->second[bucket_idx]; + SWSS_LOG_INFO("Recovering nexthop %s with bucket_idx %d", nh_memb_key.ip_address.to_string().c_str(), bucket_idx); + // case nhps in bank are all down + if (fgNhgEntry->next_hops[nh_memb_key.ip_address].bank != bank) { - bank_nh_memb = nexthopsMap->second[j]; - SWSS_LOG_INFO("Recovering nexthop %s with bucket %d", bank_nh_memb.ip_address.to_string().c_str(), j); - // case nhps in bank are all down - if (fgNhgEntry->next_hops[bank_nh_memb.ip_address].bank != i) - { - syncd_fg_route_entry.inactive_to_active_map[i] = fgNhgEntry->next_hops[bank_nh_memb.ip_address].bank; - } - } - else - { - bank_nh_memb = bank_member_changes[bank].nhs_to_add[j % - bank_member_changes[bank].nhs_to_add.size()]; + syncd_fg_route_entry.inactive_to_active_map[bank] = fgNhgEntry->next_hops[nh_memb_key.ip_address].bank; } + } + else + { + nh_memb_key = bank_member_change.nhs_to_add[bucket_idx % + bank_member_change.nhs_to_add.size()]; + } - // Create a next hop group member - sai_attribute_t nhgm_attr; - vector nhgm_attrs; - nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID; - nhgm_attr.value.oid = syncd_fg_route_entry.next_hop_group_id; - nhgm_attrs.push_back(nhgm_attr); + // Create a next hop group member + sai_attribute_t nhgm_attr; + vector nhgm_attrs; + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID; + nhgm_attr.value.oid = syncd_fg_route_entry.next_hop_group_id; + nhgm_attrs.push_back(nhgm_attr); - nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID; - nhgm_attr.value.oid = nhopgroup_members_set[bank_nh_memb]; - nhgm_attrs.push_back(nhgm_attr); + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID; + nhgm_attr.value.oid = nhopgroup_members_set[nh_memb_key]; + nhgm_attrs.push_back(nhgm_attr); - nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_INDEX; - nhgm_attr.value.s32 = j; - nhgm_attrs.push_back(nhgm_attr); + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_INDEX; + nhgm_attr.value.s32 = bucket_idx; + nhgm_attrs.push_back(nhgm_attr); - sai_object_id_t next_hop_group_member_id; - status = sai_next_hop_group_api->create_next_hop_group_member( - &next_hop_group_member_id, - gSwitchId, - (uint32_t)nhgm_attrs.size(), - nhgm_attrs.data()); - if (status != SAI_STATUS_SUCCESS) + sai_object_id_t next_hop_group_member_id; + status = sai_next_hop_group_api->create_next_hop_group_member( + &next_hop_group_member_id, + gSwitchId, + (uint32_t)nhgm_attrs.size(), + nhgm_attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create next hop group %" PRIx64 " member %" PRIx64 ": %d", + syncd_fg_route_entry.next_hop_group_id, next_hop_group_member_id, status); + if (!removeFineGrainedNextHopGroup(&syncd_fg_route_entry)) { - SWSS_LOG_ERROR("Failed to create next hop group %" PRIx64 " member %" PRIx64 ": %d", - syncd_fg_route_entry.next_hop_group_id, next_hop_group_member_id, status); - - if (!removeFineGrainedNextHopGroup(&syncd_fg_route_entry)) - { - SWSS_LOG_ERROR("Failed to clean-up after next-hop member creation failure"); - } - - task_process_status handle_status = handleSaiCreateStatus(SAI_API_NEXT_HOP_GROUP, status); - if (handle_status != task_success) - { - return parseHandleSaiStatusFailure(handle_status); - } + SWSS_LOG_ERROR("Failed to clean-up after next-hop member creation failure"); } - setStateDbRouteEntry(ipPrefix, j, bank_nh_memb); - syncd_fg_route_entry.syncd_fgnhg_map[i][bank_nh_memb].push_back(j); - syncd_fg_route_entry.active_nexthops.insert(bank_nh_memb); - syncd_fg_route_entry.nhopgroup_members.push_back(next_hop_group_member_id); - gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_NEXT_HOP_GROUP, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } } - } - if (isWarmReboot) - { - m_recoveryMap.erase(nexthopsMap); + setStateDbRouteEntry(ipPrefix, bucket_idx, nh_memb_key); + syncd_fg_route_entry.syncd_fgnhg_map[bank][nh_memb_key].push_back(bucket_idx); + syncd_fg_route_entry.active_nexthops.insert(nh_memb_key); + syncd_fg_route_entry.nhopgroup_members[bucket_idx] = next_hop_group_member_id; + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); } - syncd_fg_route_entry.points_to_rif = false; return true; } @@ -1121,9 +1201,11 @@ bool FgNhgOrch::setNewNhgMembers(FGNextHopGroupEntry &syncd_fg_route_entry, FgNh bool FgNhgOrch::isRouteFineGrained(sai_object_id_t vrf_id, const IpPrefix &ipPrefix, const NextHopGroupKey &nextHops) { SWSS_LOG_ENTER(); - + if (!isFineGrainedConfigured || (vrf_id != gVirtualRouterId)) { + SWSS_LOG_DEBUG("Route %s:%s vrf %" PRIx64 " default_vrf %" PRIx64 " NOT fine grained ECMP", + ipPrefix.to_string().c_str(), nextHops.to_string().c_str(), vrf_id, gVirtualRouterId); return false; } @@ -1156,13 +1238,15 @@ bool FgNhgOrch::isRouteFineGrained(sai_object_id_t vrf_id, const IpPrefix &ipPre */ if (fgNhgEntry != member_entry->second) { - SWSS_LOG_INFO("FG nh found across different FG_NH groups: %s expected %s, actual %s", + SWSS_LOG_INFO("FG nh found across different FG_NH groups: %s expected %s, actual %s", nhk.to_string().c_str(), fgNhgEntry->fg_nhg_name.c_str(), member_entry->second->fg_nhg_name.c_str()); return false; } } } } + SWSS_LOG_DEBUG("Route %s:%s vrf %" PRIx64 " default_vrf %" PRIx64 " IS fine grained ECMP", + ipPrefix.to_string().c_str(), nextHops.to_string().c_str(), vrf_id, gVirtualRouterId); return true; } @@ -1227,6 +1311,15 @@ bool FgNhgOrch::setFgNhg(sai_object_id_t vrf_id, const IpPrefix &ipPrefix, const return true; } + if (!fgNhgEntry) + { + SWSS_LOG_ERROR("fgNhgOrch got a route addition %s:%s with %s", + ipPrefix.to_string().c_str(), nextHops.to_string().c_str(), + (prefix_entry == m_fgNhgPrefixes.end()) ? " no prefix and FG ECMP nexthop entries" : + "no FG ECMP entry for the prefix"); + return true; + } + if (m_syncdFGRouteTables.find(vrf_id) == m_syncdFGRouteTables.end()) { m_syncdFGRouteTables.emplace(vrf_id, FGRouteTable()); @@ -1244,11 +1337,72 @@ bool FgNhgOrch::setFgNhg(sai_object_id_t vrf_id, const IpPrefix &ipPrefix, const { /* Only happens the 1st time when hash_bucket_indices are not initialized */ - for (auto it : fgNhgEntry->next_hops) + if (fgNhgEntry->match_mode == FGMatchMode::PREFIX_BASED) + { + bank_member_changes.resize(1, BankMemberChanges()); // prefix_based match_mode supports single bank + } + else + { + for (auto it : fgNhgEntry->next_hops) + { + while (bank_member_changes.size() <= it.second.bank) + { + bank_member_changes.push_back(BankMemberChanges()); + } + } + } + } + + // if match_mode==PREFIX_BASED, then initialize fgNhgEntry->next_hops with the list of IP addresses in nextHops + if (fgNhgEntry->match_mode == FGMatchMode::PREFIX_BASED) + { + for (NextHopKey nhk : next_hop_set) { - while (bank_member_changes.size() <= it.second.bank) + if (fgNhgEntry->next_hops.find(nhk.ip_address) == fgNhgEntry->next_hops.end()) { - bank_member_changes.push_back(BankMemberChanges()); + if(fgNhgEntry->next_hops.size() >= fgNhgEntry->max_next_hops) + { + SWSS_LOG_WARN("Next-hop %s exceeds max_next_hops %d for prefix %s, skipping", + nhk.to_string().c_str(), fgNhgEntry->max_next_hops, ipPrefix.to_string().c_str()); + continue; + } + FGNextHopInfo fg_nh_info = {0, "", LINK_DOWN}; // prefix_based match_mode uses single bank 0 + // find link for the next hop + if (m_neighOrch->hasNextHop(nhk)) + { + Port p; + if (gPortsOrch->getPort(nhk.alias, p)) + { + // track Physical links + if (p.m_type == Port::PHY) + { + fg_nh_info.link = p.m_alias; + if (p.m_oper_status == SAI_PORT_OPER_STATUS_UP) + { + fg_nh_info.link_oper_state = LINK_UP; + } + auto link_info = fgNhgEntry->links.find(p.m_alias); + if (link_info != fgNhgEntry->links.end()) + { + // add nhk.ip_address to the list of next-hops for the link + fgNhgEntry->links[p.m_alias].push_back(nhk.ip_address); + } + else { + // create a new list with nhk.ip_address + fgNhgEntry->links[p.m_alias] = {nhk.ip_address}; + } + } + } + else + { + SWSS_LOG_WARN("Next-hop %s alias %s prefix %s Link not found", + nhk.ip_address.to_string().c_str(), nhk.alias.c_str(), + ipPrefix.to_string().c_str()); + } + } + fgNhgEntry->next_hops[nhk.ip_address] = fg_nh_info; + SWSS_LOG_INFO("Next-hop %s alias %s added to Fine Grained next-hop group member list for prefix %s", + nhk.ip_address.to_string().c_str(), nhk.alias.c_str(), ipPrefix.to_string().c_str()); } } } @@ -1290,10 +1444,10 @@ bool FgNhgOrch::setFgNhg(sai_object_id_t vrf_id, const IpPrefix &ipPrefix, const nhs_to_add.push_back(nhk); next_hop_to_add = true; } - else + else { FGNextHopGroupEntry *syncd_fg_route_entry = &(syncd_fg_route_entry_it->second); - if (syncd_fg_route_entry->active_nexthops.find(nhk) == + if (syncd_fg_route_entry->active_nexthops.find(nhk) == syncd_fg_route_entry->active_nexthops.end()) { bank_member_changes[fgNhgEntry->next_hops[nhk.ip_address].bank]. @@ -1344,7 +1498,7 @@ bool FgNhgOrch::setFgNhg(sai_object_id_t vrf_id, const IpPrefix &ipPrefix, const } } - if (!computeAndSetHashBucketChanges(syncd_fg_route_entry, fgNhgEntry, bank_member_changes, + if (!computeAndSetHashBucketChanges(syncd_fg_route_entry, fgNhgEntry, bank_member_changes, nhopgroup_members_set, ipPrefix)) { return false; @@ -1384,7 +1538,7 @@ bool FgNhgOrch::setFgNhg(sai_object_id_t vrf_id, const IpPrefix &ipPrefix, const m_syncdFGRouteTables[vrf_id][ipPrefix] = syncd_fg_route_entry; } - m_syncdFGRouteTables[vrf_id][ipPrefix].nhg_key = nextHops; + m_syncdFGRouteTables[vrf_id][ipPrefix].nhg_key = nextHops; for (uint32_t bank_idx = 0; bank_idx < bank_member_changes.size(); bank_idx++) { @@ -1420,7 +1574,7 @@ bool FgNhgOrch::removeFgNhg(sai_object_id_t vrf_id, const IpPrefix &ipPrefix) auto it_route_table = m_syncdFGRouteTables.find(vrf_id); if (it_route_table == m_syncdFGRouteTables.end()) { - SWSS_LOG_INFO("No route table found for %s, vrf_id 0x%" PRIx64, + SWSS_LOG_INFO("No route table found for %s, vrf_id 0x%" PRIx64, ipPrefix.to_string().c_str(), vrf_id); return true; } @@ -1503,7 +1657,7 @@ void FgNhgOrch::cleanupIpInLinkToIpMap(const string &link, const IpAddress &ip, SWSS_LOG_WARN("Unexpected case where structs are out of sync for %s", link.c_str()); return; - } + } for (auto ip_it = begin(link_entry->second); ip_it != end(link_entry->second); ip_it++) { if (*ip_it == ip) @@ -1521,9 +1675,10 @@ bool FgNhgOrch::doTaskFgNhg(const KeyOpFieldsValuesTuple & t) SWSS_LOG_ENTER(); string op = kfvOp(t); string key = kfvKey(t); - string fg_nhg_name = key; + string fg_nhg_name = key; auto fgNhg_entry = m_FgNhgs.find(fg_nhg_name); - FGMatchMode match_mode = ROUTE_BASED; + FGMatchMode match_mode = FGMatchMode::ROUTE_BASED; + uint32_t max_next_hops = 0; if (op == SET_COMMAND) { @@ -1539,7 +1694,11 @@ bool FgNhgOrch::doTaskFgNhg(const KeyOpFieldsValuesTuple & t) { if (fvValue(i) == "nexthop-based") { - match_mode = NEXTHOP_BASED; + match_mode = FGMatchMode::NEXTHOP_BASED; + } + else if (fvValue(i) == "prefix-based") + { + match_mode = FGMatchMode::PREFIX_BASED; } else if (fvValue(i) != "route-based") { @@ -1547,6 +1706,17 @@ bool FgNhgOrch::doTaskFgNhg(const KeyOpFieldsValuesTuple & t) fvValue(i).c_str()); } } + else if (fvField(i) == "max_next_hops") + { + + max_next_hops = stoi(fvValue(i)); + } + } + + // Generate an error if num_next_hops is not set for prefix_based match_mode + if (match_mode == FGMatchMode::PREFIX_BASED && max_next_hops == 0) + { + SWSS_LOG_ERROR("Received match_mode==prefix_based with max_next_hops 0, not a supported combination"); } if (bucket_size == 0) @@ -1555,7 +1725,7 @@ bool FgNhgOrch::doTaskFgNhg(const KeyOpFieldsValuesTuple & t) return true; } - if (fgNhg_entry != m_FgNhgs.end()) + if (fgNhg_entry != m_FgNhgs.end()) { SWSS_LOG_WARN("FG_NHG %s already exists, ignoring", fg_nhg_name.c_str()); } @@ -1565,8 +1735,9 @@ bool FgNhgOrch::doTaskFgNhg(const KeyOpFieldsValuesTuple & t) fgNhgEntry.configured_bucket_size = bucket_size; fgNhgEntry.fg_nhg_name = fg_nhg_name; fgNhgEntry.match_mode = match_mode; - SWSS_LOG_NOTICE("Added new FG_NHG entry with bucket_size %d, match_mode: %'" PRIu8, - bucket_size, match_mode); + fgNhgEntry.max_next_hops = max_next_hops; + SWSS_LOG_NOTICE("Added new FG_NHG entry %s with bucket_size %d, match_mode: %'" PRIu8, + fg_nhg_name.c_str(), bucket_size, match_mode); isFineGrainedConfigured = true; m_FgNhgs[fg_nhg_name] = fgNhgEntry; } @@ -1578,7 +1749,7 @@ bool FgNhgOrch::doTaskFgNhg(const KeyOpFieldsValuesTuple & t) SWSS_LOG_INFO("Received delete call for non-existent entry %s", fg_nhg_name.c_str()); } - else + else { /* Check if there are no child objects associated prior to deleting */ if (fgNhg_entry->second.prefixes.size() == 0 && fgNhg_entry->second.next_hops.size() == 0) @@ -1589,9 +1760,22 @@ bool FgNhgOrch::doTaskFgNhg(const KeyOpFieldsValuesTuple & t) } else { - SWSS_LOG_INFO("Child Prefix/Member entries are still associated with this FG_NHG %s", + /* in case of prefix-based match mode, members are not deleted by config, so delete them here */ + if (fgNhg_entry->second.prefixes.size() == 0 && + fgNhg_entry->second.match_mode == FGMatchMode::PREFIX_BASED && + fgNhg_entry->second.next_hops.size() != 0) + { + fgNhg_entry->second.next_hops.clear(); + m_FgNhgs.erase(fgNhg_entry); + SWSS_LOG_INFO("Clearing dynamic NH Member entries associated with this FG_NHG %s", + fg_nhg_name.c_str()); + } + else + { + SWSS_LOG_INFO("Child Prefix/Member entries are still associated with this FG_NHG %s", fg_nhg_name.c_str()); - return false; + return false; + } } if (m_FgNhgs.size() == 0) { @@ -1640,6 +1824,21 @@ bool FgNhgOrch::doTaskFgNhgPrefix(const KeyOpFieldsValuesTuple & t) return false; } + // In the prefix-based match mode, only 1 FG_NHG_PREFIX is allowed for each FG_NHG since we can't + // handle the case where multiple prefixes are attached to the same FG_NHG and we receive routes for + // these prefixes with disjoint nexthops. If ip_prefix doesn't match with the existing prefixe in + // the fgNhgEntry, then return false. + if (fgNhg_entry->second.match_mode == FGMatchMode::PREFIX_BASED) + { + if (fgNhg_entry->second.prefixes.size() != 0) + { + SWSS_LOG_NOTICE("FG_NHG %s already has a prefix %s, ignoring addition of new prefix %s", + fg_nhg_name.c_str(), fgNhg_entry->second.prefixes[0].to_string().c_str(), + ip_prefix.to_string().c_str()); + return true; + } + } + if (fgNhg_entry->second.match_mode == NEXTHOP_BASED) { SWSS_LOG_NOTICE("FG_NHG %s is configured as nexthop_based: FG_NHG_PREFIX is a no-op", @@ -1663,7 +1862,7 @@ bool FgNhgOrch::doTaskFgNhgPrefix(const KeyOpFieldsValuesTuple & t) { SWSS_LOG_INFO("Route exists in routeorch, deleting from APP_DB to begin migration"); m_fgPrefixAddCache[ip_prefix] = nhg; - m_routeTable.del(ip_prefix.to_string()); + m_routeTable->del(ip_prefix.to_string()); return false; } } @@ -1675,13 +1874,14 @@ bool FgNhgOrch::doTaskFgNhgPrefix(const KeyOpFieldsValuesTuple & t) SWSS_LOG_INFO("Route removed in routeorch, now do an APP_DB addition"); fgNhg_entry->second.prefixes.push_back(ip_prefix); m_fgNhgPrefixes[ip_prefix] = &(fgNhg_entry->second); - m_routeTable.set(ip_prefix.to_string(), generateRouteTableFromNhgKey(addCache->second)); + m_routeTable->set(ip_prefix.to_string(), generateRouteTableFromNhgKey(addCache->second)); m_fgPrefixAddCache.erase(addCache); SWSS_LOG_INFO("Performed APP_DB addition with prefix %s", ip_prefix.to_string().c_str()); } else { - SWSS_LOG_INFO("Route exists in routeorch, and APP_DB route was deleted, waiting for routeorch delete to complete"); + SWSS_LOG_INFO("Route(%s) ADD exists in routeorch, and APP_DB route was deleted, waiting for routeorch delete to complete", + ip_prefix.to_string().c_str()); return false; } } @@ -1703,14 +1903,16 @@ bool FgNhgOrch::doTaskFgNhgPrefix(const KeyOpFieldsValuesTuple & t) m_syncdFGRouteTables.at(vrf_id).find(ip_prefix) != m_syncdFGRouteTables.at(vrf_id).end()) { nhg = m_syncdFGRouteTables.at(vrf_id).at(ip_prefix).nhg_key; - } + } auto delCache = m_fgPrefixDelCache.find(ip_prefix); if (delCache == m_fgPrefixDelCache.end()) { if (nhg.getSize() == 0) { - SWSS_LOG_INFO("Route does not exist in fgnhgorch, proceed with deletion of local structures"); + SWSS_LOG_INFO("Route(%s) does not exist in fgnhgorch, proceed with deletion of local structures", + ip_prefix.to_string().c_str()); + for (uint32_t i = 0; i < fgNhg_entry->prefixes.size(); i++) { if(fgNhg_entry->prefixes[i] == ip_prefix) @@ -1720,13 +1922,13 @@ bool FgNhgOrch::doTaskFgNhgPrefix(const KeyOpFieldsValuesTuple & t) } } - m_fgNhgPrefixes.erase(ip_prefix); + m_fgNhgPrefixes.erase(ip_prefix); } else { - SWSS_LOG_INFO("Route exists in fgNhgOrch, deleting from APP_DB"); + SWSS_LOG_INFO("Route(%s) exists in fgNhgOrch, deleting from APP_DB", ip_prefix.to_string().c_str()); m_fgPrefixDelCache[ip_prefix] = nhg; - m_routeTable.del(ip_prefix.to_string()); + m_routeTable->del(ip_prefix.to_string()); return false; } } @@ -1735,7 +1937,7 @@ bool FgNhgOrch::doTaskFgNhgPrefix(const KeyOpFieldsValuesTuple & t) if (nhg.getSize() == 0) { /* Case where fgnhgorch route entry was present and the route delete was completed */ - SWSS_LOG_INFO("Route removed in fgNhgOrch, now do an APP_DB addition"); + SWSS_LOG_INFO("Route(%s) removed in fgNhgOrch, now do an APP_DB addition", ip_prefix.to_string().c_str()); for (uint32_t i = 0; i < fgNhg_entry->prefixes.size(); i++) { if (fgNhg_entry->prefixes[i] == ip_prefix) @@ -1744,14 +1946,16 @@ bool FgNhgOrch::doTaskFgNhgPrefix(const KeyOpFieldsValuesTuple & t) break; } } - m_fgNhgPrefixes.erase(ip_prefix); + m_fgNhgPrefixes.erase(ip_prefix); - m_routeTable.set(ip_prefix.to_string(), generateRouteTableFromNhgKey(delCache->second)); + m_routeTable->set(ip_prefix.to_string(), generateRouteTableFromNhgKey(delCache->second)); + m_fgPrefixDelCache.erase(delCache); SWSS_LOG_INFO("Perform APP_DB addition with prefix %s", ip_prefix.to_string().c_str()); } else { - SWSS_LOG_INFO("Route exists in fgNhgOrch, and APP_DB route was deleted, waiting for fgNhgOrch delete to complete"); + SWSS_LOG_INFO("Route(%s) DEL exists in fgNhgOrch size(%zu), and APP_DB route was deleted, waiting for fgNhgOrch delete to complete!!", + ip_prefix.to_string().c_str(), nhg.getSize()); return false; } } @@ -1803,6 +2007,12 @@ bool FgNhgOrch::doTaskFgNhgMember(const KeyOpFieldsValuesTuple & t) SWSS_LOG_INFO("FG_NHG entry not received yet, continue"); return false; } + // Flag error if FGNHG was created with match_mode as prefix_based and member is added + else if (fgNhg_entry->second.match_mode == FGMatchMode::PREFIX_BASED) + { + SWSS_LOG_ERROR("Received FG_NHG member for prefix-based match_mode, not a supported operation"); + return true; + } else { /* skip addition if next-hop already exists */ @@ -1911,9 +2121,9 @@ bool FgNhgOrch::doTaskFgNhgMember(const KeyOpFieldsValuesTuple & t) } return true; } - -void FgNhgOrch::doTask(Consumer& consumer) + +void FgNhgOrch::doTask(Consumer& consumer) { SWSS_LOG_ENTER(); const string & table_name = consumer.getTableName(); diff --git a/orchagent/fgnhgorch.h b/orchagent/fgnhgorch.h index b7ec844a590..8356ee35c44 100644 --- a/orchagent/fgnhgorch.h +++ b/orchagent/fgnhgorch.h @@ -6,6 +6,8 @@ #include "intfsorch.h" #include "neighorch.h" #include "producerstatetable.h" +#include "zmqclient.h" +#include "zmqproducerstatetable.h" #include "ipaddress.h" #include "ipaddresses.h" @@ -15,9 +17,10 @@ #include typedef uint32_t Bank; +typedef uint32_t HashBucketIdx; typedef std::set ActiveNextHops; typedef std::vector FGNextHopGroupMembers; -typedef std::vector HashBuckets; +typedef std::vector HashBuckets; typedef std::map FGNextHopGroupMap; typedef std::vector BankFGNextHopGroupMap; typedef std::map InactiveBankMapsToBank; @@ -56,8 +59,10 @@ typedef std::unordered_map> Links; enum FGMatchMode { ROUTE_BASED, - NEXTHOP_BASED + NEXTHOP_BASED, + PREFIX_BASED }; + /* Store the indices occupied by a bank */ typedef struct { @@ -70,6 +75,7 @@ typedef struct FgNhgEntry string fg_nhg_name; // Name of FG NHG group configured by user uint32_t configured_bucket_size; // Bucket size configured by user uint32_t real_bucket_size; // Real bucket size as queried from SAI + uint32_t max_next_hops; // For match_mode==prefix-based. Maximum number of next hops in the FG NHG NextHops next_hops; // The IP to Bank mapping configured by user Links links; // Link to IP map for oper changes std::vector prefixes; // Prefix which desires FG behavior @@ -78,9 +84,9 @@ typedef struct FgNhgEntry } FgNhgEntry; /* Map from IP prefix to user configured FG NHG entries */ -typedef std::map FgNhgPrefixes; +typedef std::map FgNhgPrefixes; /* Map from IP address to user configured FG NHG entries */ -typedef std::map FgNhgMembers; +typedef std::map FgNhgMembers; /* Main structure to hold user configuration */ typedef std::map FgNhgs; @@ -123,7 +129,9 @@ class FgNhgOrch : public Orch, public Observer bool isFineGrainedConfigured; Table m_stateWarmRestartRouteTable; - ProducerStateTable m_routeTable; + + std::shared_ptr m_zmqClient = nullptr; + std::shared_ptr m_routeTable = nullptr; FgPrefixOpCache m_fgPrefixAddCache; FgPrefixOpCache m_fgPrefixDelCache; @@ -133,13 +141,18 @@ class FgNhgOrch : public Orch, public Observer WarmBootRecoveryMap m_recoveryMap; bool setNewNhgMembers(FGNextHopGroupEntry &syncd_fg_route_entry, FgNhgEntry *fgNhgEntry, - std::vector &bank_member_changes, + std::vector &bank_member_changes, std::map &nhopgroup_members_set, const IpPrefix&); + bool sprayBankNhgMembers(FGNextHopGroupEntry &syncd_fg_route_entry, const IpPrefix &ipPrefix, + BankIndexRange hash_idx_range, FgNhgEntry *fgNhgEntry, + uint32_t bank, BankMemberChanges &bank_member_change, + std::map &nhopgroup_members_set); + bool computeAndSetHashBucketChanges(FGNextHopGroupEntry *syncd_fg_route_entry, FgNhgEntry *fgNhgEntry, std::vector &bank_member_changes, std::map &nhopgroup_members_set, const IpPrefix&); bool setActiveBankHashBucketChanges(FGNextHopGroupEntry *syncd_fg_route_entry, FgNhgEntry *fgNhgEntry, - uint32_t bank, uint32_t syncd_bank, std::vector bank_member_changes, + uint32_t syncd_bank, BankMemberChanges bank_member_changes, std::map &nhopgroup_members_set, const IpPrefix&); bool setInactiveBankHashBucketChanges(FGNextHopGroupEntry *syncd_fg_route_entry, FgNhgEntry *fgNhgEntry, uint32_t bank,std::vector &bank_member_changes, diff --git a/orchagent/flex_counter/flex_counter_manager.cpp b/orchagent/flex_counter/flex_counter_manager.cpp index 95fb28171d0..d4350ace089 100644 --- a/orchagent/flex_counter/flex_counter_manager.cpp +++ b/orchagent/flex_counter/flex_counter_manager.cpp @@ -5,7 +5,6 @@ #include "schema.h" #include "rediscommand.h" #include "logger.h" -#include "sai_serialize.h" #include @@ -18,12 +17,15 @@ using swss::DBConnector; using swss::FieldValueTuple; using swss::ProducerTable; +extern sai_switch_api_t *sai_switch_api; + const string FLEX_COUNTER_ENABLE("enable"); const string FLEX_COUNTER_DISABLE("disable"); const unordered_map FlexCounterManager::stats_mode_lookup = { { StatsMode::READ, STATS_MODE_READ }, + { StatsMode::READ_AND_CLEAR, STATS_MODE_READ_AND_CLEAR }, }; const unordered_map FlexCounterManager::status_lookup = @@ -38,6 +40,8 @@ const unordered_map FlexCounterManager::counter_id_field_lo { CounterType::SWITCH_DEBUG, SWITCH_DEBUG_COUNTER_ID_LIST }, { CounterType::PORT, PORT_COUNTER_ID_LIST }, { CounterType::QUEUE, QUEUE_COUNTER_ID_LIST }, + { CounterType::QUEUE_ATTR, QUEUE_ATTR_ID_LIST }, + { CounterType::PRIORITY_GROUP, PG_COUNTER_ID_LIST }, { CounterType::MACSEC_SA_ATTR, MACSEC_SA_ATTR_ID_LIST }, { CounterType::MACSEC_SA, MACSEC_SA_COUNTER_ID_LIST }, { CounterType::MACSEC_FLOW, MACSEC_FLOW_COUNTER_ID_LIST }, @@ -45,6 +49,10 @@ const unordered_map FlexCounterManager::counter_id_field_lo { CounterType::TUNNEL, TUNNEL_COUNTER_ID_LIST }, { CounterType::HOSTIF_TRAP, FLOW_COUNTER_ID_LIST }, { CounterType::ROUTE, FLOW_COUNTER_ID_LIST }, + { CounterType::ENI, ENI_COUNTER_ID_LIST }, + { CounterType::DASH_METER, DASH_METER_COUNTER_ID_LIST }, + { CounterType::SRV6, SRV6_COUNTER_ID_LIST }, + { CounterType::SWITCH, SWITCH_COUNTER_ID_LIST }, }; FlexManagerDirectory g_FlexManagerDirectory; @@ -89,13 +97,13 @@ FlexCounterManager::FlexCounterManager( const uint polling_interval, const bool enabled, FieldValueTuple fv_plugin) : - FlexCounterManager("FLEX_COUNTER_DB", group_name, stats_mode, + FlexCounterManager(false, group_name, stats_mode, polling_interval, enabled, fv_plugin) { } FlexCounterManager::FlexCounterManager( - const string& db_name, + const bool is_gearbox, const string& group_name, const StatsMode stats_mode, const uint polling_interval, @@ -106,11 +114,7 @@ FlexCounterManager::FlexCounterManager( polling_interval(polling_interval), enabled(enabled), fv_plugin(fv_plugin), - flex_counter_db(new DBConnector(db_name, 0)), - flex_counter_group_table(new ProducerTable(flex_counter_db.get(), - FLEX_COUNTER_GROUP_TABLE)), - flex_counter_table(new ProducerTable(flex_counter_db.get(), - FLEX_COUNTER_TABLE)) + is_gearbox(is_gearbox) { SWSS_LOG_ENTER(); @@ -125,13 +129,10 @@ FlexCounterManager::~FlexCounterManager() for (const auto& counter: installed_counters) { - flex_counter_table->del(getFlexCounterTableKey(group_name, counter)); + stopFlexCounterPolling(counter.second, getFlexCounterTableKey(group_name, counter.first)); } - if (flex_counter_group_table != nullptr) - { - flex_counter_group_table->del(group_name); - } + delFlexCounterGroup(group_name, is_gearbox); SWSS_LOG_DEBUG("Deleted flex counter group '%s'.", group_name.c_str()); } @@ -140,19 +141,13 @@ void FlexCounterManager::applyGroupConfiguration() { SWSS_LOG_ENTER(); - vector field_values = - { - FieldValueTuple(STATS_MODE_FIELD, stats_mode_lookup.at(stats_mode)), - FieldValueTuple(POLL_INTERVAL_FIELD, std::to_string(polling_interval)), - FieldValueTuple(FLEX_COUNTER_STATUS_FIELD, status_lookup.at(enabled)) - }; - - if (!fvField(fv_plugin).empty()) - { - field_values.emplace_back(fv_plugin); - } - - flex_counter_group_table->set(group_name, field_values); + setFlexCounterGroupParameter(group_name, + std::to_string(polling_interval), + stats_mode_lookup.at(stats_mode), + fvField(fv_plugin), + fvValue(fv_plugin), + status_lookup.at(enabled), + is_gearbox); } void FlexCounterManager::updateGroupPollingInterval( @@ -160,11 +155,7 @@ void FlexCounterManager::updateGroupPollingInterval( { SWSS_LOG_ENTER(); - vector field_values = - { - FieldValueTuple(POLL_INTERVAL_FIELD, std::to_string(polling_interval)) - }; - flex_counter_group_table->set(group_name, field_values); + setFlexCounterGroupPollInterval(group_name, std::to_string(polling_interval), is_gearbox); SWSS_LOG_DEBUG("Set polling interval for flex counter group '%s' to %d ms.", group_name.c_str(), polling_interval); @@ -181,11 +172,7 @@ void FlexCounterManager::enableFlexCounterGroup() return; } - vector field_values = - { - FieldValueTuple(FLEX_COUNTER_STATUS_FIELD, FLEX_COUNTER_ENABLE) - }; - flex_counter_group_table->set(group_name, field_values); + setFlexCounterGroupOperation(group_name, FLEX_COUNTER_ENABLE, is_gearbox); enabled = true; SWSS_LOG_DEBUG("Enabling flex counters for group '%s'.", @@ -203,11 +190,7 @@ void FlexCounterManager::disableFlexCounterGroup() return; } - vector field_values = - { - FieldValueTuple(FLEX_COUNTER_STATUS_FIELD, FLEX_COUNTER_DISABLE) - }; - flex_counter_group_table->set(group_name, field_values); + setFlexCounterGroupOperation(group_name, FLEX_COUNTER_DISABLE, is_gearbox); enabled = false; SWSS_LOG_DEBUG("Disabling flex counters for group '%s'.", @@ -219,7 +202,8 @@ void FlexCounterManager::disableFlexCounterGroup() void FlexCounterManager::setCounterIdList( const sai_object_id_t object_id, const CounterType counter_type, - const unordered_set& counter_stats) + const unordered_set& counter_stats, + const sai_object_id_t switch_id) { SWSS_LOG_ENTER(); @@ -231,12 +215,12 @@ void FlexCounterManager::setCounterIdList( return; } - std::vector field_values = - { - FieldValueTuple(counter_type_it->second, serializeCounterStats(counter_stats)) - }; - flex_counter_table->set(getFlexCounterTableKey(group_name, object_id), field_values); - installed_counters.insert(object_id); + auto key = getFlexCounterTableKey(group_name, object_id); + auto counter_ids = serializeCounterStats(counter_stats); + auto effective_switch_id = switch_id == SAI_NULL_OBJECT_ID ? gSwitchId : switch_id; + + startFlexCounterPolling(effective_switch_id, key, counter_ids, counter_type_it->second); + installed_counters[object_id] = effective_switch_id; SWSS_LOG_DEBUG("Updated flex counter id list for object '%" PRIu64 "' in group '%s'.", object_id, @@ -252,13 +236,14 @@ void FlexCounterManager::clearCounterIdList(const sai_object_id_t object_id) auto counter_it = installed_counters.find(object_id); if (counter_it == installed_counters.end()) { - SWSS_LOG_WARN("No counters found on object '%" PRIu64 "' in group '%s'.", + SWSS_LOG_INFO("No counters found on object '%" PRIu64 "' in group '%s'.", object_id, group_name.c_str()); return; } - flex_counter_table->del(getFlexCounterTableKey(group_name, object_id)); + auto key = getFlexCounterTableKey(group_name, object_id); + stopFlexCounterPolling(installed_counters[object_id], key); installed_counters.erase(counter_it); SWSS_LOG_DEBUG("Cleared flex counter id list for object '%" PRIu64 "' in group '%s'.", @@ -272,12 +257,12 @@ string FlexCounterManager::getFlexCounterTableKey( { SWSS_LOG_ENTER(); - return group_name + flex_counter_table->getTableNameSeparator() + sai_serialize_object_id(object_id); + return group_name + ":" + sai_serialize_object_id(object_id); } // serializeCounterStats turns a set of stats into a format suitable for FLEX_COUNTER_DB. string FlexCounterManager::serializeCounterStats( - const unordered_set& counter_stats) const + const unordered_set& counter_stats) { SWSS_LOG_ENTER(); diff --git a/orchagent/flex_counter/flex_counter_manager.h b/orchagent/flex_counter/flex_counter_manager.h index 38bf8290585..f3810296f8c 100644 --- a/orchagent/flex_counter/flex_counter_manager.h +++ b/orchagent/flex_counter/flex_counter_manager.h @@ -9,6 +9,10 @@ #include "producertable.h" #include "table.h" #include +#include +#include "sai_serialize.h" +#include "saihelper.h" +#include extern "C" { #include "sai.h" @@ -16,13 +20,16 @@ extern "C" { enum class StatsMode { - READ + READ, + READ_AND_CLEAR }; enum class CounterType { PORT, QUEUE, + QUEUE_ATTR, + PRIORITY_GROUP, PORT_DEBUG, SWITCH_DEBUG, MACSEC_SA_ATTR, @@ -32,8 +39,16 @@ enum class CounterType TUNNEL, HOSTIF_TRAP, ROUTE, + ENI, + DASH_METER, + SRV6, + SWITCH, }; +extern bool gTraditionalFlexCounter; +extern sai_object_id_t gSwitchId; + +struct CachedObjects; // FlexCounterManager allows users to manage a group of flex counters. // // TODO: FlexCounterManager doesn't currently support the full range of @@ -41,6 +56,7 @@ enum class CounterType // counters and support for plugins needs to be added. class FlexCounterManager { + friend struct CachedObjects; public: FlexCounterManager( const std::string& group_name, @@ -53,7 +69,7 @@ class FlexCounterManager {} FlexCounterManager( - const std::string& db_name, + const bool is_gearbox, const std::string& group_name, const StatsMode stats_mode, const uint polling_interval, @@ -68,11 +84,12 @@ class FlexCounterManager void enableFlexCounterGroup(); void disableFlexCounterGroup(); - void setCounterIdList( + virtual void setCounterIdList( const sai_object_id_t object_id, const CounterType counter_type, - const std::unordered_set& counter_stats); - void clearCounterIdList(const sai_object_id_t object_id); + const std::unordered_set& counter_stats, + const sai_object_id_t switch_id=SAI_NULL_OBJECT_ID); + virtual void clearCounterIdList(const sai_object_id_t object_id); const std::string& getGroupName() const { @@ -97,29 +114,263 @@ class FlexCounterManager protected: void applyGroupConfiguration(); - private: std::string getFlexCounterTableKey( const std::string& group_name, const sai_object_id_t object_id) const; - std::string serializeCounterStats( - const std::unordered_set& counter_stats) const; std::string group_name; StatsMode stats_mode; uint polling_interval; bool enabled; swss::FieldValueTuple fv_plugin; - std::unordered_set installed_counters; + std::unordered_map installed_counters; + bool is_gearbox; - std::shared_ptr flex_counter_db = nullptr; - std::shared_ptr flex_counter_group_table = nullptr; - std::shared_ptr flex_counter_table = nullptr; + static std::string serializeCounterStats( + const std::unordered_set& counter_stats); static const std::unordered_map stats_mode_lookup; static const std::unordered_map status_lookup; static const std::unordered_map counter_id_field_lookup; }; +struct CachedObjects +{ + struct PendingMapKey + { + std::unordered_set counter_stats; + CounterType counter_type; + sai_object_id_t switch_id; + + bool operator==(const PendingMapKey& other) const { + return counter_stats == other.counter_stats && + counter_type == other.counter_type && + switch_id == other.switch_id; + } + }; + + struct PendingMapHash { + size_t operator()(const PendingMapKey& key) const { + size_t seed = 0; + std::vector sorted_stats(key.counter_stats.begin(), key.counter_stats.end()); + std::sort(sorted_stats.begin(), sorted_stats.end()); + boost::hash_combine(seed, boost::hash_range(sorted_stats.begin(), sorted_stats.end())); + boost::hash_combine(seed, key.counter_type); + boost::hash_combine(seed, key.switch_id); + return seed; + } + }; + + std::unordered_map, PendingMapHash> pending_objects_map; + + void cache(const sai_object_id_t object_id, + const CounterType counter_type, + const std::unordered_set& counter_stats, + sai_object_id_t switch_id) + { + PendingMapKey key{counter_stats, counter_type, switch_id}; + pending_objects_map[key].emplace(object_id); + } + + void flush(const std::string &group_name) + { + if (pending_objects_map.empty()) + { + return; + } + + for (const auto& entry : pending_objects_map) + { + const auto& counter_stats = entry.first.counter_stats; + const auto& counter_type = entry.first.counter_type; + const auto& switch_id = entry.first.switch_id; + const auto& pending_sai_objects = entry.second; + + if (pending_sai_objects.empty()) + { + continue; + } + + auto counter_ids = FlexCounterManager::serializeCounterStats(counter_stats); + auto counter_type_it = FlexCounterManager::counter_id_field_lookup.find(counter_type); + + auto counter_keys = group_name + ":"; + for (const auto& oid: pending_sai_objects) + { + counter_keys += sai_serialize_object_id(oid) + ","; + } + counter_keys.pop_back(); + + startFlexCounterPolling(switch_id, counter_keys, counter_ids, counter_type_it->second); + } + + /* Clear all cached entries after flush */ + pending_objects_map.clear(); + } +}; + +class FlexCounterCachedManager : public FlexCounterManager +{ + public: + FlexCounterCachedManager( + const std::string& group_name, + const StatsMode stats_mode, + const uint polling_interval, + const bool enabled, + swss::FieldValueTuple fv_plugin = std::make_pair("","")) : + FlexCounterManager(group_name, stats_mode, polling_interval, enabled, fv_plugin) + { + } + + virtual void flush() + { + } + + protected: + void flush(const std::string &group_name, struct CachedObjects &cached_objects) + { + cached_objects.flush(group_name); + } + + void setCounterIdList( + struct CachedObjects &cached_objects, + const sai_object_id_t object_id, + const CounterType counter_type, + const std::unordered_set& counter_stats, + const sai_object_id_t switch_id=SAI_NULL_OBJECT_ID) + { + if (gTraditionalFlexCounter) + { + // Unable to cache an object and initialize in bulk in traditional flex counter mode + FlexCounterManager::setCounterIdList(object_id, counter_type, counter_stats, switch_id); + return; + } + + auto effective_switch_id = switch_id == SAI_NULL_OBJECT_ID ? gSwitchId : switch_id; + installed_counters[object_id] = effective_switch_id; + cached_objects.cache(object_id, counter_type, counter_stats, effective_switch_id); + } + + void clearCounterIdList( + struct CachedObjects &cached_objects, + const sai_object_id_t object_id) + { + bool found = false; + for (auto entry = cached_objects.pending_objects_map.begin(); entry != cached_objects.pending_objects_map.end(); ) + { + if (entry->second.find(object_id) != entry->second.end()) + { + found = true; + installed_counters.erase(object_id); + entry->second.erase(object_id); + } + + if (found && entry->second.empty()) + { + entry = cached_objects.pending_objects_map.erase(entry); + break; + } + else + { + ++entry; + } + } + + if (!found) + { + /* If the object is not found in the cached objects, clear the counter id list assuming it is already installed */ + FlexCounterManager::clearCounterIdList(object_id); + } + } +}; + +template +class FlexCounterTaggedCachedManager : public FlexCounterCachedManager +{ + public: + FlexCounterTaggedCachedManager( + const std::string& group_name, + const StatsMode stats_mode, + const uint polling_interval, + const bool enabled, + swss::FieldValueTuple fv_plugin = std::make_pair("","")) : + FlexCounterCachedManager(group_name, stats_mode, polling_interval, enabled, fv_plugin) + { + } + + void flush() + { + FlexCounterCachedManager::flush(group_name, cached_objects); + } + + virtual void setCounterIdList( + const sai_object_id_t object_id, + const CounterType counter_type, + const std::unordered_set& counter_stats, + const sai_object_id_t switch_id=SAI_NULL_OBJECT_ID) + { + FlexCounterCachedManager::setCounterIdList(cached_objects, + object_id, + counter_type, + counter_stats); + } + + virtual void clearCounterIdList( + const sai_object_id_t object_id) + { + FlexCounterCachedManager::clearCounterIdList(cached_objects, object_id); + } + + private: + struct CachedObjects cached_objects; +}; + +template +class FlexCounterTaggedCachedManager::value>> : public FlexCounterCachedManager +{ + public: + FlexCounterTaggedCachedManager( + const std::string& group_name, + const StatsMode stats_mode, + const uint polling_interval, + const bool enabled, + swss::FieldValueTuple fv_plugin = std::make_pair("","")) : + FlexCounterCachedManager(group_name, stats_mode, polling_interval, enabled, fv_plugin) + { + } + + void flush() + { + for(auto &it : cached_objects) + { + FlexCounterCachedManager::flush(group_name, it.second); + } + } + + void setCounterIdList( + const sai_object_id_t object_id, + const CounterType counter_type, + const std::unordered_set& counter_stats, + const TagType tag, + const sai_object_id_t switch_id=SAI_NULL_OBJECT_ID) + { + FlexCounterCachedManager::setCounterIdList(cached_objects[tag], + object_id, + counter_type, + counter_stats); + } + + void clearCounterIdList( + const sai_object_id_t object_id, + const TagType tag) + { + FlexCounterCachedManager::clearCounterIdList(cached_objects[tag], object_id); + } + + private: + std::map cached_objects; +}; + class FlexManagerDirectory { public: diff --git a/orchagent/flexcounterorch.cpp b/orchagent/flexcounterorch.cpp index bc974181f1f..834838d7a5d 100644 --- a/orchagent/flexcounterorch.cpp +++ b/orchagent/flexcounterorch.cpp @@ -1,21 +1,32 @@ #include -#include "portsorch.h" -#include "fabricportsorch.h" -#include "select.h" + +#include +#include +#include +#include + #include "notifier.h" -#include "sai_serialize.h" -#include "pfcwdorch.h" -#include "bufferorch.h" -#include "flexcounterorch.h" -#include "debugcounterorch.h" #include "directory.h" + +#include "bufferorch.h" #include "copporch.h" -#include -#include "routeorch.h" #include "macsecorch.h" -#include "flowcounterrouteorch.h" +#include "portsorch.h" +#include "pfcwdorch.h" +#include "routeorch.h" +#include "srv6orch.h" +#include "switchorch.h" +#include "debugcounterorch.h" +#include "fabricportsorch.h" + +#include "dash/dashorch.h" +#include "dash/dashmeterorch.h" +#include "flex_counter/flowcounterrouteorch.h" + +#include "flexcounterorch.h" extern sai_port_api_t *sai_port_api; +extern sai_switch_api_t *sai_switch_api; extern PortsOrch *gPortsOrch; extern FabricPortsOrch *gFabricPortsOrch; @@ -24,6 +35,11 @@ extern BufferOrch *gBufferOrch; extern Directory gDirectory; extern CoppOrch *gCoppOrch; extern FlowCounterRouteOrch *gFlowCounterRouteOrch; +extern Srv6Orch *gSrv6Orch; +extern SwitchOrch *gSwitchOrch; +extern sai_object_id_t gSwitchId; + +int gFlexCounterDelaySec; #define BUFFER_POOL_WATERMARK_KEY "BUFFER_POOL_WATERMARK" #define PORT_KEY "PORT" @@ -37,11 +53,18 @@ extern FlowCounterRouteOrch *gFlowCounterRouteOrch; #define TUNNEL_KEY "TUNNEL" #define FLOW_CNT_TRAP_KEY "FLOW_CNT_TRAP" #define FLOW_CNT_ROUTE_KEY "FLOW_CNT_ROUTE" +#define ENI_KEY "ENI" +#define DASH_METER_KEY "DASH_METER" +#define WRED_QUEUE_KEY "WRED_ECN_QUEUE" +#define WRED_PORT_KEY "WRED_ECN_PORT" +#define SRV6_KEY "SRV6" +#define SWITCH_KEY "SWITCH" unordered_map flexCounterGroupMap = { {"PORT", PORT_STAT_COUNTER_FLEX_COUNTER_GROUP}, {"PORT_RATES", PORT_RATE_COUNTER_FLEX_COUNTER_GROUP}, + {"DEBUG_MONITOR_COUNTER", DEBUG_DROP_MONITOR_FLEX_COUNTER_GROUP}, {"PORT_BUFFER_DROP", PORT_BUFFER_DROP_STAT_FLEX_COUNTER_GROUP}, {"QUEUE", QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP}, {"PFCWD", PFC_WD_FLEX_COUNTER_GROUP}, @@ -59,21 +82,52 @@ unordered_map flexCounterGroupMap = {"MACSEC_SA", COUNTERS_MACSEC_SA_GROUP}, {"MACSEC_SA_ATTR", COUNTERS_MACSEC_SA_ATTR_GROUP}, {"MACSEC_FLOW", COUNTERS_MACSEC_FLOW_GROUP}, + {"ENI", ENI_STAT_COUNTER_FLEX_COUNTER_GROUP}, + {"DASH_METER", METER_STAT_COUNTER_FLEX_COUNTER_GROUP}, + {"WRED_ECN_PORT", WRED_PORT_STAT_COUNTER_FLEX_COUNTER_GROUP}, + {"WRED_ECN_QUEUE", WRED_QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP}, + {SRV6_KEY, SRV6_STAT_COUNTER_FLEX_COUNTER_GROUP}, + {SWITCH_KEY, SWITCH_STAT_COUNTER_FLEX_COUNTER_GROUP} }; FlexCounterOrch::FlexCounterOrch(DBConnector *db, vector &tableNames): Orch(db, tableNames), - m_flexCounterConfigTable(db, CFG_FLEX_COUNTER_TABLE_NAME), m_bufferQueueConfigTable(db, CFG_BUFFER_QUEUE_TABLE_NAME), m_bufferPgConfigTable(db, CFG_BUFFER_PG_TABLE_NAME), - m_deviceMetadataConfigTable(db, CFG_DEVICE_METADATA_TABLE_NAME), - m_flexCounterDb(new DBConnector("FLEX_COUNTER_DB", 0)), - m_flexCounterGroupTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)), - m_gbflexCounterDb(new DBConnector("GB_FLEX_COUNTER_DB", 0)), - m_gbflexCounterGroupTable(new ProducerTable(m_gbflexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)) + m_deviceMetadataConfigTable(db, CFG_DEVICE_METADATA_TABLE_NAME) { SWSS_LOG_ENTER(); + + // Read create_only_config_db_buffers configuration once during initialization + std::string createOnlyConfigDbBuffersValue; + try + { + if (m_deviceMetadataConfigTable.hget("localhost", "create_only_config_db_buffers", createOnlyConfigDbBuffersValue)) + { + if (createOnlyConfigDbBuffersValue == "true") + { + m_createOnlyConfigDbBuffers = true; + } + } + } + catch(const std::system_error& e) + { + SWSS_LOG_ERROR("System error reading create_only_config_db_buffers: %s", e.what()); + } + + SWSS_LOG_NOTICE("Counter delay is %d seconds", gFlexCounterDelaySec); + if (gFlexCounterDelaySec > 0) + { + m_delayTimer = new SelectableTimer(timespec{.tv_sec = static_cast(gFlexCounterDelaySec), .tv_nsec = 0}); + auto delayExecutor = new ExecutableTimer(m_delayTimer, this, "FLEX_COUNTER_DELAY"); + Orch::addExecutor(delayExecutor); + m_delayTimer->start(); + } + else + { + m_delayTimerExpired = true; + } } FlexCounterOrch::~FlexCounterOrch(void) @@ -85,7 +139,21 @@ void FlexCounterOrch::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); + // Handle DEVICE_METADATA table changes for create_only_config_db_buffers + if (consumer.getTableName() == CFG_DEVICE_METADATA_TABLE_NAME) + { + handleDeviceMetadataTable(consumer); + return; + } + + if (!m_delayTimerExpired) + { + return; + } + VxlanTunnelOrch* vxlan_tunnel_orch = gDirectory.get(); + DashOrch* dash_orch = gDirectory.get(); + DashMeterOrch* dash_meter_orch = gDirectory.get(); if (gPortsOrch && !gPortsOrch->allPortsReady()) { return; @@ -114,13 +182,9 @@ void FlexCounterOrch::doTask(Consumer &consumer) if (op == SET_COMMAND) { - auto itDelay = std::find(std::begin(data), std::end(data), FieldValueTuple(FLEX_COUNTER_DELAY_STATUS_FIELD, "true")); + string bulk_chunk_size; + string bulk_chunk_size_per_counter; - if (itDelay != data.end()) - { - consumer.m_toSync.erase(it++); - continue; - } for (auto valuePair:data) { const auto &field = fvField(valuePair); @@ -128,17 +192,24 @@ void FlexCounterOrch::doTask(Consumer &consumer) if (field == POLL_INTERVAL_FIELD) { - vector fieldValues; - fieldValues.emplace_back(POLL_INTERVAL_FIELD, value); - m_flexCounterGroupTable->set(flexCounterGroupMap[key], fieldValues); + setFlexCounterGroupPollInterval(flexCounterGroupMap[key], value); + if (gPortsOrch && gPortsOrch->isGearboxEnabled()) { if (key == PORT_KEY || key.rfind("MACSEC", 0) == 0) { - m_gbflexCounterGroupTable->set(flexCounterGroupMap[key], fieldValues); + setFlexCounterGroupPollInterval(flexCounterGroupMap[key], value, true); } } } + else if (field == BULK_CHUNK_SIZE_FIELD) + { + bulk_chunk_size = value; + } + else if (field == BULK_CHUNK_SIZE_PER_PREFIX_FIELD) + { + bulk_chunk_size_per_counter = value; + } else if(field == FLEX_COUNTER_STATUS_FIELD) { // Currently, the counters are disabled for polling by default @@ -185,6 +256,17 @@ void FlexCounterOrch::doTask(Consumer &consumer) m_pg_watermark_enabled = true; gPortsOrch->addPriorityGroupWatermarkFlexCounters(getPgConfigurations()); } + else if(key == WRED_PORT_KEY) + { + gPortsOrch->generateWredPortCounterMap(); + m_wred_port_counter_enabled = true; + } + else if(key == WRED_QUEUE_KEY) + { + gPortsOrch->generateQueueMap(getQueueConfigurations()); + m_wred_queue_counter_enabled = true; + gPortsOrch->addWredQueueFlexCounters(getQueueConfigurations()); + } } if(gIntfsOrch && (key == RIF_KEY) && (value == "enable")) { @@ -202,6 +284,14 @@ void FlexCounterOrch::doTask(Consumer &consumer) { vxlan_tunnel_orch->generateTunnelCounterMap(); } + if (dash_orch && (key == ENI_KEY)) + { + dash_orch->handleFCStatusUpdate((value == "enable")); + } + if (dash_meter_orch && (key == DASH_METER_KEY)) + { + dash_meter_orch->handleMeterFCStatusUpdate((value == "enable")); + } if (gCoppOrch && (key == FLOW_CNT_TRAP_KEY)) { if (value == "enable") @@ -228,35 +318,68 @@ void FlexCounterOrch::doTask(Consumer &consumer) m_route_flow_counter_enabled = false; } } - vector fieldValues; - fieldValues.emplace_back(FLEX_COUNTER_STATUS_FIELD, value); - m_flexCounterGroupTable->set(flexCounterGroupMap[key], fieldValues); + if (gSrv6Orch && (key == SRV6_KEY)) + { + gSrv6Orch->setCountersState((value == "enable")); + } + if (gSwitchOrch && (key == SWITCH_KEY) && (value == "enable")) + { + gSwitchOrch->generateSwitchCounterIdList(); + } + + if (gPortsOrch) + { + gPortsOrch->flushCounters(); + } + + setFlexCounterGroupOperation(flexCounterGroupMap[key], value); if (gPortsOrch && gPortsOrch->isGearboxEnabled()) { if (key == PORT_KEY || key.rfind("MACSEC", 0) == 0) { - m_gbflexCounterGroupTable->set(flexCounterGroupMap[key], fieldValues); + setFlexCounterGroupOperation(flexCounterGroupMap[key], value, true); } } } - else if(field == FLEX_COUNTER_DELAY_STATUS_FIELD) - { - // This field is ignored since it is being used before getting into this loop. - // If it is exist and the value is 'true' we need to skip the iteration in order to delay the counter creation. - // The field will clear out and counter will be created when enable_counters script is called. - } else { SWSS_LOG_NOTICE("Unsupported field %s", field.c_str()); } } + + if (!bulk_chunk_size.empty() || !bulk_chunk_size_per_counter.empty()) + { + m_groupsWithBulkChunkSize.insert(key); + setFlexCounterGroupBulkChunkSize(flexCounterGroupMap[key], + bulk_chunk_size.empty() ? "NULL" : bulk_chunk_size, + bulk_chunk_size_per_counter.empty() ? "NULL" : bulk_chunk_size_per_counter); + } + else if (m_groupsWithBulkChunkSize.find(key) != m_groupsWithBulkChunkSize.end()) + { + setFlexCounterGroupBulkChunkSize(flexCounterGroupMap[key], "NULL", "NULL"); + m_groupsWithBulkChunkSize.erase(key); + } } consumer.m_toSync.erase(it++); } } +void FlexCounterOrch::doTask(SelectableTimer&) +{ + SWSS_LOG_ENTER(); + + if (m_delayTimerExpired) + { + return; + } + + SWSS_LOG_NOTICE("Processing counters"); + m_delayTimer->stop(); + m_delayTimerExpired = true; +} + bool FlexCounterOrch::getPortCountersState() const { return m_port_counter_enabled; @@ -287,68 +410,69 @@ bool FlexCounterOrch::getPgWatermarkCountersState() const return m_pg_watermark_enabled; } -bool FlexCounterOrch::bake() +bool FlexCounterOrch::getWredQueueCountersState() const { - /* - * bake is called during warmreboot reconciling procedure. - * By default, it should fetch items from the tables the sub agents listen to, - * and then push them into m_toSync of each sub agent. - * The motivation is to make sub agents handle the saved entries first and then handle the upcoming entries. - */ - - std::deque entries; - vector keys; - m_flexCounterConfigTable.getKeys(keys); - for (const auto &key: keys) - { - if (!flexCounterGroupMap.count(key)) - { - SWSS_LOG_NOTICE("FlexCounterOrch: Invalid flex counter group intput %s is skipped during reconciling", key.c_str()); - continue; - } - - if (key == BUFFER_POOL_WATERMARK_KEY) - { - SWSS_LOG_NOTICE("FlexCounterOrch: Do not handle any FLEX_COUNTER table for %s update during reconciling", - BUFFER_POOL_WATERMARK_KEY); - continue; - } - - KeyOpFieldsValuesTuple kco; + return m_wred_queue_counter_enabled; +} - kfvKey(kco) = key; - kfvOp(kco) = SET_COMMAND; +bool FlexCounterOrch::getWredPortCountersState() const +{ + return m_wred_port_counter_enabled; +} - if (!m_flexCounterConfigTable.get(key, kfvFieldsValues(kco))) - { - continue; - } - entries.push_back(kco); - } - Consumer* consumer = dynamic_cast(getExecutor(CFG_FLEX_COUNTER_TABLE_NAME)); - return consumer->addToSync(entries); +bool FlexCounterOrch::isCreateOnlyConfigDbBuffers() const +{ + return m_createOnlyConfigDbBuffers; } -static bool isCreateOnlyConfigDbBuffers(Table& deviceMetadataConfigTable) +void FlexCounterOrch::handleDeviceMetadataTable(Consumer &consumer) { - std::string createOnlyConfigDbBuffersValue; + SWSS_LOG_ENTER(); - try + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) { - if (deviceMetadataConfigTable.hget("localhost", "create_only_config_db_buffers", createOnlyConfigDbBuffersValue)) + KeyOpFieldsValuesTuple t = it->second; + string key = kfvKey(t); + string op = kfvOp(t); + auto data = kfvFieldsValues(t); + + // Only process localhost entries + if (key == "localhost" && op == SET_COMMAND) { - if (createOnlyConfigDbBuffersValue == "true") + for (auto valuePair : data) { - return true; + const auto &field = fvField(valuePair); + const auto &value = fvValue(valuePair); + + if (field == "create_only_config_db_buffers") + { + bool newValue = (value == "true"); + if (m_createOnlyConfigDbBuffers != newValue) + { + SWSS_LOG_NOTICE("Updating create_only_config_db_buffers from %s to %s", + m_createOnlyConfigDbBuffers ? "true" : "false", + value.c_str()); + m_createOnlyConfigDbBuffers = newValue; + } + } } } + consumer.m_toSync.erase(it++); } - catch(const std::system_error& e) - { - SWSS_LOG_ERROR("System error: %s", e.what()); - } +} + +bool FlexCounterOrch::bake() +{ + /* + * bake is called during warmreboot reconciling procedure. + * By default, it should fetch items from the tables the sub agents listen to, + * and then push them into m_toSync of each sub agent. + * The motivation is to make sub agents handle the saved entries first and then handle the upcoming entries. + * The FCs are not data plane configuration required during reconciling process, hence don't do anything in bake. + */ - return false; + return true; } map FlexCounterOrch::getQueueConfigurations() @@ -357,7 +481,7 @@ map FlexCounterOrch::getQueueConfigurations() map queuesStateVector; - if (!isCreateOnlyConfigDbBuffers(m_deviceMetadataConfigTable)) + if (!isCreateOnlyConfigDbBuffers()) { FlexCounterQueueStates flexCounterQueueState(0); queuesStateVector.insert(make_pair(createAllAvailableBuffersStr, flexCounterQueueState)); @@ -365,11 +489,11 @@ map FlexCounterOrch::getQueueConfigurations() } std::vector portQueueKeys; - m_bufferQueueConfigTable.getKeys(portQueueKeys); + gBufferOrch->getBufferObjectsWithNonZeroProfile(portQueueKeys, APP_BUFFER_QUEUE_TABLE_NAME); for (const auto& portQueueKey : portQueueKeys) { - auto toks = tokenize(portQueueKey, '|'); + auto toks = tokenize(portQueueKey, ':'); if (toks.size() != 2) { SWSS_LOG_ERROR("Invalid BUFFER_QUEUE key: [%s]", portQueueKey.c_str()); @@ -403,6 +527,13 @@ map FlexCounterOrch::getQueueConfigurations() { queuesStateVector.at(configPortName).enableQueueCounter(startIndex); } + + Port port; + gPortsOrch->getPort(configPortName, port); + if (port.m_host_tx_queue_configured && port.m_host_tx_queue <= maxQueueIndex) + { + queuesStateVector.at(configPortName).enableQueueCounter(port.m_host_tx_queue); + } } catch (std::invalid_argument const& e) { SWSS_LOG_ERROR("Invalid queue index [%s] for port [%s]", configPortQueues.c_str(), configPortName.c_str()); continue; @@ -419,7 +550,7 @@ map FlexCounterOrch::getPgConfigurations() map pgsStateVector; - if (!isCreateOnlyConfigDbBuffers(m_deviceMetadataConfigTable)) + if (!isCreateOnlyConfigDbBuffers()) { FlexCounterPgStates flexCounterPgState(0); pgsStateVector.insert(make_pair(createAllAvailableBuffersStr, flexCounterPgState)); @@ -427,11 +558,11 @@ map FlexCounterOrch::getPgConfigurations() } std::vector portPgKeys; - m_bufferPgConfigTable.getKeys(portPgKeys); + gBufferOrch->getBufferObjectsWithNonZeroProfile(portPgKeys, APP_BUFFER_PG_TABLE_NAME); for (const auto& portPgKey : portPgKeys) { - auto toks = tokenize(portPgKey, '|'); + auto toks = tokenize(portPgKey, ':'); if (toks.size() != 2) { SWSS_LOG_ERROR("Invalid BUFFER_PG key: [%s]", portPgKey.c_str()); diff --git a/orchagent/flexcounterorch.h b/orchagent/flexcounterorch.h index 06a1ddadbc3..6d33dda11a3 100644 --- a/orchagent/flexcounterorch.h +++ b/orchagent/flexcounterorch.h @@ -4,12 +4,25 @@ #include "orch.h" #include "port.h" #include "producertable.h" +#include "selectabletimer.h" #include "table.h" extern "C" { #include "sai.h" } +// Delay in seconds before flex counter processing begins after orchagent startup. +// +// This delay improves boot time by prioritizing data plane configuration over +// counter initialization. Systems with many ports, priority groups (PGs), and +// queues require significant time to generate counter maps, which is not +// immediately necessary during boot. +// Value of 0 will process flex counters immediately. +// +// Configured via orchagent command line argument: -D +// +extern int gFlexCounterDelaySec; + const std::string createAllAvailableBuffersStr = "create_all_available_buffers"; class FlexCounterQueueStates @@ -40,6 +53,7 @@ class FlexCounterOrch: public Orch { public: void doTask(Consumer &consumer); + void doTask(SelectableTimer &timer); FlexCounterOrch(swss::DBConnector *db, std::vector &tableNames); virtual ~FlexCounterOrch(void); bool getPortCountersState() const; @@ -52,13 +66,13 @@ class FlexCounterOrch: public Orch std::map getPgConfigurations(); bool getHostIfTrapCounterState() const {return m_hostif_trap_counter_enabled;} bool getRouteFlowCountersState() const {return m_route_flow_counter_enabled;} + bool getWredQueueCountersState() const; + bool getWredPortCountersState() const; + bool isCreateOnlyConfigDbBuffers() const; bool bake() override; private: - std::shared_ptr m_flexCounterDb = nullptr; - std::shared_ptr m_flexCounterGroupTable = nullptr; - std::shared_ptr m_gbflexCounterDb = nullptr; - std::shared_ptr m_gbflexCounterGroupTable = nullptr; + void handleDeviceMetadataTable(Consumer &consumer); bool m_port_counter_enabled = false; bool m_port_buffer_drop_counter_enabled = false; bool m_queue_enabled = false; @@ -67,10 +81,16 @@ class FlexCounterOrch: public Orch bool m_pg_watermark_enabled = false; bool m_hostif_trap_counter_enabled = false; bool m_route_flow_counter_enabled = false; - Table m_flexCounterConfigTable; + bool m_delayTimerExpired = false; + bool m_wred_queue_counter_enabled = false; + bool m_wred_port_counter_enabled = false; Table m_bufferQueueConfigTable; Table m_bufferPgConfigTable; Table m_deviceMetadataConfigTable; + SelectableTimer* m_delayTimer; + std::unordered_set m_groupsWithBulkChunkSize; + + bool m_createOnlyConfigDbBuffers = false; }; #endif diff --git a/orchagent/high_frequency_telemetry/counternameupdater.cpp b/orchagent/high_frequency_telemetry/counternameupdater.cpp new file mode 100644 index 00000000000..acb82f90762 --- /dev/null +++ b/orchagent/high_frequency_telemetry/counternameupdater.cpp @@ -0,0 +1,88 @@ +#include "counternameupdater.h" +#include "hftelorch.h" + +#include +#include + +extern HFTelOrch *gHFTOrch; + +CounterNameMapUpdater::CounterNameMapUpdater(const std::string &db_name, const std::string &table_name) + : m_db_name(db_name), + m_table_name(table_name), + m_connector(m_db_name, 0), + m_counters_table(&m_connector, m_table_name) +{ + SWSS_LOG_ENTER(); +} + +void CounterNameMapUpdater::setCounterNameMap(const std::string &counter_name, sai_object_id_t oid) +{ + SWSS_LOG_ENTER(); + + if (gHFTOrch) + { + std::string unified_counter_name = unify_counter_name(counter_name); + Message msg{ + .m_table_name = m_table_name.c_str(), + .m_operation = OPERATION::SET, + .m_set{ + .m_counter_name = unified_counter_name.c_str(), + .m_oid = oid, + }, + }; + gHFTOrch->locallyNotify(msg); + } + + m_counters_table.hset("", counter_name, sai_serialize_object_id(oid)); +} + +void CounterNameMapUpdater::setCounterNameMap(const std::vector &counter_name_maps) +{ + SWSS_LOG_ENTER(); + + for (const auto& map : counter_name_maps) + { + const std::string& counter_name = fvField(map); + sai_object_id_t oid = SAI_NULL_OBJECT_ID; + if (!fvValue(map).empty()) + { + sai_deserialize_object_id(fvValue(map), oid); + } + setCounterNameMap(counter_name, oid); + } +} + +void CounterNameMapUpdater::delCounterNameMap(const std::string &counter_name) +{ + SWSS_LOG_ENTER(); + + if (gHFTOrch) + { + std::string unified_counter_name = unify_counter_name(counter_name); + Message msg{ + .m_table_name = m_table_name.c_str(), + .m_operation = OPERATION::DEL, + .m_del{ + .m_counter_name = unified_counter_name.c_str(), + }, + }; + gHFTOrch->locallyNotify(msg); + } + + m_counters_table.hdel("", counter_name); +} + +std::string CounterNameMapUpdater::unify_counter_name(const std::string &counter_name) +{ + SWSS_LOG_ENTER(); + + std::string unify_counter_name = counter_name; + // Replace the separator ':' with '|' + auto pos = unify_counter_name.rfind(":"); + if (pos != std::string::npos) + { + unify_counter_name[pos] = '|'; + } + + return unify_counter_name; +} diff --git a/orchagent/high_frequency_telemetry/counternameupdater.h b/orchagent/high_frequency_telemetry/counternameupdater.h new file mode 100644 index 00000000000..820d0edcd01 --- /dev/null +++ b/orchagent/high_frequency_telemetry/counternameupdater.h @@ -0,0 +1,57 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include + +class CounterNameMapUpdater +{ +public: + + enum OPERATION + { + SET, + DEL, + }; + + struct SetPayload + { + const char* m_counter_name; + sai_object_id_t m_oid; + }; + + struct DelPayload + { + const char* m_counter_name; + }; + + struct Message + { + const char* m_table_name; + OPERATION m_operation; + union + { + SetPayload m_set; + DelPayload m_del; + }; + }; + + CounterNameMapUpdater(const std::string &db_name, const std::string &table_name); + ~CounterNameMapUpdater() = default; + + void setCounterNameMap(const std::string &counter_name, sai_object_id_t oid); + void setCounterNameMap(const std::vector &counter_name_maps); + void delCounterNameMap(const std::string &counter_name); + +private: + std::string m_db_name; + std::string m_table_name; + swss::DBConnector m_connector; + swss::Table m_counters_table; + + std::string unify_counter_name(const std::string &counter_name); +}; diff --git a/orchagent/high_frequency_telemetry/hftelgroup.cpp b/orchagent/high_frequency_telemetry/hftelgroup.cpp new file mode 100644 index 00000000000..ff8bbaa8279 --- /dev/null +++ b/orchagent/high_frequency_telemetry/hftelgroup.cpp @@ -0,0 +1,56 @@ +#include "hftelgroup.h" +#include "hftelutils.h" + +#include + +using namespace std; + +HFTelGroup::HFTelGroup(const string &group_name) : m_group_name(group_name) +{ + SWSS_LOG_ENTER(); +} + +void HFTelGroup::updateObjects(const set &object_names) +{ + SWSS_LOG_ENTER(); + + m_objects.clear(); + sai_uint16_t lable = 1; + for (auto &name : object_names) + { + m_objects[name] = lable++; + } +} + +void HFTelGroup::updateStatsIDs(const std::set &stats_ids) +{ + SWSS_LOG_ENTER(); + + m_stats_ids = move(stats_ids); +} + +bool HFTelGroup::isSameObjects(const std::set &object_names) const +{ + SWSS_LOG_ENTER(); + + if (m_objects.size() == object_names.size()) + { + for (const auto &name : object_names) + { + if (m_objects.find(name) == m_objects.end()) + { + return false; + } + } + return true; + } + + return false; +} + +bool HFTelGroup::isObjectInGroup(const string &object_name) const +{ + SWSS_LOG_ENTER(); + + return m_objects.find(object_name) != m_objects.end(); +} diff --git a/orchagent/high_frequency_telemetry/hftelgroup.h b/orchagent/high_frequency_telemetry/hftelgroup.h new file mode 100644 index 00000000000..7405ea3105c --- /dev/null +++ b/orchagent/high_frequency_telemetry/hftelgroup.h @@ -0,0 +1,42 @@ +#pragma once + +#include +#include +#include +#include + +#include + + +class HFTelGroup +{ +public: + HFTelGroup() = delete; + HFTelGroup(const std::string& group_name); + ~HFTelGroup() = default; + void updateObjects(const std::set &object_names); + void updateStatsIDs(const std::set &stats_ids); + bool isSameObjects(const std::set &object_names) const; + bool isObjectInGroup(const std::string &object_name) const; + const std::unordered_map& getObjects() const { return m_objects; } + const std::set& getStatsIDs() const { return m_stats_ids; } + std::pair, std::vector> getObjectNamesAndLabels() const + { + std::vector names; + std::vector labels; + names.reserve(m_objects.size()); + labels.reserve(m_objects.size()); + for (const auto& obj : m_objects) + { + names.push_back(obj.first); + labels.push_back(std::to_string(obj.second)); + } + return {names, labels}; + } + +private: + std::string m_group_name; + // Object names and label IDs + std::unordered_map m_objects; + std::set m_stats_ids; +}; diff --git a/orchagent/high_frequency_telemetry/hftelorch.cpp b/orchagent/high_frequency_telemetry/hftelorch.cpp new file mode 100644 index 00000000000..d3a311bff95 --- /dev/null +++ b/orchagent/high_frequency_telemetry/hftelorch.cpp @@ -0,0 +1,729 @@ +#include "hftelorch.h" +#include "hftelutils.h" + +#include "notifications.h" + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +using namespace std; +using namespace swss; + +#define CONSTANTS_FILE "/et/sonic/constants.yml" + +const unordered_map HFTelOrch::SUPPORT_COUNTER_TABLES = { + {COUNTERS_PORT_NAME_MAP, SAI_OBJECT_TYPE_PORT}, + {COUNTERS_BUFFER_POOL_NAME_MAP, SAI_OBJECT_TYPE_BUFFER_POOL}, + {COUNTERS_QUEUE_NAME_MAP, SAI_OBJECT_TYPE_QUEUE}, + {COUNTERS_PG_NAME_MAP, SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP}, +}; + +extern sai_object_id_t gSwitchId; +extern sai_switch_api_t *sai_switch_api; +extern sai_hostif_api_t *sai_hostif_api; +extern sai_tam_api_t *sai_tam_api; + +namespace swss +{ + + template <> + inline void lexical_convert(const string &buffer, sai_tam_tel_type_state_t &stage) + { + SWSS_LOG_ENTER(); + + if (buffer == "enabled") + { + stage = SAI_TAM_TEL_TYPE_STATE_START_STREAM; + } + else if (buffer == "disabled") + { + stage = SAI_TAM_TEL_TYPE_STATE_STOP_STREAM; + } + else + { + SWSS_LOG_THROW("Invalid stream state %s for high frequency telemetry", buffer.c_str()); + } + } + +} + +HFTelOrch::HFTelOrch( + DBConnector *cfg_db, + DBConnector *state_db, + const vector &tables) + : Orch(cfg_db, tables), + m_state_telemetry_session(state_db, STATE_HIGH_FREQUENCY_TELEMETRY_SESSION_TABLE_NAME), + m_asic_db("ASIC_DB", 0), + m_sai_hostif_obj(SAI_NULL_OBJECT_ID), + m_sai_hostif_trap_group_obj(SAI_NULL_OBJECT_ID), + m_sai_hostif_user_defined_trap_obj(SAI_NULL_OBJECT_ID), + m_sai_hostif_table_entry_obj(SAI_NULL_OBJECT_ID), + m_sai_tam_transport_obj(SAI_NULL_OBJECT_ID), + m_sai_tam_collector_obj(SAI_NULL_OBJECT_ID), + m_sai_tam_obj(SAI_NULL_OBJECT_ID) +{ + SWSS_LOG_ENTER(); + + createNetlinkChannel("sonic_stel", "ipfix"); + createTAM(); + + m_asic_notification_consumer = make_shared(&m_asic_db, "NOTIFICATIONS"); + auto notifier = new Notifier(m_asic_notification_consumer.get(), this, "TAM_TEL_TYPE_STATE"); + sai_attribute_t attr; + attr.id = SAI_SWITCH_ATTR_TAM_TEL_TYPE_CONFIG_CHANGE_NOTIFY; + attr.value.ptr = (void *)on_tam_tel_type_config_change; + if (sai_switch_api->set_switch_attribute(gSwitchId, &attr) != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set SAI_SWITCH_ATTR_TAM_TEL_TYPE_CONFIG_CHANGE_NOTIFY"); + throw runtime_error("HFTelOrch initialization failure (failed to set tam tel type config change notify)"); + } + + Orch::addExecutor(notifier); +} + +HFTelOrch::~HFTelOrch() +{ + SWSS_LOG_ENTER(); + + m_name_profile_mapping.clear(); + m_type_profile_mapping.clear(); + + deleteTAM(); + deleteNetlinkChannel(); +} + +void HFTelOrch::locallyNotify(const CounterNameMapUpdater::Message &msg) +{ + SWSS_LOG_ENTER(); + + auto counter_itr = HFTelOrch::SUPPORT_COUNTER_TABLES.find(msg.m_table_name); + if (counter_itr == HFTelOrch::SUPPORT_COUNTER_TABLES.end()) + { + SWSS_LOG_WARN("The counter table %s is not supported by high frequency telemetry", msg.m_table_name); + return; + } + + SWSS_LOG_NOTICE("The counter table %s is updated, operation %d, object %s", + msg.m_table_name, + msg.m_operation, + msg.m_operation == CounterNameMapUpdater::SET ? msg.m_set.m_counter_name : msg.m_del.m_counter_name); + + // Update the local cache + if (msg.m_operation == CounterNameMapUpdater::SET) + { + m_counter_name_cache[counter_itr->second][msg.m_set.m_counter_name] = msg.m_set.m_oid; + } + else if (msg.m_operation == CounterNameMapUpdater::DEL) + { + m_counter_name_cache[counter_itr->second].erase(msg.m_del.m_counter_name); + } + + // Update the profile + auto type_itr = m_type_profile_mapping.find(counter_itr->second); + if (type_itr == m_type_profile_mapping.end()) + { + return; + } + for (auto profile_itr = type_itr->second.begin(); profile_itr != type_itr->second.end(); profile_itr++) + { + auto profile = *profile_itr; + const char *counter_name = msg.m_operation == CounterNameMapUpdater::SET ? msg.m_set.m_counter_name : msg.m_del.m_counter_name; + + if (!profile->canBeUpdated(counter_itr->second)) + { + // TODO: Here is a potential issue, we might need to retry the task. + // Because the Syncd is generating the configuration(template), + // we cannot update the monitor objects at this time. + SWSS_LOG_WARN("The high frequency telemetry profile %s is not ready to be updated, but the object %s want to be updated", profile->getProfileName().c_str(), counter_name); + continue; + } + + if (msg.m_operation == CounterNameMapUpdater::SET) + { + profile->setObjectSAIID(counter_itr->second, counter_name, msg.m_set.m_oid); + } + else if (msg.m_operation == CounterNameMapUpdater::DEL) + { + profile->delObjectSAIID(counter_itr->second, counter_name); + } + else + { + SWSS_LOG_THROW("Unknown operation type %d", msg.m_operation); + } + profile->tryCommitConfig(counter_itr->second); + } +} + +bool HFTelOrch::isSupportedHFTel(sai_object_id_t switch_id) +{ + SWSS_LOG_ENTER(); + + sai_stat_st_capability_list_t stats_st_capability; + stats_st_capability.count = 0; + stats_st_capability.list = nullptr; + sai_status_t status = sai_query_stats_st_capability(switch_id, SAI_OBJECT_TYPE_PORT, &stats_st_capability); + + return status == SAI_STATUS_SUCCESS || status == SAI_STATUS_BUFFER_OVERFLOW; +} + +task_process_status HFTelOrch::profileTableSet(const string &profile_name, const vector &values) +{ + SWSS_LOG_ENTER(); + auto profile = getProfile(profile_name); + + if (!profile->canBeUpdated()) + { + return task_process_status::task_need_retry; + } + + auto value_opt = fvsGetValue(values, "stream_state", true); + string stream_state = "disable"; + sai_tam_tel_type_state_t state = SAI_TAM_TEL_TYPE_STATE_STOP_STREAM; + if (value_opt) + { + lexical_convert(*value_opt, state); + profile->setStreamState(state); + stream_state = *value_opt; + } + + value_opt = fvsGetValue(values, "poll_interval", true); + uint32_t poll_interval = 0; + if (value_opt) + { + lexical_convert(*value_opt, poll_interval); + profile->setPollInterval(poll_interval); + } + + SWSS_LOG_NOTICE("The high frequency telemetry profile %s is set (stream_state: %s, poll_interval: %u)", + profile_name.c_str(), + state == SAI_TAM_TEL_TYPE_STATE_START_STREAM ? "enabled" : "disabled", + poll_interval); + + return task_process_status::task_success; +} + +task_process_status HFTelOrch::profileTableDel(const std::string &profile_name) +{ + SWSS_LOG_ENTER(); + + auto profile_itr = m_name_profile_mapping.find(profile_name); + if (profile_itr == m_name_profile_mapping.end()) + { + return task_process_status::task_success; + } + + if (!profile_itr->second->canBeUpdated()) + { + return task_process_status::task_need_retry; + } + + if (!profile_itr->second->isEmpty()) + { + return task_process_status::task_need_retry; + } + + m_name_profile_mapping.erase(profile_itr); + + SWSS_LOG_NOTICE("The high frequency telemetry profile %s is deleted", profile_name.c_str()); + + return task_process_status::task_success; +} + +task_process_status HFTelOrch::groupTableSet(const std::string &profile_name, const std::string &group_name, const std::vector &values) +{ + SWSS_LOG_ENTER(); + + auto profile = tryGetProfile(profile_name); + if (!profile) + { + return task_process_status::task_need_retry; + } + + auto type = HFTelUtils::group_name_to_sai_type(group_name); + + if (!profile->canBeUpdated(type)) + { + return task_process_status::task_need_retry; + } + + auto arg_object_names = fvsGetValue(values, "object_names", true); + if (arg_object_names && !arg_object_names->empty()) + { + vector buffer; + boost::split(buffer, *arg_object_names, boost::is_any_of(",")); + set object_names(buffer.begin(), buffer.end()); + profile->setObjectNames(group_name, move(object_names)); + } + + auto arg_object_counters = fvsGetValue(values, "object_counters", true); + if (arg_object_counters && !arg_object_counters->empty()) + { + vector buffer; + boost::split(buffer, *arg_object_counters, boost::is_any_of(",")); + set object_counters(buffer.begin(), buffer.end()); + profile->setStatsIDs(group_name, object_counters); + } + + if (profile->getStreamState(type) != SAI_TAM_TEL_TYPE_STATE_STOP_STREAM) + { + SWSS_LOG_WARN("The high frequency telemetry group %s:%s is not in the stop stream state, it means no new configuration needs to be applied", + profile_name.c_str(), + group_name.c_str()); + return task_process_status::task_success; + } + + profile->tryCommitConfig(type); + + m_type_profile_mapping[type].insert(profile); + + SWSS_LOG_NOTICE("The high frequency telemetry group %s with profile %s is set (object_names: %s, object_counters: %s)", + group_name.c_str(), + profile_name.c_str(), + arg_object_names ? arg_object_names->c_str() : "", + arg_object_counters ? arg_object_counters->c_str() : ""); + + return task_process_status::task_success; +} + +task_process_status HFTelOrch::groupTableDel(const std::string &profile_name, const std::string &group_name) +{ + SWSS_LOG_ENTER(); + + auto profile = tryGetProfile(profile_name); + + if (!profile) + { + SWSS_LOG_WARN("The high frequency telemetry profile %s is not found", profile_name.c_str()); + return task_process_status::task_success; + } + + auto type = HFTelUtils::group_name_to_sai_type(group_name); + + if (!profile->canBeUpdated(type)) + { + return task_process_status::task_need_retry; + } + + profile->clearGroup(group_name); + m_type_profile_mapping[type].erase(profile); + m_state_telemetry_session.del(profile_name + "|" + HFTelUtils::sai_type_to_group_name(type)); + + SWSS_LOG_NOTICE("The high frequency telemetry group %s with profile %s is deleted", group_name.c_str(), profile_name.c_str()); + + return task_process_status::task_success; +} + +shared_ptr HFTelOrch::getProfile(const string &profile_name) +{ + SWSS_LOG_ENTER(); + + if (m_name_profile_mapping.find(profile_name) == m_name_profile_mapping.end()) + { + m_name_profile_mapping.emplace( + profile_name, + make_shared( + profile_name, + m_sai_tam_obj, + m_sai_tam_collector_obj, + m_counter_name_cache)); + } + + return m_name_profile_mapping.at(profile_name); +} + +std::shared_ptr HFTelOrch::tryGetProfile(const std::string &profile_name) +{ + SWSS_LOG_ENTER(); + + auto itr = m_name_profile_mapping.find(profile_name); + if (itr != m_name_profile_mapping.end()) + { + return itr->second; + } + + return std::shared_ptr(); +} + +void HFTelOrch::doTask(swss::NotificationConsumer &consumer) +{ + SWSS_LOG_ENTER(); + + std::string op; + std::string data; + std::vector values; + + if (&consumer != m_asic_notification_consumer.get()) + { + SWSS_LOG_DEBUG("Is not TAM notification"); + return; + } + + consumer.pop(op, data, values); + + if (op != SAI_SWITCH_NOTIFICATION_NAME_TAM_TEL_TYPE_CONFIG_CHANGE) + { + SWSS_LOG_DEBUG("Unknown operation type %s for HFTel Orch", op.c_str()); + return; + } + + sai_object_id_t tam_tel_type_obj = SAI_NULL_OBJECT_ID; + + sai_deserialize_object_id(data, tam_tel_type_obj); + + if (tam_tel_type_obj == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("The TAM tel type object is not valid"); + return; + } + + for (auto &profile : m_name_profile_mapping) + { + auto type = profile.second->getObjectType(tam_tel_type_obj); + if (type == SAI_OBJECT_TYPE_NULL) + { + continue; + } + + // TODO: A potential optimization + // We need to notify Config Ready only when the message of State DB is delivered to the CounterSyncd + profile.second->notifyConfigReady(type); + + // Update state db + vector values; + auto state = profile.second->getTelemetryTypeState(type); + if (state == SAI_TAM_TEL_TYPE_STATE_START_STREAM) + { + values.emplace_back("stream_status", "enabled"); + } + else if (state == SAI_TAM_TEL_TYPE_STATE_STOP_STREAM) + { + values.emplace_back("stream_status", "disabled"); + } + else + { + SWSS_LOG_THROW("Unexpected state %d for high frequency telemetry", state); + } + + + values.emplace_back("object_names", boost::algorithm::join(profile.second->getObjectNames(type), ",")); + auto to_string = boost::adaptors::transformed([](sai_uint16_t n) + { return boost::lexical_cast(n); }); + values.emplace_back("object_ids", boost::algorithm::join(profile.second->getObjectLabels(type) | to_string, ",")); + + + values.emplace_back("session_type", "ipfix"); + + auto templates = profile.second->getTemplates(type); + values.emplace_back("session_config", string(templates.begin(), templates.end())); + + m_state_telemetry_session.set(profile.first + "|" + HFTelUtils::sai_type_to_group_name(type), values); + + SWSS_LOG_NOTICE("The high frequency telemetry group %s with profile %s is ready", + HFTelUtils::sai_type_to_group_name(type).c_str(), + profile.first.c_str()); + + return; + } + + SWSS_LOG_ERROR("The TAM tel type object %s is not found in the profile", sai_serialize_object_id(tam_tel_type_obj).c_str()); +} + +void HFTelOrch::doTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + string table_name = consumer.getTableName(); + + auto itr = consumer.m_toSync.begin(); + while (itr != consumer.m_toSync.end()) + { + task_process_status status = task_process_status::task_failed; + KeyOpFieldsValuesTuple t = itr->second; + + string key = kfvKey(t); + string op = kfvOp(t); + + if (table_name == CFG_HIGH_FREQUENCY_TELEMETRY_PROFILE_TABLE_NAME) + { + if (op == SET_COMMAND) + { + status = profileTableSet(key, kfvFieldsValues(t)); + } + else if (op == DEL_COMMAND) + { + status = profileTableDel(key); + } + else + { + SWSS_LOG_ERROR("Unknown operation type %s\n", op.c_str()); + } + } + else if (table_name == CFG_HIGH_FREQUENCY_TELEMETRY_GROUP_TABLE_NAME) + { + auto tokens = tokenize(key, '|'); + if (tokens.size() != 2) + { + SWSS_LOG_THROW("Invalid key %s in the %s", key.c_str(), table_name.c_str()); + } + if (op == SET_COMMAND) + { + status = groupTableSet(tokens[0], tokens[1], kfvFieldsValues(t)); + } + else if (op == DEL_COMMAND) + { + status = groupTableDel(tokens[0], tokens[1]); + } + else + { + SWSS_LOG_ERROR("Unknown operation type %s\n", op.c_str()); + } + } + else + { + SWSS_LOG_ERROR("Unknown table %s\n", table_name.c_str()); + } + + if (status == task_process_status::task_need_retry) + { + ++itr; + } + else + { + itr = consumer.m_toSync.erase(itr); + } + } +} + +void HFTelOrch::createNetlinkChannel(const string &genl_family, const string &genl_group) +{ + SWSS_LOG_ENTER(); + + // Delete the existing netlink channel + deleteNetlinkChannel(); + + vector attrs; + sai_attribute_t attr; + + // Create hostif object + attr.id = SAI_HOSTIF_ATTR_TYPE; + attr.value.s32 = SAI_HOSTIF_TYPE_GENETLINK; + attrs.push_back(attr); + + attr.id = SAI_HOSTIF_ATTR_OPER_STATUS; + attr.value.booldata = true; + attrs.push_back(attr); + + attr.id = SAI_HOSTIF_ATTR_NAME; + strncpy(attr.value.chardata, genl_family.c_str(), sizeof(attr.value.chardata)); + attrs.push_back(attr); + + attr.id = SAI_HOSTIF_ATTR_GENETLINK_MCGRP_NAME; + strncpy(attr.value.chardata, genl_group.c_str(), sizeof(attr.value.chardata)); + attrs.push_back(attr); + + sai_hostif_api->create_hostif(&m_sai_hostif_obj, gSwitchId, static_cast(attrs.size()), attrs.data()); + + // // Create hostif trap group object + // sai_hostif_api->create_hostif_trap_group(&m_sai_hostif_trap_group_obj, gSwitchId, 0, nullptr); + + // Create hostif user defined trap object + attrs.clear(); + + attr.id = SAI_HOSTIF_USER_DEFINED_TRAP_ATTR_TYPE; + attr.value.s32 = SAI_HOSTIF_USER_DEFINED_TRAP_TYPE_TAM; + attrs.push_back(attr); + + // attr.id = SAI_HOSTIF_USER_DEFINED_TRAP_ATTR_TRAP_GROUP; + // attr.value.oid = m_sai_hostif_trap_group_obj; + // attrs.push_back(attr); + + sai_hostif_api->create_hostif_user_defined_trap(&m_sai_hostif_user_defined_trap_obj, gSwitchId, static_cast(attrs.size()), attrs.data()); + + // Create hostif table entry object + attrs.clear(); + + attr.id = SAI_HOSTIF_TABLE_ENTRY_ATTR_TYPE; + attr.value.s32 = SAI_HOSTIF_TABLE_ENTRY_TYPE_TRAP_ID; + attrs.push_back(attr); + + attr.id = SAI_HOSTIF_TABLE_ENTRY_ATTR_TRAP_ID; + attr.value.oid = m_sai_hostif_user_defined_trap_obj; + attrs.push_back(attr); + + attr.id = SAI_HOSTIF_TABLE_ENTRY_ATTR_CHANNEL_TYPE; + attr.value.s32 = SAI_HOSTIF_TABLE_ENTRY_CHANNEL_TYPE_GENETLINK; + attrs.push_back(attr); + + attr.id = SAI_HOSTIF_TABLE_ENTRY_ATTR_HOST_IF; + attr.value.oid = m_sai_hostif_obj; + attrs.push_back(attr); + + sai_hostif_api->create_hostif_table_entry(&m_sai_hostif_table_entry_obj, gSwitchId, static_cast(attrs.size()), attrs.data()); +} + +void HFTelOrch::deleteNetlinkChannel() +{ + SWSS_LOG_ENTER(); + + if (m_sai_hostif_table_entry_obj != SAI_NULL_OBJECT_ID) + { + sai_hostif_api->remove_hostif_table_entry(m_sai_hostif_table_entry_obj); + m_sai_hostif_table_entry_obj = SAI_NULL_OBJECT_ID; + } + if (m_sai_hostif_user_defined_trap_obj != SAI_NULL_OBJECT_ID) + { + sai_hostif_api->remove_hostif_user_defined_trap(m_sai_hostif_user_defined_trap_obj); + m_sai_hostif_user_defined_trap_obj = SAI_NULL_OBJECT_ID; + } + if (m_sai_hostif_trap_group_obj != SAI_NULL_OBJECT_ID) + { + sai_hostif_api->remove_hostif_trap_group(m_sai_hostif_trap_group_obj); + m_sai_hostif_trap_group_obj = SAI_NULL_OBJECT_ID; + } + if (m_sai_hostif_obj != SAI_NULL_OBJECT_ID) + { + sai_hostif_api->remove_hostif(m_sai_hostif_obj); + m_sai_hostif_obj = SAI_NULL_OBJECT_ID; + } +} + +void HFTelOrch::createTAM() +{ + SWSS_LOG_ENTER(); + + // Delete the existing TAM + deleteTAM(); + + vector attrs; + sai_attribute_t attr; + + // Create TAM transport object + attr.id = SAI_TAM_TRANSPORT_ATTR_TRANSPORT_TYPE; + attr.value.s32 = SAI_TAM_TRANSPORT_TYPE_NONE; + attrs.push_back(attr); + + handleSaiCreateStatus( + SAI_API_TAM, + sai_tam_api->create_tam_transport( + &m_sai_tam_transport_obj, + gSwitchId, + static_cast(attrs.size()), + attrs.data())); + + // Create TAM collector object + attrs.clear(); + + attr.id = SAI_TAM_COLLECTOR_ATTR_SRC_IP; + attr.value.ipaddr.addr_family = SAI_IP_ADDR_FAMILY_IPV4; + attr.value.ipaddr.addr.ip4 = 0; + attrs.push_back(attr); + + attr.id = SAI_TAM_COLLECTOR_ATTR_DST_IP; + attr.value.ipaddr.addr_family = SAI_IP_ADDR_FAMILY_IPV4; + attr.value.ipaddr.addr.ip4 = 0; + attrs.push_back(attr); + + attr.id = SAI_TAM_COLLECTOR_ATTR_TRANSPORT; + attr.value.oid = m_sai_tam_transport_obj; + attrs.push_back(attr); + + attr.id = SAI_TAM_COLLECTOR_ATTR_LOCALHOST; + attr.value.booldata = true; + attrs.push_back(attr); + + attr.id = SAI_TAM_COLLECTOR_ATTR_HOSTIF_TRAP; + attr.value.oid = m_sai_hostif_user_defined_trap_obj; + attrs.push_back(attr); + + attr.id = SAI_TAM_COLLECTOR_ATTR_DSCP_VALUE; + attr.value.u8 = 0; + attrs.push_back(attr); + + handleSaiCreateStatus( + SAI_API_TAM, + sai_tam_api->create_tam_collector( + &m_sai_tam_collector_obj, + gSwitchId, + static_cast(attrs.size()), + attrs.data())); + + // Create TAM object + attrs.clear(); + attr.id = SAI_TAM_ATTR_TAM_BIND_POINT_TYPE_LIST; + vector bind_point_types = { + SAI_TAM_BIND_POINT_TYPE_SWITCH, + }; + attr.value.s32list.count = static_cast(bind_point_types.size()); + attr.value.s32list.list = bind_point_types.data(); + attrs.push_back(attr); + + handleSaiCreateStatus( + SAI_API_TAM, + sai_tam_api->create_tam( + &m_sai_tam_obj, + gSwitchId, + static_cast(attrs.size()), + attrs.data())); + + // Bind the TAM object to switch + // FIX: There is a bug for config reload + // WARNING #syncd: :- logViewObjectCount: object count for SAI_OBJECT_TYPE_TAM on current view 1 is different than on temporary view: 2 + // WARNING #syncd: :- performObjectSetTransition: Present current attr SAI_TAM_ATTR_TAM_BIND_POINT_TYPE_LIST:1:SAI_TAM_BIND_POINT_TYPE_SWITCH has default that CAN'T be set to 0:null since it's CREATE_ONLY + attr.id = SAI_SWITCH_ATTR_TAM_OBJECT_ID; + vector obj_list = {m_sai_tam_obj}; + attr.value.objlist.count = static_cast(obj_list.size()); + attr.value.objlist.list = obj_list.data(); + sai_status_t status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set SAI_SWITCH_ATTR_TAM_OBJECT_ID, status: %s", + sai_serialize_status(status).c_str()); + throw runtime_error("HFTelOrch initialization failure (failed to set tam object id)"); + } +} + +void HFTelOrch::deleteTAM() +{ + SWSS_LOG_ENTER(); + + if (m_sai_tam_obj != SAI_NULL_OBJECT_ID) + { + // Unbind the TAM object from switch + HFTELUTILS_DEL_SAI_OBJECT_LIST( + gSwitchId, + SAI_SWITCH_ATTR_TAM_OBJECT_ID, + m_sai_tam_obj, + SAI_API_SWITCH, + switch, + switch); + handleSaiRemoveStatus( + SAI_API_TAM, + sai_tam_api->remove_tam(m_sai_tam_obj)); + m_sai_tam_obj = SAI_NULL_OBJECT_ID; + } + if (m_sai_tam_collector_obj != SAI_NULL_OBJECT_ID) + { + handleSaiRemoveStatus( + SAI_API_TAM, + sai_tam_api->remove_tam_collector(m_sai_tam_collector_obj)); + m_sai_tam_collector_obj = SAI_NULL_OBJECT_ID; + } + if (m_sai_tam_transport_obj != SAI_NULL_OBJECT_ID) + { + handleSaiRemoveStatus( + SAI_API_TAM, + sai_tam_api->remove_tam_transport(m_sai_tam_transport_obj)); + m_sai_tam_transport_obj = SAI_NULL_OBJECT_ID; + } +} diff --git a/orchagent/high_frequency_telemetry/hftelorch.h b/orchagent/high_frequency_telemetry/hftelorch.h new file mode 100644 index 00000000000..6000eeaf2f9 --- /dev/null +++ b/orchagent/high_frequency_telemetry/hftelorch.h @@ -0,0 +1,67 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include + +#include "counternameupdater.h" +#include "hftelprofile.h" + + +class HFTelOrch : public Orch +{ +public: + HFTelOrch( + swss::DBConnector *cfg_db, + swss::DBConnector *state_db, + const std::vector &tables); + ~HFTelOrch(); + HFTelOrch(const HFTelOrch &) = delete; + HFTelOrch &operator=(const HFTelOrch &) = delete; + HFTelOrch(HFTelOrch &&) = delete; + HFTelOrch &operator=(HFTelOrch &&) = delete; + + static const std::unordered_map SUPPORT_COUNTER_TABLES; + + void locallyNotify(const CounterNameMapUpdater::Message &msg); + static bool isSupportedHFTel(sai_object_id_t switch_id); + +private: + swss::Table m_state_telemetry_session; + swss::DBConnector m_asic_db; + std::shared_ptr m_asic_notification_consumer; + + std::unordered_map> m_name_profile_mapping; + std::unordered_map>> m_type_profile_mapping; + CounterNameCache m_counter_name_cache; + + task_process_status profileTableSet(const std::string &profile_name, const std::vector &values); + task_process_status profileTableDel(const std::string &profile_name); + task_process_status groupTableSet(const std::string &profile_name, const std::string &group_name, const std::vector &values); + task_process_status groupTableDel(const std::string &profile_name, const std::string &group_name); + std::shared_ptr getProfile(const std::string &profile_name); + std::shared_ptr tryGetProfile(const std::string &profile_name); + + void doTask(swss::NotificationConsumer &consumer); + void doTask(Consumer &consumer); + + // SAI objects + sai_object_id_t m_sai_hostif_obj; + sai_object_id_t m_sai_hostif_trap_group_obj; + sai_object_id_t m_sai_hostif_user_defined_trap_obj; + sai_object_id_t m_sai_hostif_table_entry_obj; + sai_object_id_t m_sai_tam_transport_obj; + sai_object_id_t m_sai_tam_collector_obj; + sai_object_id_t m_sai_tam_obj; + + // SAI calls + void createNetlinkChannel(const std::string &genl_family, const std::string &genl_group); + void deleteNetlinkChannel(); + void createTAM(); + void deleteTAM(); +}; diff --git a/orchagent/high_frequency_telemetry/hftelprofile.cpp b/orchagent/high_frequency_telemetry/hftelprofile.cpp new file mode 100644 index 00000000000..2d25f4be858 --- /dev/null +++ b/orchagent/high_frequency_telemetry/hftelprofile.cpp @@ -0,0 +1,988 @@ +#include "hftelprofile.h" +#include "hftelutils.h" +#include "saihelper.h" + +#include +#include +#include + +#include +#include + +using namespace std; +using namespace swss; + +extern sai_object_id_t gSwitchId; +extern sai_tam_api_t *sai_tam_api; + +HFTelProfile::HFTelProfile( + const string &profile_name, + sai_object_id_t sai_tam_obj, + sai_object_id_t sai_tam_collector_obj, + const CounterNameCache &cache) + : m_profile_name(profile_name), + m_setting_state(SAI_TAM_TEL_TYPE_STATE_STOP_STREAM), + m_poll_interval(0), + m_counter_name_cache(cache), + m_sai_tam_obj(sai_tam_obj), + m_sai_tam_collector_obj(sai_tam_collector_obj) +{ + SWSS_LOG_ENTER(); + + if (m_sai_tam_obj == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_THROW("The SAI TAM object is not valid"); + } + if (m_sai_tam_collector_obj == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_THROW("The SAI TAM collector object is not valid"); + } + + initTelemetry(); +} + +HFTelProfile::~HFTelProfile() +{ + SWSS_LOG_ENTER(); +} + +const string &HFTelProfile::getProfileName() const +{ + SWSS_LOG_ENTER(); + + return m_profile_name; +} + +void HFTelProfile::setStreamState(sai_tam_tel_type_state_t state) +{ + SWSS_LOG_ENTER(); + m_setting_state = state; + + for (const auto &item : m_sai_tam_tel_type_objs) + { + setStreamState(item.first, state); + } +} + +void HFTelProfile::setStreamState(sai_object_type_t type, sai_tam_tel_type_state_t state) +{ + SWSS_LOG_ENTER(); + + auto type_itr = m_sai_tam_tel_type_objs.find(type); + if (type_itr == m_sai_tam_tel_type_objs.end()) + { + return; + } + + auto stats = m_sai_tam_tel_type_states.find(type_itr->second); + if (stats == m_sai_tam_tel_type_states.end()) + { + return; + } + + if (stats->second == state) + { + return; + } + + do + { + if (stats->second == SAI_TAM_TEL_TYPE_STATE_STOP_STREAM) + { + if (state == SAI_TAM_TEL_TYPE_STATE_CREATE_CONFIG) + { + if (!isMonitoringObjectReady(type)) + { + return; + } + // Clearup the previous templates + m_sai_tam_tel_type_templates.erase(type); + } + else if (state == SAI_TAM_TEL_TYPE_STATE_START_STREAM) + { + if (m_sai_tam_tel_type_templates.find(type) == m_sai_tam_tel_type_templates.end()) + { + // The template isn't ready + return; + } + if (!isMonitoringObjectReady(type)) + { + return; + } + } + else + { + break; + } + } + else if (stats->second == SAI_TAM_TEL_TYPE_STATE_START_STREAM) + { + if (state == SAI_TAM_TEL_TYPE_STATE_STOP_STREAM) + { + // Nothing to do + } + else if (state == SAI_TAM_TEL_TYPE_STATE_CREATE_CONFIG) + { + // TODO: Implement the transition from started to config generating in Phase2 + SWSS_LOG_THROW("Transfer from start to create config hasn't been implemented yet"); + } + else + { + break; + } + } + else if (stats->second == SAI_TAM_TEL_TYPE_STATE_CREATE_CONFIG) + { + if (state == SAI_TAM_TEL_TYPE_STATE_STOP_STREAM) + { + // Nothing to do + } + else if (state == SAI_TAM_TEL_TYPE_STATE_START_STREAM) + { + // Nothing to do + } + else + { + break; + } + } + else + { + SWSS_LOG_THROW("Unknown state %d", stats->second); + } + + sai_attribute_t attr; + attr.id = SAI_TAM_TEL_TYPE_ATTR_STATE; + attr.value.s32 = state; + auto status = sai_tam_api->set_tam_tel_type_attribute(*type_itr->second, &attr); + if (status != SAI_STATUS_SUCCESS) + { + handleSaiSetStatus(SAI_API_TAM, status); + } + + stats->second = state; + return; + + } while(false); + + SWSS_LOG_THROW("Invalid state transfer from %d to %d", stats->second, state); +} + +sai_tam_tel_type_state_t HFTelProfile::getStreamState(sai_object_type_t object_type) const +{ + SWSS_LOG_ENTER(); + auto itr = m_sai_tam_tel_type_objs.find(object_type); + if (itr == m_sai_tam_tel_type_objs.end()) + { + return SAI_TAM_TEL_TYPE_STATE_STOP_STREAM; + } + auto state_itr = m_sai_tam_tel_type_states.find(itr->second); + if (state_itr == m_sai_tam_tel_type_states.end()) + { + return SAI_TAM_TEL_TYPE_STATE_STOP_STREAM; + } + return state_itr->second; +} + +void HFTelProfile::notifyConfigReady(sai_object_type_t object_type) +{ + SWSS_LOG_ENTER(); + + auto itr = m_sai_tam_tel_type_objs.find(object_type); + if (itr == m_sai_tam_tel_type_objs.end()) + { + return; + } + + updateTemplates(*itr->second); + setStreamState(object_type, m_setting_state); +} + +sai_tam_tel_type_state_t HFTelProfile::getTelemetryTypeState(sai_object_type_t object_type) const +{ + SWSS_LOG_ENTER(); + + auto itr = m_sai_tam_tel_type_objs.find(object_type); + if (itr == m_sai_tam_tel_type_objs.end()) + { + return SAI_TAM_TEL_TYPE_STATE_STOP_STREAM; + } + auto state_itr = m_sai_tam_tel_type_states.find(itr->second); + if (state_itr == m_sai_tam_tel_type_states.end()) + { + return SAI_TAM_TEL_TYPE_STATE_STOP_STREAM; + } + return state_itr->second; +} + +HFTelProfile::sai_guard_t HFTelProfile::getTAMTelTypeGuard(sai_object_id_t tam_tel_type_obj) const +{ + SWSS_LOG_ENTER(); + + for (const auto &item : m_sai_tam_tel_type_objs) + { + if (*item.second == tam_tel_type_obj) + { + return item.second; + } + } + + return sai_guard_t(); +} + +sai_object_type_t HFTelProfile::getObjectType(sai_object_id_t tam_tel_type_obj) const +{ + SWSS_LOG_ENTER(); + + auto guard = getTAMTelTypeGuard(tam_tel_type_obj); + if (guard) + { + for (const auto &item : m_sai_tam_tel_type_objs) + { + if (item.second == guard) + { + return item.first; + } + } + } + + return SAI_OBJECT_TYPE_NULL; +} + +void HFTelProfile::setPollInterval(uint32_t poll_interval) +{ + SWSS_LOG_ENTER(); + + if (poll_interval == m_poll_interval) + { + return; + } + m_poll_interval = poll_interval; + + for (const auto &report : m_sai_tam_report_objs) + { + sai_attribute_t attr; + attr.id = SAI_TAM_REPORT_ATTR_REPORT_INTERVAL; + attr.value.u32 = m_poll_interval; + sai_status_t status = sai_tam_api->set_tam_report_attribute(*report.second, &attr); + if (status != SAI_STATUS_SUCCESS) + { + handleSaiSetStatus(SAI_API_TAM, status); + } + } +} + +void HFTelProfile::setObjectNames(const string &group_name, set &&object_names) +{ + SWSS_LOG_ENTER(); + + sai_object_type_t sai_object_type = HFTelUtils::group_name_to_sai_type(group_name); + + auto itr = m_groups.lower_bound(sai_object_type); + + if (itr == m_groups.end() || itr->first != sai_object_type) + { + HFTelGroup group(group_name); + group.updateObjects(object_names); + m_groups.insert(itr, {sai_object_type, move(group)}); + } + else + { + if (itr->second.isSameObjects(object_names)) + { + return; + } + for (const auto &obj : itr->second.getObjects()) + { + delObjectSAIID(sai_object_type, obj.first.c_str()); + } + itr->second.updateObjects(object_names); + } + loadCounterNameCache(sai_object_type); + + // TODO: In the phase 2, we don't need to stop the stream before update the object names + setStreamState(sai_object_type, SAI_TAM_TEL_TYPE_STATE_STOP_STREAM); +} + +void HFTelProfile::setStatsIDs(const string &group_name, const set &object_counters) +{ + SWSS_LOG_ENTER(); + + sai_object_type_t sai_object_type = HFTelUtils::group_name_to_sai_type(group_name); + auto itr = m_groups.lower_bound(sai_object_type); + set stats_ids_set = HFTelUtils::object_counters_to_stats_ids(group_name, object_counters); + + if (itr == m_groups.end() || itr->first != sai_object_type) + { + HFTelGroup group(group_name); + group.updateStatsIDs(stats_ids_set); + m_groups.insert(itr, {sai_object_type, move(group)}); + } + else + { + if (itr->second.getStatsIDs() == stats_ids_set) + { + return; + } + itr->second.updateStatsIDs(stats_ids_set); + } + + // TODO: In the phase 2, we don't need to stop the stream before update the stats + setStreamState(sai_object_type, SAI_TAM_TEL_TYPE_STATE_STOP_STREAM); + + deployCounterSubscriptions(sai_object_type); +} + +void HFTelProfile::setObjectSAIID(sai_object_type_t object_type, const char *object_name, sai_object_id_t object_id) +{ + SWSS_LOG_ENTER(); + + if (!isObjectTypeInProfile(object_type, object_name)) + { + return; + } + + auto &objs = m_name_sai_map[object_type]; + auto itr = objs.find(object_name); + if (itr != objs.end()) + { + if (itr->second == object_id) + { + return; + } + } + objs[object_name] = object_id; + + SWSS_LOG_DEBUG("Set object %s with ID %s in the name sai map", object_name, sai_serialize_object_id(object_id).c_str()); + + // TODO: In the phase 2, we don't need to stop the stream before update the object + setStreamState(object_type, SAI_TAM_TEL_TYPE_STATE_STOP_STREAM); + + // Update the counter subscription + deployCounterSubscriptions(object_type, object_id, m_groups.at(object_type).getObjects().at(object_name)); +} + +void HFTelProfile::delObjectSAIID(sai_object_type_t object_type, const char *object_name) +{ + SWSS_LOG_ENTER(); + + if (!isObjectTypeInProfile(object_type, object_name)) + { + return; + } + + auto &objs = m_name_sai_map[object_type]; + auto itr = objs.find(object_name); + if (itr == objs.end()) + { + return; + } + + // TODO: In the phase 2, we don't need to stop the stream before removing the object + setStreamState(object_type, SAI_TAM_TEL_TYPE_STATE_STOP_STREAM); + + // Remove all counters bounded to the object + auto counter_itr = m_sai_tam_counter_subscription_objs.find(object_type); + if (counter_itr != m_sai_tam_counter_subscription_objs.end()) + { + counter_itr->second.erase(itr->second); + if (counter_itr->second.empty()) + { + m_sai_tam_counter_subscription_objs.erase(counter_itr); + } + } + + objs.erase(itr); + if (objs.empty()) + { + m_name_sai_map.erase(object_type); + SWSS_LOG_DEBUG("Delete object %s from the name sai map", object_name); + } +} + +bool HFTelProfile::canBeUpdated() const +{ + SWSS_LOG_ENTER(); + + for (const auto &group : m_groups) + { + if (!canBeUpdated(group.first)) + { + return false; + } + } + + return true; +} + +bool HFTelProfile::canBeUpdated(sai_object_type_t object_type) const +{ + SWSS_LOG_ENTER(); + + + if (getTelemetryTypeState(object_type) == SAI_TAM_TEL_TYPE_STATE_CREATE_CONFIG) + { + return false; + } + + return true; +} + +bool HFTelProfile::isEmpty() const +{ + SWSS_LOG_ENTER(); + + return m_groups.empty(); +} + +void HFTelProfile::clearGroup(const std::string &group_name) +{ + SWSS_LOG_ENTER(); + + sai_object_type_t sai_object_type = HFTelUtils::group_name_to_sai_type(group_name); + + auto itr = m_groups.find(sai_object_type); + if (itr != m_groups.end()) + { + for (const auto &obj : itr->second.getObjects()) + { + delObjectSAIID(sai_object_type, obj.first.c_str()); + } + m_groups.erase(itr); + } + m_sai_tam_tel_type_templates.erase(sai_object_type); + m_sai_tam_tel_type_states.erase(m_sai_tam_tel_type_objs[sai_object_type]); + m_sai_tam_tel_type_objs.erase(sai_object_type); + m_sai_tam_report_objs.erase(sai_object_type); + m_sai_tam_counter_subscription_objs.erase(sai_object_type); + m_name_sai_map.erase(sai_object_type); + + SWSS_LOG_NOTICE("Cleared high frequency telemetry group %s with no objects", group_name.c_str()); +} + +const vector &HFTelProfile::getTemplates(sai_object_type_t object_type) const +{ + SWSS_LOG_ENTER(); + + return m_sai_tam_tel_type_templates.at(object_type); +} + +const vector HFTelProfile::getObjectNames(sai_object_type_t object_type) const +{ + SWSS_LOG_ENTER(); + + vector object_names; + auto group = m_groups.find(object_type); + if (group != m_groups.end()) + { + object_names.resize(group->second.getObjects().size()); + transform(group->second.getObjects().begin(), group->second.getObjects().end(), object_names.begin(), + [](const auto &pair) + { return pair.first; }); + } + + return object_names; +} + +const vector HFTelProfile::getObjectLabels(sai_object_type_t object_type) const +{ + SWSS_LOG_ENTER(); + + vector object_labels; + auto group = m_groups.find(object_type); + if (group != m_groups.end()) + { + object_labels.resize(group->second.getObjects().size()); + transform(group->second.getObjects().begin(), group->second.getObjects().end(), object_labels.begin(), + [](const auto &pair) + { return pair.second; }); + } + return object_labels; +} + +pair, vector> HFTelProfile::getObjectNamesAndLabels(sai_object_type_t object_type) const +{ + SWSS_LOG_ENTER(); + + auto group = m_groups.find(object_type); + if (group == m_groups.end()) + { + return {vector(), vector()}; + } + + return group->second.getObjectNamesAndLabels(); +} + +vector HFTelProfile::getObjectTypes() const +{ + vector types; + types.reserve(m_groups.size()); + + for (const auto &group : m_groups) + { + types.push_back(group.first); + } + + return types; +} + +void HFTelProfile::loadCounterNameCache(sai_object_type_t object_type) +{ + SWSS_LOG_ENTER(); + + auto itr = m_counter_name_cache.find(object_type); + if (itr == m_counter_name_cache.end()) + { + return; + } + auto group = m_groups.find(object_type); + if (group == m_groups.end()) + { + return; + } + const auto &sai_objs = itr->second; + for (const auto &obj : group->second.getObjects()) + { + auto sai_obj = sai_objs.find(obj.first); + if (sai_obj != sai_objs.end()) + { + setObjectSAIID(object_type, obj.first.c_str(), sai_obj->second); + } + } +} + +bool HFTelProfile::tryCommitConfig(sai_object_type_t object_type) +{ + SWSS_LOG_ENTER(); + + if (!canBeUpdated(object_type)) + { + return false; + } + + if (m_setting_state == SAI_TAM_TEL_TYPE_STATE_CREATE_CONFIG) + { + SWSS_LOG_THROW("Cannot commit the configuration in the state %d", m_setting_state); + } + + auto group = m_groups.find(object_type); + if (group == m_groups.end()) + { + return false; + } + if (group->second.getObjects().empty()) + { + // TODO: If the object names are empty, implicitly select all objects of the group + return true; + } + if (!isMonitoringObjectReady(object_type)) + { + deployCounterSubscriptions(object_type); + if (!isMonitoringObjectReady(object_type)) + { + // There are some objects still not ready + return false; + } + } + setStreamState(object_type, SAI_TAM_TEL_TYPE_STATE_CREATE_CONFIG); + return true; +} + +bool HFTelProfile::isObjectTypeInProfile(sai_object_type_t object_type, const string &object_name) const +{ + SWSS_LOG_ENTER(); + + auto group = m_groups.find(object_type); + if (group == m_groups.end()) + { + return false; + } + if (!group->second.isObjectInGroup(object_name)) + { + return false; + } + + return true; +} + +bool HFTelProfile::isMonitoringObjectReady(sai_object_type_t object_type) const +{ + SWSS_LOG_ENTER(); + + auto group = m_groups.find(object_type); + if (group == m_groups.end()) + { + SWSS_LOG_THROW("The high frequency telemetry group for object type %s is not found", sai_serialize_object_type(object_type).c_str()); + } + + auto counters = m_sai_tam_counter_subscription_objs.find(object_type); + + if (counters == m_sai_tam_counter_subscription_objs.end() || group->second.getObjects().size() != counters->second.size()) + { + // The monitoring counters are not ready + return false; + } + + return true; +} + +sai_object_id_t HFTelProfile::getTAMReportObjID(sai_object_type_t object_type) +{ + SWSS_LOG_ENTER(); + + auto itr = m_sai_tam_report_objs.find(object_type); + if (itr != m_sai_tam_report_objs.end()) + { + return *itr->second; + } + + sai_object_id_t sai_object; + vector attrs; + sai_attribute_t attr; + + // Create TAM report object + attr.id = SAI_TAM_REPORT_ATTR_TYPE; + attr.value.s32 = SAI_TAM_REPORT_TYPE_IPFIX; + attrs.push_back(attr); + + attr.id = SAI_TAM_REPORT_ATTR_REPORT_MODE; + attr.value.s32 = SAI_TAM_REPORT_MODE_BULK; + attrs.push_back(attr); + + attr.id = SAI_TAM_REPORT_ATTR_TEMPLATE_REPORT_INTERVAL; + // Don't push the template, Because we hope the template can be proactively queried by orchagent + attr.value.u32 = 0; + attrs.push_back(attr); + + if (m_poll_interval != 0) + { + attr.id = SAI_TAM_REPORT_ATTR_REPORT_INTERVAL; + attr.value.u32 = m_poll_interval; + attrs.push_back(attr); + } + + attr.id = SAI_TAM_REPORT_ATTR_REPORT_INTERVAL_UNIT; + attr.value.s32 = SAI_TAM_REPORT_INTERVAL_UNIT_USEC; + + handleSaiCreateStatus( + SAI_API_TAM, + sai_tam_api->create_tam_report( + &sai_object, + gSwitchId, + static_cast(attrs.size()), + attrs.data())); + + m_sai_tam_report_objs[object_type] = move( + sai_guard_t( + new sai_object_id_t(sai_object), + [this](sai_object_id_t *p) + { + handleSaiRemoveStatus( + SAI_API_TAM, + sai_tam_api->remove_tam_report(*p)); + delete p; + })); + + return sai_object; +} + +sai_object_id_t HFTelProfile::getTAMTelTypeObjID(sai_object_type_t object_type) +{ + SWSS_LOG_ENTER(); + + auto itr = m_sai_tam_tel_type_objs.find(object_type); + if (itr != m_sai_tam_tel_type_objs.end()) + { + return *itr->second; + } + + sai_object_id_t sai_object; + vector attrs; + sai_attribute_t attr; + + // Create TAM telemetry type object + + attr.id = SAI_TAM_TEL_TYPE_ATTR_TAM_TELEMETRY_TYPE; + attr.value.s32 = SAI_TAM_TELEMETRY_TYPE_COUNTER_SUBSCRIPTION; + attrs.push_back(attr); + + attr.id = SAI_TAM_TEL_TYPE_ATTR_SWITCH_ENABLE_PORT_STATS; + attr.value.booldata = true; + attrs.push_back(attr); + + attr.id = SAI_TAM_TEL_TYPE_ATTR_SWITCH_ENABLE_PORT_STATS_INGRESS; + attr.value.booldata = true; + attrs.push_back(attr); + + attr.id = SAI_TAM_TEL_TYPE_ATTR_SWITCH_ENABLE_PORT_STATS_EGRESS; + attr.value.booldata = true; + attrs.push_back(attr); + + attr.id = SAI_TAM_TEL_TYPE_ATTR_SWITCH_ENABLE_MMU_STATS; + attr.value.booldata = true; + attrs.push_back(attr); + + attr.id = SAI_TAM_TEL_TYPE_ATTR_SWITCH_ENABLE_OUTPUT_QUEUE_STATS; + attr.value.booldata = true; + attrs.push_back(attr); + + attr.id = SAI_TAM_TEL_TYPE_ATTR_MODE ; + attr.value.s32 = SAI_TAM_TEL_TYPE_MODE_SINGLE_TYPE; + attrs.push_back(attr); + + attr.id = SAI_TAM_TEL_TYPE_ATTR_REPORT_ID; + attr.value.oid = getTAMReportObjID(object_type); + attrs.push_back(attr); + + handleSaiCreateStatus( + SAI_API_TAM, + sai_tam_api->create_tam_tel_type( + &sai_object, + gSwitchId, + static_cast(attrs.size()), + attrs.data())); + + m_sai_tam_tel_type_objs[object_type] = move( + sai_guard_t( + new sai_object_id_t(sai_object), + [this](sai_object_id_t *p) + { + HFTELUTILS_DEL_SAI_OBJECT_LIST( + *this->m_sai_tam_telemetry_obj, + SAI_TAM_TELEMETRY_ATTR_TAM_TYPE_LIST, + *p, + SAI_API_TAM, + tam, + tam_telemetry); + + handleSaiRemoveStatus( + SAI_API_TAM, + sai_tam_api->remove_tam_tel_type(*p)); + delete p; + })); + m_sai_tam_tel_type_states[m_sai_tam_tel_type_objs[object_type]] = SAI_TAM_TEL_TYPE_STATE_STOP_STREAM; + + HFTELUTILS_ADD_SAI_OBJECT_LIST( + *m_sai_tam_telemetry_obj, + SAI_TAM_TELEMETRY_ATTR_TAM_TYPE_LIST, + sai_object, + SAI_API_TAM, + tam, + tam_telemetry); + + return sai_object; +} + +void HFTelProfile::initTelemetry() +{ + SWSS_LOG_ENTER(); + + sai_object_id_t sai_object; + vector attrs; + sai_attribute_t attr; + sai_object_id_t sai_tam_collector_obj = m_sai_tam_collector_obj; + + // Create TAM telemetry object + attr.id = SAI_TAM_TELEMETRY_ATTR_COLLECTOR_LIST; + attr.value.objlist.count = 1; + attr.value.objlist.list = &sai_tam_collector_obj; + attrs.push_back(attr); + + handleSaiCreateStatus( + SAI_API_TAM, + sai_tam_api->create_tam_telemetry( + &sai_object, + gSwitchId, static_cast(attrs.size()), + attrs.data())); + + HFTELUTILS_ADD_SAI_OBJECT_LIST( + m_sai_tam_obj, + SAI_TAM_ATTR_TELEMETRY_OBJECTS_LIST, + sai_object, + SAI_API_TAM, + tam, + tam); + + m_sai_tam_telemetry_obj = move( + sai_guard_t( + new sai_object_id_t(sai_object), + [=](sai_object_id_t *p) + { + HFTELUTILS_DEL_SAI_OBJECT_LIST( + m_sai_tam_obj, + SAI_TAM_ATTR_TELEMETRY_OBJECTS_LIST, + *p, + SAI_API_TAM, + tam, + tam); + + handleSaiRemoveStatus( + SAI_API_TAM, + sai_tam_api->remove_tam_telemetry(*p)); + delete p; + })); +} + +void HFTelProfile::deployCounterSubscription(sai_object_type_t object_type, sai_object_id_t sai_obj, sai_stat_id_t stat_id, uint16_t label) +{ + SWSS_LOG_ENTER(); + + vector attrs; + sai_attribute_t attr; + + auto itr = m_sai_tam_counter_subscription_objs[object_type].find(sai_obj); + if (itr != m_sai_tam_counter_subscription_objs[object_type].end()) + { + auto itr2 = itr->second.find(stat_id); + if (itr2 != itr->second.end()) + { + return; + } + } + + attr.id = SAI_TAM_COUNTER_SUBSCRIPTION_ATTR_TEL_TYPE; + attr.value.oid = getTAMTelTypeObjID(object_type); + attrs.push_back(attr); + + attr.id = SAI_TAM_COUNTER_SUBSCRIPTION_ATTR_OBJECT_ID; + attr.value.oid = sai_obj; + attrs.push_back(attr); + + attr.id = SAI_TAM_COUNTER_SUBSCRIPTION_ATTR_STAT_ID; + attr.value.oid = stat_id; + attrs.push_back(attr); + + attr.id = SAI_TAM_COUNTER_SUBSCRIPTION_ATTR_LABEL; + attr.value.u64 = static_cast(label); + attrs.push_back(attr); + + attr.id = SAI_TAM_COUNTER_SUBSCRIPTION_ATTR_STATS_MODE; + attr.value.s32 = HFTelUtils::get_stats_mode(object_type, stat_id); + attrs.push_back(attr); + + sai_object_id_t counter_id; + + handleSaiCreateStatus( + SAI_API_TAM, + sai_tam_api->create_tam_counter_subscription( + &counter_id, + gSwitchId, + static_cast(attrs.size()), + attrs.data())); + + m_sai_tam_counter_subscription_objs[object_type][sai_obj][stat_id] = move( + sai_guard_t( + new sai_object_id_t(counter_id), + [](sai_object_id_t *p) + { + handleSaiRemoveStatus( + SAI_API_TAM, + sai_tam_api->remove_tam_counter_subscription(*p)); + delete p; + })); +} + +void HFTelProfile::deployCounterSubscriptions(sai_object_type_t object_type, sai_object_id_t sai_obj, std::uint16_t label) +{ + SWSS_LOG_ENTER(); + + // TODO: Bulk create the counter subscriptions + auto group = m_groups.find(object_type); + if (group == m_groups.end()) + { + return; + } + + for (const auto &stat_id : group->second.getStatsIDs()) + { + deployCounterSubscription(object_type, sai_obj, stat_id, label); + } +} + +void HFTelProfile::deployCounterSubscriptions(sai_object_type_t object_type) +{ + SWSS_LOG_ENTER(); + + // TODO: Bulk create the counter subscriptions + + auto group = m_groups.find(object_type); + if (group == m_groups.end()) + { + return; + } + for (const auto &obj : group->second.getObjects()) + { + auto itr = m_name_sai_map[object_type].find(obj.first); + if (itr == m_name_sai_map[object_type].end()) + { + continue; + } + for (const auto &stat_id : group->second.getStatsIDs()) + { + deployCounterSubscription(object_type, itr->second, stat_id, obj.second); + } + } +} + +void HFTelProfile::undeployCounterSubscriptions(sai_object_type_t object_type) +{ + SWSS_LOG_ENTER(); + + // TODO: Bulk remove the counter subscriptions + m_sai_tam_counter_subscription_objs.erase(object_type); +} + +void HFTelProfile::updateTemplates(sai_object_id_t tam_tel_type_obj) +{ + SWSS_LOG_ENTER(); + + auto object_type = getObjectType(tam_tel_type_obj); + if (object_type == SAI_OBJECT_TYPE_NULL) + { + SWSS_LOG_THROW("The object type is not found"); + } + + // Estimate the template size + auto counters = m_sai_tam_counter_subscription_objs.find(object_type); + if (counters == m_sai_tam_counter_subscription_objs.end()) + { + SWSS_LOG_THROW("The counter subscription object is not found"); + } + size_t counters_count = 0; + for (const auto &item : counters->second) + { + counters_count += item.second.size(); + } + + const size_t COUNTER_SIZE (8LLU); + const size_t IPFIX_TEMPLATE_MAX_SIZE (0xffffLLU); + const size_t IPFIX_HEADER_SIZE (16LLU); + const size_t IPFIX_TEMPLATE_METADATA_SIZE (12LLU); + const size_t IPFIX_TEMPLATE_MAX_STATS_COUNT (((IPFIX_TEMPLATE_MAX_SIZE - IPFIX_HEADER_SIZE - IPFIX_TEMPLATE_METADATA_SIZE) / COUNTER_SIZE) - 1LLU); + size_t estimated_template_size = (counters_count / IPFIX_TEMPLATE_MAX_STATS_COUNT + 1) * IPFIX_TEMPLATE_MAX_SIZE; + + vector buffer(estimated_template_size, 0); + + sai_attribute_t attr; + attr.id = SAI_TAM_TEL_TYPE_ATTR_IPFIX_TEMPLATES; + attr.value.u8list.count = static_cast(buffer.size()); + attr.value.u8list.list = buffer.data(); + + auto status = sai_tam_api->get_tam_tel_type_attribute(tam_tel_type_obj, 1, &attr); + if (status == SAI_STATUS_BUFFER_OVERFLOW) + { + buffer.resize(attr.value.u8list.count); + attr.value.u8list.list = buffer.data(); + status = sai_tam_api->get_tam_tel_type_attribute(tam_tel_type_obj, 1, &attr); + } + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_THROW("Failed to get the TAM telemetry type object %s attributes: %d", + sai_serialize_object_id(tam_tel_type_obj).c_str(), status); + } + + buffer.resize(attr.value.u8list.count); + + m_sai_tam_tel_type_templates[object_type] = move(buffer); +} diff --git a/orchagent/high_frequency_telemetry/hftelprofile.h b/orchagent/high_frequency_telemetry/hftelprofile.h new file mode 100644 index 00000000000..0127d6d0dc8 --- /dev/null +++ b/orchagent/high_frequency_telemetry/hftelprofile.h @@ -0,0 +1,111 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hftelgroup.h" + + +using CounterNameCache = std::unordered_map>; + +class HFTelProfile +{ +public: + HFTelProfile( + const std::string &profile_name, + sai_object_id_t sai_tam_obj, + sai_object_id_t sai_tam_collector_obj, + const CounterNameCache &cache); + ~HFTelProfile(); + HFTelProfile(const HFTelProfile &) = delete; + HFTelProfile &operator=(const HFTelProfile &) = delete; + HFTelProfile(HFTelProfile &&) = delete; + HFTelProfile &operator=(HFTelProfile &&) = delete; + + using sai_guard_t = std::shared_ptr; + + const std::string& getProfileName() const; + void setStreamState(sai_tam_tel_type_state_t state); + void setStreamState(sai_object_type_t object_type, sai_tam_tel_type_state_t state); + sai_tam_tel_type_state_t getStreamState(sai_object_type_t object_type) const; + void notifyConfigReady(sai_object_type_t object_type); + sai_tam_tel_type_state_t getTelemetryTypeState(sai_object_type_t object_type) const; + sai_guard_t getTAMTelTypeGuard(sai_object_id_t tam_tel_type_obj) const; + sai_object_type_t getObjectType(sai_object_id_t tam_tel_type_obj) const; + void setPollInterval(std::uint32_t poll_interval); + void setBulkSize(std::uint32_t bulk_size); + void setObjectNames(const std::string &group_name, std::set &&object_names); + void setStatsIDs(const std::string &group_name, const std::set &object_counters); + void setObjectSAIID(sai_object_type_t object_type, const char *object_name, sai_object_id_t object_id); + void delObjectSAIID(sai_object_type_t object_type, const char *object_name); + bool canBeUpdated() const; + bool canBeUpdated(sai_object_type_t object_type) const; + bool isEmpty() const; + void clearGroup(const std::string &group_name); + + const std::vector &getTemplates(sai_object_type_t object_type) const; + const std::vector getObjectNames(sai_object_type_t object_type) const; + const std::vector getObjectLabels(sai_object_type_t object_type) const; + std::pair, std::vector> getObjectNamesAndLabels(sai_object_type_t object_type) const; + std::vector getObjectTypes() const; + + void loadCounterNameCache(sai_object_type_t object_type); + bool tryCommitConfig(sai_object_type_t object_type); + +private: + // Configuration parameters + const std::string m_profile_name; + sai_tam_tel_type_state_t m_setting_state; + std::uint32_t m_poll_interval; + std::map m_groups; + + // Runtime parameters + const CounterNameCache &m_counter_name_cache; + + std::unordered_map< + sai_object_type_t, + std::unordered_map< + std::string, + sai_object_id_t>> + m_name_sai_map; + + // SAI objects + const sai_object_id_t m_sai_tam_obj; + const sai_object_id_t m_sai_tam_collector_obj; + std::unordered_map< + sai_object_type_t, + std::unordered_map< + sai_object_id_t, + std::unordered_map< + sai_stat_id_t, + sai_guard_t>>> + m_sai_tam_counter_subscription_objs; + sai_guard_t m_sai_tam_telemetry_obj; + std::unordered_map m_sai_tam_tel_type_states; + std::unordered_map m_sai_tam_tel_type_objs; + std::unordered_map m_sai_tam_report_objs; + std::unordered_map> m_sai_tam_tel_type_templates; + + bool isObjectTypeInProfile(sai_object_type_t object_type, const std::string &object_name) const; + bool isMonitoringObjectReady(sai_object_type_t object_type) const; + + // SAI calls + sai_object_id_t getTAMReportObjID(sai_object_type_t object_type); + sai_object_id_t getTAMTelTypeObjID(sai_object_type_t object_type); + void initTelemetry(); + void deployCounterSubscription(sai_object_type_t object_type, sai_object_id_t sai_obj, sai_stat_id_t stat_id, std::uint16_t label); + void deployCounterSubscriptions(sai_object_type_t object_type, sai_object_id_t sai_obj, std::uint16_t label); + void deployCounterSubscriptions(sai_object_type_t object_type); + void undeployCounterSubscriptions(sai_object_type_t object_type); + void updateTemplates(sai_object_id_t tam_tel_type_obj); +}; diff --git a/orchagent/high_frequency_telemetry/hftelutils.cpp b/orchagent/high_frequency_telemetry/hftelutils.cpp new file mode 100644 index 00000000000..ce96786cbb2 --- /dev/null +++ b/orchagent/high_frequency_telemetry/hftelutils.cpp @@ -0,0 +1,139 @@ +#include "hftelutils.h" + +#include +#include + +#include +#include + +using namespace std; + +#define OBJECT_TYPE_PREFIX "SAI_OBJECT_TYPE_" + +vector HFTelUtils::get_sai_object_list( + sai_object_id_t obj, + sai_attr_id_t attr_id, + sai_api_t api, + function get_attribute_handler) +{ + SWSS_LOG_ENTER(); + + vector obj_list(1024, SAI_NULL_OBJECT_ID); + sai_attribute_t attr; + + attr.id = attr_id; + attr.value.objlist.count = static_cast(obj_list.size()); + attr.value.objlist.list = obj_list.data(); + + auto status = get_attribute_handler( + obj, + 1, + &attr); + if (status != SAI_STATUS_SUCCESS) + { + handleSaiGetStatus( + api, + status); + } + assert(attr.value.objlist.count < obj_list.size()); + + obj_list.erase( + obj_list.begin() + attr.value.objlist.count, + obj_list.end()); + + return obj_list; +} + +sai_object_type_t HFTelUtils::group_name_to_sai_type(const string &group_name) +{ + SWSS_LOG_ENTER(); + + sai_object_type_t sai_object_type; + + sai_deserialize_object_type(string(OBJECT_TYPE_PREFIX) + boost::to_upper_copy(group_name), sai_object_type); + return sai_object_type; +} + +std::string HFTelUtils::sai_type_to_group_name(sai_object_type_t object_type) +{ + SWSS_LOG_ENTER(); + + std::string group_name = sai_serialize_object_type(object_type); + + group_name.erase(0, sizeof(OBJECT_TYPE_PREFIX) - 1); + + return group_name; +} + +set HFTelUtils::object_counters_to_stats_ids( + const string &group_name, + const set &object_counters) +{ + SWSS_LOG_ENTER(); + sai_object_type_t sai_object_type = HFTelUtils::group_name_to_sai_type(group_name); + set stats_ids_set; + + auto info = sai_metadata_get_object_type_info(sai_object_type); + if (info == nullptr) + { + SWSS_LOG_THROW("Failed to get the object type info for %s", group_name.c_str()); + } + + auto state_enum = info->statenum; + if (state_enum == nullptr) + { + SWSS_LOG_THROW("The object type %s does not support stats", group_name.c_str()); + } + + string type_prefix = "SAI_" + group_name + "_STAT_"; + + for (size_t i = 0; i < state_enum->valuescount; i++) + { + if (object_counters.find(state_enum->valuesshortnames[i]) != object_counters.end()) + { + SWSS_LOG_DEBUG("Found the object counter %s", state_enum->valuesshortnames[i]); + stats_ids_set.insert(state_enum->values[i]); + } + } + + if (stats_ids_set.size() != object_counters.size()) + { + SWSS_LOG_THROW("Failed to convert the object counters to stats ids for %s", group_name.c_str()); + } + + return stats_ids_set; +} + +sai_stats_mode_t HFTelUtils::get_stats_mode(sai_object_type_t object_type, sai_stat_id_t stat_id) +{ + SWSS_LOG_ENTER(); + + switch(object_type) + { + case SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP: + switch(stat_id) + { + case SAI_INGRESS_PRIORITY_GROUP_STAT_WATERMARK_BYTES: + case SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES: + case SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES: + return SAI_STATS_MODE_READ_AND_CLEAR; + default: + break; + } + break; + case SAI_OBJECT_TYPE_BUFFER_POOL: + switch(stat_id) + { + case SAI_BUFFER_POOL_STAT_WATERMARK_BYTES: + case SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES: + return SAI_STATS_MODE_READ_AND_CLEAR; + default: + break; + } + break; + default: + break; + } + + return SAI_STATS_MODE_READ; +} diff --git a/orchagent/high_frequency_telemetry/hftelutils.h b/orchagent/high_frequency_telemetry/hftelutils.h new file mode 100644 index 00000000000..73176315441 --- /dev/null +++ b/orchagent/high_frequency_telemetry/hftelutils.h @@ -0,0 +1,79 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include + +class HFTelUtils +{ +public: + HFTelUtils() = delete; + + static std::vector get_sai_object_list( + sai_object_id_t obj, + sai_attr_id_t attr_id, + sai_api_t api, + std::function get_attribute_handler); + static sai_object_type_t group_name_to_sai_type(const std::string &group_name); + static std::string sai_type_to_group_name(sai_object_type_t object_type); + static std::set object_counters_to_stats_ids( + const std::string &group_name, + const std::set &object_counters); + static sai_stats_mode_t get_stats_mode(sai_object_type_t object_type, sai_stat_id_t stat_id); +}; + +#define HFTELUTILS_ADD_SAI_OBJECT_LIST(obj, attr_id, inserted_obj, api_type_name, api_name, obj_type_name) \ + { \ + sai_attribute_t attr; \ + auto obj_list = HFTelUtils::get_sai_object_list( \ + obj, \ + attr_id, \ + api_type_name, \ + sai_##api_name##_api->get_##obj_type_name##_attribute); \ + obj_list.push_back(inserted_obj); \ + attr.id = attr_id; \ + attr.value.objlist.count = static_cast(obj_list.size()); \ + attr.value.objlist.list = obj_list.data(); \ + sai_status_t status = sai_##api_name##_api->set_##obj_type_name##_attribute( \ + obj, \ + &attr); \ + if (status != SAI_STATUS_SUCCESS) \ + { \ + handleSaiSetStatus( \ + api_type_name, \ + status); \ + } \ + } + +#define HFTELUTILS_DEL_SAI_OBJECT_LIST(obj, attr_id, removed_obj, api_type_name, api_name, obj_type_name) \ + { \ + sai_attribute_t attr; \ + auto obj_list = HFTelUtils::get_sai_object_list( \ + obj, \ + attr_id, \ + api_type_name, \ + sai_##api_name##_api->get_##obj_type_name##_attribute); \ + obj_list.erase( \ + std::remove( \ + obj_list.begin(), \ + obj_list.end(), \ + removed_obj), \ + obj_list.end()); \ + attr.id = attr_id; \ + attr.value.objlist.count = static_cast(obj_list.size()); \ + attr.value.objlist.list = obj_list.data(); \ + sai_status_t status = sai_##api_name##_api->set_##obj_type_name##_attribute( \ + obj, \ + &attr); \ + if (status != SAI_STATUS_SUCCESS) \ + { \ + handleSaiSetStatus( \ + api_type_name, \ + status); \ + } \ + } diff --git a/orchagent/icmporch.cpp b/orchagent/icmporch.cpp new file mode 100644 index 00000000000..29788136470 --- /dev/null +++ b/orchagent/icmporch.cpp @@ -0,0 +1,628 @@ +/* + * icmporch.cpp + * + * Created on: Feb 21, 2025 + * Author: Manas Kumar Mandal + */ + +#include "converter.h" +#include "swssnet.h" +#include "notifier.h" +#include "sai_serialize.h" +#include "directory.h" +#include "notifications.h" +#include "icmporch.h" +#include "switchorch.h" +#include + +using namespace std; +using namespace swss; + +extern SwitchOrch *gSwitchOrch; + +const uint32_t IcmpOrch::m_max_sessions = 1024; + +const std::map IcmpOrch::m_session_state_lkup = +{ + {SAI_ICMP_ECHO_SESSION_STATE_DOWN, "Down"}, + {SAI_ICMP_ECHO_SESSION_STATE_UP, "Up"} +}; + +const std::map IcmpOrch::m_session_state_str_lkup = +{ + {"Down", SAI_ICMP_ECHO_SESSION_STATE_DOWN}, + {"Up", SAI_ICMP_ECHO_SESSION_STATE_UP} +}; + +IcmpOrch::IcmpOrch(DBConnector *db, string tableName, TableConnector stateDbIcmpSessionTable): + Orch(db, tableName), + m_stateIcmpSessionTable(stateDbIcmpSessionTable.first, stateDbIcmpSessionTable.second), + m_register_state_change_notif{false} +{ + SWSS_LOG_ENTER(); + + // check the capability + std::string offload_capable; + gSwitchOrch->get_switch_capability("ICMP_OFFLOAD_CAPABLE", offload_capable); + if (offload_capable != "true") + { + SWSS_LOG_NOTICE("ICMP offload not supported"); + return; + } + + DBConnector *notificationsDb = new DBConnector("ASIC_DB", 0); + m_icmpStateNotificationConsumer = new swss::NotificationConsumer(notificationsDb, "NOTIFICATIONS"); + + // Clean up state database ICMP entries + vector keys; + + m_stateIcmpSessionTable.getKeys(keys); + + for (auto alias : keys) + { + m_stateIcmpSessionTable.del(alias); + } + + auto icmpStateNotifier = new Notifier(m_icmpStateNotificationConsumer, this, "ICMP_STATE_NOTIFICATIONS"); + Orch::addExecutor(icmpStateNotifier); +} + +IcmpOrch::~IcmpOrch(void) +{ + // do nothing, just log + SWSS_LOG_ENTER(); +} + +void IcmpOrch::doTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + + string key = kfvKey(t); + string op = kfvOp(t); + auto data = kfvFieldsValues(t); + + if (op == SET_COMMAND) + { + if (m_icmp_session_map.find(key) != m_icmp_session_map.end()) + { + if (!update_icmp_session(key, data)) + { + it++; + continue; + } + } else { + if (!create_icmp_session(key, data)) + { + it++; + continue; + } + } + } + else if (op == DEL_COMMAND) + { + if (!remove_icmp_session(key)) + { + it++; + continue; + } + } + else + { + SWSS_LOG_ERROR("Unknown operation type %s\n", op.c_str()); + } + + it = consumer.m_toSync.erase(it); + } +} + +void IcmpOrch::doTask(NotificationConsumer &consumer) +{ + SWSS_LOG_ENTER(); + + std::string op; + std::string data; + std::vector values; + + consumer.pop(op, data, values); + + if (&consumer != m_icmpStateNotificationConsumer) + { + return; + } + + if (op == "icmp_echo_session_state_change") + { + uint32_t count = 0; + sai_icmp_echo_session_state_notification_t *icmpSessionState = nullptr; + + sai_deserialize_icmp_echo_session_state_ntf(data, count, &icmpSessionState); + + for (uint32_t i = 0; i < count; i++) + { + sai_object_id_t id = icmpSessionState[i].icmp_echo_session_id; + sai_icmp_echo_session_state_t state = icmpSessionState[i].session_state; + + SWSS_LOG_INFO("Got ICMP session state change notification id:%" PRIx64 " state: %s", id, m_session_state_lkup.at(state).c_str()); + + if (m_icmp_session_lookup.find(id) == m_icmp_session_lookup.end()) + { + SWSS_LOG_NOTICE("ICMP session missing for state change notification id:%" PRIx64 " state: %s", id, m_session_state_lkup.at(state).c_str()); + continue; + } + + // handle state update + if (state != m_icmp_session_lookup[id].state || m_icmp_session_lookup[id].init_state) + { + auto key = m_icmp_session_lookup[id].db_key; + vector fvVector; + m_stateIcmpSessionTable.get(key, fvVector); + + fvVector.push_back({IcmpSaiSessionHandler::m_state_fname, m_session_state_lkup.at(state)}); + + m_stateIcmpSessionTable.set(key, fvVector); + + SWSS_LOG_NOTICE("ICMP session state for %s changed from %s to %s", key.c_str(), + m_session_state_lkup.at(m_icmp_session_lookup[id].state).c_str(), m_session_state_lkup.at(state).c_str()); + + m_icmp_session_lookup[id].state = state; + m_icmp_session_lookup[id].init_state = false; + } + } + + sai_deserialize_free_icmp_echo_session_state_ntf(count, icmpSessionState); + } +} + +bool IcmpOrch::create_icmp_session(const string& key, const vector& data) +{ + IcmpSaiSessionHandler sai_session_handler(*this); + + if (m_num_sessions == m_max_sessions) + { + SWSS_LOG_ERROR("ICMP session creation failed, limit (%u) reached", m_num_sessions); + // return false to retry + return false; + } + + // initialize the sai session handler + auto init_status = sai_session_handler.init(sai_icmp_echo_api, key); + if (init_status != SaiOffloadHandlerStatus::SUCCESS_VALID_ENTRY) + { + SWSS_LOG_INFO("ICMP session creation failed key(%s), init_status(%s)", key.c_str(), + SaiOffloadStatusStrMap.at(init_status).c_str()); + return true; + } + + if (!m_register_state_change_notif) + { + if (!sai_session_handler.register_state_change_notification()) + { + // return false to retry registration + return false; + } + m_register_state_change_notif = true; + } + + auto create_status = sai_session_handler.create(data); + if (create_status != SaiOffloadHandlerStatus::SUCCESS_VALID_ENTRY) + { + SWSS_LOG_INFO("ICMP session creation failed key(%s), create_status(%s)", key.c_str(), + SaiOffloadStatusStrMap.at(create_status).c_str()); + // do not consume the entry for retries + bool skip_entry = create_status != SaiOffloadHandlerStatus::RETRY_VALID_ENTRY; + return skip_entry; + } + + // update the STATE DB and local session maps + auto& fvVector = sai_session_handler.get_fv_vector(); + auto& state_db_key = sai_session_handler.get_state_db_key(); + + m_stateIcmpSessionTable.set(state_db_key, fvVector); + auto session_id = sai_session_handler.get_session_id(); + IcmpSessionDataCache session_cache{session_id, sai_session_handler.get_fv_map()}; + m_icmp_session_map[key] = session_cache; + m_icmp_session_lookup[session_id] = {state_db_key, SAI_ICMP_ECHO_SESSION_STATE_DOWN, true}; + + m_num_sessions++; + + SWSS_LOG_NOTICE("Created ICMP offload session key(%s)", key.c_str()); + return true; +} + +bool IcmpOrch::update_icmp_session(const string& key, const fv_vector_t& data) +{ + IcmpSaiSessionHandler sai_session_handler(*this); + + // initialize the sai session handler + auto init_status = sai_session_handler.init(sai_icmp_echo_api, key); + if (init_status != SaiOffloadHandlerStatus::SUCCESS_VALID_ENTRY) + { + SWSS_LOG_INFO("ICMP session update failed key(%s), init_status(%s)", key.c_str(), + SaiOffloadStatusStrMap.at(init_status).c_str()); + return true; + } + + auto session_id = m_icmp_session_map[key].session_id; + auto& fv_map = m_icmp_session_map[key].fv_map; + auto update_status = sai_session_handler.update(session_id, data, fv_map); + if (update_status != SaiOffloadHandlerStatus::SUCCESS_VALID_ENTRY) + { + SWSS_LOG_INFO("ICMP session update failed key(%s), update_status(%s)", key.c_str(), + SaiOffloadStatusStrMap.at(update_status).c_str()); + // do not consume the entry for retries + bool skip_entry = update_status != SaiOffloadHandlerStatus::RETRY_VALID_ENTRY; + return skip_entry; + } + + // update the STATE DB and local session maps + auto& fvVector = sai_session_handler.get_fv_vector(); + if (fvVector.size()) { + auto& state_db_key = sai_session_handler.get_state_db_key(); + auto& fv_map_upd = sai_session_handler.get_fv_map(); + m_stateIcmpSessionTable.set(state_db_key, fvVector); + IcmpSessionDataCache session_cache{session_id, fv_map_upd}; + m_icmp_session_map[key] = session_cache; + + SWSS_LOG_NOTICE("Updated ICMP offload session key(%s)", key.c_str()); + } + return true; +} + +bool IcmpOrch::remove_icmp_session(const string& key) +{ + if (m_icmp_session_map.find(key) == m_icmp_session_map.end()) + { + SWSS_LOG_ERROR("Request to remove non-existing ICMP session for %s", key.c_str()); + return true; + } + + IcmpSaiSessionHandler sai_session_handler(*this); + + // initialize the sai session handler + auto init_status = sai_session_handler.init(sai_icmp_echo_api, key); + if (init_status != SaiOffloadHandlerStatus::SUCCESS_VALID_ENTRY) + { + SWSS_LOG_INFO("ICMP session removal failed key(%s), init_status(%s)", key.c_str(), + SaiOffloadStatusStrMap.at(init_status).c_str()); + return true; + } + + sai_object_id_t icmp_session_id = m_icmp_session_map[key].session_id; + auto remove_status = sai_session_handler.remove(icmp_session_id); + if ( remove_status != SaiOffloadHandlerStatus::SUCCESS_VALID_ENTRY) + { + // do not consume the entry for retries + SWSS_LOG_INFO("ICMP session removal failed key(%s), remove_status(%s)", key.c_str(), + SaiOffloadStatusStrMap.at(remove_status).c_str()); + bool skip_entry = remove_status != SaiOffloadHandlerStatus::RETRY_VALID_ENTRY; + return skip_entry; + } + + // delete the session from state db and remove them from local maps + m_stateIcmpSessionTable.del(m_icmp_session_lookup[icmp_session_id].db_key); + + m_icmp_session_map.erase(key); + m_icmp_session_lookup.erase(icmp_session_id); + m_num_sessions--; + + SWSS_LOG_NOTICE("Removed ICMP offload session key(%s)", key.c_str()); + return true; +} + +const std::string IcmpSaiSessionHandler::m_name = "IcmpOffload"; + +const std::string IcmpSaiSessionHandler::m_tx_interval_fname = "tx_interval"; +const std::string IcmpSaiSessionHandler::m_rx_interval_fname = "rx_interval"; +const std::string IcmpSaiSessionHandler::m_src_ip_fname = "src_ip"; +const std::string IcmpSaiSessionHandler::m_dst_ip_fname = "dst_ip"; +const std::string IcmpSaiSessionHandler::m_src_mac_fname = "src_mac"; +const std::string IcmpSaiSessionHandler::m_dst_mac_fname = "dst_mac"; +const std::string IcmpSaiSessionHandler::m_tos_fname = "tos"; +const std::string IcmpSaiSessionHandler::m_ttl_fname = "ttl"; +const std::string IcmpSaiSessionHandler::m_state_fname = "state"; +const std::string IcmpSaiSessionHandler::m_session_cookie_fname = "session_cookie"; +const std::string IcmpSaiSessionHandler::m_session_guid_fname = "session_guid"; +const std::string IcmpSaiSessionHandler::m_hw_lookup_fname = "hw_lookup"; +const std::string IcmpSaiSessionHandler::m_nexthop_switchover_fname = "nexthop_switchover"; +const std::string IcmpSaiSessionHandler::m_session_type_normal = "NORMAL"; +const std::string IcmpSaiSessionHandler::m_session_type_rx = "RX"; + +const uint32_t IcmpSaiSessionHandler::m_max_tx_interval_usec = 1200000; +const uint32_t IcmpSaiSessionHandler::m_min_tx_interval_usec = 3000; +const uint32_t IcmpSaiSessionHandler::m_max_rx_interval_usec = 24000000; +const uint32_t IcmpSaiSessionHandler::m_min_rx_interval_usec = 9000; + + +const std::unordered_set IcmpSaiSessionHandler::m_update_fields = { + m_tx_interval_fname, + m_rx_interval_fname, + m_tos_fname, +}; + +void IcmpSaiSessionHandler::handle_tx_interval_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, + fv_vector_t& fvVector) +{ + sai_attribute_value_t val; + val.u32 = time_msec_to_usec(to_uint(sval)); + if (val.u32) + { + if (val.u32 < m_min_tx_interval_usec) + { + SWSS_LOG_NOTICE("IcmpOrch resetting to min tx_interval (%u)", m_min_tx_interval_usec); + val.u32 = m_min_tx_interval_usec; + sval = to_string(m_min_tx_interval_usec/1000); + } + + if (val.u32 > m_max_tx_interval_usec) + { + SWSS_LOG_NOTICE("IcmpOrch resetting to max tx_interval (%u)", m_max_tx_interval_usec); + val.u32 = m_max_tx_interval_usec; + sval = to_string(m_max_tx_interval_usec/1000); + } + } + id_val_map[SAI_ICMP_ECHO_SESSION_ATTR_TX_INTERVAL] = val; + fvVector.push_back({m_tx_interval_fname, sval}); +} + +void IcmpSaiSessionHandler::handle_rx_interval_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, + fv_vector_t& fvVector) +{ + sai_attribute_value_t val; + val.u32 = time_msec_to_usec(to_uint(sval)); + + if (val.u32 < m_min_rx_interval_usec) + { + SWSS_LOG_NOTICE("IcmpOrch resetting to min rx_interval (%u)", m_min_rx_interval_usec); + val.u32 = m_min_rx_interval_usec; + sval = to_string(m_min_rx_interval_usec/1000); + } + + if (val.u32 > m_max_rx_interval_usec) + { + SWSS_LOG_NOTICE("IcmpOrch resetting to max rx_interval (%u)", m_max_rx_interval_usec); + val.u32 = m_max_rx_interval_usec; + sval = to_string(m_max_rx_interval_usec/1000); + } + + id_val_map[SAI_ICMP_ECHO_SESSION_ATTR_RX_INTERVAL] = val; + fvVector.push_back({m_rx_interval_fname, sval}); +} + +void IcmpSaiSessionHandler::handle_src_ip_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, + fv_vector_t& fvVector) +{ + sai_attribute_value_t val; + auto src_ip = IpAddress(sval); + swss::copy(val.ipaddr, src_ip); + id_val_map[SAI_ICMP_ECHO_SESSION_ATTR_SRC_IP_ADDRESS] = val; + fvVector.push_back({m_src_ip_fname, sval}); + sai_attribute_value_t hdr_type; + hdr_type.u8 = src_ip.isV4() ? 4 : 6; + id_val_map[SAI_ICMP_ECHO_SESSION_ATTR_IPHDR_VERSION] = hdr_type; +} + +void IcmpSaiSessionHandler::handle_dst_ip_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, + fv_vector_t& fvVector) +{ + sai_attribute_value_t val; + swss::copy(val.ipaddr, IpAddress(sval)); + id_val_map[SAI_ICMP_ECHO_SESSION_ATTR_DST_IP_ADDRESS] = val; + fvVector.push_back({m_dst_ip_fname, sval}); +} + +void IcmpSaiSessionHandler::handle_src_mac_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, + fv_vector_t& fvVector) +{ + sai_attribute_value_t val; + auto mac = MacAddress(sval); + memcpy(val.mac, mac.getMac(), sizeof(sai_mac_t)); + id_val_map[SAI_ICMP_ECHO_SESSION_ATTR_SRC_MAC_ADDRESS] = val; +} + +void IcmpSaiSessionHandler::handle_dst_mac_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, + fv_vector_t& fvVector) +{ + sai_attribute_value_t val; + auto mac = MacAddress(sval); + memcpy(val.mac, mac.getMac(), sizeof(sai_mac_t)); + id_val_map[SAI_ICMP_ECHO_SESSION_ATTR_DST_MAC_ADDRESS] = val; +} + +void IcmpSaiSessionHandler::handle_tos_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, + fv_vector_t& fvVector) +{ + sai_attribute_value_t val; + val.u8 = to_uint(sval); + id_val_map[SAI_ICMP_ECHO_SESSION_ATTR_TOS] = val; +} + +void IcmpSaiSessionHandler::handle_ttl_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, + fv_vector_t& fvVector) +{ + sai_attribute_value_t val; + val.u8 = to_uint(sval); + id_val_map[SAI_ICMP_ECHO_SESSION_ATTR_TTL] = val; +} + +void IcmpSaiSessionHandler::handle_session_guid_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, + fv_vector_t& fvVector) +{ + sai_attribute_value_t val; + val.u64 = to_uint(sval); + id_val_map[SAI_ICMP_ECHO_SESSION_ATTR_GUID] = val; + fvVector.push_back({m_session_guid_fname, sval}); +} + +void IcmpSaiSessionHandler::handle_session_cookie_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, + fv_vector_t& fvVector) +{ + sai_attribute_value_t val; + val.u32 = to_uint(sval); + id_val_map[SAI_ICMP_ECHO_SESSION_ATTR_COOKIE] = val; + fvVector.push_back({m_session_cookie_fname, sval}); +} + +void IcmpSaiSessionHandler::handle_hw_lookup_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, + fv_vector_t& fvVector) +{ + sai_attribute_value_t val; + val.booldata = (sval == "true") ? true : false; + id_val_map[SAI_ICMP_ECHO_SESSION_ATTR_HW_LOOKUP_VALID] = val; +} + +// ICMP Sai session attribute handlers +sai_attr_handler_map_t IcmpSaiSessionHandler::m_handler_map = { + + {m_tx_interval_fname, std::make_tuple(SAI_ICMP_ECHO_SESSION_ATTR_TX_INTERVAL, + IcmpSaiSessionHandler::handle_tx_interval_field)}, + {m_rx_interval_fname, std::make_tuple(SAI_ICMP_ECHO_SESSION_ATTR_RX_INTERVAL, + IcmpSaiSessionHandler::handle_rx_interval_field)}, + {m_src_ip_fname, std::make_tuple(SAI_ICMP_ECHO_SESSION_ATTR_SRC_IP_ADDRESS, + IcmpSaiSessionHandler::handle_src_ip_field)}, + {m_dst_ip_fname, std::make_tuple(SAI_ICMP_ECHO_SESSION_ATTR_DST_IP_ADDRESS, + IcmpSaiSessionHandler::handle_dst_ip_field)}, + {m_src_mac_fname, std::make_tuple(SAI_ICMP_ECHO_SESSION_ATTR_SRC_MAC_ADDRESS, + IcmpSaiSessionHandler::handle_src_mac_field)}, + {m_dst_mac_fname, std::make_tuple(SAI_ICMP_ECHO_SESSION_ATTR_DST_MAC_ADDRESS, + IcmpSaiSessionHandler::handle_dst_mac_field)}, + {m_tos_fname, std::make_tuple(SAI_ICMP_ECHO_SESSION_ATTR_TOS, + IcmpSaiSessionHandler::handle_tos_field)}, + {m_ttl_fname, std::make_tuple(SAI_ICMP_ECHO_SESSION_ATTR_TTL, + IcmpSaiSessionHandler::handle_ttl_field)}, + {m_session_guid_fname, std::make_tuple(SAI_ICMP_ECHO_SESSION_ATTR_GUID, + IcmpSaiSessionHandler::handle_session_guid_field)}, + {m_session_cookie_fname, std::make_tuple(SAI_ICMP_ECHO_SESSION_ATTR_COOKIE, + IcmpSaiSessionHandler::handle_session_cookie_field)}, + {m_hw_lookup_fname, std::make_tuple(SAI_ICMP_ECHO_SESSION_ATTR_HW_LOOKUP_VALID, + IcmpSaiSessionHandler::handle_hw_lookup_field)} +}; + +SaiOffloadHandlerStatus IcmpSaiSessionHandler::do_init(sai_icmp_echo_api_t *api) +{ + size_t vrf_pos = m_key.find(delimiter); + if (vrf_pos == string::npos) + { + SWSS_LOG_ERROR("%s, Failed to parse key %s, no vrf is given", m_name.c_str(), m_key.c_str()); + return SaiOffloadHandlerStatus::FAILED_INVALID_ENTRY; + } + + size_t ifname_pos = m_key.find(delimiter, vrf_pos + 1); + if (ifname_pos == string::npos) + { + SWSS_LOG_ERROR("%s, Failed to parse key %s, no ifname is given", m_name.c_str(), m_key.c_str()); + return SaiOffloadHandlerStatus::FAILED_INVALID_ENTRY; + } + + size_t guid_pos = m_key.find(delimiter, ifname_pos + 1); + if (guid_pos == string::npos) + { + SWSS_LOG_ERROR("%s, Failed to parse key %s, no guid is given", m_name.c_str(), m_key.c_str()); + return SaiOffloadHandlerStatus::FAILED_INVALID_ENTRY; + } + + m_vrf_name = m_key.substr(0, vrf_pos); + m_alias = m_key.substr(vrf_pos + 1, ifname_pos - vrf_pos - 1); + m_guid = m_key.substr(ifname_pos + 1, guid_pos - ifname_pos - 1); + m_session_type = m_key.substr(guid_pos + 1); + if (m_session_type == "") + { + m_session_type = m_session_type_normal; + } + + m_state_db_key = IcmpOrch::get_state_db_key(m_vrf_name, m_alias, m_guid, m_session_type); + + // initialize the sai icmp echo session function pointers + sai_create_session = api->create_icmp_echo_session; + sai_remove_session = api->remove_icmp_echo_session; + sai_set_session_attrib = api->set_icmp_echo_session_attribute; + sai_get_session_attrib = api->get_icmp_echo_session_attribute; + + return SaiOffloadHandlerStatus::SUCCESS_VALID_ENTRY; +} + +SaiOffloadHandlerStatus IcmpSaiSessionHandler::do_create() +{ + // updating the tx_interval to 0 for PEER sessions makes sure + // that hardware will not send echo requests for the PEER session + if (m_session_type == m_session_type_rx) + { + SWSS_LOG_NOTICE("%s, Tx interval being reset to 0 for RX session, %s", m_name.c_str(), m_key.c_str()); + sai_attribute_value_t val; + val.u32 = 0; + m_attr_val_map[SAI_ICMP_ECHO_SESSION_ATTR_TX_INTERVAL] = val; + m_fv_vector.push_back({m_tx_interval_fname, "0"}); + m_fv_map[m_tx_interval_fname] = "0"; + } + + // update the hw_lookup parameter in fv_vector + auto& hw_lookup_attr_val = m_attr_val_map[SAI_ICMP_ECHO_SESSION_ATTR_HW_LOOKUP_VALID]; + if (hw_lookup_attr_val.booldata) + { + m_fv_vector.push_back({m_hw_lookup_fname, "true"}); + } + else + { + m_fv_vector.push_back({m_hw_lookup_fname, "false"}); + } + + // set the guid that we got in key + auto hsearch = m_handler_map.find(m_session_guid_fname); + if (hsearch != m_handler_map.end()) + { + auto& htuple = hsearch->second; + auto& handler = std::get<1>(htuple); + handler(m_guid, m_attr_val_map, m_fv_vector); + m_fv_map[m_session_guid_fname] = m_guid; + } + else + { + // this should never happen + SWSS_LOG_ERROR("%s, GUID handler not found, %s", m_name.c_str(), m_key.c_str()); + return SaiOffloadHandlerStatus::FAILED_VALID_ENTRY; + } + + return SaiOffloadHandlerStatus::SUCCESS_VALID_ENTRY; +} + +SaiOffloadHandlerStatus IcmpSaiSessionHandler::do_remove() +{ + // no special handling required + return SaiOffloadHandlerStatus::SUCCESS_VALID_ENTRY; +} + +SaiOffloadHandlerStatus IcmpSaiSessionHandler::do_update() +{ + // do not update tx_interval for RX sessions + if ((m_fv_map.find(m_tx_interval_fname) != m_fv_map.end()) && + (m_session_type == m_session_type_rx)) + { + SWSS_LOG_NOTICE("%s, Not updating Tx interval for RX session, %s", m_name.c_str(), m_key.c_str()); + m_attr_val_map.erase(SAI_ICMP_ECHO_SESSION_ATTR_TX_INTERVAL); + + for (auto it = m_fv_vector.begin(); it != m_fv_vector.end();) + { + if (fvField(*it) == m_tx_interval_fname) + { + it = m_fv_vector.erase(it); + } else { + it++; + } + } + + m_fv_map[m_tx_interval_fname] = "0"; + } + + return SaiOffloadHandlerStatus::SUCCESS_VALID_ENTRY; +} + +void IcmpSaiSessionHandler::on_state_change(uint32_t count, sai_icmp_echo_session_state_notification_t *data) +{ + // we do not use this registered notification handler + // as it is called in a separate thread of sairedis +} + diff --git a/orchagent/icmporch.h b/orchagent/icmporch.h new file mode 100644 index 00000000000..b43bca30951 --- /dev/null +++ b/orchagent/icmporch.h @@ -0,0 +1,345 @@ +/* + * icmporch.h + * + * Created on: Feb 21, 2025 + * Author: Manas Kumar Mandal + */ +#ifndef SWSS_ICMPORCH_H +#define SWSS_ICMPORCH_H + +#include "orch.h" +#include "observer.h" +#include "saioffloadsession.h" +#include +#include + +extern sai_icmp_echo_api_t* sai_icmp_echo_api; + +extern void sai_deserialize_icmp_session_state_ntf( const std::string& s, + uint32_t &count, + sai_icmp_echo_session_state_notification_t** bfd_session_state); +extern void sai_deserialize_free_icmp_session_state_ntf(uint32_t count, + sai_icmp_echo_session_state_notification_t* icmp_session_state); + +constexpr uint32_t time_msec_to_usec(const uint32_t val) +{ + return val * 1000; +} + +/** + *@struct IcmpUpdate + * + *@brief structure used for mapping icmp session id and state + */ +struct IcmpUpdate +{ + std::string db_key; + sai_icmp_echo_session_state_t state; + bool init_state; +}; + +/** + *@struct IcmpSessionDataCache + * + *@brief structure used for mapping icmp session key and session data + */ +struct IcmpSessionDataCache +{ + sai_object_id_t session_id; + fv_map_t fv_map; +}; + +// forward declaration of icmp sai handler +struct IcmpSaiSessionHandler; + +/** + *@class IcmpOrch + * + *@brief Orchestrator class that handles ICMP sessions + */ +class IcmpOrch: public Orch, public Subject +{ +public: + /** + *@method IcmpOrch + * + *@brief class constructor + * + *@param db(in) pointer to DBConnector object + *@param tableName(in) consumer table name + *@param stateDbIcmpSessionTable(in) producer state db table + */ + IcmpOrch(swss::DBConnector *db, std::string tableName, TableConnector stateDbIcmpSessionTable); + + /** + *@method ~IcmpOrch + * + *@brief class destructor + */ + virtual ~IcmpOrch(void); + + /** + *@method doTask + * + *@brief overriden method that consumes SET/DEL + * operations on consumer table entries + * + *@param consumer(in) reference to consumer + */ + void doTask(Consumer &consumer) override; + + /** + *@method doTask + * + *@brief overriden method that consumes notifications + * from asic db + * + *@param consumer(in) reference to notification consumer + */ + void doTask(swss::NotificationConsumer &consumer) override; + + // friend handler have access to IcmpOrch + friend struct IcmpSaiSessionHandler; + + static inline std::string get_state_db_key(const std::string& vrf, const std::string& alias, + const std::string& guid, const std::string& session_type) { + return vrf + state_db_key_delimiter + alias + state_db_key_delimiter + guid + + state_db_key_delimiter + session_type; + } + +private: + /** + *@method create_icmp_session + * + *@brief creates icmp echo sessions in hardware + * + *@param key(in) reference to session key + *@param data(in) vector of session parameters from APP_DB + * table as field value tuples + * + *@return false for retries + * true for all other cases where session entry is consumed + */ + bool create_icmp_session(const string& key, const vector& data); + + /** + *@method remove_icmp_session + * + *@brief removes icmp echo sessions from hardware + * + *@param key(in) reference to session key + * + *@return false for retries + * true for all other cases + */ + bool remove_icmp_session(const string& key); + + /** + *@method update_icmp_session + * + *@brief updates icmp echo sessions in hardware + * + *@param key(in) reference to session key + *@param data(in) vector of session parameters from APP_DB + * table as field value tuples + * + *@return false for retries + * true for all other cases where session entry is consumed + */ + bool update_icmp_session(const string& key, const vector& data); + + // map of session key to session data cache + std::map m_icmp_session_map; + // map of session object id to update data for handling notification from asic db + std::map m_icmp_session_lookup; + + // Icmp session state table produced by IcmpOrch + swss::Table m_stateIcmpSessionTable; + + // ASIC_DB ICMP state notification consumer + swss::NotificationConsumer* m_icmpStateNotificationConsumer; + // indicates notification registration is done + bool m_register_state_change_notif; + + // keeps track of number of sessions + uint32_t m_num_sessions = 0; + + // max number of sessions + static const uint32_t m_max_sessions; + + // map of sai icmp session state to string + static const std::map m_session_state_lkup; + // map of icmp session state string to sai icmp session state + static const std::map m_session_state_str_lkup; +}; + +/** + *@struct IcmpSaiSessionHandler + * + *@brief Sai session handler for ICMP sessions + */ +struct IcmpSaiSessionHandler : public SaiOffloadSessionHandler +{ + /** + *@method IcmpSaiSessionHandler + * + *@brief class default constructor + */ + IcmpSaiSessionHandler() = delete; + + /** + *@method IcmpSaiSessionHandler copy constructor + * + *@brief class copy constructor + * + *@param IcmpSaiSessionHandler(in) reference to IcmpSaiSessionHandler object to be copied + */ + IcmpSaiSessionHandler(const IcmpSaiSessionHandler &) = delete; + + /** + *@method IcmpSaiSessionHandler + * + *@brief class constructor + * + *@param IcmpOrch(in) reference to IcmpOrch object + */ + IcmpSaiSessionHandler(IcmpOrch& orch) : m_orch(orch) { } + + // enum that maps ICMP ECHO SESSION SAI attribute IDs to common SaiOffload IDs + enum class SAI_ATTR_ID { + HW_LOOKUP_ID = SAI_ICMP_ECHO_SESSION_ATTR_HW_LOOKUP_VALID, + VRF_ATTR_ID = SAI_ICMP_ECHO_SESSION_ATTR_VIRTUAL_ROUTER, + PORT_ID = SAI_ICMP_ECHO_SESSION_ATTR_PORT, + IPVER_ID = SAI_ICMP_ECHO_SESSION_ATTR_IPHDR_VERSION, + TX_INTERVAL_ID = SAI_ICMP_ECHO_SESSION_ATTR_TX_INTERVAL, + RX_INTERVAL_ID = SAI_ICMP_ECHO_SESSION_ATTR_RX_INTERVAL, + SRC_IP_ID = SAI_ICMP_ECHO_SESSION_ATTR_SRC_IP_ADDRESS, + DST_IP_ID = SAI_ICMP_ECHO_SESSION_ATTR_DST_IP_ADDRESS, + SRC_MAC_ID = SAI_ICMP_ECHO_SESSION_ATTR_SRC_MAC_ADDRESS, + DST_MAC_ID = SAI_ICMP_ECHO_SESSION_ATTR_DST_MAC_ADDRESS, + TOS_ID = SAI_ICMP_ECHO_SESSION_ATTR_TOS, + TTL_ID = SAI_ICMP_ECHO_SESSION_ATTR_TTL, + COUNT_MODE_ID = SAI_ICMP_ECHO_SESSION_ATTR_STATS_COUNT_MODE, + COUNTER_LIST_ID = SAI_ICMP_ECHO_SESSION_ATTR_SELECTIVE_COUNTER_LIST, + }; + + // enum that maps ICMP Notification attribute IDs to common SaiOffload notification IDs + enum class SAI_NOTIF_ATTR_ID { + STATE_CHANGE = SAI_SWITCH_ATTR_ICMP_ECHO_SESSION_STATE_CHANGE_NOTIFY, + AVAILABLE_SESSIONS = SAI_SWITCH_ATTR_AVAILABLE_ICMP_ECHO_SESSION + }; + + // enum that maps ICMP State to common SaiOffload State + enum class SESSION_STATE { + STATE_DOWN = SAI_ICMP_ECHO_SESSION_STATE_DOWN, + STATE_UP = SAI_ICMP_ECHO_SESSION_STATE_UP, + }; + + enum class SAI_API_TYPE { + API_TYPE = SAI_API_ICMP_ECHO + }; + + /** + *@method do_init + * + *@brief initializes the icmp sai session handler + * + *@param api(in) pointer to sai_icmp_echo_api_t + * + *@return SUCCESS_VALID_ENTRY when valid key and successfully initialized + * FAILED_INVALID_ENTRY when key is invalid + * FAILED_VALID_ENTRY when initialization fails for valid key + */ + SaiOffloadHandlerStatus do_init(sai_icmp_echo_api_t *api); + + /** + *@method do_create + * + *@brief auxilary create method for icmp echo session + * + *@return SUCCESS_VALID_ENTRY session parameters valid and created with success + * FAILED_INVALID_ENTRY session parameters are invalid + * FAILED_VALID_ENTRY session creation fails for valid key + * RETRY_VALID_ENTRY retry session creation for valid key + */ + SaiOffloadHandlerStatus do_create(); + + /** + *@method do_remove + * + *@brief auxilary remove method for icmp echo session + * + *@return SUCCESS_VALID_ENTRY session id found and removed + * FAILED_INVALID_ENTRY session id not found + * FAILED_VALID_ENTRY unable to remove session for a found id + * RETRY_VALID_ENTRY retry session removal for a found id + */ + SaiOffloadHandlerStatus do_remove(); + + /** + *@method do_update + * + *@brief auxilary update method for icmp echo session + * + *@return SUCCESS_VALID_ENTRY session parameters valid and updated with success + * FAILED_INVALID_ENTRY session parameters are invalid + * FAILED_VALID_ENTRY session update fails for valid key + * RETRY_VALID_ENTRY retry session update for valid key + */ + SaiOffloadHandlerStatus do_update(); + + // stored reference to the IcmpOrch + IcmpOrch& m_orch; + // icmp echo session type, NORMAL/RX + std::string m_session_type; + // icmp echo session guid string from key + std::string m_guid; + + // function registered for icmp session notification + static void on_state_change(uint32_t count, sai_icmp_echo_session_state_notification_t *data); + + // map of sai attributes and its handlers + static sai_attr_handler_map_t m_handler_map; + + // unordered set of fields that are updatable + static const std::unordered_set m_update_fields; + // name of the icmp orch + static const std::string m_name; + + // handlers for icmp echo session app_db fields + static void handle_tx_interval_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, fv_vector_t& fvVector); + static void handle_rx_interval_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, fv_vector_t& fvVector); + static void handle_src_ip_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, fv_vector_t& fvVector); + static void handle_dst_ip_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, fv_vector_t& fvVector); + static void handle_src_mac_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, fv_vector_t& fvVector); + static void handle_dst_mac_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, fv_vector_t& fvVector); + static void handle_tos_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, fv_vector_t& fvVector); + static void handle_ttl_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, fv_vector_t& fvVector); + static void handle_session_guid_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, fv_vector_t& fvVector); + static void handle_session_cookie_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, fv_vector_t& fvVector); + static void handle_hw_lookup_field(std::string& sval, sai_attr_id_val_map_t& id_val_map, fv_vector_t& fvVector); + + // fieldname strings used by app_db and state_db for icmp echo sessions + static const std::string m_tx_interval_fname; + static const std::string m_rx_interval_fname; + static const std::string m_src_ip_fname; + static const std::string m_dst_ip_fname; + static const std::string m_src_mac_fname; + static const std::string m_dst_mac_fname; + static const std::string m_tos_fname; + static const std::string m_ttl_fname; + static const std::string m_state_fname; + static const std::string m_session_cookie_fname; + static const std::string m_session_guid_fname; + static const std::string m_hw_lookup_fname; + static const std::string m_nexthop_switchover_fname; + static const std::string m_session_type_normal; + static const std::string m_session_type_rx; + + static const uint32_t m_max_tx_interval_usec; + static const uint32_t m_min_tx_interval_usec; + static const uint32_t m_max_rx_interval_usec; + static const uint32_t m_min_rx_interval_usec; +}; + +#endif /* SWSS_ICMPORCH_H */ diff --git a/orchagent/intfsorch.cpp b/orchagent/intfsorch.cpp index 4363beb9ea0..b5361602734 100644 --- a/orchagent/intfsorch.cpp +++ b/orchagent/intfsorch.cpp @@ -37,10 +37,11 @@ extern bool gIsNatSupported; extern NeighOrch *gNeighOrch; extern string gMySwitchType; extern int32_t gVoqMySwitchId; +extern bool gTraditionalFlexCounter; +extern bool isChassisDbInUse(); const int intfsorch_pri = 35; -#define RIF_FLEX_STAT_COUNTER_POLL_MSECS "1000" #define UPDATE_MAPS_SEC 1 #define MGMT_VRF "mgmt" @@ -64,45 +65,41 @@ IntfsOrch::IntfsOrch(DBConnector *db, string tableName, VRFOrch *vrf_orch, DBCon /* Initialize DB connectors */ m_counter_db = shared_ptr(new DBConnector("COUNTERS_DB", 0)); - m_flex_db = shared_ptr(new DBConnector("FLEX_COUNTER_DB", 0)); m_asic_db = shared_ptr(new DBConnector("ASIC_DB", 0)); /* Initialize COUNTER_DB tables */ m_rifNameTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_RIF_NAME_MAP)); m_rifTypeTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_RIF_TYPE_MAP)); - m_vidToRidTable = unique_ptr
(new Table(m_asic_db.get(), "VIDTORID")); + if (gTraditionalFlexCounter) + { + m_vidToRidTable = unique_ptr
(new Table(m_asic_db.get(), "VIDTORID")); + } + auto intervT = timespec { .tv_sec = UPDATE_MAPS_SEC , .tv_nsec = 0 }; m_updateMapsTimer = new SelectableTimer(intervT); auto executorT = new ExecutableTimer(m_updateMapsTimer, this, "UPDATE_MAPS_TIMER"); Orch::addExecutor(executorT); - /* Initialize FLEX_COUNTER_DB tables */ - m_flexCounterTable = unique_ptr(new ProducerTable(m_flex_db.get(), FLEX_COUNTER_TABLE)); - m_flexCounterGroupTable = unique_ptr(new ProducerTable(m_flex_db.get(), FLEX_COUNTER_GROUP_TABLE)); - - vector fieldValues; - fieldValues.emplace_back(POLL_INTERVAL_FIELD, RIF_FLEX_STAT_COUNTER_POLL_MSECS); - fieldValues.emplace_back(STATS_MODE_FIELD, STATS_MODE_READ); - m_flexCounterGroupTable->set(RIF_STAT_COUNTER_FLEX_COUNTER_GROUP, fieldValues); string rifRatePluginName = "rif_rates.lua"; + string rifRateSha; try { string rifRateLuaScript = swss::loadLuaScript(rifRatePluginName); - string rifRateSha = swss::loadRedisScript(m_counter_db.get(), rifRateLuaScript); - - vector fieldValues; - fieldValues.emplace_back(RIF_PLUGIN_FIELD, rifRateSha); - fieldValues.emplace_back(POLL_INTERVAL_FIELD, RIF_FLEX_STAT_COUNTER_POLL_MSECS); - fieldValues.emplace_back(STATS_MODE_FIELD, STATS_MODE_READ); - m_flexCounterGroupTable->set(RIF_STAT_COUNTER_FLEX_COUNTER_GROUP, fieldValues); + rifRateSha = swss::loadRedisScript(m_counter_db.get(), rifRateLuaScript); } catch (const runtime_error &e) { SWSS_LOG_WARN("RIF flex counter group plugins was not set successfully: %s", e.what()); } - if(gMySwitchType == "voq") + setFlexCounterGroupParameter(RIF_STAT_COUNTER_FLEX_COUNTER_GROUP, + RIF_FLEX_STAT_COUNTER_POLL_MSECS, + STATS_MODE_READ, + RIF_PLUGIN_FIELD, + rifRateSha); + + if(isChassisDbInUse()) { //Add subscriber to process VOQ system interface tableName = CHASSIS_APP_SYSTEM_INTERFACE_TABLE_NAME; @@ -368,6 +365,21 @@ bool IntfsOrch::setIntfVlanFloodType(const Port &port, sai_vlan_flood_control_ty } } + // Also set ipv6 multicast flood type + attr.id = SAI_VLAN_ATTR_UNKNOWN_MULTICAST_FLOOD_CONTROL_TYPE; + attr.value.s32 = vlan_flood_type; + + status = sai_vlan_api->set_vlan_attribute(port.m_vlan_info.vlan_oid, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set multicast flood type for VLAN %u, rv:%d", port.m_vlan_info.vlan_id, status); + task_process_status handle_status = handleSaiSetStatus(SAI_API_VLAN, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + return true; } @@ -704,7 +716,7 @@ void IntfsOrch::doTask(Consumer &consumer) bool mpls = false; string vlan = ""; string loopbackAction = ""; - + string oper_status =""; for (auto idx : data) { const auto &field = fvField(idx); @@ -796,6 +808,10 @@ void IntfsOrch::doTask(Consumer &consumer) { loopbackAction = value; } + else if (field == "oper_status") + { + oper_status = value; + } } if (alias == "eth0" || alias == "docker0") @@ -831,6 +847,19 @@ void IntfsOrch::doTask(Consumer &consumer) m_syncdIntfses[alias] = intfs_entry; m_vrfOrch->increaseVrfRefCount(vrf_id); } + else if (m_syncdIntfses[alias].vrf_id != vrf_id) + { + if (m_syncdIntfses[alias].ip_addresses.size() == 0) + { + m_vrfOrch->decreaseVrfRefCount(m_syncdIntfses[alias].vrf_id); + m_vrfOrch->increaseVrfRefCount(vrf_id); + m_syncdIntfses[alias].vrf_id = vrf_id; + } + else + { + SWSS_LOG_ERROR("Failed to set interface '%s' to VRF ID '%d' because it has IP addresses associated with it.", alias.c_str(), vrf_id); + } + } } else { @@ -849,7 +878,19 @@ void IntfsOrch::doTask(Consumer &consumer) it = consumer.m_toSync.erase(it); continue; } - + if(table_name == CHASSIS_APP_SYSTEM_INTERFACE_TABLE_NAME) + { + if(isRemoteSystemPortIntf(alias)) + { + SWSS_LOG_INFO("Handle remote systemport intf %s, oper status %s", alias.c_str(), oper_status.c_str()); + bool isUp = (oper_status == "up") ? true : false; + if (!gNeighOrch->ifChangeInformRemoteNextHop(alias, isUp)) + { + SWSS_LOG_WARN("Unable to update the nexthop for port %s, oper status %s", alias.c_str(), oper_status.c_str()); + } + + } + } //Voq Inband interface config processing if(inband_type.size() && !ip_prefix_in_key) { @@ -1270,7 +1311,7 @@ bool IntfsOrch::addRouterIntfs(sai_object_id_t vrf_id, Port &port, string loopba SWSS_LOG_NOTICE("Create router interface %s MTU %u", port.m_alias.c_str(), port.m_mtu); - if(gMySwitchType == "voq") + if(isChassisDbInUse()) { // Sync the interface of local port/LAG to the SYSTEM_INTERFACE table of CHASSIS_APP_DB voqSyncAddIntf(port.m_alias); @@ -1289,8 +1330,21 @@ bool IntfsOrch::removeRouterIntfs(Port &port) return false; } - const auto id = sai_serialize_object_id(port.m_rif_id); - removeRifFromFlexCounter(id, port.m_alias); + bool port_found = false; + for (auto it = m_rifsToAdd.begin(); it != m_rifsToAdd.end(); ++it) + { + if (it->m_rif_id == port.m_rif_id) + { + m_rifsToAdd.erase(it); + port_found = true; + break; + } + } + if (!port_found) + { + const auto id = sai_serialize_object_id(port.m_rif_id); + removeRifFromFlexCounter(id, port.m_alias); + } sai_status_t status = sai_router_intfs_api->remove_router_interface(port.m_rif_id); if (status != SAI_STATUS_SUCCESS) @@ -1310,7 +1364,7 @@ bool IntfsOrch::removeRouterIntfs(Port &port) SWSS_LOG_NOTICE("Remove router interface for port %s", port.m_alias.c_str()); - if(gMySwitchType == "voq") + if(isChassisDbInUse()) { // Sync the removal of interface of local port/LAG to the SYSTEM_INTERFACE table of CHASSIS_APP_DB voqSyncDelIntf(port.m_alias); @@ -1491,11 +1545,11 @@ void IntfsOrch::addRifToFlexCounter(const string &id, const string &name, const { counters_stream << sai_serialize_router_interface_stat(it) << comma; } + auto &&counters_str = counters_stream.str(); /* check the state of intf, if registering the intf to FC will result in runtime error */ - vector fieldValues; - fieldValues.emplace_back(RIF_COUNTER_ID_LIST, counters_stream.str()); - m_flexCounterTable->set(key, fieldValues); + startFlexCounterPolling(gSwitchId, key, counters_str.c_str(), RIF_COUNTER_ID_LIST); + SWSS_LOG_DEBUG("Registered interface %s to Flex counter", name.c_str()); } @@ -1509,7 +1563,8 @@ void IntfsOrch::removeRifFromFlexCounter(const string &id, const string &name) /* remove it from FLEX_COUNTER_DB */ string key = getRifFlexCounterTableKey(id); - m_flexCounterTable->del(key); + stopFlexCounterPolling(gSwitchId, key); + SWSS_LOG_DEBUG("Unregistered interface %s from Flex counter", name.c_str()); } @@ -1569,7 +1624,7 @@ void IntfsOrch::doTask(SelectableTimer &timer) type = ""; break; } - if (m_vidToRidTable->hget("", id, value)) + if (!gTraditionalFlexCounter || m_vidToRidTable->hget("", id, value)) { SWSS_LOG_INFO("Registering %s it is ready", it->m_alias.c_str()); addRifToFlexCounter(id, it->m_alias, type); @@ -1644,7 +1699,15 @@ void IntfsOrch::voqSyncAddIntf(string &alias) return; } - FieldValueTuple nullFv ("NULL", "NULL"); + if(alias.empty()) + { + SWSS_LOG_ERROR("System Port/LAG alias is empty for %s!", port.m_alias.c_str()); + return; + } + + string oper_status = port.m_oper_status == SAI_PORT_OPER_STATUS_UP ? "up" : "down"; + + FieldValueTuple nullFv ("oper_status", oper_status); vector attrs; attrs.push_back(nullFv); @@ -1684,3 +1747,36 @@ void IntfsOrch::voqSyncDelIntf(string &alias) m_tableVoqSystemInterfaceTable->del(alias); } +void IntfsOrch::voqSyncIntfState(string &alias, bool isUp) +{ + Port port; + string port_alias; + if(gPortsOrch->getPort(alias, port)) + { + //if route interface is not created no need sync the state + if(port.m_rif_id == 0) + { + return; + } + if (port.m_type == Port::LAG) + { + if (port.m_system_lag_info.switch_id != gVoqMySwitchId) + { + return; + } + port_alias = port.m_system_lag_info.alias; + } + else + { + if(port.m_system_port_info.type == SAI_SYSTEM_PORT_TYPE_REMOTE) + { + return; + } + port_alias = port.m_system_port_info.alias; + } + SWSS_LOG_NOTICE("Syncing system interface state %s for port %s", isUp ? "up" : "down", port_alias.c_str()); + m_tableVoqSystemInterfaceTable->hset(port_alias, "oper_status", isUp ? "up" : "down"); + } + +} + diff --git a/orchagent/intfsorch.h b/orchagent/intfsorch.h index ea15ada14b3..aa5129bef45 100644 --- a/orchagent/intfsorch.h +++ b/orchagent/intfsorch.h @@ -18,6 +18,7 @@ extern MacAddress gMacAddress; #define RIF_STAT_COUNTER_FLEX_COUNTER_GROUP "RIF_STAT_COUNTER" #define RIF_RATE_COUNTER_FLEX_COUNTER_GROUP "RIF_RATE_COUNTER" +#define RIF_FLEX_STAT_COUNTER_POLL_MSECS "1000" struct IntfsEntry { @@ -71,6 +72,7 @@ class IntfsOrch : public Orch bool isRemoteSystemPortIntf(string alias); bool isLocalSystemPortIntf(string alias); + void voqSyncIntfState(string &alias, bool); private: @@ -84,13 +86,10 @@ class IntfsOrch : public Orch void doTask(SelectableTimer &timer); shared_ptr m_counter_db; - shared_ptr m_flex_db; shared_ptr m_asic_db; unique_ptr
m_rifNameTable; unique_ptr
m_rifTypeTable; unique_ptr
m_vidToRidTable; - unique_ptr m_flexCounterTable; - unique_ptr m_flexCounterGroupTable; std::set m_removingIntfses; diff --git a/orchagent/lagids.lua b/orchagent/lagids.lua index 93a546cad19..30c1fb5e08a 100644 --- a/orchagent/lagids.lua +++ b/orchagent/lagids.lua @@ -37,28 +37,52 @@ if op == "add" then end -- proposed lag id is different than that in database OR -- the portchannel does not exist in the database - -- If proposed lagid is available, return the same proposed lag id - if redis.call("sismember", "SYSTEM_LAG_ID_SET", tostring(plagid)) == 0 then - redis.call("sadd", "SYSTEM_LAG_ID_SET", tostring(plagid)) - redis.call("srem", "SYSTEM_LAG_ID_SET", tostring(dblagid)) - redis.call("hset", "SYSTEM_LAG_ID_TABLE", pcname, tostring(plagid)) - return plagid + -- If proposed lagid is not available, lpop the first availabe ID + local index = redis.call("lpos", "SYSTEM_LAG_IDS_FREE_LIST", tostring(plagid)) + if index ~= false then + if redis.call("sismember", "SYSTEM_LAG_ID_SET", tostring(plagid)) == 0 then + redis.call("lrem", "SYSTEM_LAG_IDS_FREE_LIST", 0, tostring(plagid)) + redis.call("hset", "SYSTEM_LAG_ID_TABLE", pcname, tostring(plagid)) + redis.call("sadd", "SYSTEM_LAG_ID_SET", tostring(plagid)) + if dblagid then + redis.call("srem", "SYSTEM_LAG_ID_SET", tostring(dblagid)) + if redis.call("lpos", "SYSTEM_LAG_IDS_FREE_LIST", tostring(dblagid)) == false then + redis.call("rpush", "SYSTEM_LAG_IDS_FREE_LIST", tostring(dblagid)) + end + end + return plagid + else + redis.call("lrem", "SYSTEM_LAG_IDS_FREE_LIST", 0, tostring(plagid)) + end end end - local lagid = lagid_start - while lagid <= lagid_end do - if redis.call("sismember", "SYSTEM_LAG_ID_SET", tostring(lagid)) == 0 then - redis.call("sadd", "SYSTEM_LAG_ID_SET", tostring(lagid)) - redis.call("srem", "SYSTEM_LAG_ID_SET", tostring(dblagid)) - redis.call("hset", "SYSTEM_LAG_ID_TABLE", pcname, tostring(lagid)) - return lagid + if redis.call("llen", "SYSTEM_LAG_IDS_FREE_LIST") <= 0 then + return -1 + end + local lagid = redis.call("lpop", "SYSTEM_LAG_IDS_FREE_LIST") + + -- check if the first one is in the SYSTEM_LAG_ID_SET (which could be set by LC which + -- is running previous image with the old allocation method + -- remove from free_list and check/get the next one from the free_list + while redis.call("sismember", "SYSTEM_LAG_ID_SET", tostring(lagid)) == 1 do + if redis.call("llen", "SYSTEM_LAG_IDS_FREE_LIST") <= 0 then + return -1 end - lagid = lagid + 1 + lagid = redis.call("lpop", "SYSTEM_LAG_IDS_FREE_LIST") end - return -1 + -- Remove it from free list in case there is duplicated one + redis.call("lrem", "SYSTEM_LAG_IDS_FREE_LIST", 0, lagid) + redis.call("hset", "SYSTEM_LAG_ID_TABLE", pcname, lagid) + redis.call("sadd", "SYSTEM_LAG_ID_SET", lagid) + if dblagid then + if redis.call("lpos", "SYSTEM_LAG_IDS_FREE_LIST", tostring(dblagid)) == false then + redis.call("rpush", "SYSTEM_LAG_IDS_FREE_LIST", tostring(dblagid)) + end + end + return tonumber(lagid) end if op == "del" then @@ -67,6 +91,9 @@ if op == "del" then local lagid = redis.call("hget", "SYSTEM_LAG_ID_TABLE", pcname) redis.call("srem", "SYSTEM_LAG_ID_SET", lagid) redis.call("hdel", "SYSTEM_LAG_ID_TABLE", pcname) + if redis.call("lpos", "SYSTEM_LAG_IDS_FREE_LIST", lagid) == false then + redis.call("rpush", "SYSTEM_LAG_IDS_FREE_LIST", lagid) + end return tonumber(lagid) end diff --git a/orchagent/macsecorch.cpp b/orchagent/macsecorch.cpp index 9a5e48f8832..dbce7eb1a8a 100644 --- a/orchagent/macsecorch.cpp +++ b/orchagent/macsecorch.cpp @@ -1,4 +1,6 @@ #include "macsecorch.h" +#include "macsecpost.h" +#include "notifier.h" #include #include @@ -38,6 +40,7 @@ extern sai_switch_api_t *sai_switch_api; constexpr bool DEFAULT_ENABLE_ENCRYPT = true; constexpr bool DEFAULT_SCI_IN_SECTAG = false; constexpr sai_macsec_cipher_suite_t DEFAULT_CIPHER_SUITE = SAI_MACSEC_CIPHER_SUITE_GCM_AES_128; +bool saiAclFieldSciMatchSupported = true; static const std::vector macsec_sa_attrs = { @@ -598,6 +601,7 @@ MACsecOrch::MACsecOrch( const std::vector &tables, PortsOrch *port_orch) : Orch(app_db, tables), m_port_orch(port_orch), + m_state_db(state_db), m_state_macsec_port(state_db, STATE_MACSEC_PORT_TABLE_NAME), m_state_macsec_egress_sc(state_db, STATE_MACSEC_EGRESS_SC_TABLE_NAME), m_state_macsec_ingress_sc(state_db, STATE_MACSEC_INGRESS_SC_TABLE_NAME), @@ -621,22 +625,80 @@ MACsecOrch::MACsecOrch( StatsMode::READ, MACSEC_STAT_POLLING_INTERVAL_MS, true), m_gb_macsec_sa_attr_manager( - "GB_FLEX_COUNTER_DB", + true, COUNTERS_MACSEC_SA_ATTR_GROUP, StatsMode::READ, MACSEC_STAT_XPN_POLLING_INTERVAL_MS, true), m_gb_macsec_sa_stat_manager( - "GB_FLEX_COUNTER_DB", + true, COUNTERS_MACSEC_SA_GROUP, StatsMode::READ, MACSEC_STAT_POLLING_INTERVAL_MS, true), m_gb_macsec_flow_stat_manager( - "GB_FLEX_COUNTER_DB", + true, COUNTERS_MACSEC_FLOW_GROUP, StatsMode::READ, MACSEC_STAT_POLLING_INTERVAL_MS, true) { SWSS_LOG_ENTER(); + sai_attr_capability_t capability; + if (sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_ACL_TABLE, + SAI_ACL_TABLE_ATTR_FIELD_MACSEC_SCI, + &capability) == SAI_STATUS_SUCCESS) + { + if (capability.create_implemented == false) + { + SWSS_LOG_DEBUG("SAI_ACL_TABLE_ATTR_FIELD_MACSEC_SCI is not supported"); + saiAclFieldSciMatchSupported = false; + } + } + + // Add handler for POST completion callback/notification. + string post_state = getMacsecPostState(m_state_db); + if (post_state == "switch-level-post-in-progress" || + post_state == "macsec-level-post-in-progress" ) + { + m_notificationsDb = make_shared("ASIC_DB", 0); + m_postCompletionNotificationConsumer = new swss::NotificationConsumer(m_notificationsDb.get(), "NOTIFICATIONS"); + auto postCompletionNotificatier = new Notifier(m_postCompletionNotificationConsumer, this, "POST_COMPLETION__NOTIFICATIONS"); + Orch::addExecutor(postCompletionNotificatier); + } + + if (post_state == "switch-level-post-in-progress") + { + // POST was already enabled in switch init. The completion notification may have already been sent + // before MACSecOrch is initialized. So query if POST is completed or not. + sai_attribute_t attr; + attr.id = SAI_SWITCH_ATTR_MACSEC_POST_STATUS; + if (sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr) == SAI_STATUS_SUCCESS) + { + if (attr.value.s32 == SAI_SWITCH_MACSEC_POST_STATUS_PASS) + { + setMacsecPostState(m_state_db, "pass"); + SWSS_LOG_NOTICE("Switch MACSec POST passed"); + } + else if (attr.value.s32 == SAI_SWITCH_MACSEC_POST_STATUS_FAIL) + { + setMacsecPostState(m_state_db, "fail"); + SWSS_LOG_ERROR("Switch MACSec POST failed: oid %" PRIu64, gSwitchId); + } + else + { + SWSS_LOG_NOTICE("Switch MACSec POST status: %d", attr.value.s32); + } + } + else + { + SWSS_LOG_ERROR("Failed to get MACSec POST status"); + } + } + else if (post_state == "macsec-level-post-in-progress") + { + // POST is only supported in MACSec init. Create MACSec and enable POST. + m_enable_post = true; + initMACsecObject(gSwitchId); + SWSS_LOG_NOTICE("Init MACSec objects and enable POST"); + } } MACsecOrch::~MACsecOrch() @@ -649,6 +711,127 @@ MACsecOrch::~MACsecOrch() } } +void MACsecOrch::doTask(NotificationConsumer &consumer) +{ + SWSS_LOG_ENTER(); + + if (&consumer != m_postCompletionNotificationConsumer) + { + return; + } + + std::deque entries; + consumer.pops(entries); + for (auto& entry : entries) + { + handleNotification(consumer, entry); + } +} + +void MACsecOrch::handleNotification(NotificationConsumer &consumer, KeyOpFieldsValuesTuple& entry) +{ + SWSS_LOG_ENTER(); + + if (&consumer != m_postCompletionNotificationConsumer) + { + return; + } + + auto op = kfvOp(entry); + auto data = kfvKey(entry); + SWSS_LOG_NOTICE("Received SAI notification: op %s, data %s", op.c_str(), data.c_str()); + + if (op == "switch_macsec_post_status") + { + sai_object_id_t switch_id; + sai_switch_macsec_post_status_t switch_macsec_post_status; + sai_deserialize_switch_macsec_post_status_ntf(data, switch_id, switch_macsec_post_status); + + string post_state = getMacsecPostState(m_state_db); + if (post_state == "switch-level-post-in-progress") + { + // MACSec POST was enabled in switch init. SAI enables POST in all HW MACSec engines. + // The returned POST status is the aggregated result for all HW MACSec engines. + + if (switch_macsec_post_status == SAI_SWITCH_MACSEC_POST_STATUS_PASS) + { + setMacsecPostState(m_state_db, "pass"); + SWSS_LOG_NOTICE("Switch MACSec POST passed"); + } + else if (switch_macsec_post_status == SAI_SWITCH_MACSEC_POST_STATUS_FAIL) + { + setMacsecPostState(m_state_db, "fail"); + SWSS_LOG_ERROR("Switch MACSec POST failed"); + } + } + else if (post_state == "macsec-level-post-in-progress") + { + SWSS_LOG_ERROR("POST enabled in MACSec init, but got notification from switch init"); + } + } + + if (op == "macsec_post_status") + { + sai_object_id_t macsec_id; + sai_macsec_post_status_t macsec_post_status; + sai_deserialize_macsec_post_status_ntf(data, macsec_id, macsec_post_status); + + if (m_enable_post) + { + // MACSec POST was enabled in MACSec object init. Since two MACSec objects were created + // (one for each direction), two POST status must be returend from SAI. POST is considered + // pass only if POST passes in both MACSec objects. + + string direction = "unknown"; + auto macsec_obj = m_macsec_objs.find(gSwitchId); + if (macsec_obj->second.m_ingress_id == macsec_id) + { + direction = "ingress"; + } + else if (macsec_obj->second.m_egress_id == macsec_id) + { + direction = "egress"; + } + + if (macsec_post_status == SAI_MACSEC_POST_STATUS_PASS) + { + if (direction == "ingress") + { + SWSS_LOG_NOTICE("Ingress MACSec POST passed"); + macsec_obj->second.m_ingress_post_passed = true; + } + else if (direction == "egress") + { + SWSS_LOG_NOTICE("Egress MACSec POST passed"); + macsec_obj->second.m_egress_post_passed = true; + } + + // Check if POST passed on both MACSec objects. + if (macsec_obj->second.m_ingress_post_passed && macsec_obj->second.m_egress_post_passed) + { + setMacsecPostState(m_state_db, "pass"); + SWSS_LOG_NOTICE("Ingress and egress MACSec POST passed"); + } + } + else if(macsec_post_status == SAI_MACSEC_POST_STATUS_FAIL) + { + if (direction == "ingress") + { + SWSS_LOG_ERROR("Ingress MACSec POST failed"); + } + else if (direction == "egress") + { + SWSS_LOG_ERROR("Egress MACSec POST failed"); + } + + // Consider POST failed since it failed on one MACSec object. + setMacsecPostState(m_state_db, "fail"); + SWSS_LOG_ERROR("MACSec POST failed"); + } + } + } +} + void MACsecOrch::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -1032,6 +1215,13 @@ bool MACsecOrch::initMACsecObject(sai_object_id_t switch_id) attr.value.booldata = true; attrs.push_back(attr); + if (m_enable_post) + { + attr.id = SAI_MACSEC_ATTR_ENABLE_POST; + attr.value.booldata = true; + attrs.push_back(attr); + } + sai_status_t status = sai_macsec_api->create_macsec( &macsec_obj.first->second.m_egress_id, switch_id, @@ -1057,6 +1247,13 @@ bool MACsecOrch::initMACsecObject(sai_object_id_t switch_id) attr.value.booldata = true; attrs.push_back(attr); + if (m_enable_post) + { + attr.id = SAI_MACSEC_ATTR_ENABLE_POST; + attr.value.booldata = true; + attrs.push_back(attr); + } + status = sai_macsec_api->create_macsec( &macsec_obj.first->second.m_ingress_id, switch_id, @@ -2352,16 +2549,16 @@ void MACsecOrch::installCounter( switch(counter_type) { case CounterType::MACSEC_SA_ATTR: - MACsecSaAttrStatManager(ctx).setCounterIdList(obj_id, counter_type, counter_stats); + MACsecSaAttrStatManager(ctx).setCounterIdList(obj_id, counter_type, counter_stats, *ctx.get_switch_id()); break; case CounterType::MACSEC_SA: - MACsecSaStatManager(ctx).setCounterIdList(obj_id, counter_type, counter_stats); + MACsecSaStatManager(ctx).setCounterIdList(obj_id, counter_type, counter_stats, *ctx.get_switch_id()); MACsecCountersMap(ctx).hset("", obj_name, sai_serialize_object_id(obj_id)); break; case CounterType::MACSEC_FLOW: - MACsecFlowStatManager(ctx).setCounterIdList(obj_id, counter_type, counter_stats); + MACsecFlowStatManager(ctx).setCounterIdList(obj_id, counter_type, counter_stats, *ctx.get_switch_id()); break; default: @@ -2570,9 +2767,12 @@ bool MACsecOrch::createMACsecACLTable( attr.value.booldata = true; attrs.push_back(attr); - attr.id = SAI_ACL_TABLE_ATTR_FIELD_MACSEC_SCI; - attr.value.booldata = sci_in_sectag; - attrs.push_back(attr); + if (saiAclFieldSciMatchSupported == true) + { + attr.id = SAI_ACL_TABLE_ATTR_FIELD_MACSEC_SCI; + attr.value.booldata = sci_in_sectag; + attrs.push_back(attr); + } sai_status_t status = sai_acl_api->create_acl_table( &table_id, @@ -2738,7 +2938,7 @@ bool MACsecOrch::createMACsecACLDataEntry( attr.value.aclaction.parameter.s32 = SAI_PACKET_ACTION_DROP; attr.value.aclaction.enable = true; attrs.push_back(attr); - if (sci_in_sectag) + if ((saiAclFieldSciMatchSupported == true) && sci_in_sectag) { attr.id = SAI_ACL_ENTRY_ATTR_FIELD_MACSEC_SCI; attr.value.aclfield.enable = true; diff --git a/orchagent/macsecorch.h b/orchagent/macsecorch.h index 9c6e2be6366..6673f701018 100644 --- a/orchagent/macsecorch.h +++ b/orchagent/macsecorch.h @@ -40,6 +40,8 @@ class MACsecOrch : public Orch private: void doTask(Consumer &consumer); + void doTask(NotificationConsumer &consumer); + void handleNotification(NotificationConsumer &consumer, KeyOpFieldsValuesTuple& entry); public: using TaskArgs = std::vector; @@ -57,6 +59,11 @@ class MACsecOrch : public Orch task_process_status taskUpdateIngressSA(const std::string & port_sci_an, const TaskArgs & sa_attr); task_process_status taskDeleteIngressSA(const std::string & port_sci_an, const TaskArgs & sa_attr); + DBConnector * m_state_db; + shared_ptr m_notificationsDb; + NotificationConsumer* m_postCompletionNotificationConsumer; + bool m_enable_post; + PortsOrch * m_port_orch; Table m_state_macsec_port; @@ -117,6 +124,8 @@ class MACsecOrch : public Orch map > m_macsec_ports; bool m_sci_in_ingress_macsec_acl; sai_uint8_t m_max_sa_per_sc; + bool m_egress_post_passed; + bool m_ingress_post_passed; }; map m_macsec_objs; map > m_macsec_ports; diff --git a/orchagent/macsecpost.cpp b/orchagent/macsecpost.cpp new file mode 100644 index 00000000000..0ac72735134 --- /dev/null +++ b/orchagent/macsecpost.cpp @@ -0,0 +1,42 @@ +#include "dbconnector.h" +#include "macsecpost.h" +#include "redisutility.h" +#include "schema.h" +#include "table.h" + +namespace swss { + +void setMacsecPostState(DBConnector *stateDb, string postState) +{ + Table macsecPostStateTable = Table(stateDb, STATE_FIPS_MACSEC_POST_TABLE_NAME); + vector fvts; + FieldValueTuple postStateFvt("post_state", postState); + fvts.push_back(postStateFvt); + + auto now = std::chrono::system_clock::now(); + std::time_t now_c = std::chrono::system_clock::to_time_t(now); + char buffer[32]; + std::strftime(buffer, sizeof(buffer), "%a %b %d %H:%M:%S %Y", std::gmtime(&now_c)); + FieldValueTuple lastUpdateTimeFvt("last_update_time", buffer); + fvts.push_back(lastUpdateTimeFvt); + + macsecPostStateTable.set("sai", fvts); +} + +string getMacsecPostState(DBConnector *stateDb) +{ + std::string postState = ""; + std::vector fvts; + Table macsecPostStateTable = Table(stateDb, STATE_FIPS_MACSEC_POST_TABLE_NAME); + if (macsecPostStateTable.get("sai", fvts)) + { + auto state = fvsGetValue(fvts, "post_state", true); + if (state) + { + postState = *state; + } + } + return postState; +} + +} diff --git a/orchagent/macsecpost.h b/orchagent/macsecpost.h new file mode 100644 index 00000000000..d7b1b25fd66 --- /dev/null +++ b/orchagent/macsecpost.h @@ -0,0 +1,13 @@ +#ifndef ORCHAGENT_MACSECPOST_H +#define ORCHAGENT_MACSECPOST_H + +using namespace std; + +namespace swss { + +void setMacsecPostState(DBConnector *stateDb, string postState); +string getMacsecPostState(DBConnector *stateDb); + +} + +#endif // ORCHAGENT_MACSECPOST_H diff --git a/orchagent/main.cpp b/orchagent/main.cpp index 0add517a05d..9ff87f38d64 100644 --- a/orchagent/main.cpp +++ b/orchagent/main.cpp @@ -17,18 +17,22 @@ extern "C" { #include #include #include +#include +#include #include #include #include #include "orchdaemon.h" +#include "orch_zmq_config.h" #include "sai_serialize.h" #include "saihelper.h" #include "notifications.h" #include #include "warm_restart.h" #include "gearboxutils.h" +#include "macsecpost.h" using namespace std; using namespace swss; @@ -52,6 +56,7 @@ extern size_t gMaxBulkSize; #define DEFAULT_BATCH_SIZE 128 extern int gBatchSize; +bool gRingMode = false; bool gSyncMode = false; sai_redis_communication_mode_t gRedisCommunicationMode = SAI_REDIS_COMMUNICATION_MODE_REDIS_ASYNC; string gAsicInstance; @@ -62,16 +67,28 @@ extern bool gIsNatSupported; #define SWSS_RECORD_ENABLE (0x1 << 1) #define RESPONSE_PUBLISHER_RECORD_ENABLE (0x1 << 2) +/* orchagent heart beat message interval */ +#define HEART_BEAT_INTERVAL_MSECS_DEFAULT 10 * 1000 + string gMySwitchType = ""; +string gMySwitchSubType = ""; int32_t gVoqMySwitchId = -1; int32_t gVoqMaxCores = 0; uint32_t gCfgSystemPorts = 0; string gMyHostName = ""; string gMyAsicName = ""; +bool gTraditionalFlexCounter = false; +uint32_t create_switch_timeout = 0; +bool gMultiAsicVoq = false; + +bool isChassisDbInUse() +{ + return gMultiAsicVoq; +} void usage() { - cout << "usage: orchagent [-h] [-r record_type] [-d record_location] [-f swss_rec_filename] [-j sairedis_rec_filename] [-b batch_size] [-m MAC] [-i INST_ID] [-s] [-z mode] [-k bulk_size] [-q zmq_server_address]" << endl; + cout << "usage: orchagent [-h] [-r record_type] [-d record_location] [-f swss_rec_filename] [-j sairedis_rec_filename] [-b batch_size] [-m MAC] [-i INST_ID] [-s] [-z mode] [-k bulk_size] [-q zmq_server_address] [-c mode] [-t create_switch_timeout] [-v VRF] [-I heart_beat_interval] [-R] [-M]" << endl; cout << " -h: display this message" << endl; cout << " -r record_type: record orchagent logs with type (default 3)" << endl; cout << " Bit 0: sairedis.rec, Bit 1: swss.rec, Bit 2: responsepublisher.rec. For example:" << endl; @@ -90,6 +107,13 @@ void usage() cout << " -j sairedis_rec_filename: sairedis record log filename(default sairedis.rec)" << endl; cout << " -k max bulk size in bulk mode (default 1000)" << endl; cout << " -q zmq_server_address: ZMQ server address (default disable ZMQ)" << endl; + cout << " -c counter mode (traditional|asic_db), default: asic_db" << endl; + cout << " -t Override create switch timeout, in sec" << endl; + cout << " -v vrf: VRF name (default empty)" << endl; + cout << " -I heart_beat_interval: Heart beat interval in millisecond (default 10)" << endl; + cout << " -R enable the ring thread feature" << endl; + cout << " -M enable SAI MACSec POST" << endl; + cout << " -D Delay in seconds before flex counter processing begins after orchagent startup (default 0)" << endl; } void sighup_handler(int signo) @@ -115,7 +139,8 @@ void syncd_apply_view() if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to notify syncd APPLY_VIEW %d", status); - handleSaiFailure(true); + handleSaiFailure(SAI_API_SWITCH, "set", status); + return; } } @@ -155,7 +180,7 @@ void init_gearbox_phys(DBConnector *applDb) delete tmpGearboxTable; } -void getCfgSwitchType(DBConnector *cfgDb, string &switch_type) +void getCfgSwitchType(DBConnector *cfgDb, string &switch_type, string &switch_sub_type) { Table cfgDeviceMetaDataTable(cfgDb, CFG_DEVICE_METADATA_TABLE_NAME); @@ -179,6 +204,28 @@ void getCfgSwitchType(DBConnector *cfgDb, string &switch_type) //If configured switch type is none of the supported, assume regular switch switch_type = "switch"; } + + try + { + cfgDeviceMetaDataTable.hget("localhost", "subtype", switch_sub_type); + } + catch(const std::system_error& e) + { + SWSS_LOG_ERROR("System error in parsing switch subtype: %s", e.what()); + } + +} + +bool isChassisAppDbPresent() +{ + std::ifstream file("/etc/sonic/database_config.json"); + if (!file.is_open()) return false; + + nlohmann::json db_config; + file >> db_config; + + return db_config.contains("DATABASES") && + db_config["DATABASES"].contains("CHASSIS_APP_DB"); } bool getSystemPortConfigList(DBConnector *cfgDb, DBConnector *appDb, vector &sysportcfglist) @@ -339,12 +386,16 @@ int main(int argc, char **argv) string record_location = Recorder::DEFAULT_DIR; string swss_rec_filename = Recorder::SWSS_FNAME; string sairedis_rec_filename = Recorder::SAIREDIS_FNAME; - string zmq_server_address = "tcp://127.0.0.1:" + to_string(ORCH_ZMQ_PORT); - bool enable_zmq = false; + string zmq_server_address = ""; + string vrf; string responsepublisher_rec_filename = Recorder::RESPPUB_FNAME; int record_type = 3; // Only swss and sairedis recordings enabled by default. + long heartBeatInterval = HEART_BEAT_INTERVAL_MSECS_DEFAULT; - while ((opt = getopt(argc, argv, "b:m:r:f:j:d:i:hsz:k:q:")) != -1) + // Disable SAI MACSec POST by default. Use option -M to enable it. + bool macsec_post_enabled = false; + + while ((opt = getopt(argc, argv, "b:m:r:f:j:d:i:hsz:k:q:c:t:v:I:R:D:M")) != -1) { switch (opt) { @@ -395,6 +446,12 @@ int main(int argc, char **argv) case 'z': sai_deserialize_redis_communication_mode(optarg, gRedisCommunicationMode); break; + case 'c': + if (optarg == string("traditional")) + { + gTraditionalFlexCounter = true; + } + break; case 'f': if (optarg) @@ -426,9 +483,40 @@ int main(int argc, char **argv) if (optarg) { zmq_server_address = optarg; - enable_zmq = true; } break; + case 't': + create_switch_timeout = atoi(optarg); + break; + case 'v': + if (optarg) + { + vrf = optarg; + } + break; + case 'I': + if (optarg) + { + auto interval = atoi(optarg); + if (interval >= 0) + { + heartBeatInterval = interval; + SWSS_LOG_NOTICE("Setting heartbeat interval as %ld", heartBeatInterval); + } + else + { + heartBeatInterval = HEART_BEAT_INTERVAL_MSECS_DEFAULT; + SWSS_LOG_ERROR("Invalid input for heartbeat interval: %d. use default interval: %ld", interval, heartBeatInterval); + } + } + break; + case 'R': + gRingMode = true; + break; + case 'M': + macsec_post_enabled = true; + break; + case 'D': { gFlexCounterDelaySec = swss::to_int(optarg); } break; default: /* '?' */ exit(EXIT_FAILURE); } @@ -446,6 +534,7 @@ int main(int argc, char **argv) /* Initialize sairedis */ initSaiApi(); initSaiRedis(); + initFlexCounterTables(); /* Initialize remaining recorder parameters */ Recorder::Instance().swss.setRecord( @@ -470,18 +559,18 @@ int main(int argc, char **argv) // Instantiate ZMQ server shared_ptr zmq_server = nullptr; - if (enable_zmq) + if (zmq_server_address.empty()) { - SWSS_LOG_NOTICE("Instantiate ZMQ server : %s", zmq_server_address.c_str()); - zmq_server = make_shared(zmq_server_address.c_str()); + SWSS_LOG_NOTICE("The ZMQ channel on the northbound side of orchagent has been disabled."); } else { - SWSS_LOG_NOTICE("ZMQ disabled"); + SWSS_LOG_NOTICE("The ZMQ channel on the northbound side of orchagent has been initialized: %s, %s", zmq_server_address.c_str(), vrf.c_str()); + zmq_server = create_zmq_server(zmq_server_address); } // Get switch_type - getCfgSwitchType(&config_db, gMySwitchType); + getCfgSwitchType(&config_db, gMySwitchType, gMySwitchSubType); sai_attribute_t attr; vector attrs; @@ -490,13 +579,6 @@ int main(int argc, char **argv) attr.value.booldata = true; attrs.push_back(attr); - if (gMySwitchType != "dpu") - { - attr.id = SAI_SWITCH_ATTR_FDB_EVENT_NOTIFY; - attr.value.ptr = (void *)on_fdb_event; - attrs.push_back(attr); - } - attr.id = SAI_SWITCH_ATTR_PORT_STATE_CHANGE_NOTIFY; attr.value.ptr = (void *)on_port_state_change; attrs.push_back(attr); @@ -512,21 +594,6 @@ int main(int argc, char **argv) attrs.push_back(attr); } - // SAI_REDIS_SWITCH_ATTR_SYNC_MODE attribute only setBuffer and g_syncMode to true - // since it is not using ASIC_DB, we can execute it before create_switch - // when g_syncMode is set to true here, create_switch will wait the response from syncd - if (gSyncMode) - { - SWSS_LOG_WARN("sync mode is depreacated, use -z param"); - - gRedisCommunicationMode = SAI_REDIS_COMMUNICATION_MODE_REDIS_SYNC; - } - - attr.id = SAI_REDIS_SWITCH_ATTR_REDIS_COMMUNICATION_MODE; - attr.value.s32 = gRedisCommunicationMode; - - sai_switch_api->set_switch_attribute(gSwitchId, &attr); - if (!gAsicInstance.empty()) { attr.id = SAI_SWITCH_ATTR_SWITCH_HARDWARE_INFO; @@ -570,7 +637,19 @@ int main(int argc, char **argv) //Connect to CHASSIS_APP_DB in redis-server in control/supervisor card as per //connection info in database_config.json - chassis_app_db = make_shared("CHASSIS_APP_DB", 0, true); + chassis_app_db = nullptr; + if (isChassisAppDbPresent()) + { + gMultiAsicVoq = true; + try + { + chassis_app_db = make_shared("CHASSIS_APP_DB", 0, true); + } + catch (const std::exception& e) + { + SWSS_LOG_NOTICE("CHASSIS_APP_DB not available, operating in standalone VOQ mode"); + } + } } else if (gMySwitchType == "fabric") { @@ -579,11 +658,56 @@ int main(int argc, char **argv) attr.value.u32 = SAI_SWITCH_TYPE_FABRIC; attrs.push_back(attr); + //Read switch_id from config_db. + Table cfgDeviceMetaDataTable(&config_db, CFG_DEVICE_METADATA_TABLE_NAME); + string value; + if (cfgDeviceMetaDataTable.hget("localhost", "switch_id", value)) + { + if (value.size()) + { + gVoqMySwitchId = stoi(value); + } + + if (gVoqMySwitchId < 0) + { + SWSS_LOG_ERROR("Invalid fabric switch id %d configured", gVoqMySwitchId); + exit(EXIT_FAILURE); + } + } + else + { + SWSS_LOG_ERROR("Fabric switch id is not configured"); + exit(EXIT_FAILURE); + } + attr.id = SAI_SWITCH_ATTR_SWITCH_ID; attr.value.u32 = gVoqMySwitchId; attrs.push_back(attr); } + string macsec_post_state; + if (gMySwitchType != "fabric" && macsec_post_enabled) + { + macsec_post_state = "switch-level-post-in-progress"; + + attr.id = SAI_SWITCH_ATTR_MACSEC_ENABLE_POST; + attr.value.booldata = true; + attrs.push_back(attr); + + attr.id = SAI_SWITCH_ATTR_SWITCH_MACSEC_POST_STATUS_NOTIFY; + attr.value.ptr = (void *)on_switch_macsec_post_status_notify; + attrs.push_back(attr); + + attr.id = SAI_SWITCH_ATTR_MACSEC_POST_STATUS_NOTIFY; + attr.value.ptr = (void *)on_macsec_post_status_notify; + attrs.push_back(attr); + } + else + { + macsec_post_state = "disabled"; + } + setMacsecPostState(&state_db, macsec_post_state); + /* Must be last Attribute */ attr.id = SAI_REDIS_SWITCH_ATTR_CONTEXT; attr.value.u64 = gSwitchId; @@ -598,7 +722,7 @@ int main(int argc, char **argv) delay_factor = 2; } - if (gMySwitchType == "voq" || gMySwitchType == "fabric" || gMySwitchType == "chassis-packet" || gMySwitchType == "dpu" || asan_enabled) + if (gMySwitchType == "voq" || gMySwitchType == "fabric" || gMySwitchType == "chassis-packet" || gMySwitchType == "dpu" || asan_enabled || create_switch_timeout) { /* We set this long timeout in order for orchagent to wait enough time for * response from syncd. It is needed since switch create takes more time @@ -606,7 +730,12 @@ int main(int argc, char **argv) * and systems ports to initialize */ - if (gMySwitchType == "voq" || gMySwitchType == "chassis-packet" || gMySwitchType == "dpu") + if (create_switch_timeout) + { + /* Convert timeout to milliseconds from seconds */ + attr.value.u64 = (create_switch_timeout * 1000); + } + else if (gMySwitchType == "voq" || gMySwitchType == "chassis-packet" || gMySwitchType == "dpu") { attr.value.u64 = (5 * SAI_REDIS_DEFAULT_SYNC_OPERATION_RESPONSE_TIMEOUT); } @@ -637,11 +766,12 @@ int main(int argc, char **argv) if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to create a switch, rv:%d", status); - handleSaiFailure(true); + handleSaiFailure(SAI_API_SWITCH, "create", status); + return EXIT_FAILURE; } SWSS_LOG_NOTICE("Create a switch, id:%" PRIu64, gSwitchId); - if (gMySwitchType == "voq" || gMySwitchType == "fabric" || gMySwitchType == "chassis-packet" || gMySwitchType == "dpu") + if (gMySwitchType == "voq" || gMySwitchType == "fabric" || gMySwitchType == "chassis-packet" || gMySwitchType == "dpu" || create_switch_timeout) { /* Set syncd response timeout back to the default value */ attr.id = SAI_REDIS_SWITCH_ATTR_SYNC_OPERATION_RESPONSE_TIMEOUT; @@ -668,7 +798,8 @@ int main(int argc, char **argv) if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to get MAC address from switch, rv:%d", status); - handleSaiFailure(true); + handleSaiFailure(SAI_API_SWITCH, "get", status); + return EXIT_FAILURE; } else { @@ -683,12 +814,43 @@ int main(int argc, char **argv) if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Fail to get switch virtual router ID %d", status); - handleSaiFailure(true); + handleSaiFailure(SAI_API_SWITCH, "get", status); + return EXIT_FAILURE; } gVirtualRouterId = attr.value.oid; SWSS_LOG_NOTICE("Get switch virtual router ID %" PRIx64, gVirtualRouterId); + /* Query MACSec POST capability and set POST state in state DB accordingly */ + if (macsec_post_enabled) + { + sai_attr_capability_t post_capability; + if (sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_SWITCH, + SAI_SWITCH_ATTR_MACSEC_ENABLE_POST, + &post_capability) == SAI_STATUS_SUCCESS && + post_capability.create_implemented) + { + // POST is supported in switch init, and it was already enabled in switch init. + SWSS_LOG_NOTICE("MACSec POST enabled in switch init"); + } + else if (sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_MACSEC, + SAI_MACSEC_ATTR_ENABLE_POST, + &post_capability) == SAI_STATUS_SUCCESS && + post_capability.create_implemented) + { + // POST is only supported in MACSec init. Set POST state to notify MACSecOrch + // to perform POST. + setMacsecPostState(&state_db, "macsec-level-post-in-progress"); + SWSS_LOG_NOTICE("MACSec POST will be enabled in MACSec init"); + } + else + { + // POST is not supported by SAI. Don't declare that SAI POST fails. + setMacsecPostState(&state_db, "disabled"); + SWSS_LOG_ERROR("MACSec POST is not supported by SAI"); + } + } + /* Get the NAT supported info */ attr.id = SAI_SWITCH_ATTR_AVAILABLE_SNAT_ENTRY; @@ -725,7 +887,8 @@ int main(int argc, char **argv) if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to create underlay router interface %d", status); - handleSaiFailure(true); + handleSaiFailure(SAI_API_ROUTER_INTERFACE, "create", status); + return EXIT_FAILURE; } SWSS_LOG_NOTICE("Created underlay router interface ID %" PRIx64, gUnderlayIfId); @@ -736,9 +899,29 @@ int main(int argc, char **argv) } shared_ptr orchDaemon; - if (gMySwitchType != "fabric") + DBConnector *chassis_db = nullptr; + if (chassis_app_db != nullptr) + { + chassis_db = chassis_app_db.get(); + } + + /* + * Declare shared pointers for dpu specific databases. + * These dpu databases exist on the npu for smartswitch. + */ + shared_ptr dpu_app_db; + shared_ptr dpu_app_state_db; + + if (gMySwitchType == "dpu") { - orchDaemon = make_shared(&appl_db, &config_db, &state_db, chassis_app_db.get(), zmq_server.get()); + dpu_app_db = make_shared("DPU_APPL_DB", 0, true); + dpu_app_state_db = make_shared("DPU_APPL_STATE_DB", 0, true); + orchDaemon = make_shared(&appl_db, &config_db, &state_db, chassis_app_db.get(), dpu_app_db.get(), dpu_app_state_db.get(), zmq_server.get()); + } + + else if (gMySwitchType != "fabric") + { + orchDaemon = make_shared(&appl_db, &config_db, &state_db, chassis_db, zmq_server.get()); if (gMySwitchType == "voq") { orchDaemon->setFabricEnabled(true); @@ -748,7 +931,12 @@ int main(int argc, char **argv) } else { - orchDaemon = make_shared(&appl_db, &config_db, &state_db, chassis_app_db.get(), zmq_server.get()); + orchDaemon = make_shared(&appl_db, &config_db, &state_db, chassis_db, zmq_server.get()); + } + + if (gRingMode) { + /* Initialize the ring before OrchDaemon initializing Orchs */ + orchDaemon->enableRingBuffer(); } if (!orchDaemon->init()) @@ -766,7 +954,15 @@ int main(int argc, char **argv) syncd_apply_view(); } - orchDaemon->start(); + if (zmq_server) + { + // To prevent message loss between ZmqServer's bind operation and the creation of ZmqProducerStateTable, + // use lazy binding and call bind() only after the handler has been registered. + zmq_server->bind(); + SWSS_LOG_NOTICE("ZMQ channel on the northbound side of Orchagent successfully bound: %s, %s", zmq_server_address.c_str(), vrf.c_str()); + } + + orchDaemon->start(heartBeatInterval); return 0; } diff --git a/orchagent/mirrororch.cpp b/orchagent/mirrororch.cpp index 5647d488791..35a58cb887b 100644 --- a/orchagent/mirrororch.cpp +++ b/orchagent/mirrororch.cpp @@ -34,7 +34,8 @@ #define MIRROR_SESSION_DEFAULT_VLAN_PRI 0 #define MIRROR_SESSION_DEFAULT_VLAN_CFI 0 -#define MIRROR_SESSION_DEFAULT_IP_HDR_VER 4 +#define MIRROR_SESSION_IP_HDR_VER_4 4 +#define MIRROR_SESSION_IP_HDR_VER_6 6 #define MIRROR_SESSION_DSCP_SHIFT 2 #define MIRROR_SESSION_DSCP_MIN 0 #define MIRROR_SESSION_DSCP_MAX 63 @@ -76,13 +77,14 @@ MirrorEntry::MirrorEntry(const string& platform) : } MirrorOrch::MirrorOrch(TableConnector stateDbConnector, TableConnector confDbConnector, - PortsOrch *portOrch, RouteOrch *routeOrch, NeighOrch *neighOrch, FdbOrch *fdbOrch, PolicerOrch *policerOrch) : + PortsOrch *portOrch, RouteOrch *routeOrch, NeighOrch *neighOrch, FdbOrch *fdbOrch, PolicerOrch *policerOrch, SwitchOrch *switchOrch) : Orch(confDbConnector.first, confDbConnector.second), m_portsOrch(portOrch), m_routeOrch(routeOrch), m_neighOrch(neighOrch), m_fdbOrch(fdbOrch), m_policerOrch(policerOrch), + m_switchOrch(switchOrch), m_mirrorTable(stateDbConnector.first, stateDbConnector.second) { sai_status_t status; @@ -329,7 +331,7 @@ bool MirrorOrch::validateSrcPortList(const string& srcPortList) vector portv; int portCount = 0; m_portsOrch->getLagMember(port, portv); - for (const auto p : portv) + for (const auto &p : portv) { if (checkPortExistsInSrcPortList(p.m_alias, srcPortList)) { @@ -380,6 +382,9 @@ task_process_status MirrorOrch::createEntry(const string& key, const vectorisPortIngressMirrorSupported()) + { + SWSS_LOG_ERROR("Port ingress mirror is not supported by the ASIC"); + return false; + } + if (!ingress && !m_switchOrch->isPortEgressMirrorSupported()) + { + SWSS_LOG_ERROR("Port egress mirror is not supported by the ASIC"); + return false; + } + sai_status_t status; sai_attribute_t port_attr; port_attr.id = ingress ? SAI_PORT_ATTR_INGRESS_MIRROR_SESSION: @@ -828,7 +843,7 @@ bool MirrorOrch::setUnsetPortMirror(Port port, { vector portv; m_portsOrch->getLagMember(port, portv); - for (const auto p : portv) + for (const auto &p : portv) { if (p.m_type != Port::PHY) { @@ -992,7 +1007,7 @@ bool MirrorOrch::activateSession(const string& name, MirrorEntry& session) attrs.push_back(attr); attr.id = SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION; - attr.value.u8 = MIRROR_SESSION_DEFAULT_IP_HDR_VER; + attr.value.u8 = session.dstIp.isV4() ? MIRROR_SESSION_IP_HDR_VER_4 : MIRROR_SESSION_IP_HDR_VER_6; attrs.push_back(attr); // TOS value format is the following: @@ -1341,7 +1356,7 @@ void MirrorOrch::updateNextHop(const NextHopUpdate& update) else { string alias = ""; - session.nexthopInfo.nexthop = NextHopKey("0.0.0.0", alias); + session.nexthopInfo.nexthop = session.dstIp.isV4() ? NextHopKey("0.0.0.0", alias) : NextHopKey("::", alias); } // Update State DB Nexthop diff --git a/orchagent/mirrororch.h b/orchagent/mirrororch.h index d498a7ef6c9..15bdc789629 100644 --- a/orchagent/mirrororch.h +++ b/orchagent/mirrororch.h @@ -77,7 +77,7 @@ class MirrorOrch : public Orch, public Observer, public Subject { public: MirrorOrch(TableConnector appDbConnector, TableConnector confDbConnector, - PortsOrch *portOrch, RouteOrch *routeOrch, NeighOrch *neighOrch, FdbOrch *fdbOrch, PolicerOrch *policerOrch); + PortsOrch *portOrch, RouteOrch *routeOrch, NeighOrch *neighOrch, FdbOrch *fdbOrch, PolicerOrch *policerOrch, SwitchOrch *switchOrch); bool bake() override; void update(SubjectType, void *); @@ -95,6 +95,7 @@ class MirrorOrch : public Orch, public Observer, public Subject NeighOrch *m_neighOrch; FdbOrch *m_fdbOrch; PolicerOrch *m_policerOrch; + SwitchOrch *m_switchOrch; // Maximum number of traffic classes starting at 0, thus queue can be 0 - m_maxNumTC-1 uint8_t m_maxNumTC; diff --git a/orchagent/mplsrouteorch.cpp b/orchagent/mplsrouteorch.cpp index 73dbbdb1944..cce3fe5ca2c 100644 --- a/orchagent/mplsrouteorch.cpp +++ b/orchagent/mplsrouteorch.cpp @@ -17,7 +17,7 @@ extern CrmOrch *gCrmOrch; extern NhgOrch *gNhgOrch; extern CbfNhgOrch *gCbfNhgOrch; -void RouteOrch::doLabelTask(Consumer& consumer) +void RouteOrch::doLabelTask(ConsumerBase& consumer) { SWSS_LOG_ENTER(); @@ -520,7 +520,8 @@ bool RouteOrch::addLabelRoute(LabelRouteBulkContext& ctx, const NextHopGroupKey m_neighOrch->isNeighborResolved(nexthop)) { /* since IP neighbor NH exists, neighbor is resolved, add MPLS NH */ - if (m_neighOrch->addNextHop(nexthop)) + NeighborContext ctx = NeighborContext(nexthop); + if (m_neighOrch->addNextHop(ctx)) { next_hop_id = m_neighOrch->getNextHopId(nexthop); } diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index 72b1a32fa60..356002168f4 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -553,6 +553,10 @@ void MuxCable::rollbackStateChange() st_chg_in_progress_ = true; state_ = prev_state_; bool success = false; + + nbr_handler_->clearBulkers(); + gNeighOrch->clearBulkers(); + switch (prev_state_) { case MuxState::MUX_STATE_ACTIVE: @@ -631,6 +635,7 @@ bool MuxCable::nbrHandler(bool enable, bool update_rt) if (enable) { ret = nbr_handler_->enable(update_rt); + // Loop through all routes with nexthops through this mux cable when changing state updateRoutes(); } else @@ -641,6 +646,7 @@ bool MuxCable::nbrHandler(bool enable, bool update_rt) SWSS_LOG_INFO("Null NH object id, retry for %s", peer_ip4_.to_string().c_str()); return false; } + // Loop through all routes with nexthops through this mux cable when changing state updateRoutes(); ret = nbr_handler_->disable(tnh); } @@ -652,7 +658,6 @@ void MuxCable::updateNeighbor(NextHopKey nh, bool add) SWSS_LOG_NOTICE("Processing update on neighbor %s for mux %s, add %d, state %d", nh.ip_address.to_string().c_str(), mux_name_.c_str(), add, state_); sai_object_id_t tnh = mux_orch_->getNextHopTunnelId(MUX_TUNNEL, peer_ip4_); - nbr_handler_->update(nh, tnh, add, state_); if (add) { mux_orch_->addNexthop(nh, mux_name_); @@ -661,7 +666,8 @@ void MuxCable::updateNeighbor(NextHopKey nh, bool add) { mux_orch_->removeNexthop(nh); } - updateRoutes(); + nbr_handler_->update(nh, tnh, add, state_); + updateRoutesForNextHop(nh); } /** @@ -669,7 +675,6 @@ void MuxCable::updateNeighbor(NextHopKey nh, bool add) */ void MuxCable::updateRoutes() { - SWSS_LOG_INFO("Updating routes pointing to multiple mux nexthops"); MuxNeighbor neighbors = nbr_handler_->getNeighbors(); string alias = nbr_handler_->getAlias(); for (auto nh = neighbors.begin(); nh != neighbors.end(); nh ++) @@ -680,12 +685,32 @@ void MuxCable::updateRoutes() { for (auto rt = routes.begin(); rt != routes.end(); rt++) { - mux_orch_->updateRoute(rt->prefix, true); + SWSS_LOG_NOTICE("Checking route %s for multi-mux nexthops", + rt->prefix.to_string().c_str()); + mux_orch_->updateRoute(rt->prefix); } } } } +/** + * @brief updates routes for given nexthop if part of multi-mux route + * @param nh NextHopKey to search routes + */ +void MuxCable::updateRoutesForNextHop(NextHopKey nh) +{ + std::set routes; + if (gRouteOrch->getRoutesForNexthop(routes, nh)) + { + SWSS_LOG_NOTICE("Updating multi-mux routes with nexthop: %s", + nh.ip_address.to_string().c_str()); + for (auto rt = routes.begin(); rt != routes.end(); rt++) + { + mux_orch_->updateRoute(rt->prefix); + } + } +} + void MuxNbrHandler::update(NextHopKey nh, sai_object_id_t tunnelId, bool add, MuxState state) { uint32_t num_routes = 0; @@ -716,9 +741,12 @@ void MuxNbrHandler::update(NextHopKey nh, sai_object_id_t tunnelId, bool add, Mu neighbors_[nh.ip_address] = gNeighOrch->getLocalNextHopId(nh); gNeighOrch->enableNeighbor(nh); gRouteOrch->updateNextHopRoutes(nh, num_routes); + gNeighOrch->increaseNextHopRefCount(nh, num_routes); break; case MuxState::MUX_STATE_STANDBY: neighbors_[nh.ip_address] = tunnelId; + gRouteOrch->updateNextHopRoutes(nh, num_routes); + gNeighOrch->decreaseNextHopRefCount(nh, num_routes); gNeighOrch->disableNeighbor(nh); updateTunnelRoute(nh, true); create_route(pfx, tunnelId); @@ -744,6 +772,8 @@ void MuxNbrHandler::update(NextHopKey nh, sai_object_id_t tunnelId, bool add, Mu bool MuxNbrHandler::enable(bool update_rt) { NeighborEntry neigh; + std::list neigh_ctx_list; + std::list route_ctx_list; auto it = neighbors_.begin(); while (it != neighbors_.end()) @@ -751,13 +781,21 @@ bool MuxNbrHandler::enable(bool update_rt) SWSS_LOG_INFO("Enabling neigh %s on %s", it->first.to_string().c_str(), alias_.c_str()); neigh = NeighborEntry(it->first, alias_); - if (!gNeighOrch->enableNeighbor(neigh)) - { - SWSS_LOG_INFO("Enabling neigh failed for %s", neigh.ip_address.to_string().c_str()); - return false; - } + // Create neighbor context with bulk_op enabled + neigh_ctx_list.push_back(NeighborContext(neigh, true)); + it++; + } + + if (!gNeighOrch->enableNeighbors(neigh_ctx_list)) + { + return false; + } + it = neighbors_.begin(); + while (it != neighbors_.end()) + { /* Update NH to point to learned neighbor */ + neigh = NeighborEntry(it->first, alias_); it->second = gNeighOrch->getLocalNextHopId(neigh); /* Reprogram route */ @@ -772,45 +810,29 @@ bool MuxNbrHandler::enable(bool update_rt) /* Increment ref count for new NHs */ gNeighOrch->increaseNextHopRefCount(nh_key, num_routes); - /* - * Invalidate current nexthop group and update with new NH - * Ref count update is not required for tunnel NH IDs (nh_removed) - */ - uint32_t nh_removed, nh_added; - if (!gRouteOrch->invalidnexthopinNextHopGroup(nh_key, nh_removed)) - { - SWSS_LOG_ERROR("Removing existing NH failed for %s", nh_key.ip_address.to_string().c_str()); - return false; - } - - if (!gRouteOrch->validnexthopinNextHopGroup(nh_key, nh_added)) - { - SWSS_LOG_ERROR("Adding NH failed for %s", nh_key.ip_address.to_string().c_str()); - return false; - } - - /* Increment ref count for ECMP NH members */ - gNeighOrch->increaseNextHopRefCount(nh_key, nh_added); - IpPrefix pfx = it->first.to_string(); if (update_rt) { - if (remove_route(pfx) != SAI_STATUS_SUCCESS) - { - return false; - } + route_ctx_list.push_back(MuxRouteBulkContext(pfx)); updateTunnelRoute(nh_key, false); } it++; } + if (update_rt && !removeRoutes(route_ctx_list)) + { + return false; + } + return true; } bool MuxNbrHandler::disable(sai_object_id_t tnh) { NeighborEntry neigh; + std::list neigh_ctx_list; + std::list route_ctx_list; auto it = neighbors_.begin(); while (it != neighbors_.end()) @@ -832,41 +854,28 @@ bool MuxNbrHandler::disable(sai_object_id_t tnh) /* Decrement ref count for old NHs */ gNeighOrch->decreaseNextHopRefCount(nh_key, num_routes); - /* Invalidate current nexthop group and update with new NH */ - uint32_t nh_removed, nh_added; - if (!gRouteOrch->invalidnexthopinNextHopGroup(nh_key, nh_removed)) - { - SWSS_LOG_ERROR("Removing existing NH failed for %s", nh_key.ip_address.to_string().c_str()); - return false; - } - - /* Decrement ref count for ECMP NH members */ - gNeighOrch->decreaseNextHopRefCount(nh_key, nh_removed); - - if (!gRouteOrch->validnexthopinNextHopGroup(nh_key, nh_added)) - { - SWSS_LOG_ERROR("Adding NH failed for %s", nh_key.ip_address.to_string().c_str()); - return false; - } - updateTunnelRoute(nh_key, true); IpPrefix pfx = it->first.to_string(); - if (create_route(pfx, it->second) != SAI_STATUS_SUCCESS) - { - return false; - } + route_ctx_list.push_back(MuxRouteBulkContext(pfx, it->second)); neigh = NeighborEntry(it->first, alias_); - if (!gNeighOrch->disableNeighbor(neigh)) - { - SWSS_LOG_INFO("Disabling neigh failed for %s", neigh.ip_address.to_string().c_str()); - return false; - } + // Create neighbor context with bulk_op enabled + neigh_ctx_list.push_back(NeighborContext(neigh, true)); it++; } + if (!addRoutes(route_ctx_list)) + { + return false; + } + + if (!gNeighOrch->disableNeighbors(neigh_ctx_list)) + { + return false; + } + return true; } @@ -881,6 +890,141 @@ sai_object_id_t MuxNbrHandler::getNextHopId(const NextHopKey nhKey) return SAI_NULL_OBJECT_ID; } +bool MuxNbrHandler::addRoutes(std::list& bulk_ctx_list) +{ + sai_status_t status; + bool ret = true; + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + auto& object_statuses = ctx->object_statuses; + sai_route_entry_t route_entry; + route_entry.switch_id = gSwitchId; + route_entry.vr_id = gVirtualRouterId; + copy(route_entry.destination, ctx->pfx); + subnet(route_entry.destination, route_entry.destination); + + SWSS_LOG_INFO("Adding route entry %s, nh %" PRIx64 " to bulker", ctx->pfx.getIp().to_string().c_str(), ctx->nh); + + object_statuses.emplace_back(); + sai_attribute_t attr; + vector attrs; + + attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + attr.value.s32 = SAI_PACKET_ACTION_FORWARD; + attrs.push_back(attr); + + attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + attr.value.oid = ctx->nh; + attrs.push_back(attr); + + status = gRouteBulker.create_entry(&object_statuses.back(), &route_entry, (uint32_t)attrs.size(), attrs.data()); + } + + gRouteBulker.flush(); + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + auto& object_statuses = ctx->object_statuses; + auto it_status = object_statuses.begin(); + status = *it_status++; + + sai_route_entry_t route_entry; + route_entry.switch_id = gSwitchId; + route_entry.vr_id = gVirtualRouterId; + copy(route_entry.destination, ctx->pfx); + subnet(route_entry.destination, route_entry.destination); + + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) { + SWSS_LOG_INFO("Tunnel route to %s already exists", ctx->pfx.to_string().c_str()); + continue; + } + SWSS_LOG_ERROR("Failed to create tunnel route %s,nh %" PRIx64 " rv:%d", + ctx->pfx.getIp().to_string().c_str(), ctx->nh, status); + ret = false; + continue; + } + + if (route_entry.destination.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV4_ROUTE); + } + else + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); + } + + SWSS_LOG_NOTICE("Created tunnel route to %s ", ctx->pfx.to_string().c_str()); + } + + gRouteBulker.clear(); + return ret; +} + +bool MuxNbrHandler::removeRoutes(std::list& bulk_ctx_list) +{ + sai_status_t status; + bool ret = true; + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + auto& object_statuses = ctx->object_statuses; + sai_route_entry_t route_entry; + route_entry.switch_id = gSwitchId; + route_entry.vr_id = gVirtualRouterId; + copy(route_entry.destination, ctx->pfx); + subnet(route_entry.destination, route_entry.destination); + + SWSS_LOG_INFO("Removing route entry %s, nh %" PRIx64 "", ctx->pfx.getIp().to_string().c_str(), ctx->nh); + + object_statuses.emplace_back(); + status = gRouteBulker.remove_entry(&object_statuses.back(), &route_entry); + } + + gRouteBulker.flush(); + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + auto& object_statuses = ctx->object_statuses; + auto it_status = object_statuses.begin(); + status = *it_status++; + + sai_route_entry_t route_entry; + route_entry.switch_id = gSwitchId; + route_entry.vr_id = gVirtualRouterId; + copy(route_entry.destination, ctx->pfx); + subnet(route_entry.destination, route_entry.destination); + + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_NOT_FOUND) { + SWSS_LOG_INFO("Tunnel route to %s already removed", ctx->pfx.to_string().c_str()); + continue; + } + SWSS_LOG_ERROR("Failed to remove tunnel route %s, rv:%d", + ctx->pfx.getIp().to_string().c_str(), status); + ret = false; + continue; + } + + if (route_entry.destination.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_ROUTE); + } + else + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); + } + + SWSS_LOG_NOTICE("Removed tunnel route to %s ", ctx->pfx.to_string().c_str()); + } + + gRouteBulker.clear(); + return ret; +} + void MuxNbrHandler::updateTunnelRoute(NextHopKey nh, bool add) { MuxOrch* mux_orch = gDirectory.get(); @@ -1087,24 +1231,29 @@ sai_object_id_t MuxOrch::getNextHopTunnelId(std::string tunnelKey, IpAddress& ip return it->second.nh_id; } +sai_object_id_t MuxOrch::getTunnelNextHopId() +{ + if (!mux_peer_switch_.isZero()) + { + auto it = mux_tunnel_nh_.find(mux_peer_switch_); + if (it != mux_tunnel_nh_.end()) + { + return it->second.nh_id; + } + } + + return SAI_NULL_OBJECT_ID; +} + /** * @brief updates the given route to point to a single active NH or tunnel * @param pfx IpPrefix of route to update - * @param remove bool only true when route is getting removed */ -void MuxOrch::updateRoute(const IpPrefix &pfx, bool add) +void MuxOrch::updateRoute(const IpPrefix &pfx) { NextHopGroupKey nhg_key; NextHopGroupEntry nhg_entry; - if (!add) - { - SWSS_LOG_INFO("Removing route %s from mux_multi_active_nh_table", - pfx.to_string().c_str()); - mux_multi_active_nh_table.erase(pfx); - return; - } - /* get nexthop group key from syncd */ nhg_key = gRouteOrch->getSyncdRouteNhgKey(gVirtualRouterId, pfx); @@ -1131,14 +1280,18 @@ void MuxOrch::updateRoute(const IpPrefix &pfx, bool add) for (auto it = nextHops.begin(); it != nextHops.end(); it++) { NextHopKey nexthop = *it; - /* This will only work for configured MUX neighbors (most cases) - * TODO: add way to find MUX from neighbor - */ - MuxCable* cable = findMuxCableInSubnet(nexthop.ip_address); - auto standalone = standalone_tunnel_neighbors_.find(nexthop.ip_address); + NeighborEntry neighbor; + MacAddress mac; - if ((cable == nullptr && standalone == standalone_tunnel_neighbors_.end()) || - cable->isActive()) + if (!gNeighOrch->getNeighborEntry(nexthop, neighbor, mac)) + { + // Not able to get neighbor entry, so skip. + SWSS_LOG_NOTICE("Neighbor entry for nexthop %s not found.", + nexthop.to_string().c_str()); + continue; + } + + if (isNeighborActive(neighbor.ip_address, mac, neighbor.alias)) { /* Here we pull from local nexthop ID because neighbor update occurs during state change * before nexthopID is updated in neighorch. This ensures that if a neighbor is Active @@ -1150,12 +1303,11 @@ void MuxOrch::updateRoute(const IpPrefix &pfx, bool add) if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to set route entry %s to nexthop %s", - pfx.to_string().c_str(), nexthop.to_string().c_str()); + pfx.to_string().c_str(), neighbor.to_string().c_str()); continue; } SWSS_LOG_NOTICE("setting route %s with nexthop %s %" PRIx64 "", - pfx.to_string().c_str(), nexthop.to_string().c_str(), next_hop_id); - mux_multi_active_nh_table[pfx] = nexthop; + pfx.to_string().c_str(), neighbor.to_string().c_str(), next_hop_id); active_found = true; break; } @@ -1173,7 +1325,6 @@ void MuxOrch::updateRoute(const IpPrefix &pfx, bool add) SWSS_LOG_ERROR("Failed to set route entry %s to tunnel", pfx.getIp().to_string().c_str()); } - mux_multi_active_nh_table.erase(pfx); } } @@ -1375,6 +1526,7 @@ void MuxOrch::updateNeighbor(const NeighborUpdate& update) auto it = mux_nexthop_tb_.find(update.entry); if (it != mux_nexthop_tb_.end()) { + SWSS_LOG_INFO("port %s, nexthop %s", port.c_str(), it->second.c_str()); port = it->second; removeNexthop(update.entry); } @@ -1477,13 +1629,34 @@ void MuxOrch::update(SubjectType type, void *cntx) case SUBJECT_TYPE_NEIGH_CHANGE: { NeighborUpdate *update = static_cast(cntx); - updateNeighbor(*update); + if (enable_cache_neigh_updates_) + { + cached_neigh_updates_.push_back(*update); + } + else + { + try + { + updateNeighbor(*update); + } + catch (const std::exception& e) + { + SWSS_LOG_ERROR("Exception caught while updating neighbor. Error: %s", e.what()); + } + } break; } case SUBJECT_TYPE_FDB_CHANGE: { FdbUpdate *update = static_cast(cntx); - updateFdb(*update); + try + { + updateFdb(*update); + } + catch (const std::exception& e) + { + SWSS_LOG_ERROR("Exception caught while updating FDB. Error: %s", e.what()); + } break; } default: @@ -1563,6 +1736,26 @@ bool MuxOrch::handleMuxCfg(const Request& request) (MuxCable(port_name, srv_ip, srv_ip6, mux_peer_switch_, cable_type)); addSkipNeighbors(skip_neighbors); + // Add neighbors that were learned before this mux port was configured. + NeighborTable m_neighbors; + gNeighOrch->getMuxNeighborsForPort(port_name, m_neighbors); + for (const auto &entry : m_neighbors) + { + bool nexthop_found = containsNextHop(entry.first); + bool is_skip_neighbor = isSkipNeighbor(entry.first.ip_address); + if (!nexthop_found && !is_skip_neighbor) + { + SWSS_LOG_NOTICE("Neighbor %s on %s learned before mux port %s configured. updating...", + entry.first.ip_address.to_string().c_str(), + entry.second.mac.to_string().c_str(), + port_name.c_str() + ); + + NeighborUpdate neighbor_update = {entry.first, entry.second.mac, 1}; + updateNeighbor(neighbor_update); + } + } + SWSS_LOG_NOTICE("Mux entry for port '%s' was added, cable type %d", port_name.c_str(), cable_type); } else @@ -1714,6 +1907,30 @@ bool MuxOrch::isStandaloneTunnelRouteInstalled(const IpAddress& neighborIp) return standalone_tunnel_neighbors_.find(neighborIp) != standalone_tunnel_neighbors_.end(); } +void MuxOrch::updateCachedNeighbors() +{ + if (!enable_cache_neigh_updates_) + { + SWSS_LOG_NOTICE("Skip process cached neighbor updates"); + return; + } + if (mux_peer_switch_.isZero()) + { + SWSS_LOG_NOTICE("Skip process cached neighbor updates, no peer switch addr is configured"); + return; + } + + while (!cached_neigh_updates_.empty()) + { + const NeighborUpdate &update = cached_neigh_updates_.back(); + SWSS_LOG_NOTICE("Update cached neighbor %s, add %d", + update.entry.ip_address.to_string().c_str(), + update.add); + updateNeighbor(update); + cached_neigh_updates_.pop_back(); + } +} + MuxCableOrch::MuxCableOrch(DBConnector *db, DBConnector *sdb, const std::string& tableName): Orch2(db, tableName, request_), app_tunnel_route_table_(db, APP_TUNNEL_ROUTE_TABLE_NAME), diff --git a/orchagent/muxorch.h b/orchagent/muxorch.h index ce6a4d9b3f6..85acc2006c6 100644 --- a/orchagent/muxorch.h +++ b/orchagent/muxorch.h @@ -10,6 +10,7 @@ #include "tunneldecaporch.h" #include "aclorch.h" #include "neighorch.h" +#include "bulker.h" enum MuxState { @@ -35,6 +36,26 @@ enum MuxCableType ACTIVE_ACTIVE }; +struct MuxRouteBulkContext +{ + std::deque object_statuses; // Bulk statuses + IpPrefix pfx; // Route prefix + sai_object_id_t nh; // nexthop id + + MuxRouteBulkContext(IpPrefix pfx) + : pfx(pfx) + { + } + + MuxRouteBulkContext(IpPrefix pfx, sai_object_id_t nh) + : pfx(pfx), nh(nh) + { + } +}; + +extern size_t gMaxBulkSize; +extern sai_route_api_t* sai_route_api; + // Forward Declarations class MuxOrch; class MuxCableOrch; @@ -64,7 +85,7 @@ typedef std::map MuxNeighbor; class MuxNbrHandler { public: - MuxNbrHandler() = default; + MuxNbrHandler() : gRouteBulker(sai_route_api, gMaxBulkSize) {}; bool enable(bool update_rt); bool disable(sai_object_id_t); @@ -73,13 +94,18 @@ class MuxNbrHandler sai_object_id_t getNextHopId(const NextHopKey); MuxNeighbor getNeighbors() const { return neighbors_; }; string getAlias() const { return alias_; }; + void clearBulkers() { gRouteBulker.clear(); }; private: + bool removeRoutes(std::list& bulk_ctx_list); + bool addRoutes(std::list& bulk_ctx_list); + inline void updateTunnelRoute(NextHopKey, bool = true); private: MuxNeighbor neighbors_; string alias_; + EntityBulker gRouteBulker; }; // Mux Cable object @@ -105,6 +131,7 @@ class MuxCable bool isIpInSubnet(IpAddress ip); void updateNeighbor(NextHopKey nh, bool add); void updateRoutes(); + void updateRoutesForNextHop(NextHopKey nh); sai_object_id_t getNextHopId(const NextHopKey nh) { return nbr_handler_->getNextHopId(nh); @@ -148,6 +175,7 @@ const request_description_t mux_cfg_request_description = { { "soc_ipv4", REQ_T_IP_PREFIX }, { "soc_ipv6", REQ_T_IP_PREFIX }, { "cable_type", REQ_T_STRING }, + { "prober_type", REQ_T_STRING }, }, { } }; @@ -194,11 +222,6 @@ class MuxOrch : public Orch2, public Observer, public Subject return (skip_neighbors_.find(nbr) != skip_neighbors_.end()); } - bool isMultiNexthopRoute(const IpPrefix& pfx) - { - return (mux_multi_active_nh_table.find(pfx) != mux_multi_active_nh_table.end()); - } - MuxCable* findMuxCableInSubnet(IpAddress); bool isNeighborActive(const IpAddress&, const MacAddress&, string&); void update(SubjectType, void *); @@ -213,10 +236,22 @@ class MuxOrch : public Orch2, public Observer, public Subject sai_object_id_t createNextHopTunnel(std::string tunnelKey, IpAddress& ipAddr); bool removeNextHopTunnel(std::string tunnelKey, IpAddress& ipAddr); sai_object_id_t getNextHopTunnelId(std::string tunnelKey, IpAddress& ipAddr); + sai_object_id_t getTunnelNextHopId(); - void updateRoute(const IpPrefix &pfx, bool add); + void updateRoute(const IpPrefix &pfx); bool isStandaloneTunnelRouteInstalled(const IpAddress& neighborIp); + void enableCachingNeighborUpdate() + { + enable_cache_neigh_updates_ = true; + } + void disableCachingNeighborUpdate() + { + enable_cache_neigh_updates_ = false; + } + void updateCachedNeighbors(); + bool getMuxPort(const MacAddress&, const string&, string&); + private: virtual bool addOperation(const Request& request); virtual bool delOperation(const Request& request); @@ -227,8 +262,6 @@ class MuxOrch : public Orch2, public Observer, public Subject void updateNeighbor(const NeighborUpdate&); void updateFdb(const FdbUpdate&); - bool getMuxPort(const MacAddress&, const string&, string&); - /*** * Methods for managing tunnel routes for neighbor IPs not associated * with a specific mux cable @@ -256,9 +289,6 @@ class MuxOrch : public Orch2, public Observer, public Subject MuxTunnelNHs mux_tunnel_nh_; NextHopTb mux_nexthop_tb_; - /* contains reference of programmed routes by updateRoute */ - MuxRouteTb mux_multi_active_nh_table; - handler_map handler_map_; TunnelDecapOrch *decap_orch_; @@ -268,6 +298,9 @@ class MuxOrch : public Orch2, public Observer, public Subject MuxCfgRequest request_; std::set standalone_tunnel_neighbors_; std::set skip_neighbors_; + + bool enable_cache_neigh_updates_ = false; + std::vector cached_neigh_updates_; }; const request_description_t mux_cable_request_description = { diff --git a/orchagent/neighorch.cpp b/orchagent/neighorch.cpp index 47bcca3c324..cf01d3e4173 100644 --- a/orchagent/neighorch.cpp +++ b/orchagent/neighorch.cpp @@ -22,10 +22,16 @@ extern Directory gDirectory; extern string gMySwitchType; extern int32_t gVoqMySwitchId; extern BfdOrch *gBfdOrch; +extern size_t gMaxBulkSize; +extern string gMyHostName; + +extern bool isChassisDbInUse(); const int neighorch_pri = 30; NeighOrch::NeighOrch(DBConnector *appDb, string tableName, IntfsOrch *intfsOrch, FdbOrch *fdbOrch, PortsOrch *portsOrch, DBConnector *chassisAppDb) : + gNeighBulker(sai_neighbor_api, gMaxBulkSize), + gNextHopBulker(sai_next_hop_api, gSwitchId, gMaxBulkSize), Orch(appDb, tableName, neighorch_pri), m_intfsOrch(intfsOrch), m_fdbOrch(fdbOrch), @@ -37,12 +43,12 @@ NeighOrch::NeighOrch(DBConnector *appDb, string tableName, IntfsOrch *intfsOrch, m_fdbOrch->attach(this); // Some UTs instantiate NeighOrch but gBfdOrch is null, it is not null in orchagent - if (gBfdOrch) - { + if (gBfdOrch) + { gBfdOrch->attach(this); } - if(gMySwitchType == "voq") + if(isChassisDbInUse()) { //Add subscriber to process VOQ system neigh tableName = CHASSIS_APP_SYSTEM_NEIGH_TABLE_NAME; @@ -194,9 +200,10 @@ bool NeighOrch::isNeighborResolved(const NextHopKey &nexthop) return hasNextHop(base_nexthop); } -bool NeighOrch::addNextHop(const NextHopKey &nh) +bool NeighOrch::addNextHop(NeighborContext& ctx) { SWSS_LOG_ENTER(); + const NextHopKey nh = ctx.neighborEntry; Port p; if (!gPortsOrch->getPort(nh.alias, p)) @@ -265,6 +272,13 @@ bool NeighOrch::addNextHop(const NextHopKey &nh) next_hop_attrs.push_back(next_hop_attr); sai_object_id_t next_hop_id; + + if (ctx.bulk_op) + { + gNextHopBulker.create_entry(&ctx.next_hop_id , (uint32_t)next_hop_attrs.size(), next_hop_attrs.data()); + return true; + } + sai_status_t status = sai_next_hop_api->create_next_hop(&next_hop_id, gSwitchId, (uint32_t)next_hop_attrs.size(), next_hop_attrs.data()); if (status != SAI_STATUS_SUCCESS) { @@ -298,7 +312,99 @@ bool NeighOrch::addNextHop(const NextHopKey &nh) next_hop_entry.nh_flags = 0; m_syncdNextHops[nexthop] = next_hop_entry; - m_intfsOrch->increaseRouterIntfsRefCount(nexthop.alias); + m_intfsOrch->increaseRouterIntfsRefCount(nh.alias); + + if (nexthop.isMplsNextHop()) + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_MPLS_NEXTHOP); + } + else + { + if (nexthop.ip_address.isV4()) + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEXTHOP); + } + else + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEXTHOP); + } + } + + gFgNhgOrch->validNextHopInNextHopGroup(nexthop); + + // For nexthop with incoming port which has down oper status, NHFLAGS_IFDOWN + // flag should be set on it. + // This scenario may happen under race condition where buffered neighbor event + // is processed after incoming port is down. + if (p.m_oper_status == SAI_PORT_OPER_STATUS_DOWN) + { + if (setNextHopFlag(nexthop, NHFLAGS_IFDOWN) == false) + { + SWSS_LOG_WARN("Failed to set NHFLAGS_IFDOWN on nexthop %s for interface %s", + nexthop.ip_address.to_string().c_str(), nexthop.alias.c_str()); + } + } + return true; +} + +bool NeighOrch::processBulkAddNextHop(NeighborContext& ctx) +{ + SWSS_LOG_ENTER(); + + const NextHopKey nh = ctx.neighborEntry; + + Port p; + if (!gPortsOrch->getPort(nh.alias, p)) + { + SWSS_LOG_ERROR("Neighbor %s seen on port %s which doesn't exist", + nh.ip_address.to_string().c_str(), nh.alias.c_str()); + return false; + } + if (p.m_type == Port::SUBPORT) + { + if (!gPortsOrch->getPort(p.m_parent_port_id, p)) + { + SWSS_LOG_ERROR("Neighbor %s seen on sub interface %s whose parent port doesn't exist", + nh.ip_address.to_string().c_str(), nh.alias.c_str()); + return false; + } + } + + NextHopKey nexthop(nh); + if (ctx.next_hop_id == SAI_NULL_OBJECT_ID) + { + sai_status_t bulker_status = gNextHopBulker.create_status(ctx.next_hop_id); + if (bulker_status == SAI_STATUS_ITEM_ALREADY_EXISTS) + { + SWSS_LOG_NOTICE("Next hop %s on %s already exists", + nexthop.ip_address.to_string().c_str(), nexthop.alias.c_str()); + return true; + } + SWSS_LOG_ERROR("Failed to create next hop %s on %s, rv:%d", + nexthop.ip_address.to_string().c_str(), nexthop.alias.c_str(), bulker_status); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_NEXT_HOP, bulker_status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + SWSS_LOG_NOTICE("Created next hop %s on %s", + nexthop.ip_address.to_string().c_str(), nexthop.alias.c_str()); + if (m_neighborToResolve.find(nexthop) != m_neighborToResolve.end()) + { + clearResolvedNeighborEntry(nexthop); + m_neighborToResolve.erase(nexthop); + SWSS_LOG_INFO("Resolved neighbor for %s", nexthop.to_string().c_str()); + } + + NextHopEntry next_hop_entry; + next_hop_entry.next_hop_id = ctx.next_hop_id; + next_hop_entry.ref_count = 0; + next_hop_entry.nh_flags = 0; + m_syncdNextHops[nexthop] = next_hop_entry; + + m_intfsOrch->increaseRouterIntfsRefCount(nh.alias); if (nexthop.isMplsNextHop()) { @@ -340,6 +446,8 @@ bool NeighOrch::setNextHopFlag(const NextHopKey &nexthop, const uint32_t nh_flag auto nhop = m_syncdNextHops.find(nexthop); bool rc = false; + SWSS_LOG_INFO("setNextHopFlag on %s seen on port %s ", + nexthop.ip_address.to_string().c_str(), nexthop.alias.c_str()); assert(nhop != m_syncdNextHops.end()); if (nhop->second.nh_flags & nh_flag) @@ -379,6 +487,8 @@ bool NeighOrch::clearNextHopFlag(const NextHopKey &nexthop, const uint32_t nh_fl nhop->second.nh_flags &= ~nh_flag; uint32_t count; + SWSS_LOG_INFO("clearnexthop on %s seen on port %s ", + nexthop.ip_address.to_string().c_str(), nexthop.alias.c_str()); switch (nh_flag) { case NHFLAGS_IFDOWN: @@ -789,6 +899,8 @@ void NeighOrch::doTask(Consumer &consumer) NeighborEntry neighbor_entry = { ip_address, alias }; + NeighborContext ctx = NeighborContext(neighbor_entry); + if (op == SET_COMMAND) { Port p; @@ -814,6 +926,8 @@ void NeighOrch::doTask(Consumer &consumer) mac_address = MacAddress(fvValue(*i)); } + ctx.mac = mac_address; + bool nbr_not_found = (m_syncdNeighbors.find(neighbor_entry) == m_syncdNeighbors.end()); if (nbr_not_found || m_syncdNeighbors[neighbor_entry].mac != mac_address) { @@ -842,7 +956,7 @@ void NeighOrch::doTask(Consumer &consumer) it = consumer.m_toSync.erase(it); } } - else if (addNeighbor(neighbor_entry, mac_address)) + else if (addNeighbor(ctx)) { it = consumer.m_toSync.erase(it); } @@ -873,7 +987,7 @@ void NeighOrch::doTask(Consumer &consumer) { if (m_syncdNeighbors.find(neighbor_entry) != m_syncdNeighbors.end()) { - if (removeNeighbor(neighbor_entry)) + if (removeNeighbor(ctx)) { it = consumer.m_toSync.erase(it); } @@ -894,13 +1008,42 @@ void NeighOrch::doTask(Consumer &consumer) } } -bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress &macAddress) +/* Gets all neighbor entries tied to a given mux port */ +void NeighOrch::getMuxNeighborsForPort(string port_name, NeighborTable& m_neighbors) +{ + SWSS_LOG_INFO("Getting mux neighbors on %s", port_name.c_str()); + + MuxOrch* mux_orch = gDirectory.get(); + string mux_port_name; + for (const auto &entry : m_syncdNeighbors) + { + // Check if mux port exists for given neighbor entry + mux_port_name = ""; + if (!mux_orch->getMuxPort(entry.second.mac, entry.first.alias, mux_port_name) || mux_port_name.empty()) + { + continue; + } + + // Add to m_neighbors if entry found + if (mux_port_name == port_name) + { + m_neighbors.insert(entry); + } + } +} + +bool NeighOrch::addNeighbor(NeighborContext& ctx) { SWSS_LOG_ENTER(); sai_status_t status; + auto& object_statuses = ctx.object_statuses; + + const MacAddress &macAddress = ctx.mac; + const NeighborEntry neighborEntry = ctx.neighborEntry; IpAddress ip_address = neighborEntry.ip_address; string alias = neighborEntry.alias; + bool bulk_op = ctx.bulk_op; sai_object_id_t rif_id = m_intfsOrch->getRouterIntfsId(alias); if (rif_id == SAI_NULL_OBJECT_ID) @@ -933,6 +1076,52 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress } } + PortsOrch* ports_orch = gDirectory.get(); + auto vlan_ports = ports_orch->getAllVlans(); + + for (auto vlan_port: vlan_ports) + { + if (vlan_port == alias) + { + continue; + } + NeighborEntry temp_entry = { ip_address, vlan_port }; + if (m_syncdNeighbors.find(temp_entry) != m_syncdNeighbors.end()) + { + // Neighbor already exists on another VLAN. If they belong to the same VRF, delete the old neighbor + Port existing_vlan, new_vlan; + if (!gPortsOrch->getPort(vlan_port, new_vlan)) + { + SWSS_LOG_ERROR("Failed to get port for %s", vlan_port.c_str()); + return false; + } + if (!gPortsOrch->getPort(alias, existing_vlan)) + { + SWSS_LOG_ERROR("Failed to get port for %s", alias.c_str()); + return false; + } + if (existing_vlan.m_vr_id == new_vlan.m_vr_id) + { + std::string vrf_name = gDirectory.get()->getVRFname(existing_vlan.m_vr_id); + if (vrf_name.empty()) + { + SWSS_LOG_NOTICE("Neighbor %s already learned on %s, removing before adding new neighbor", ip_address.to_string().c_str(), vlan_port.c_str()); + } + else + { + SWSS_LOG_NOTICE("Neighbor %s already learned on %s in VRF %s, removing before adding new neighbor", ip_address.to_string().c_str(), vlan_port.c_str(), vrf_name.c_str()); + } + + NeighborContext removeContext = NeighborContext(temp_entry); + if (!removeNeighbor(removeContext)) + { + SWSS_LOG_ERROR("Failed to remove neighbor %s on %s", ip_address.to_string().c_str(), vlan_port.c_str()); + return false; + } + } + } + } + MuxOrch* mux_orch = gDirectory.get(); bool hw_config = isHwConfigured(neighborEntry); @@ -946,6 +1135,16 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress if (!hw_config && mux_orch->isNeighborActive(ip_address, macAddress, alias)) { + // Using bulker, return and post-process later + if (bulk_op) + { + SWSS_LOG_INFO("Adding neighbor entry %s on %s to bulker.", ip_address.to_string().c_str(), alias.c_str()); + object_statuses.emplace_back(); + gNeighBulker.create_entry(&object_statuses.back(), &neighbor_entry, (uint32_t)neighbor_attrs.size(), neighbor_attrs.data()); + addNextHop(ctx); + return true; + } + status = sai_neighbor_api->create_neighbor_entry(&neighbor_entry, (uint32_t)neighbor_attrs.size(), neighbor_attrs.data()); if (status != SAI_STATUS_SUCCESS) @@ -981,7 +1180,7 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEIGHBOR); } - if (!addNextHop(NextHopKey(ip_address, alias))) + if (!addNextHop(ctx)) { status = sai_neighbor_api->remove_neighbor_entry(&neighbor_entry); if (status != SAI_STATUS_SUCCESS) @@ -1033,7 +1232,7 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress NeighborUpdate update = { neighborEntry, macAddress, true }; notify(SUBJECT_TYPE_NEIGH_CHANGE, static_cast(&update)); - if(gMySwitchType == "voq") + if(isChassisDbInUse()) { //Sync the neighbor to add to the CHASSIS_APP_DB voqSyncAddNeigh(alias, ip_address, macAddress, neighbor_entry); @@ -1042,13 +1241,17 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress return true; } -bool NeighOrch::removeNeighbor(const NeighborEntry &neighborEntry, bool disable) +bool NeighOrch::removeNeighbor(NeighborContext& ctx, bool disable) { SWSS_LOG_ENTER(); sai_status_t status; - IpAddress ip_address = neighborEntry.ip_address; + auto& object_statuses = ctx.object_statuses; + + const NeighborEntry neighborEntry = ctx.neighborEntry; string alias = neighborEntry.alias; + IpAddress ip_address = neighborEntry.ip_address; + bool bulk_op = ctx.bulk_op; NextHopKey nexthop = { ip_address, alias }; if(m_intfsOrch->isRemoteSystemPortIntf(alias)) @@ -1083,6 +1286,15 @@ bool NeighOrch::removeNeighbor(const NeighborEntry &neighborEntry, bool disable) copy(neighbor_entry.ip_address, ip_address); sai_object_id_t next_hop_id = m_syncdNextHops[nexthop].next_hop_id; + + if (bulk_op) + { + object_statuses.emplace_back(); + gNextHopBulker.remove_entry(&ctx.nexthop_status, next_hop_id); + gNeighBulker.remove_entry(&object_statuses.back(), &neighbor_entry); + return true; + } + status = sai_next_hop_api->remove_next_hop(next_hop_id); if (status != SAI_STATUS_SUCCESS) { @@ -1169,7 +1381,7 @@ bool NeighOrch::removeNeighbor(const NeighborEntry &neighborEntry, bool disable) NeighborUpdate update = { neighborEntry, MacAddress(), false }; notify(SUBJECT_TYPE_NEIGH_CHANGE, static_cast(&update)); - if(gMySwitchType == "voq") + if(isChassisDbInUse()) { //Sync the neighbor to delete from the CHASSIS_APP_DB voqSyncDelNeigh(alias, ip_address); @@ -1178,6 +1390,219 @@ bool NeighOrch::removeNeighbor(const NeighborEntry &neighborEntry, bool disable) return true; } +/* Process bulk ctx entry and enable the neigbor */ +bool NeighOrch::processBulkEnableNeighbor(NeighborContext& ctx) +{ + SWSS_LOG_ENTER(); + + const auto& object_statuses = ctx.object_statuses; + auto it_status = object_statuses.begin(); + sai_status_t status; + + const MacAddress &macAddress = ctx.mac; + const NeighborEntry neighborEntry = ctx.neighborEntry; + string alias = neighborEntry.alias; + IpAddress ip_address = neighborEntry.ip_address; + + if (!ctx.bulk_op) + { + SWSS_LOG_INFO("Not a bulk entry for %s on %s", ip_address.to_string().c_str(), alias.c_str()); + return true; + } + + SWSS_LOG_INFO("Checking neighbor create entry status %s on %s.", ip_address.to_string().c_str(), alias.c_str()); + + sai_object_id_t rif_id = m_intfsOrch->getRouterIntfsId(alias); + if (rif_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_INFO("Failed to get rif_id for %s", alias.c_str()); + return false; + } + + sai_neighbor_entry_t neighbor_entry; + neighbor_entry.rif_id = rif_id; + neighbor_entry.switch_id = gSwitchId; + copy(neighbor_entry.ip_address, ip_address); + + MuxOrch* mux_orch = gDirectory.get(); + if (mux_orch->isNeighborActive(ip_address, macAddress, alias)) + { + status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) + { + SWSS_LOG_INFO("Neighbor exists: neighbor %s on %s, skipping: status:%s", + macAddress.to_string().c_str(), alias.c_str(), sai_serialize_status(status).c_str()); + return true; + } + else + { + SWSS_LOG_ERROR("Failed to create neighbor %s on %s, status:%s", + macAddress.to_string().c_str(), alias.c_str(), sai_serialize_status(status).c_str()); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_NEIGHBOR, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + } + + SWSS_LOG_NOTICE("Created neighbor ip %s, %s on %s", ip_address.to_string().c_str(), + macAddress.to_string().c_str(), alias.c_str()); + + m_intfsOrch->increaseRouterIntfsRefCount(alias); + + if (neighbor_entry.ip_address.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEIGHBOR); + } + else + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEIGHBOR); + } + + if (!processBulkAddNextHop(ctx)) + { + status = sai_neighbor_api->remove_neighbor_entry(&neighbor_entry); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove neighbor %s on %s, rv:%d", + macAddress.to_string().c_str(), alias.c_str(), status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_NEIGHBOR, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + m_intfsOrch->decreaseRouterIntfsRefCount(alias); + + if (neighbor_entry.ip_address.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEIGHBOR); + } + else + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEIGHBOR); + } + + return false; + } + } + + m_syncdNeighbors[neighborEntry] = { macAddress, true }; + + NeighborUpdate update = { neighborEntry, macAddress, true }; + notify(SUBJECT_TYPE_NEIGH_CHANGE, static_cast(&update)); + + return true; +} + +/* Process bulk ctx entry and disable the neigbor */ +bool NeighOrch::processBulkDisableNeighbor(NeighborContext& ctx) +{ + SWSS_LOG_ENTER(); + + const auto& object_statuses = ctx.object_statuses; + auto it_status = object_statuses.begin(); + sai_status_t status; + + const NeighborEntry neighborEntry = ctx.neighborEntry; + string alias = neighborEntry.alias; + IpAddress ip_address = neighborEntry.ip_address; + + if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) + { + return true; + } + + SWSS_LOG_INFO("Checking neighbor remove entry status %s on %s.", ip_address.to_string().c_str(), m_syncdNeighbors[neighborEntry].mac.to_string().c_str()); + + if (isHwConfigured(neighborEntry)) + { + sai_object_id_t rif_id = m_intfsOrch->getRouterIntfsId(alias); + + sai_neighbor_entry_t neighbor_entry; + neighbor_entry.rif_id = rif_id; + neighbor_entry.switch_id = gSwitchId; + copy(neighbor_entry.ip_address, ip_address); + + if (ctx.nexthop_status != SAI_STATUS_SUCCESS) + { + /* When next hop is not found, we continue to remove neighbor entry. */ + if (ctx.nexthop_status == SAI_STATUS_ITEM_NOT_FOUND) + { + SWSS_LOG_NOTICE("Next hop %s on %s doesn't exist, rv:%d", + ip_address.to_string().c_str(), alias.c_str(), ctx.nexthop_status); + } + else + { + SWSS_LOG_ERROR("Failed to remove next hop %s on %s, rv:%d", + ip_address.to_string().c_str(), alias.c_str(), ctx.nexthop_status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_NEXT_HOP, ctx.nexthop_status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + } + + if (ctx.nexthop_status != SAI_STATUS_ITEM_NOT_FOUND) + { + if (neighbor_entry.ip_address.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEXTHOP); + } + else + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEXTHOP); + } + } + + SWSS_LOG_NOTICE("Bulk removed next hop %s on %s", ip_address.to_string().c_str(), alias.c_str()); + + status = *it_status++; + if (status != SAI_STATUS_SUCCESS) + { + if (status == SAI_STATUS_ITEM_NOT_FOUND) + { + SWSS_LOG_NOTICE("Bulk remove entry skipped, neighbor %s on %s already removed, rv:%d", + m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str(), status); + } + else + { + SWSS_LOG_ERROR("Failed to remove neighbor %s on %s, rv:%d", + m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str(), status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_NEIGHBOR, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + } + else + { + if (neighbor_entry.ip_address.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEIGHBOR); + } + else + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEIGHBOR); + } + + removeNextHop(ip_address, alias); + m_intfsOrch->decreaseRouterIntfsRefCount(alias); + SWSS_LOG_NOTICE("Removed neighbor %s on %s", + m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str()); + } + } + + /* Do not delete entry from cache for disable request */ + m_syncdNeighbors[neighborEntry].hw_configured = false; + return true; +} + bool NeighOrch::isHwConfigured(const NeighborEntry& neighborEntry) { if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) @@ -1204,7 +1629,11 @@ bool NeighOrch::enableNeighbor(const NeighborEntry& neighborEntry) return true; } - return addNeighbor(neighborEntry, m_syncdNeighbors[neighborEntry].mac); + NeighborEntry neigh = neighborEntry; + NeighborContext ctx = NeighborContext(neigh); + ctx.mac = m_syncdNeighbors[neighborEntry].mac; + + return addNeighbor(ctx); } bool NeighOrch::disableNeighbor(const NeighborEntry& neighborEntry) @@ -1223,7 +1652,110 @@ bool NeighOrch::disableNeighbor(const NeighborEntry& neighborEntry) return true; } - return removeNeighbor(neighborEntry, true); + NeighborContext ctx = NeighborContext(neighborEntry); + + return removeNeighbor(ctx, true); +} + +/* enable neighbors using bulker */ +bool NeighOrch::enableNeighbors(std::list& bulk_ctx_list) +{ + bool ret = true; + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + const NeighborEntry& neighborEntry = ctx->neighborEntry; + ctx->mac = m_syncdNeighbors[neighborEntry].mac; + + if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) + { + SWSS_LOG_INFO("Neighbor %s not found", neighborEntry.ip_address.to_string().c_str()); + continue; + } + + if (isHwConfigured(neighborEntry)) + { + SWSS_LOG_INFO("Neighbor %s is already programmed to HW", neighborEntry.ip_address.to_string().c_str()); + continue; + } + + SWSS_LOG_NOTICE("Neighbor enable request for %s ", neighborEntry.ip_address.to_string().c_str()); + + if(!addNeighbor(*ctx)) + { + SWSS_LOG_ERROR("Neighbor %s create entry failed.", neighborEntry.ip_address.to_string().c_str()); + continue; + } + } + + gNeighBulker.flush(); + gNextHopBulker.flush(); + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + if (ctx->object_statuses.empty()) + { + continue; + } + + const NeighborEntry& neighborEntry = ctx->neighborEntry; + if (!processBulkEnableNeighbor(*ctx)) + { + SWSS_LOG_INFO("Enable neighbor failed for %s", neighborEntry.ip_address.to_string().c_str()); + /* finish processing bulk entries */ + ret = false; + } + } + + gNeighBulker.clear(); + return ret; +} + +/* disable neighbors using bulker */ +bool NeighOrch::disableNeighbors(std::list& bulk_ctx_list) +{ + bool ret = true; + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + const NeighborEntry& neighborEntry = ctx->neighborEntry; + ctx->mac = m_syncdNeighbors[neighborEntry].mac; + + if (m_syncdNeighbors.find(neighborEntry) == m_syncdNeighbors.end()) + { + SWSS_LOG_INFO("Neighbor %s not found", neighborEntry.ip_address.to_string().c_str()); + continue; + } + + SWSS_LOG_NOTICE("Neighbor disable request for %s ", neighborEntry.ip_address.to_string().c_str()); + + if(!removeNeighbor(*ctx, true)) + { + SWSS_LOG_ERROR("Neighbor %s remove entry failed.", neighborEntry.ip_address.to_string().c_str()); + } + } + + gNextHopBulker.flush(); + gNeighBulker.flush(); + + for (auto ctx = bulk_ctx_list.begin(); ctx != bulk_ctx_list.end(); ctx++) + { + if (ctx->object_statuses.empty()) + { + continue; + } + + const NeighborEntry& neighborEntry = ctx->neighborEntry; + if (!processBulkDisableNeighbor(*ctx)) + { + SWSS_LOG_INFO("Disable neighbor failed for %s", neighborEntry.ip_address.to_string().c_str()); + /* finish processing bulk entries but return false */ + ret = false; + } + } + + gNeighBulker.clear(); + return ret; } sai_object_id_t NeighOrch::addTunnelNextHop(const NextHopKey& nh) @@ -1334,6 +1866,8 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) string alias = key.substr(0, found); + size_t pos = alias.find('|'); + std::string port_hostname = (pos != std::string::npos) ? alias.substr(0, pos) : alias; if(gIntfsOrch->isLocalSystemPortIntf(alias)) { //Synced local neighbor. Skip @@ -1364,7 +1898,7 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) continue; } - MacAddress mac_address; + MacAddress mac_address, original_mac_address; uint32_t encap_index = 0; for (auto i = kfvFieldsValues(t).begin(); i != kfvFieldsValues(t).end(); i++) @@ -1385,6 +1919,15 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) it++; continue; } + if (m_syncdNeighbors.find(neighbor_entry) == m_syncdNeighbors.end()) + { + NextHopKey nexthop = { ip_address, ibif.m_alias}; + if (hasNextHop(nexthop)) + { + it++; + continue; + } + } if (m_syncdNeighbors.find(neighbor_entry) == m_syncdNeighbors.end() || m_syncdNeighbors[neighbor_entry].mac != mac_address || @@ -1405,7 +1948,8 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) SWSS_LOG_NOTICE("VOQ encap index set failed for neighbor %s. Removing and re-adding", kfvKey(t).c_str()); //Remove neigh from SAI - if (removeNeighbor(neighbor_entry)) + NeighborContext ctx = NeighborContext(neighbor_entry); + if (removeNeighbor(ctx)) { //neigh successfully deleted from SAI. Set STATE DB to signal to remove entries from kernel m_stateSystemNeighTable->del(state_key); @@ -1436,7 +1980,9 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) } //Add neigh to SAI - if (addNeighbor(neighbor_entry, mac_address)) + NeighborContext ctx = NeighborContext(neighbor_entry); + ctx.mac = mac_address; + if (addNeighbor(ctx)) { //neigh successfully added to SAI. Set STATE DB to signal kernel programming by neighbor manager @@ -1444,42 +1990,13 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) //kernel programming. if(ibif.m_type != Port::VLAN) { + original_mac_address = mac_address; mac_address = gMacAddress; - - // For VS platforms, the mac of the static neigh should not be same as asic's own mac. - // This is because host originated packets will have same mac for both src and dst which - // will result in host NOT sending packet out. To address this problem which is specific - // to port type inband interfaces, set the mac to the neighbor's owner asic's mac. Since - // the owner asic's mac is not readily avaiable here, the owner asic mac is derived from - // the switch id and lower 5 bytes of asic mac which is assumed to be same for all asics - // in the VS system. - // Therefore to make VOQ chassis systems work in VS platform based setups like the setups - // using KVMs, it is required that all asics have same base mac in the format given below - // :<6th byte = switch_id> - string platform = getenv("ASIC_VENDOR") ? getenv("ASIC_VENDOR") : ""; - + // For VS platform, use the original MAC address if (platform == VS_PLATFORM_SUBSTRING) { - int8_t sw_id = -1; - uint8_t egress_asic_mac[ETHER_ADDR_LEN]; - - gMacAddress.getMac(egress_asic_mac); - - if (p.m_type == Port::LAG) - { - sw_id = (int8_t) p.m_system_lag_info.switch_id; - } - else if (p.m_type == Port::PHY || p.m_type == Port::SYSTEM) - { - sw_id = (int8_t) p.m_system_port_info.switch_id; - } - - if(sw_id != -1) - { - egress_asic_mac[5] = sw_id; - mac_address = MacAddress(egress_asic_mac); - } + mac_address = original_mac_address; } } vector fvVector; @@ -1518,7 +2035,8 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) if (m_syncdNeighbors.find(neighbor_entry) != m_syncdNeighbors.end()) { //Remove neigh from SAI - if (removeNeighbor(neighbor_entry)) + NeighborContext ctx = NeighborContext(neighbor_entry); + if (removeNeighbor(ctx)) { //neigh successfully deleted from SAI. Set STATE DB to signal to remove entries from kernel m_stateSystemNeighTable->del(state_key); @@ -1885,3 +2403,36 @@ bool NeighOrch::addZeroMacTunnelRoute(const NeighborEntry& entry, const MacAddre return false; } + +bool NeighOrch::ifChangeInformRemoteNextHop(const string &alias, bool if_up) +{ + SWSS_LOG_ENTER(); + bool rc = true; + Port inbp; + gPortsOrch->getInbandPort(inbp); + for (auto nbr = m_syncdNeighbors.begin(); nbr != m_syncdNeighbors.end(); ++nbr) + { + if (nbr->first.alias != alias) + { + continue; + } + SWSS_LOG_INFO("Found remote Neighbor %s on %s", nbr->first.ip_address.to_string().c_str(), alias.c_str()); + NextHopKey nhop = { nbr->first.ip_address, inbp.m_alias }; + + if (if_up) + { + rc = clearNextHopFlag(nhop, NHFLAGS_IFDOWN); + } + else + { + rc = setNextHopFlag(nhop, NHFLAGS_IFDOWN); + } + } + return rc; +} + +void NeighOrch::clearBulkers() +{ + gNeighBulker.clear(); + gNextHopBulker.clear(); +} diff --git a/orchagent/neighorch.h b/orchagent/neighorch.h index e72979ad072..bbaa898f716 100644 --- a/orchagent/neighorch.h +++ b/orchagent/neighorch.h @@ -12,6 +12,7 @@ #include "producerstatetable.h" #include "schema.h" #include "bfdorch.h" +#include "bulker.h" #define NHFLAGS_IFDOWN 0x1 // nexthop's outbound i/f is down @@ -43,6 +44,29 @@ struct NeighborUpdate bool add; }; +/* + * Keeps track of neighbor entry information primarily for bulk operations + */ +struct NeighborContext +{ + NeighborEntry neighborEntry; // neighbor entry to process + std::deque object_statuses; // entity bulk statuses for neighbors + MacAddress mac; // neighbor mac + bool bulk_op = false; // use bulker (only for mux use for now) + sai_object_id_t next_hop_id; // next hop id + sai_status_t nexthop_status; // next hop status + + NeighborContext(NeighborEntry neighborEntry) + : neighborEntry(neighborEntry) + { + } + + NeighborContext(NeighborEntry neighborEntry, bool bulk_op) + : neighborEntry(neighborEntry), bulk_op(bulk_op) + { + } +}; + class NeighOrch : public Orch, public Subject, public Observer { public: @@ -51,7 +75,7 @@ class NeighOrch : public Orch, public Subject, public Observer bool hasNextHop(const NextHopKey&); bool isNeighborResolved(const NextHopKey&); - bool addNextHop(const NextHopKey&); + bool addNextHop(NeighborContext& ctx); bool removeMplsNextHop(const NextHopKey&); sai_object_id_t getNextHopId(const NextHopKey&); @@ -66,12 +90,15 @@ class NeighOrch : public Orch, public Subject, public Observer bool enableNeighbor(const NeighborEntry&); bool disableNeighbor(const NeighborEntry&); + bool enableNeighbors(std::list&); + bool disableNeighbors(std::list&); bool isHwConfigured(const NeighborEntry&); sai_object_id_t addTunnelNextHop(const NextHopKey&); bool removeTunnelNextHop(const NextHopKey&); bool ifChangeInformNextHop(const string &, bool); + bool isNextHopFlagSet(const NextHopKey &, const uint32_t); bool removeOverlayNextHop(const NextHopKey &); void update(SubjectType, void *); @@ -81,6 +108,10 @@ class NeighOrch : public Orch, public Subject, public Observer void resolveNeighbor(const NeighborEntry &); void updateSrv6Nexthop(const NextHopKey &, const sai_object_id_t &); + bool ifChangeInformRemoteNextHop(const string &, bool); + void getMuxNeighborsForPort(string port_name, NeighborTable &m_neighbors); + + void clearBulkers(); private: PortsOrch *m_portsOrch; @@ -93,10 +124,16 @@ class NeighOrch : public Orch, public Subject, public Observer std::set m_neighborToResolve; + EntityBulker gNeighBulker; + ObjectBulker gNextHopBulker; + bool removeNextHop(const IpAddress&, const string&); + bool processBulkAddNextHop(NeighborContext&); - bool addNeighbor(const NeighborEntry&, const MacAddress&); - bool removeNeighbor(const NeighborEntry&, bool disable = false); + bool addNeighbor(NeighborContext& ctx); + bool removeNeighbor(NeighborContext& ctx, bool disable = false); + bool processBulkEnableNeighbor(NeighborContext& ctx); + bool processBulkDisableNeighbor(NeighborContext& ctx); bool setNextHopFlag(const NextHopKey &, const uint32_t); bool clearNextHopFlag(const NextHopKey &, const uint32_t); diff --git a/orchagent/nexthopgroupkey.h b/orchagent/nexthopgroupkey.h index d012cbe41a5..06381cffcd7 100644 --- a/orchagent/nexthopgroupkey.h +++ b/orchagent/nexthopgroupkey.h @@ -2,6 +2,7 @@ #define SWSS_NEXTHOPGROUPKEY_H #include "nexthopkey.h" +#include class NextHopGroupKey { @@ -13,6 +14,7 @@ class NextHopGroupKey { m_overlay_nexthops = false; m_srv6_nexthops = false; + m_srv6_vpn = false; auto nhv = tokenize(nexthops, NHG_DELIMITER); for (const auto &nh : nhv) { @@ -27,6 +29,7 @@ class NextHopGroupKey { m_overlay_nexthops = true; m_srv6_nexthops = false; + m_srv6_vpn = false; auto nhv = tokenize(nexthops, NHG_DELIMITER); for (const auto &nh_str : nhv) { @@ -38,11 +41,16 @@ class NextHopGroupKey { m_overlay_nexthops = false; m_srv6_nexthops = true; + m_srv6_vpn = false; auto nhv = tokenize(nexthops, NHG_DELIMITER); for (const auto &nh_str : nhv) { auto nh = NextHopKey(nh_str, overlay_nh, srv6_nh); m_nexthops.insert(nh); + if (nh.isSrv6Vpn()) + { + m_srv6_vpn = true; + } } } } @@ -51,6 +59,7 @@ class NextHopGroupKey { m_overlay_nexthops = false; m_srv6_nexthops = false; + m_srv6_vpn = false; std::vector nhv = tokenize(nexthops, NHG_DELIMITER); std::vector wtv = tokenize(weights, NHG_DELIMITER); bool set_weight = wtv.size() == nhv.size(); @@ -221,6 +230,11 @@ class NextHopGroupKey return m_srv6_nexthops; } + inline bool is_srv6_vpn() const + { + return m_srv6_vpn; + } + void clear() { m_nexthops.clear(); @@ -228,8 +242,22 @@ class NextHopGroupKey private: std::set m_nexthops; - bool m_overlay_nexthops; - bool m_srv6_nexthops; + bool m_overlay_nexthops = false; + bool m_srv6_nexthops = false; + bool m_srv6_vpn = false; + + // Support std::unordered_map + template + friend class std::hash; }; +namespace std { + template <> + struct hash { + size_t operator()(const NextHopGroupKey& obj) const { + return boost::hash_range(obj.m_nexthops.begin(), obj.m_nexthops.end()); + } + }; +} + #endif /* SWSS_NEXTHOPGROUPKEY_H */ diff --git a/orchagent/nexthopkey.cpp b/orchagent/nexthopkey.cpp new file mode 100644 index 00000000000..68707ca37be --- /dev/null +++ b/orchagent/nexthopkey.cpp @@ -0,0 +1,17 @@ +#include "nexthopkey.h" + +std::size_t hash_value(const NextHopKey& obj) { + std::size_t nh_hash = 0; + + boost::hash_combine(nh_hash, obj.ip_address.to_string()); + boost::hash_combine(nh_hash, obj.alias); + boost::hash_combine(nh_hash, obj.vni); + boost::hash_combine(nh_hash, obj.mac_address.to_string()); + boost::hash_combine(nh_hash, obj.label_stack.to_string()); + boost::hash_combine(nh_hash, obj.weight); + boost::hash_combine(nh_hash, obj.srv6_segment); + boost::hash_combine(nh_hash, obj.srv6_source); + boost::hash_combine(nh_hash, obj.srv6_vpn_sid); + + return nh_hash; +} diff --git a/orchagent/nexthopkey.h b/orchagent/nexthopkey.h index 2f03e9fd496..21bc81f741c 100644 --- a/orchagent/nexthopkey.h +++ b/orchagent/nexthopkey.h @@ -1,6 +1,14 @@ #ifndef SWSS_NEXTHOPKEY_H #define SWSS_NEXTHOPKEY_H +extern "C" +{ +#include +} + +#include +#include + #include "ipaddress.h" #include "tokenize.h" #include "label.h" @@ -14,6 +22,8 @@ extern IntfsOrch *gIntfsOrch; struct NextHopKey { + // Note: When adding a new field to NextHopKey, make sure to also update + // the hash_value method to incorporate the new field into the hash calculation. IpAddress ip_address; // neighbor IP address string alias; // incoming interface alias uint32_t vni; // Encap VNI overlay nexthop @@ -22,6 +32,7 @@ struct NextHopKey uint32_t weight; // NH weight for NHGs string srv6_segment; // SRV6 segment string string srv6_source; // SRV6 source address + string srv6_vpn_sid; // SRV6 vpn sid NextHopKey() : weight(0) {} NextHopKey(const std::string &str, const std::string &alias) : @@ -76,7 +87,7 @@ struct NextHopKey vni = 0; weight = 0; auto keys = tokenize(str, NH_DELIMITER); - if (keys.size() != 3) + if (keys.size() != 4) { std::string err = "Error converting " + str + " to Nexthop"; throw std::invalid_argument(err); @@ -84,6 +95,7 @@ struct NextHopKey ip_address = keys[0]; srv6_segment = keys[1]; srv6_source = keys[2]; + srv6_vpn_sid = keys[3]; } else { @@ -103,6 +115,7 @@ struct NextHopKey } NextHopKey(const IpAddress &ip, const MacAddress &mac, const uint32_t &vni, bool overlay_nh) : ip_address(ip), alias(""), vni(vni), mac_address(mac), weight(0){} + NextHopKey(const IpAddress &ip, const std::string &alias, const MacAddress &mac, const uint32_t &vni, bool overlay_nh) : ip_address(ip), alias(alias), vni(vni), mac_address(mac), weight(0){} const std::string to_string() const { @@ -115,7 +128,8 @@ struct NextHopKey { if (srv6_nh) { - return ip_address.to_string() + NH_DELIMITER + srv6_segment + NH_DELIMITER + srv6_source; + return ip_address.to_string() + NH_DELIMITER + srv6_segment + NH_DELIMITER + srv6_source + NH_DELIMITER + + srv6_vpn_sid + NH_DELIMITER; } std::string str = formatMplsNextHop(); str += (ip_address.to_string() + NH_DELIMITER + alias + NH_DELIMITER + @@ -125,8 +139,8 @@ struct NextHopKey bool operator<(const NextHopKey &o) const { - return tie(ip_address, alias, label_stack, vni, mac_address, srv6_segment, srv6_source) < - tie(o.ip_address, o.alias, o.label_stack, o.vni, o.mac_address, o.srv6_segment, o.srv6_source); + return std::tie(ip_address, alias, label_stack, vni, mac_address, srv6_segment, srv6_source, srv6_vpn_sid) < + std::tie(o.ip_address, o.alias, o.label_stack, o.vni, o.mac_address, o.srv6_segment, o.srv6_source, o.srv6_vpn_sid); } bool operator==(const NextHopKey &o) const @@ -134,7 +148,8 @@ struct NextHopKey return (ip_address == o.ip_address) && (alias == o.alias) && (label_stack == o.label_stack) && (vni == o.vni) && (mac_address == o.mac_address) && - (srv6_segment == o.srv6_segment) && (srv6_source == o.srv6_source); + (srv6_segment == o.srv6_segment) && (srv6_source == o.srv6_source) && + (srv6_vpn_sid == o.srv6_vpn_sid); } bool operator!=(const NextHopKey &o) const @@ -154,7 +169,12 @@ struct NextHopKey bool isSrv6NextHop() const { - return (srv6_segment != ""); + return ((srv6_segment != "") || (srv6_vpn_sid != "") || (srv6_source != "")); + } + + bool isSrv6Vpn() const + { + return (srv6_vpn_sid != ""); } std::string parseMplsNextHop(const std::string& str) @@ -199,4 +219,6 @@ struct NextHopKey } }; +std::size_t hash_value(const NextHopKey& obj); + #endif /* SWSS_NEXTHOPKEY_H */ diff --git a/orchagent/nhgorch.cpp b/orchagent/nhgorch.cpp index cefc2efbb12..b457775afbc 100644 --- a/orchagent/nhgorch.cpp +++ b/orchagent/nhgorch.cpp @@ -2,15 +2,18 @@ #include "neighorch.h" #include "crmorch.h" #include "routeorch.h" +#include "srv6orch.h" #include "bulker.h" #include "logger.h" #include "swssnet.h" extern sai_object_id_t gSwitchId; +extern IntfsOrch *gIntfsOrch; extern NeighOrch *gNeighOrch; extern RouteOrch *gRouteOrch; extern NhgOrch *gNhgOrch; +extern Srv6Orch *gSrv6Orch; extern size_t gMaxBulkSize; @@ -58,46 +61,187 @@ void NhgOrch::doTask(Consumer& consumer) string aliases; string weights; string mpls_nhs; + string nhgs; + bool is_recursive = false; + string srv6_source; + bool overlay_nh = false; + bool srv6_nh = false; /* Get group's next hop IPs and aliases */ for (auto i : kfvFieldsValues(t)) { - if (fvField(i) == "nexthop") + if (fvField(i) == "nexthop" && fvValue(i) != "") ips = fvValue(i); - if (fvField(i) == "ifname") + if (fvField(i) == "ifname" && fvValue(i) != "") aliases = fvValue(i); - if (fvField(i) == "weight") + if (fvField(i) == "weight" && fvValue(i) != "") weights = fvValue(i); - if (fvField(i) == "mpls_nh") + if (fvField(i) == "mpls_nh" && fvValue(i) != "") mpls_nhs = fvValue(i); - } - /* Split ips and alaises strings into vectors of tokens. */ + if (fvField(i) == "seg_src" && fvValue(i) != "") + { + srv6_source = fvValue(i); + srv6_nh = true; + } + + if (fvField(i) == "nexthop_group" && fvValue(i) != "") + { + nhgs = fvValue(i); + is_recursive = true; + } + } + /* A NHG should not have both regular(ip/alias) and recursive fields */ + if (is_recursive && (!ips.empty() || !aliases.empty())) + { + SWSS_LOG_ERROR("Nexthop group %s has both regular(ip/alias) and recursive fields", index.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + /* Split ips and aliases strings into vectors of tokens. */ vector ipv = tokenize(ips, ','); vector alsv = tokenize(aliases, ','); vector mpls_nhv = tokenize(mpls_nhs, ','); + vector nhgv = tokenize(nhgs, NHG_DELIMITER); + vector srv6_srcv = tokenize(srv6_source, ','); /* Create the next hop group key. */ string nhg_str; + NextHopGroupKey nhg_key; + + /* Keeps track of any non-existing member of a recursive nexthop group */ + bool non_existent_member = false; - for (uint32_t i = 0; i < ipv.size(); i++) + if (is_recursive) { - if (i) nhg_str += NHG_DELIMITER; - if (!mpls_nhv.empty() && mpls_nhv[i] != "na") + SWSS_LOG_INFO("Adding recursive nexthop group %s with %s", index.c_str(), nhgs.c_str()); + + /* Reset the "nexthop_group" field and update it with only the existing members */ + nhgs = ""; + + /* Check if any of the members are a recursive or temporary nexthop group */ + bool invalid_member = false; + + for (auto& nhgm : nhgv) + { + const auto& nhgm_it = m_syncdNextHopGroups.find(nhgm); + if (nhgm_it == m_syncdNextHopGroups.end()) + { + SWSS_LOG_INFO("Member nexthop group %s in parent nhg %s not ready", + nhgm.c_str(), index.c_str()); + + non_existent_member = true; + continue; + } + if ((nhgm_it->second.nhg) && + (nhgm_it->second.nhg->isRecursive() || nhgm_it->second.nhg->isTemp())) + { + SWSS_LOG_ERROR("Invalid member nexthop group %s in parent nhg %s", + nhgm.c_str(), index.c_str()); + + invalid_member = true; + break; + } + /* Keep only the members which exist in the local cache */ + if (nhgs.empty()) + nhgs = nhgm; + else + nhgs += NHG_DELIMITER + nhgm; + } + if (invalid_member) { - nhg_str += mpls_nhv[i] + LABELSTACK_DELIMITER; + it = consumer.m_toSync.erase(it); + continue; + } + /* If no members are present */ + if (nhgs.empty()) + { + it++; + continue; } - nhg_str += ipv[i] + NH_DELIMITER + alsv[i]; - } - NextHopGroupKey nhg_key = NextHopGroupKey(nhg_str, weights); + /* Form nexthopgroup key with the nexthopgroup keys of available members */ + nhgv = tokenize(nhgs, NHG_DELIMITER); + + bool nhg_mismatch = false; + for (uint32_t i = 0; i < nhgv.size(); i++) + { + auto k = m_syncdNextHopGroups.at(nhgv[i]).nhg->getKey(); + if (i) + { + if (k.is_srv6_nexthop() != srv6_nh || k.is_overlay_nexthop() != overlay_nh) + { + SWSS_LOG_ERROR("Inconsistent nexthop group type between %s and %s", + m_syncdNextHopGroups.at(nhgv[0]).nhg->getKey().to_string().c_str(), + k.to_string().c_str()); + nhg_mismatch = true; + break; + } + nhg_str += NHG_DELIMITER; + } + else + { + srv6_nh = k.is_srv6_nexthop(); + overlay_nh = k.is_overlay_nexthop(); + } + + nhg_str += m_syncdNextHopGroups.at(nhgv[i]).nhg->getKey().to_string(); + } + + if (nhg_mismatch) + { + it = consumer.m_toSync.erase(it); + continue; + } + + if (srv6_nh) + nhg_key = NextHopGroupKey(nhg_str, overlay_nh, srv6_nh); + else + nhg_key = NextHopGroupKey(nhg_str, weights); + } + else + { + if (srv6_nh) + { + if (ipv.size() != srv6_srcv.size()) + { + SWSS_LOG_ERROR("inconsistent number of endpoints and srv6_srcs."); + it = consumer.m_toSync.erase(it); + continue; + } + for (uint32_t i = 0; i < ipv.size(); i++) + { + if (i) nhg_str += NHG_DELIMITER; + nhg_str += ipv[i] + NH_DELIMITER; // ip address + nhg_str += NH_DELIMITER; // srv6 segment + nhg_str += srv6_srcv[i] + NH_DELIMITER; // srv6 source + nhg_str += NH_DELIMITER; // srv6 vpn sid + } + nhg_key = NextHopGroupKey(nhg_str, overlay_nh, srv6_nh); + } + else + { + for (uint32_t i = 0; i < ipv.size(); i++) + { + if (i) nhg_str += NHG_DELIMITER; + if (!mpls_nhv.empty() && mpls_nhv[i] != "na") + { + nhg_str += mpls_nhv[i] + LABELSTACK_DELIMITER; + } + nhg_str += ipv[i] + NH_DELIMITER + alsv[i]; + } + nhg_key = NextHopGroupKey(nhg_str, weights); + } + } /* If the group does not exist, create one. */ if (nhg_it == m_syncdNextHopGroups.end()) { + SWSS_LOG_INFO("Create nexthop group %s with %s", index.c_str(), nhg_str.c_str()); + /* * If we've reached the NHG limit, we're going to create a temporary * group, represented by one of it's NH only until we have @@ -109,6 +253,13 @@ void NhgOrch::doTask(Consumer& consumer) { SWSS_LOG_DEBUG("Next hop group count reached its limit."); + // don't create temp nhg for srv6 + if (nhg_key.is_srv6_nexthop()) + { + ++it; + continue; + } + try { auto nhg = std::make_unique(createTempNhg(nhg_key)); @@ -133,10 +284,21 @@ void NhgOrch::doTask(Consumer& consumer) else { auto nhg = std::make_unique(nhg_key, false); - success = nhg->sync(); + /* + * Mark the nexthop group as recursive so as to create a + * nexthop group object even if it has just one available path + */ + nhg->setRecursive(is_recursive); + + success = nhg->sync(); if (success) { + /* Keep the msg in loop if any member path is not available yet */ + if (is_recursive && non_existent_member) + { + success = false; + } m_syncdNextHopGroups.emplace(index, NhgEntry(std::move(nhg))); } } @@ -144,6 +306,8 @@ void NhgOrch::doTask(Consumer& consumer) /* If the group exists, update it. */ else { + SWSS_LOG_INFO("Update nexthop group %s with %s", index.c_str(), nhg_str.c_str()); + const auto& nhg_ptr = nhg_it->second.nhg; /* @@ -216,6 +380,12 @@ void NhgOrch::doTask(Consumer& consumer) else { success = nhg_ptr->update(nhg_key); + + /* Keep the msg in loop if any member path is not available yet */ + if (is_recursive && non_existent_member) + { + success = false; + } } } } @@ -367,9 +537,21 @@ sai_object_id_t NextHopGroupMember::getNhId() const sai_object_id_t nh_id = SAI_NULL_OBJECT_ID; - if (gNeighOrch->hasNextHop(m_key)) + if (m_key.isIntfNextHop()) + { + nh_id = gIntfsOrch->getRouterIntfsId(m_key.alias); + } + else if (gNeighOrch->hasNextHop(m_key)) { nh_id = gNeighOrch->getNextHopId(m_key); + if (m_key.isSrv6NextHop()) + { + SWSS_LOG_INFO("Single NH: create srv6 nexthop %s", m_key.to_string(false, true).c_str()); + if (!gSrv6Orch->createSrv6NexthopWithoutVpn(m_key, nh_id)) + { + SWSS_LOG_ERROR("Failed to create SRv6 nexthop %s", m_key.to_string(false, true).c_str()); + } + } } /* * If the next hop is labeled and the IP next hop exists, create the @@ -380,14 +562,28 @@ sai_object_id_t NextHopGroupMember::getNhId() const */ else if (isLabeled() && gNeighOrch->isNeighborResolved(m_key)) { - if (gNeighOrch->addNextHop(m_key)) + NeighborContext ctx = NeighborContext(m_key); + if (gNeighOrch->addNextHop(ctx)) { nh_id = gNeighOrch->getNextHopId(m_key); } } else { - gNeighOrch->resolveNeighbor(m_key); + if (m_key.isSrv6NextHop()) + { + SWSS_LOG_INFO("Single NH: create srv6 nexthop %s", m_key.to_string(false, true).c_str()); + if (!gSrv6Orch->createSrv6NexthopWithoutVpn(m_key, nh_id)) + { + SWSS_LOG_ERROR("Failed to create SRv6 nexthop %s", m_key.to_string(false, true).c_str()); + } + } + else + { + SWSS_LOG_INFO("Failed to get next hop %s, resolving neighbor", + m_key.to_string().c_str()); + gNeighOrch->resolveNeighbor(m_key); + } } return nh_id; @@ -463,6 +659,14 @@ NextHopGroupMember::~NextHopGroupMember() { SWSS_LOG_ENTER(); + if (m_key.isSrv6NextHop() && gNeighOrch->hasNextHop(m_key) && + !gNeighOrch->getNextHopRefCount(m_key)) + { + if (!gSrv6Orch->removeSrv6NexthopWithoutVpn(m_key)) + { + SWSS_LOG_ERROR("SRv6 Nexthop %s delete failed", m_key.to_string(false, true).c_str()); + } + } /* * If the labeled next hop is unreferenced, remove it from NeighOrch as * NhgOrch and RouteOrch are the ones controlling it's lifetime. They both @@ -470,7 +674,7 @@ NextHopGroupMember::~NextHopGroupMember() * them as they're both doing the same checks before removing a labeled * next hop. */ - if (isLabeled() && + else if (isLabeled() && gNeighOrch->hasNextHop(m_key) && (gNeighOrch->getNextHopRefCount(m_key) == 0)) { @@ -484,7 +688,7 @@ NextHopGroupMember::~NextHopGroupMember() * Params: IN key - The next hop group's key. * Returns: Nothing. */ -NextHopGroup::NextHopGroup(const NextHopGroupKey& key, bool is_temp) : NhgCommon(key), m_is_temp(is_temp) +NextHopGroup::NextHopGroup(const NextHopGroupKey& key, bool is_temp) : NhgCommon(key), m_is_temp(is_temp), m_is_recursive(false) { SWSS_LOG_ENTER(); @@ -506,6 +710,7 @@ NextHopGroup& NextHopGroup::operator=(NextHopGroup&& nhg) SWSS_LOG_ENTER(); m_is_temp = nhg.m_is_temp; + m_is_recursive = nhg.m_is_recursive; NhgCommon::operator=(std::move(nhg)); @@ -532,11 +737,8 @@ bool NextHopGroup::sync() return true; } - /* - * If the group is temporary, the group ID will be the only member's NH - * ID. - */ - if (m_is_temp) + /* If the group is non-recursive with single member, the group ID will be the only member's NH ID */ + if (!isRecursive() && (m_members.size() == 1)) { const NextHopGroupMember& nhgm = m_members.begin()->second; sai_object_id_t nhid = nhgm.getNhId(); @@ -549,6 +751,12 @@ bool NextHopGroup::sync() else { m_id = nhid; + + auto nh_key = nhgm.getKey(); + if (nh_key.isIntfNextHop()) + gIntfsOrch->increaseRouterIntfsRefCount(nh_key.alias); + else + gNeighOrch->increaseNextHopRefCount(nh_key); } } else @@ -663,9 +871,21 @@ bool NextHopGroup::remove() { SWSS_LOG_ENTER(); - // If the group is temporary, there is nothing to be done - just reset the ID. - if (m_is_temp) + if (!isSynced()) + { + return true; + } + // If the group is temporary or non-recursive, update the neigh or rif ref-count and reset the ID. + if (m_is_temp || + (!isRecursive() && m_members.size() == 1)) { + const NextHopGroupMember& nhgm = m_members.begin()->second; + auto nh_key = nhgm.getKey(); + if (nh_key.isIntfNextHop()) + gIntfsOrch->decreaseRouterIntfsRefCount(nh_key.alias); + else + gNeighOrch->decreaseNextHopRefCount(nh_key); + m_id = SAI_NULL_OBJECT_ID; return true; } @@ -687,6 +907,9 @@ bool NextHopGroup::syncMembers(const std::set& nh_keys) { SWSS_LOG_ENTER(); + /* This method should not be called for single-membered non-recursive nexthop groups */ + assert(isRecursive() || (m_members.size() > 1)); + ObjectBulker nextHopGroupMemberBulker(sai_next_hop_group_api, gSwitchId, gMaxBulkSize); /* @@ -698,6 +921,7 @@ bool NextHopGroup::syncMembers(const std::set& nh_keys) */ std::map syncingMembers; + bool success = true; for (const auto& nh_key : nh_keys) { NextHopGroupMember& nhgm = m_members.at(nh_key); @@ -715,7 +939,8 @@ bool NextHopGroup::syncMembers(const std::set& nh_keys) { SWSS_LOG_WARN("Failed to get next hop %s in group %s", nhgm.to_string().c_str(), to_string().c_str()); - return false; + success = false; + continue; } /* If the neighbor's interface is down, skip from being syncd. */ @@ -742,7 +967,6 @@ bool NextHopGroup::syncMembers(const std::set& nh_keys) * Go through the synced members and increment the Crm ref count for the * successful ones. */ - bool success = true; for (const auto& mbr : syncingMembers) { /* Check that the returned member ID is valid. */ @@ -776,6 +1000,23 @@ bool NextHopGroup::update(const NextHopGroupKey& nhg_key) { SWSS_LOG_ENTER(); + if (!isSynced() || + (!isRecursive() && (m_members.size() == 1 || nhg_key.getSize() == 1))) + { + bool was_synced = isSynced(); + bool was_temp = isTemp(); + *this = NextHopGroup(nhg_key, false); + + /* + * For temporary nexthop group being updated, set the recursive flag + * as it is expected to get promoted to multiple NHG + */ + setRecursive(was_temp); + + /* Sync the group only if it was synced before. */ + return (was_synced ? sync() : true); + } + /* Update the key. */ m_key = nhg_key; @@ -798,7 +1039,7 @@ bool NextHopGroup::update(const NextHopGroupKey& nhg_key) /* If the member is updated, update it's weight. */ else { - if (!mbr_it.second.updateWeight(new_nh_key_it->weight)) + if (new_nh_key_it->weight && mbr_it.second.getWeight() != new_nh_key_it->weight && !mbr_it.second.updateWeight(new_nh_key_it->weight)) { SWSS_LOG_WARN("Failed to update member %s weight", nh_key.to_string().c_str()); return false; @@ -891,7 +1132,12 @@ bool NextHopGroup::validateNextHop(const NextHopKey& nh_key) { SWSS_LOG_ENTER(); - return syncMembers({nh_key}); + if (isRecursive() || (m_members.size() > 1)) + { + return syncMembers({nh_key}); + } + + return true; } /* @@ -905,5 +1151,10 @@ bool NextHopGroup::invalidateNextHop(const NextHopKey& nh_key) { SWSS_LOG_ENTER(); - return removeMembers({nh_key}); + if (isRecursive() || (m_members.size() > 1)) + { + return removeMembers({nh_key}); + } + + return true; } diff --git a/orchagent/nhgorch.h b/orchagent/nhgorch.h index 225d3ffaf24..d8a92e61310 100644 --- a/orchagent/nhgorch.h +++ b/orchagent/nhgorch.h @@ -54,7 +54,7 @@ class NextHopGroup : public NhgCommon& nh_keys) override; diff --git a/orchagent/notifications.cpp b/orchagent/notifications.cpp index 2cd46d5cfd5..978c702e1e4 100644 --- a/orchagent/notifications.cpp +++ b/orchagent/notifications.cpp @@ -4,6 +4,10 @@ extern "C" { #include "logger.h" #include "notifications.h" +#include "switchorch.h" + +extern SwitchOrch *gSwitchOrch; +extern sai_redis_communication_mode_t gRedisCommunicationMode; #ifdef ASAN_ENABLED #include @@ -15,18 +19,67 @@ void on_fdb_event(uint32_t count, sai_fdb_event_notification_data_t *data) // which causes concurrency access to the DB } +/* + * Don't perform DB operations within this event handler, because it runs by + * libsairedis in a separate thread which causes concurrency issues. + * For platforms which use zmq between orchagent and syncd, it is an acceptable + * workaround to forward the notifications from the callback handler to the + * redis notifications channel processed by portsorch. + */ void on_port_state_change(uint32_t count, sai_port_oper_status_notification_t *data) +{ + if (gRedisCommunicationMode == SAI_REDIS_COMMUNICATION_MODE_ZMQ_SYNC) + { + swss::DBConnector db("ASIC_DB", 0); + swss::NotificationProducer port_state_change(&db, "NOTIFICATIONS"); + std::string sdata = sai_serialize_port_oper_status_ntf(count, data); + std::vector values; + + // Forward port_state_change notification to be handled in portsorch doTask() + port_state_change.send("port_state_change", sdata, values); + } +} + +void on_bfd_session_state_change(uint32_t count, sai_bfd_session_state_notification_t *data) { // don't use this event handler, because it runs by libsairedis in a separate thread // which causes concurrency access to the DB } -void on_bfd_session_state_change(uint32_t count, sai_bfd_session_state_notification_t *data) +void on_twamp_session_event(uint32_t count, sai_twamp_session_event_notification_data_t *data) { // don't use this event handler, because it runs by libsairedis in a separate thread // which causes concurrency access to the DB } +void on_ha_set_event(uint32_t count, sai_ha_set_event_data_t *data) +{ + if (gRedisCommunicationMode == SAI_REDIS_COMMUNICATION_MODE_ZMQ_SYNC) + { + swss::DBConnector db("ASIC_DB", 0); + swss::NotificationProducer ha_set_event(&db, "NOTIFICATIONS"); + std::string sdata = sai_serialize_ha_set_event_ntf(count, data); + std::vector values; + + // Forward ha_set_event notification to be handled in dashhaorch doTask() + ha_set_event.send(SAI_SWITCH_NOTIFICATION_NAME_HA_SET_EVENT, sdata, values); + } +} + +void on_ha_scope_event(uint32_t count, sai_ha_scope_event_data_t *data) +{ + if (gRedisCommunicationMode == SAI_REDIS_COMMUNICATION_MODE_ZMQ_SYNC) + { + swss::DBConnector db("ASIC_DB", 0); + swss::NotificationProducer ha_scope_event(&db, "NOTIFICATIONS"); + std::string sdata = sai_serialize_ha_scope_event_ntf(count, data); + std::vector values; + + // Forward ha_scope_event notification to be handled in dashhaorch doTask() + ha_scope_event.send(SAI_SWITCH_NOTIFICATION_NAME_HA_SCOPE_EVENT, sdata, values); + } +} + void on_switch_shutdown_request(sai_object_id_t switch_id) { SWSS_LOG_ENTER(); @@ -34,6 +87,12 @@ void on_switch_shutdown_request(sai_object_id_t switch_id) /* TODO: Later a better restart story will be told here */ SWSS_LOG_ERROR("Syncd stopped"); + if (gSwitchOrch->isFatalEventReceived()) + { + SWSS_LOG_ERROR("Orchagent aborted due to fatal SAI error received"); + abort(); + } + /* The quick_exit() is used instead of the exit() to avoid a following data race: * the exit() calls the destructors for global static variables (e.g.BufferOrch::m_buffer_type_maps) @@ -47,3 +106,58 @@ void on_switch_shutdown_request(sai_object_id_t switch_id) quick_exit(EXIT_FAILURE); } + +void on_port_host_tx_ready(sai_object_id_t switch_id, sai_object_id_t port_id, sai_port_host_tx_ready_status_t m_portHostTxReadyStatus) +{ + // don't use this event handler, because it runs by libsairedis in a separate thread + // which causes concurrency access to the DB +} + +void on_switch_asic_sdk_health_event(sai_object_id_t switch_id, + sai_switch_asic_sdk_health_severity_t severity, + sai_timespec_t timestamp, + sai_switch_asic_sdk_health_category_t category, + sai_switch_health_data_t data, + const sai_u8_list_t description) +{ + gSwitchOrch->onSwitchAsicSdkHealthEvent(switch_id, + severity, + timestamp, + category, + data, + description); +} + +void on_tam_tel_type_config_change(sai_object_id_t tam_tel_id) +{ +} + +void on_switch_macsec_post_status_notify(sai_object_id_t switch_id, + sai_switch_macsec_post_status_t switch_macsec_post_status) +{ + if (gRedisCommunicationMode == SAI_REDIS_COMMUNICATION_MODE_ZMQ_SYNC) + { + swss::DBConnector db("ASIC_DB", 0); + swss::NotificationProducer macsec_post_status_notify(&db, "NOTIFICATIONS"); + std::string sdata = sai_serialize_switch_macsec_post_status_ntf(switch_id, switch_macsec_post_status); + std::vector values; + + // Forward switch_macsec_post_status notification to be handled in macsecorch doTask() + macsec_post_status_notify.send("switch_macsec_post_status", sdata, values); + } +} + +void on_macsec_post_status_notify(sai_object_id_t macsec_id, + sai_macsec_post_status_t macsec_post_status) +{ + if (gRedisCommunicationMode == SAI_REDIS_COMMUNICATION_MODE_ZMQ_SYNC) + { + swss::DBConnector db("ASIC_DB", 0); + swss::NotificationProducer macsec_post_status_notify(&db, "NOTIFICATIONS"); + std::string sdata = sai_serialize_macsec_post_status_ntf(macsec_id, macsec_post_status); + std::vector values; + + // Forward macsec_post_status notification to be handled in macsecorch doTask() + macsec_post_status_notify.send("macsec_post_status", sdata, values); + } +} diff --git a/orchagent/notifications.h b/orchagent/notifications.h index 61e8422db02..b81efe9c590 100644 --- a/orchagent/notifications.h +++ b/orchagent/notifications.h @@ -2,12 +2,32 @@ extern "C" { #include "sai.h" +#include "saiextensions.h" } void on_fdb_event(uint32_t count, sai_fdb_event_notification_data_t *data); void on_port_state_change(uint32_t count, sai_port_oper_status_notification_t *data); void on_bfd_session_state_change(uint32_t count, sai_bfd_session_state_notification_t *data); +void on_twamp_session_event(uint32_t count, sai_twamp_session_event_notification_data_t *data); +void on_ha_set_event(uint32_t count, sai_ha_set_event_data_t *data); +void on_ha_scope_event(uint32_t count, sai_ha_scope_event_data_t *data); // The function prototype information can be found here: // https://github.com/sonic-net/sonic-sairedis/blob/master/meta/NotificationSwitchShutdownRequest.cpp#L49 void on_switch_shutdown_request(sai_object_id_t switch_id); + +void on_port_host_tx_ready(sai_object_id_t switch_id, sai_object_id_t port_id, sai_port_host_tx_ready_status_t m_portHostTxReadyStatus); + +void on_switch_asic_sdk_health_event(sai_object_id_t switch_id, + sai_switch_asic_sdk_health_severity_t severity, + sai_timespec_t timestamp, + sai_switch_asic_sdk_health_category_t category, + sai_switch_health_data_t data, + const sai_u8_list_t description); + +void on_tam_tel_type_config_change(sai_object_id_t tam_tel_id); + +void on_switch_macsec_post_status_notify(sai_object_id_t switch_id, + sai_switch_macsec_post_status_t switch_macsec_post_status); +void on_macsec_post_status_notify(sai_object_id_t macsec_id, + sai_macsec_post_status_t macsec_post_status); diff --git a/orchagent/notifier.h b/orchagent/notifier.h index 36416ab7f0b..f4fcfab42f8 100644 --- a/orchagent/notifier.h +++ b/orchagent/notifier.h @@ -14,8 +14,18 @@ class Notifier : public Executor { return static_cast(getSelectable()); } - void execute() + void execute() override { - m_orch->doTask(*getNotificationConsumer()); + auto notificationConsumer = getNotificationConsumer(); + /* Check before triggering doTask because pop() can throw an exception if there is no data */ + if (notificationConsumer->hasData()) + { + m_orch->doTask(*notificationConsumer); + } } -}; + + void drain() override + { + this->execute(); + } +}; \ No newline at end of file diff --git a/orchagent/nvda_port_trim_drop.lua b/orchagent/nvda_port_trim_drop.lua new file mode 100644 index 00000000000..32a80a355f1 --- /dev/null +++ b/orchagent/nvda_port_trim_drop.lua @@ -0,0 +1,36 @@ +-- KEYS - port IDs +-- ARGV[1] - counters db index +-- ARGV[2] - counters table name +-- ARGV[3] - poll time interval +-- return log + +local logtable = {} + +local function logit(msg) + logtable[#logtable+1] = tostring(msg) +end + +local counters_db = ARGV[1] +local counters_table_name = ARGV[2] + +-- Get configuration +redis.call('SELECT', counters_db) + +-- For each port ID in KEYS +for _, port in ipairs(KEYS) do + -- Get current values from COUNTERS DB + local trim_packets = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_TRIM_PACKETS') + local trim_sent_packets = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_TX_TRIM_PACKETS') + + if trim_packets and trim_sent_packets then + -- Calculate dropped packets + local dropped_packets = tonumber(trim_packets) - tonumber(trim_sent_packets) + -- Write result back to COUNTERS DB + redis.call('HSET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_DROPPED_TRIM_PACKETS', dropped_packets) + logit("Port " .. port .. " DROPPED_TRIM_PACKETS: " .. dropped_packets) + else + logit("Port " .. port .. " missing required counters") + end +end + +return logtable diff --git a/orchagent/orch.cpp b/orchagent/orch.cpp index 1e33d7c5ebf..0e571f48f77 100644 --- a/orchagent/orch.cpp +++ b/orchagent/orch.cpp @@ -17,6 +17,79 @@ using namespace swss; int gBatchSize = 0; +std::shared_ptr Orch::gRingBuffer = nullptr; +std::shared_ptr Executor::gRingBuffer = nullptr; + +RingBuffer::RingBuffer(int size): buffer(size) +{ + if (size <= 1) { + throw std::invalid_argument("Buffer size must be greater than 1"); + } +} + +void RingBuffer::pauseThread() +{ + std::unique_lock lock(mtx); + cv.wait(lock, [&](){ return !IsEmpty() || thread_exited; }); +} + +void RingBuffer::notify() +{ + // buffer not empty but rthread idle + bool task_pending = !IsEmpty() && IsIdle(); + + if (thread_exited || task_pending) + cv.notify_all(); +} + +void RingBuffer::setIdle(bool idle) +{ + idle_status = idle; +} + +bool RingBuffer::IsIdle() const +{ + return idle_status; +} + +bool RingBuffer::IsFull() const +{ + return (tail + 1) % static_cast(buffer.size()) == head; +} + +bool RingBuffer::IsEmpty() const +{ + return tail == head; +} + +bool RingBuffer::push(AnyTask ringEntry) +{ + if (IsFull()) + return false; + buffer[tail] = std::move(ringEntry); + tail = (tail + 1) % static_cast(buffer.size()); + return true; +} + +bool RingBuffer::pop(AnyTask& ringEntry) +{ + if (IsEmpty()) + return false; + ringEntry = std::move(buffer[head]); + head = (head + 1) % static_cast(buffer.size()); + return true; +} + +void RingBuffer::addExecutor(Executor* executor) +{ + m_consumerSet.insert(executor->getName()); +} + +bool RingBuffer::serves(const std::string& tableName) +{ + return m_consumerSet.find(tableName) != m_consumerSet.end(); +} + Orch::Orch(DBConnector *db, const string tableName, int pri) { addConsumer(db, tableName, pri); @@ -30,6 +103,20 @@ Orch::Orch(DBConnector *db, const vector &tableNames) } } +Orch::Orch(swss::DBConnector *db1, swss::DBConnector *db2, + const std::vector &tableNames_1, const std::vector &tableNames_2) +{ + for(auto it : tableNames_1) + { + addConsumer(db1, it, default_orch_pri); + } + + for(auto it : tableNames_2) + { + addConsumer(db2, it, default_orch_pri); + } +} + Orch::Orch(DBConnector *db, const vector &tableNames_with_pri) { for (const auto& it : tableNames_with_pri) @@ -155,6 +242,10 @@ size_t ConsumerBase::addToSync(const std::deque &entries return entries.size(); } +size_t ConsumerBase::addToSync(std::shared_ptr> entries) { + return addToSync(*entries); +} + // TODO: Table should be const size_t ConsumerBase::refillToSync(Table* table) { @@ -239,19 +330,55 @@ void ConsumerBase::dumpPendingTasks(vector &ts) void Consumer::execute() { - // ConsumerBase::execute_impl(); SWSS_LOG_ENTER(); - size_t update_size = 0; - auto table = static_cast(getSelectable()); - do + auto entries = std::make_shared>(); + getConsumerTable()->pops(*entries); + + processAnyTask( + // bundle tasks into a lambda function which takes no argument and returns void + // this lambda captures variables by value from the surrounding scope + [=](){ + addToSync(entries); + drain(); + } + ); +} + +void Executor::processAnyTask(AnyTask&& task) +{ + // if either gRingBuffer isn't initialized or the ring thread isn't created + if (!gRingBuffer || !gRingBuffer->thread_created) { - std::deque entries; - table->pops(entries); - update_size = addToSync(entries); - } while (update_size != 0); + // execute the input task immediately + task(); + } - drain(); + // Ring Buffer Logic + + // if this executor isn't served by ring buffer + else if (!gRingBuffer->serves(getName())) + { + // this executor should execute the input task in the main thread + // but to avoid thread issue, it should wait when the ring buffer is actively working + while (!gRingBuffer->IsEmpty() || !gRingBuffer->IsIdle()) { + gRingBuffer->notify(); + std::this_thread::sleep_for(std::chrono::milliseconds(SLEEP_MSECONDS)); + } + // execute task() + task(); + } + else + { + // if this executor is served by ring buffer, + // push the task to gRingBuffer + // this task would be executed in the ring thread, not here + while (!gRingBuffer->push(task)) { + gRingBuffer->notify(); + SWSS_LOG_WARN("ring is full...push again"); + } + gRingBuffer->notify(); + } } void Consumer::drain() @@ -697,7 +824,7 @@ set Orch::generateIdListFromMap(unsigned long idsMap, sai_uint32_t maxId { unsigned long currentIdMask = 1; bool started = false, needGenerateMap = false; - sai_uint32_t lower, upper; + sai_uint32_t lower = 0, upper = 0; set idStringList; for (sai_uint32_t id = 0; id <= maxId; id ++) { @@ -804,6 +931,10 @@ void Orch::addExecutor(Executor* executor) { SWSS_LOG_THROW("Duplicated executorName in m_consumerMap: %s", executor->getName().c_str()); } + + if (gRingBuffer && executor->getName() == APP_ROUTE_TABLE_NAME) { + gRingBuffer->addExecutor(executor); + } } Executor *Orch::getExecutor(string executorName) @@ -847,19 +978,19 @@ void Orch2::doTask(Consumer &consumer) } catch (const std::invalid_argument& e) { - SWSS_LOG_ERROR("Parse error: %s", e.what()); + SWSS_LOG_ERROR("Parse error in %s: %s", typeid(*this).name(), e.what()); } catch (const std::logic_error& e) { - SWSS_LOG_ERROR("Logic error: %s", e.what()); + SWSS_LOG_ERROR("Logic error in %s: %s", typeid(*this).name(), e.what()); } catch (const std::exception& e) { - SWSS_LOG_ERROR("Exception was catched in the request parser: %s", e.what()); + SWSS_LOG_ERROR("Exception was caught in the request parser in %s: %s", typeid(*this).name(), e.what()); } catch (...) { - SWSS_LOG_ERROR("Unknown exception was catched in the request parser"); + SWSS_LOG_ERROR("Unknown exception was caught in the request parser"); } request_.clear(); diff --git a/orchagent/orch.h b/orchagent/orch.h index 6e4702ce3de..683c7527fd3 100644 --- a/orchagent/orch.h +++ b/orchagent/orch.h @@ -7,6 +7,7 @@ #include #include #include +#include extern "C" { #include @@ -24,6 +25,7 @@ extern "C" { #include "macaddress.h" #include "response_publisher.h" #include "recorder.h" +#include "schema.h" const char delimiter = ':'; const char list_item_delimiter = ','; @@ -34,21 +36,25 @@ const char range_specifier = '-'; const char config_db_key_delimiter = '|'; const char state_db_key_delimiter = '|'; -#define INVM_PLATFORM_SUBSTRING "innovium" +#define MRVL_TL_PLATFORM_SUBSTRING "marvell-teralynx" +#define MRVL_PRST_PLATFORM_SUBSTRING "marvell-prestera" #define MLNX_PLATFORM_SUBSTRING "mellanox" #define BRCM_PLATFORM_SUBSTRING "broadcom" #define BRCM_DNX_PLATFORM_SUBSTRING "broadcom-dnx" #define BFN_PLATFORM_SUBSTRING "barefoot" #define VS_PLATFORM_SUBSTRING "vs" #define NPS_PLATFORM_SUBSTRING "nephos" -#define MRVL_PLATFORM_SUBSTRING "marvell" #define CISCO_8000_PLATFORM_SUBSTRING "cisco-8000" #define XS_PLATFORM_SUBSTRING "xsight" +#define CLX_PLATFORM_SUBSTRING "clounix" #define CONFIGDB_KEY_SEPARATOR "|" #define DEFAULT_KEY_SEPARATOR ":" #define VLAN_SUB_INTERFACE_SEPARATOR "." +#define RING_SIZE 30 +#define SLEEP_MSECONDS 500 + const int default_orch_pri = 0; typedef enum @@ -88,6 +94,10 @@ typedef std::pair table_name_with_pri_t; class Orch; +using AnyTask = std::function; // represents a function with no argument and returns void + +class RingBuffer; + // Design assumption // 1. one Orch can have one or more Executor // 2. one Executor must belong to one and only one Orch @@ -124,6 +134,10 @@ class Executor : public swss::Selectable return m_name; } + Orch *getOrch() const { return m_orch; } + static std::shared_ptr gRingBuffer; + void processAnyTask(AnyTask&& func); + protected: swss::Selectable *m_selectable; Orch *m_orch; @@ -135,6 +149,8 @@ class Executor : public swss::Selectable swss::Selectable *getSelectable() const { return m_selectable; } }; +typedef std::map> ConsumerMap; + class ConsumerBase : public Executor { public: ConsumerBase(swss::Selectable *selectable, Orch *orch, const std::string &name) @@ -163,11 +179,46 @@ class ConsumerBase : public Executor { // Returns: the number of entries added to m_toSync size_t addToSync(const std::deque &entries); + size_t addToSync(std::shared_ptr> entries); size_t refillToSync(); size_t refillToSync(swss::Table* table); }; +class RingBuffer +{ +private: + std::vector buffer; + int head = 0; + int tail = 0; + std::set m_consumerSet; + + std::condition_variable cv; + std::mutex mtx; + bool idle_status = true; + +public: + RingBuffer(int size=RING_SIZE); + bool thread_created = false; + std::atomic thread_exited{false}; + + // pause the ring thread if the buffer is empty + void pauseThread(); + // wake up the ring thread in case it's locked but not empty + void notify(); + + bool IsFull() const; + bool IsEmpty() const; + bool IsIdle() const; + + bool push(AnyTask entry); + bool pop(AnyTask& entry); + + void addExecutor(Executor* executor); + bool serves(const std::string& tableName); + void setIdle(bool idle); +}; + class Consumer : public ConsumerBase { public: Consumer(swss::ConsumerTableBase *select, Orch *orch, const std::string &name) @@ -175,7 +226,7 @@ class Consumer : public ConsumerBase { { } - swss::TableBase *getConsumerTable() const override + swss::ConsumerTableBase *getConsumerTable() const override { // ConsumerTableBase is a subclass of TableBase return static_cast(getSelectable()); @@ -201,8 +252,6 @@ class Consumer : public ConsumerBase { void drain() override; }; -typedef std::map> ConsumerMap; - typedef enum { success, @@ -221,10 +270,14 @@ class Orch public: Orch(swss::DBConnector *db, const std::string tableName, int pri = default_orch_pri); Orch(swss::DBConnector *db, const std::vector &tableNames); + Orch(swss::DBConnector *db1, swss::DBConnector *db2, + const std::vector &tableNames_1, const std::vector &tableNames_2); Orch(swss::DBConnector *db, const std::vector &tableNameWithPri); Orch(const std::vector& tables); virtual ~Orch() = default; + static std::shared_ptr gRingBuffer; + std::vector getSelectables(); // add the existing table data (left by warm reboot) to the consumer todo task list. @@ -243,6 +296,16 @@ class Orch virtual void doTask(swss::NotificationConsumer &consumer) { } virtual void doTask(swss::SelectableTimer &timer) { } + /* + * Called once after APPLY_VIEW in warm/fast boot scenario. + * Orch can override this method to perform orch specific operations after boot is finished. + * These operations are not meant to produce additional ASIC configuration, + * instead a capability fetch and STATE_DB update here is encouraged. + * Orch is not expected to call the base method implementation as it must remain + * empty for compatibility reasons. + */ + virtual void onWarmBootEnd() { } + void dumpPendingTasks(std::vector &ts); /** @@ -271,7 +334,7 @@ class Orch void addExecutor(Executor* executor); Executor *getExecutor(std::string executorName); - ResponsePublisher m_publisher; + ResponsePublisher m_publisher{"APPL_STATE_DB"}; private: void addConsumer(swss::DBConnector *db, std::string tableName, int pri = default_orch_pri); }; diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index 8861d49ca00..dbb0ee4a0d1 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -7,10 +7,12 @@ #include #include "warm_restart.h" #include +#include "orch_zmq_config.h" #define SAI_SWITCH_ATTR_CUSTOM_RANGE_BASE SAI_SWITCH_ATTR_CUSTOM_RANGE_START #include "sairedis.h" #include "chassisorch.h" +#include "stporch.h" using namespace std; using namespace swss; @@ -19,12 +21,13 @@ using namespace swss; #define SELECT_TIMEOUT 1000 #define PFC_WD_POLL_MSECS 100 -/* orchagent heart beat message interval */ -#define HEART_BEAT_INTERVAL_MSECS 10 * 1000 +#define APP_FABRIC_MONITOR_PORT_TABLE_NAME "FABRIC_PORT_TABLE" +#define APP_FABRIC_MONITOR_DATA_TABLE_NAME "FABRIC_MONITOR_TABLE" extern sai_switch_api_t* sai_switch_api; extern sai_object_id_t gSwitchId; extern string gMySwitchType; +extern string gMySwitchSubType; extern void syncd_apply_view(); /* @@ -60,6 +63,11 @@ Srv6Orch *gSrv6Orch; FlowCounterRouteOrch *gFlowCounterRouteOrch; DebugCounterOrch *gDebugCounterOrch; MonitorOrch *gMonitorOrch; +TunnelDecapOrch *gTunneldecapOrch; +StpOrch *gStpOrch; +MuxOrch *gMuxOrch; +IcmpOrch *gIcmpOrch; +HFTelOrch *gHFTOrch; bool gIsNatSupported = false; event_handle_t g_events_handle; @@ -83,6 +91,16 @@ OrchDaemon::~OrchDaemon() { SWSS_LOG_ENTER(); + // Stop the ring thread before delete orch pointers + if (ring_thread.joinable()) { + // notify the ring_thread to exit + gRingBuffer->thread_exited = true; + gRingBuffer->notify(); + // wait for the ring_thread to exit + ring_thread.join(); + disableRingBuffer(); + } + /* * Some orchagents call other agents in their destructor. * To avoid accessing deleted agent, do deletion in reverse order. @@ -101,6 +119,48 @@ OrchDaemon::~OrchDaemon() events_deinit_publisher(g_events_handle); } +void OrchDaemon::popRingBuffer() +{ + SWSS_LOG_ENTER(); + + // make sure there is only one thread created to run popRingBuffer() + if (!gRingBuffer || gRingBuffer->thread_created) + return; + + gRingBuffer->thread_created = true; + SWSS_LOG_NOTICE("OrchDaemon starts the popRingBuffer thread!"); + + while (!gRingBuffer->thread_exited) + { + gRingBuffer->pauseThread(); + + gRingBuffer->setIdle(false); + + AnyTask func; + while (gRingBuffer->pop(func)) { + func(); + } + + gRingBuffer->setIdle(true); + } +} + +/** + * This function initializes gRingBuffer, otherwise it's nullptr. + */ +void OrchDaemon::enableRingBuffer() { + gRingBuffer = std::make_shared(); + Executor::gRingBuffer = gRingBuffer; + Orch::gRingBuffer = gRingBuffer; + SWSS_LOG_NOTICE("RingBuffer created at %p!", (void *)gRingBuffer.get()); +} + +void OrchDaemon::disableRingBuffer() { + gRingBuffer = nullptr; + Executor::gRingBuffer = nullptr; + Orch::gRingBuffer = nullptr; +} + bool OrchDaemon::init() { SWSS_LOG_ENTER(); @@ -115,10 +175,14 @@ bool OrchDaemon::init() TableConnector app_switch_table(m_applDb, APP_SWITCH_TABLE_NAME); TableConnector conf_asic_sensors(m_configDb, CFG_ASIC_SENSORS_TABLE_NAME); TableConnector conf_switch_hash(m_configDb, CFG_SWITCH_HASH_TABLE_NAME); + TableConnector conf_switch_trim(m_configDb, CFG_SWITCH_TRIMMING_TABLE_NAME); + TableConnector conf_suppress_asic_sdk_health_categories(m_configDb, CFG_SUPPRESS_ASIC_SDK_HEALTH_EVENT_NAME); vector switch_tables = { conf_switch_hash, + conf_switch_trim, conf_asic_sensors, + conf_suppress_asic_sdk_health_categories, app_switch_table }; @@ -145,8 +209,19 @@ bool OrchDaemon::init() TableConnector stateDbFdb(m_stateDb, STATE_FDB_TABLE_NAME); TableConnector stateMclagDbFdb(m_stateDb, STATE_MCLAG_REMOTE_FDB_TABLE_NAME); gFdbOrch = new FdbOrch(m_applDb, app_fdb_tables, stateDbFdb, stateMclagDbFdb, gPortsOrch); + TableConnector stateDbBfdSessionTable(m_stateDb, STATE_BFD_SESSION_TABLE_NAME); + + BgpGlobalStateOrch* bgp_global_state_orch; + bgp_global_state_orch = new BgpGlobalStateOrch(m_configDb, CFG_BGP_DEVICE_GLOBAL_TABLE_NAME); + gDirectory.set(bgp_global_state_orch); + gBfdOrch = new BfdOrch(m_applDb, APP_BFD_SESSION_TABLE_NAME, stateDbBfdSessionTable); + gDirectory.set(gBfdOrch); + + TableConnector stateDbIcmpSessionTable(m_stateDb, STATE_ICMP_ECHO_SESSION_TABLE_NAME); + gIcmpOrch = new IcmpOrch(m_applDb, APP_ICMP_ECHO_SESSION_TABLE_NAME, stateDbIcmpSessionTable); + gDirectory.set(gIcmpOrch); static const vector route_pattern_tables = { CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME, @@ -154,6 +229,15 @@ bool OrchDaemon::init() gFlowCounterRouteOrch = new FlowCounterRouteOrch(m_configDb, route_pattern_tables); gDirectory.set(gFlowCounterRouteOrch); + vector stp_tables = { + APP_STP_VLAN_INSTANCE_TABLE_NAME, + APP_STP_PORT_STATE_TABLE_NAME, + APP_STP_FASTAGEING_FLUSH_TABLE_NAME, + APP_STP_INST_PORT_FLUSH_TABLE_NAME + }; + gStpOrch = new StpOrch(m_applDb, m_stateDb, stp_tables); + gDirectory.set(gStpOrch); + vector vnet_tables = { APP_VNET_RT_TABLE_NAME, APP_VNET_RT_TUNNEL_TABLE_NAME @@ -184,7 +268,9 @@ bool OrchDaemon::init() gDirectory.set(chassis_frontend_orch); gIntfsOrch = new IntfsOrch(m_applDb, APP_INTF_TABLE_NAME, vrf_orch, m_chassisAppDb); + gDirectory.set(gIntfsOrch); gNeighOrch = new NeighOrch(m_applDb, APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassisAppDb); + gDirectory.set(gNeighOrch); const int fgnhgorch_pri = 15; @@ -197,11 +283,19 @@ bool OrchDaemon::init() gFgNhgOrch = new FgNhgOrch(m_configDb, m_applDb, m_stateDb, fgnhg_tables, gNeighOrch, gIntfsOrch, vrf_orch); gDirectory.set(gFgNhgOrch); - vector srv6_tables = { - APP_SRV6_SID_LIST_TABLE_NAME, - APP_SRV6_MY_SID_TABLE_NAME + TableConnector srv6_sid_list_table(m_applDb, APP_SRV6_SID_LIST_TABLE_NAME); + TableConnector srv6_my_sid_table(m_applDb, APP_SRV6_MY_SID_TABLE_NAME); + TableConnector pic_context_table(m_applDb, APP_PIC_CONTEXT_TABLE_NAME); + TableConnector srv6_my_sid_cfg_table(m_configDb, CFG_SRV6_MY_SID_TABLE_NAME); + + vector srv6_tables = { + srv6_sid_list_table, + srv6_my_sid_table, + pic_context_table, + srv6_my_sid_cfg_table }; - gSrv6Orch = new Srv6Orch(m_applDb, srv6_tables, gSwitchOrch, vrf_orch, gNeighOrch); + + gSrv6Orch = new Srv6Orch(m_configDb, m_applDb, srv6_tables, gSwitchOrch, vrf_orch, gNeighOrch); gDirectory.set(gSrv6Orch); const int routeorch_pri = 5; @@ -209,12 +303,23 @@ bool OrchDaemon::init() { APP_ROUTE_TABLE_NAME, routeorch_pri }, { APP_LABEL_ROUTE_TABLE_NAME, routeorch_pri } }; - gRouteOrch = new RouteOrch(m_applDb, route_tables, gSwitchOrch, gNeighOrch, gIntfsOrch, vrf_orch, gFgNhgOrch, gSrv6Orch); + + // Enable the fpmsyncd service to send Route events to orchagent via the ZMQ channel. + auto enable_route_zmq = get_feature_status(ORCH_NORTHBOND_ROUTE_ZMQ_ENABLED, false); + auto route_zmq_sever = enable_route_zmq ? m_zmqServer : nullptr; + + gRouteOrch = new RouteOrch(m_applDb, route_tables, gSwitchOrch, gNeighOrch, gIntfsOrch, vrf_orch, gFgNhgOrch, gSrv6Orch, route_zmq_sever); gNhgOrch = new NhgOrch(m_applDb, APP_NEXTHOP_GROUP_TABLE_NAME); gCbfNhgOrch = new CbfNhgOrch(m_applDb, APP_CLASS_BASED_NEXT_HOP_GROUP_TABLE_NAME); gCoppOrch = new CoppOrch(m_applDb, APP_COPP_TABLE_NAME); - TunnelDecapOrch *tunnel_decap_orch = new TunnelDecapOrch(m_applDb, APP_TUNNEL_DECAP_TABLE_NAME); + + vector tunnel_tables = { + APP_TUNNEL_DECAP_TABLE_NAME, + APP_TUNNEL_DECAP_TERM_TABLE_NAME + }; + gTunneldecapOrch = new TunnelDecapOrch(m_applDb, m_stateDb, m_configDb, tunnel_tables); + gDirectory.set(gTunneldecapOrch); VxlanTunnelOrch *vxlan_tunnel_orch = new VxlanTunnelOrch(m_stateDb, m_applDb, APP_VXLAN_TUNNEL_TABLE_NAME); gDirectory.set(vxlan_tunnel_orch); @@ -232,40 +337,6 @@ bool OrchDaemon::init() NvgreTunnelMapOrch *nvgre_tunnel_map_orch = new NvgreTunnelMapOrch(m_configDb, CFG_NVGRE_TUNNEL_MAP_TABLE_NAME); gDirectory.set(nvgre_tunnel_map_orch); - vector dash_vnet_tables = { - APP_DASH_VNET_TABLE_NAME, - APP_DASH_VNET_MAPPING_TABLE_NAME - }; - DashVnetOrch *dash_vnet_orch = new DashVnetOrch(m_applDb, dash_vnet_tables, m_zmqServer); - gDirectory.set(dash_vnet_orch); - - vector dash_tables = { - APP_DASH_APPLIANCE_TABLE_NAME, - APP_DASH_ROUTING_TYPE_TABLE_NAME, - APP_DASH_ENI_TABLE_NAME, - APP_DASH_QOS_TABLE_NAME - }; - - DashOrch *dash_orch = new DashOrch(m_applDb, dash_tables, m_zmqServer); - gDirectory.set(dash_orch); - - vector dash_route_tables = { - APP_DASH_ROUTE_TABLE_NAME, - APP_DASH_ROUTE_RULE_TABLE_NAME - }; - - DashRouteOrch *dash_route_orch = new DashRouteOrch(m_applDb, dash_route_tables, dash_orch, m_zmqServer); - gDirectory.set(dash_route_orch); - - vector dash_acl_tables = { - APP_DASH_PREFIX_TAG_TABLE_NAME, - APP_DASH_ACL_IN_TABLE_NAME, - APP_DASH_ACL_OUT_TABLE_NAME, - APP_DASH_ACL_GROUP_TABLE_NAME, - APP_DASH_ACL_RULE_TABLE_NAME - }; - DashAclOrch *dash_acl_orch = new DashAclOrch(m_applDb, dash_acl_tables, dash_orch, m_zmqServer); - gDirectory.set(dash_acl_orch); vector qos_tables = { CFG_TC_TO_QUEUE_MAP_TABLE_NAME, @@ -306,7 +377,7 @@ bool OrchDaemon::init() TableConnector stateDbMirrorSession(m_stateDb, STATE_MIRROR_SESSION_TABLE_NAME); TableConnector confDbMirrorSession(m_configDb, CFG_MIRROR_SESSION_TABLE_NAME); - gMirrorOrch = new MirrorOrch(stateDbMirrorSession, confDbMirrorSession, gPortsOrch, gRouteOrch, gNeighOrch, gFdbOrch, gPolicerOrch); + gMirrorOrch = new MirrorOrch(stateDbMirrorSession, confDbMirrorSession, gPortsOrch, gRouteOrch, gNeighOrch, gFdbOrch, gPolicerOrch, gSwitchOrch); TableConnector confDbAclTable(m_configDb, CFG_ACL_TABLE_TABLE_NAME); TableConnector confDbAclTableType(m_configDb, CFG_ACL_TABLE_TYPE_TABLE_NAME); @@ -348,7 +419,8 @@ bool OrchDaemon::init() vector debug_counter_tables = { CFG_DEBUG_COUNTER_TABLE_NAME, - CFG_DEBUG_COUNTER_DROP_REASON_TABLE_NAME + CFG_DEBUG_COUNTER_DROP_REASON_TABLE_NAME, + CFG_DEBUG_DROP_MONITOR_TABLE_NAME }; gDebugCounterOrch = new DebugCounterOrch(m_configDb, debug_counter_tables, 1000); @@ -370,8 +442,8 @@ bool OrchDaemon::init() CFG_MUX_CABLE_TABLE_NAME, CFG_PEER_SWITCH_TABLE_NAME }; - MuxOrch *mux_orch = new MuxOrch(m_configDb, mux_tables, tunnel_decap_orch, gNeighOrch, gFdbOrch); - gDirectory.set(mux_orch); + gMuxOrch = new MuxOrch(m_configDb, mux_tables, gTunneldecapOrch, gNeighOrch, gFdbOrch); + gDirectory.set(gMuxOrch); MuxCableOrch *mux_cb_orch = new MuxCableOrch(m_applDb, m_stateDb, APP_MUX_CABLE_TABLE_NAME); gDirectory.set(mux_cb_orch); @@ -399,7 +471,7 @@ bool OrchDaemon::init() * when iterating ConsumerMap. This is ensured implicitly by the order of keys in ordered map. * For cases when Orch has to process tables in specific order, like PortsOrch during warm start, it has to override Orch::doTask() */ - m_orchList = { gSwitchOrch, gCrmOrch, gPortsOrch, gBufferOrch, gFlowCounterRouteOrch, gIntfsOrch, gNeighOrch, gNhgMapOrch, gNhgOrch, gCbfNhgOrch, gRouteOrch, gCoppOrch, gQosOrch, wm_orch, gPolicerOrch, tunnel_decap_orch, sflow_orch, gDebugCounterOrch, gMacsecOrch, gBfdOrch, gSrv6Orch, mux_orch, mux_cb_orch, gMonitorOrch}; + m_orchList = { gSwitchOrch, gCrmOrch, gPortsOrch, gBufferOrch, gFlowCounterRouteOrch, gIntfsOrch, gNeighOrch, gNhgMapOrch, gNhgOrch, gCbfNhgOrch, gFgNhgOrch, gRouteOrch, gCoppOrch, gQosOrch, wm_orch, gPolicerOrch, gTunneldecapOrch, sflow_orch, gDebugCounterOrch, gMacsecOrch, bgp_global_state_orch, gBfdOrch, gIcmpOrch, gSrv6Orch, gMuxOrch, mux_cb_orch, gMonitorOrch, gStpOrch}; bool initialize_dtel = false; if (platform == BFN_PLATFORM_SUBSTRING || platform == VS_PLATFORM_SUBSTRING) @@ -496,26 +568,32 @@ bool OrchDaemon::init() m_orchList.push_back(gNatOrch); m_orchList.push_back(gMlagOrch); m_orchList.push_back(gIsoGrpOrch); - m_orchList.push_back(gFgNhgOrch); m_orchList.push_back(mux_st_orch); m_orchList.push_back(nvgre_tunnel_orch); m_orchList.push_back(nvgre_tunnel_map_orch); - m_orchList.push_back(dash_acl_orch); - m_orchList.push_back(dash_vnet_orch); - m_orchList.push_back(dash_route_orch); - m_orchList.push_back(dash_orch); if (m_fabricEnabled) { + // register APP_FABRIC_MONITOR_PORT_TABLE_NAME table + const int fabric_portsorch_base_pri = 30; vector fabric_port_tables = { - // empty for now + { APP_FABRIC_MONITOR_PORT_TABLE_NAME, fabric_portsorch_base_pri }, + { APP_FABRIC_MONITOR_DATA_TABLE_NAME, fabric_portsorch_base_pri } }; gFabricPortsOrch = new FabricPortsOrch(m_applDb, fabric_port_tables, m_fabricPortStatEnabled, m_fabricQueueStatEnabled); m_orchList.push_back(gFabricPortsOrch); } + if (gMySwitchSubType == "SmartSwitch") + { + DashEniFwdOrch *dash_eni_fwd_orch = new DashEniFwdOrch(m_configDb, m_applDb, APP_DASH_ENI_FORWARD_TABLE, gNeighOrch); + gDirectory.set(dash_eni_fwd_orch); + m_orchList.push_back(dash_eni_fwd_orch); + } + vector flex_counter_tables = { - CFG_FLEX_COUNTER_TABLE_NAME + CFG_FLEX_COUNTER_TABLE_NAME, + CFG_DEVICE_METADATA_TABLE_NAME }; auto* flexCounterOrch = new FlexCounterOrch(m_configDb, flex_counter_tables); @@ -567,7 +645,9 @@ bool OrchDaemon::init() queueAttrIds, PFC_WD_POLL_MSECS)); } - else if ((platform == INVM_PLATFORM_SUBSTRING) + else if ((platform == MRVL_TL_PLATFORM_SUBSTRING) + || (platform == MRVL_PRST_PLATFORM_SUBSTRING) + || (platform == CLX_PLATFORM_SUBSTRING) || (platform == BFN_PLATFORM_SUBSTRING) || (platform == NPS_PLATFORM_SUBSTRING)) { @@ -600,7 +680,10 @@ bool OrchDaemon::init() static const vector queueAttrIds; - if ((platform == INVM_PLATFORM_SUBSTRING) || (platform == NPS_PLATFORM_SUBSTRING)) + if ((platform == MRVL_PRST_PLATFORM_SUBSTRING) || + (platform == MRVL_TL_PLATFORM_SUBSTRING) || + (platform == CLX_PLATFORM_SUBSTRING) || + (platform == NPS_PLATFORM_SUBSTRING)) { m_orchList.push_back(new PfcWdSwOrch( m_configDb, @@ -654,7 +737,25 @@ bool OrchDaemon::init() SAI_QUEUE_ATTR_PAUSE_STATUS, }; - if(gSwitchOrch->checkPfcDlrInitEnable()) + bool pfcDlrInit = gSwitchOrch->checkPfcDlrInitEnable(); + + // Override pfcDlrInit if needed, and this change is only for PFC tests. + if(getenv("PFC_DLR_INIT_ENABLE")) + { + string envPfcDlrInit = getenv("PFC_DLR_INIT_ENABLE"); + if(envPfcDlrInit == "1") + { + pfcDlrInit = true; + SWSS_LOG_NOTICE("Override PfcDlrInitEnable to true"); + } + else if(envPfcDlrInit == "0") + { + pfcDlrInit = false; + SWSS_LOG_NOTICE("Override PfcDlrInitEnable to false"); + } + } + + if(pfcDlrInit) { m_orchList.push_back(new PfcWdSwOrch( m_configDb, @@ -721,6 +822,26 @@ bool OrchDaemon::init() gP4Orch = new P4Orch(m_applDb, p4rt_tables, vrf_orch, gCoppOrch); m_orchList.push_back(gP4Orch); + TableConnector confDbTwampTable(m_configDb, CFG_TWAMP_SESSION_TABLE_NAME); + TableConnector stateDbTwampTable(m_stateDb, STATE_TWAMP_SESSION_TABLE_NAME); + TwampOrch *twamp_orch = new TwampOrch(confDbTwampTable, stateDbTwampTable, gSwitchOrch, gPortsOrch, vrf_orch); + m_orchList.push_back(twamp_orch); + + if (HFTelOrch::isSupportedHFTel(gSwitchId)) + { + const vector stel_tables = { + CFG_HIGH_FREQUENCY_TELEMETRY_PROFILE_TABLE_NAME, + CFG_HIGH_FREQUENCY_TELEMETRY_GROUP_TABLE_NAME + }; + gHFTOrch = new HFTelOrch(m_configDb, m_stateDb, stel_tables); + m_orchList.push_back(gHFTOrch); + SWSS_LOG_NOTICE("High Frequency Telemetry is supported on this platform"); + } + else + { + SWSS_LOG_NOTICE("High Frequency Telemetry is not supported on this platform"); + } + if (WarmStart::isWarmStart()) { bool suc = warmRestoreAndSyncUp(); @@ -744,12 +865,27 @@ void OrchDaemon::flush() if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to flush redis pipeline %d", status); - handleSaiFailure(true); + handleSaiFailure(SAI_API_SWITCH, "set", status); } - for (auto* orch: m_orchList) + /* + * Don't flush if ringbuffer is enable and it is not empty or Idle. Ring buffer thread + * could trigger notification update. + * + * Flush would be triggered later after SELECT_TIMEOUT in main thread again + * for avoiding race condition. + */ + if (gRingBuffer &&(!gRingBuffer->IsEmpty() || !gRingBuffer->IsIdle())) + { + gRingBuffer->notify(); + SWSS_LOG_WARN("Skip Flush waiting for RingBuffer empty"); + } + else { - orch->flushResponses(); + for (auto* orch: m_orchList) + { + orch->flushResponses(); + } } } @@ -767,12 +903,14 @@ void OrchDaemon::logRotate() { } -void OrchDaemon::start() +void OrchDaemon::start(long heartBeatInterval) { SWSS_LOG_ENTER(); Recorder::Instance().sairedis.setRotate(false); + ring_thread = std::thread(&OrchDaemon::popRingBuffer, this); + for (Orch *o : m_orchList) { m_select->addSelectables(o->getSelectables()); @@ -788,7 +926,7 @@ void OrchDaemon::start() ret = m_select->select(&s, SELECT_TIMEOUT); auto tend = std::chrono::high_resolution_clock::now(); - heartBeat(tend); + heartBeat(tend, heartBeatInterval); auto diff = std::chrono::duration_cast(tend - tstart); @@ -813,6 +951,20 @@ void OrchDaemon::start() * requests live in it. When the daemon has nothing to do, it * is a good chance to flush the pipeline */ flush(); + + if (gRingBuffer) + { + if (!gRingBuffer->IsEmpty() || !gRingBuffer->IsIdle()) + { + gRingBuffer->notify(); + } + else + { + for (Orch *o : m_orchList) + o->doTask(); + } + } + continue; } @@ -830,10 +982,11 @@ void OrchDaemon::start() /* After each iteration, periodically check all m_toSync map to * execute all the remaining tasks that need to be retried. */ - /* TODO: Abstract Orch class to have a specific todo list */ - for (Orch *o : m_orchList) - o->doTask(); - + if (!gRingBuffer || (gRingBuffer->IsEmpty() && gRingBuffer->IsIdle())) + { + for (Orch *o : m_orchList) + o->doTask(); + } /* * Asked to check warm restart readiness. * Not doing this under Select::TIMEOUT condition because of @@ -845,6 +998,16 @@ void OrchDaemon::start() if (ret) { // Orchagent is ready to perform warm restart, stop processing any new db data. + // but should finish data that already in the ring + if (gRingBuffer) + { + while (!gRingBuffer->IsEmpty() || !gRingBuffer->IsIdle()) + { + gRingBuffer->notify(); + std::this_thread::sleep_for(std::chrono::milliseconds(SLEEP_MSECONDS)); + } + } + // Should sleep here or continue handling timers and etc.?? if (!gSwitchOrch->checkRestartNoFreeze()) { @@ -865,7 +1028,7 @@ void OrchDaemon::start() flush(); SWSS_LOG_WARN("Orchagent is frozen for warm restart!"); - freezeAndHeartBeat(UINT_MAX); + freezeAndHeartBeat(UINT_MAX, heartBeatInterval); } } } @@ -878,6 +1041,8 @@ void OrchDaemon::start() */ bool OrchDaemon::warmRestoreAndSyncUp() { + SWSS_LOG_ENTER(); + WarmStart::setWarmStartState("orchagent", WarmStart::INITIALIZED); for (Orch *o : m_orchList) @@ -885,6 +1050,10 @@ bool OrchDaemon::warmRestoreAndSyncUp() o->bake(); } + // let's cache the neighbor updates in mux orch and + // process them after everything being settled. + gMuxOrch->enableCachingNeighborUpdate(); + /* * Three iterations are needed. * @@ -911,6 +1080,9 @@ bool OrchDaemon::warmRestoreAndSyncUp() } } + gMuxOrch->updateCachedNeighbors(); + gMuxOrch->disableCachingNeighborUpdate(); + // MirrorOrch depends on everything else being settled before it can run, // and mirror ACL rules depend on MirrorOrch, so run these two at the end // after the rest of the data has been processed. @@ -933,8 +1105,10 @@ bool OrchDaemon::warmRestoreAndSyncUp() syncd_apply_view(); - /* Start dynamic state sync up */ - gPortsOrch->refreshPortStatus(); + for (Orch *o : m_orchList) + { + o->onWarmBootEnd(); + } /* * Note. Arp sync up is handled in neighsyncd. @@ -1022,11 +1196,17 @@ void OrchDaemon::addOrchList(Orch *o) m_orchList.push_back(o); } -void OrchDaemon::heartBeat(std::chrono::time_point tcurrent) +void OrchDaemon::heartBeat(std::chrono::time_point tcurrent, long interval) { + if (interval == 0) + { + // disable heart beat feature when interval is 0 + return; + } + // output heart beat message to SYSLOG auto diff = std::chrono::duration_cast(tcurrent - m_lastHeartBeat); - if (diff.count() >= HEART_BEAT_INTERVAL_MSECS) + if (diff.count() >= interval) { m_lastHeartBeat = tcurrent; // output heart beat message to supervisord with 'PROCESS_COMMUNICATION_STDOUT' event: http://supervisord.org/events.html @@ -1034,13 +1214,13 @@ void OrchDaemon::heartBeat(std::chrono::time_point 0) { // Send heartbeat message to prevent Orchagent stuck alert. auto tend = std::chrono::high_resolution_clock::now(); - heartBeat(tend); + heartBeat(tend, interval); duration--; sleep(1); @@ -1061,8 +1241,10 @@ bool FabricOrchDaemon::init() SWSS_LOG_ENTER(); SWSS_LOG_NOTICE("FabricOrchDaemon init"); + const int fabric_portsorch_base_pri = 30; vector fabric_port_tables = { - // empty for now, I don't consume anything yet + { APP_FABRIC_MONITOR_PORT_TABLE_NAME, fabric_portsorch_base_pri }, + { APP_FABRIC_MONITOR_DATA_TABLE_NAME, fabric_portsorch_base_pri } }; gFabricPortsOrch = new FabricPortsOrch(m_applDb, fabric_port_tables); addOrchList(gFabricPortsOrch); @@ -1074,3 +1256,103 @@ bool FabricOrchDaemon::init() return true; } + +DpuOrchDaemon::DpuOrchDaemon(DBConnector *applDb, DBConnector *configDb, DBConnector *stateDb, DBConnector *chassisAppDb, DBConnector *dpuAppDb, DBConnector *dpuAppstateDb, ZmqServer *zmqServer) : + OrchDaemon(applDb, configDb, stateDb, chassisAppDb, zmqServer), + m_dpu_appDb(dpuAppDb), + m_dpu_appstateDb(dpuAppstateDb) +{ + SWSS_LOG_ENTER(); + SWSS_LOG_NOTICE("DpuOrchDaemon starting..."); +} + +bool DpuOrchDaemon::init() +{ + SWSS_LOG_NOTICE("DpuOrchDaemon init..."); + OrchDaemon::init(); + + // Enable the gNMI service to send DASH events to orchagent via the ZMQ channel. + ZmqServer *dash_zmq_server = nullptr; + if (get_feature_status(ORCH_NORTHBOND_DASH_ZMQ_ENABLED, true)) + { + SWSS_LOG_NOTICE("Enable the gNMI service to send DASH events to orchagent via the ZMQ channel."); + dash_zmq_server = m_zmqServer; + } + + vector dash_vnet_tables = { + APP_DASH_VNET_TABLE_NAME, + APP_DASH_VNET_MAPPING_TABLE_NAME + }; + DashVnetOrch *dash_vnet_orch = new DashVnetOrch(m_dpu_appDb, dash_vnet_tables, m_dpu_appstateDb, dash_zmq_server); + gDirectory.set(dash_vnet_orch); + + vector dash_tables = { + APP_DASH_APPLIANCE_TABLE_NAME, + APP_DASH_ROUTING_TYPE_TABLE_NAME, + APP_DASH_ENI_TABLE_NAME, + APP_DASH_ENI_ROUTE_TABLE_NAME, + APP_DASH_QOS_TABLE_NAME + }; + + DashOrch *dash_orch = new DashOrch(m_dpu_appDb, dash_tables, m_dpu_appstateDb, dash_zmq_server); + gDirectory.set(dash_orch); + + vector dash_ha_tables = { + APP_DASH_HA_SET_TABLE_NAME, + APP_DASH_HA_SCOPE_TABLE_NAME, + APP_BFD_SESSION_TABLE_NAME + }; + + DashHaOrch *dash_ha_orch = new DashHaOrch(m_dpu_appDb, dash_ha_tables, dash_orch, gBfdOrch, m_dpu_appstateDb, dash_zmq_server); + gDirectory.set(dash_ha_orch); + + vector dash_route_tables = { + APP_DASH_ROUTE_TABLE_NAME, + APP_DASH_ROUTE_RULE_TABLE_NAME, + APP_DASH_ROUTE_GROUP_TABLE_NAME + }; + + DashRouteOrch *dash_route_orch = new DashRouteOrch(m_dpu_appDb, dash_route_tables, dash_orch, m_dpu_appstateDb, dash_zmq_server); + gDirectory.set(dash_route_orch); + + vector dash_acl_tables = { + APP_DASH_PREFIX_TAG_TABLE_NAME, + APP_DASH_ACL_IN_TABLE_NAME, + APP_DASH_ACL_OUT_TABLE_NAME, + APP_DASH_ACL_GROUP_TABLE_NAME, + APP_DASH_ACL_RULE_TABLE_NAME + }; + DashAclOrch *dash_acl_orch = new DashAclOrch(m_dpu_appDb, dash_acl_tables, dash_orch, m_dpu_appstateDb, dash_zmq_server); + gDirectory.set(dash_acl_orch); + + vector dash_tunnel_tables = { + APP_DASH_TUNNEL_TABLE_NAME + }; + DashTunnelOrch *dash_tunnel_orch = new DashTunnelOrch(m_dpu_appDb, dash_tunnel_tables, m_dpu_appstateDb, dash_zmq_server); + gDirectory.set(dash_tunnel_orch); + + vector dash_meter_tables = { + APP_DASH_METER_POLICY_TABLE_NAME, + APP_DASH_METER_RULE_TABLE_NAME + }; + DashMeterOrch *dash_meter_orch = new DashMeterOrch(m_dpu_appDb, dash_meter_tables, dash_orch, m_dpu_appstateDb, dash_zmq_server); + gDirectory.set(dash_meter_orch); + + vector dash_port_map_tables = { + APP_DASH_OUTBOUND_PORT_MAP_TABLE_NAME, + APP_DASH_OUTBOUND_PORT_MAP_RANGE_TABLE_NAME + }; + DashPortMapOrch *dash_port_map_orch = new DashPortMapOrch(m_dpu_appDb, dash_port_map_tables, m_dpu_appstateDb, dash_zmq_server); + gDirectory.set(dash_port_map_orch); + + addOrchList(dash_acl_orch); + addOrchList(dash_vnet_orch); + addOrchList(dash_route_orch); + addOrchList(dash_orch); + addOrchList(dash_tunnel_orch); + addOrchList(dash_meter_orch); + addOrchList(dash_ha_orch); + addOrchList(dash_port_map_orch); + + return true; +} diff --git a/orchagent/orchdaemon.h b/orchagent/orchdaemon.h index 803a720c3cb..2041c6bfff3 100644 --- a/orchagent/orchdaemon.h +++ b/orchagent/orchdaemon.h @@ -44,12 +44,21 @@ #include "macsecorch.h" #include "p4orch/p4orch.h" #include "bfdorch.h" +#include "icmporch.h" #include "srv6orch.h" #include "nvgreorch.h" +#include "twamporch.h" +#include "stporch.h" +#include "dash/dashenifwdorch.h" #include "dash/dashaclorch.h" #include "dash/dashorch.h" #include "dash/dashrouteorch.h" +#include "dash/dashtunnelorch.h" #include "dash/dashvnetorch.h" +#include "dash/dashhaorch.h" +#include "dash/dashmeterorch.h" +#include "dash/dashportmaporch.h" +#include "high_frequency_telemetry/hftelorch.h" #include using namespace swss; @@ -58,10 +67,10 @@ class OrchDaemon { public: OrchDaemon(DBConnector *, DBConnector *, DBConnector *, DBConnector *, ZmqServer *); - ~OrchDaemon(); + virtual ~OrchDaemon(); virtual bool init(); - void start(); + void start(long heartBeatInterval); bool warmRestoreAndSyncUp(); void getTaskToSync(vector &ts); bool warmRestoreValidation(); @@ -82,7 +91,25 @@ class OrchDaemon m_fabricQueueStatEnabled = enabled; } void logRotate(); -private: + + // Two required API to support ring buffer feature + /** + * This method is used by a ring buffer consumer [Orchdaemon] to initialzie its ring, + * and populate this ring's pointer to the producers [Orch, Consumer], to make sure that + * they are connected to the same ring. + */ + void enableRingBuffer(); + void disableRingBuffer(); + /** + * This method describes how the ring consumer consumes this ring. + */ + void popRingBuffer(); + + std::shared_ptr gRingBuffer = nullptr; + + std::thread ring_thread; + +protected: DBConnector *m_applDb; DBConnector *m_configDb; DBConnector *m_stateDb; @@ -95,14 +122,13 @@ class OrchDaemon std::vector m_orchList; Select *m_select; - std::chrono::time_point m_lastHeartBeat; void flush(); - void heartBeat(std::chrono::time_point tcurrent); + void heartBeat(std::chrono::time_point tcurrent, long interval); - void freezeAndHeartBeat(unsigned int duration); + void freezeAndHeartBeat(unsigned int duration, long interval); }; class FabricOrchDaemon : public OrchDaemon @@ -115,4 +141,14 @@ class FabricOrchDaemon : public OrchDaemon DBConnector *m_configDb; }; + +class DpuOrchDaemon : public OrchDaemon +{ +public: + DpuOrchDaemon(DBConnector *, DBConnector *, DBConnector *, DBConnector *, DBConnector *, DBConnector *, ZmqServer *); + bool init() override; +private: + DBConnector *m_dpu_appDb; + DBConnector *m_dpu_appstateDb; +}; #endif /* SWSS_ORCHDAEMON_H */ diff --git a/orchagent/p4orch/acl_rule_manager.cpp b/orchagent/p4orch/acl_rule_manager.cpp index 40f20ba051e..d04cb14fcf8 100644 --- a/orchagent/p4orch/acl_rule_manager.cpp +++ b/orchagent/p4orch/acl_rule_manager.cpp @@ -1,5 +1,6 @@ #include "p4orch/acl_rule_manager.h" +#include #include #include #include @@ -9,7 +10,6 @@ #include "crmorch.h" #include "dbconnector.h" #include "intfsorch.h" -#include #include "logger.h" #include "orch.h" #include "p4orch.h" @@ -165,7 +165,8 @@ std::vector getMeterSaiAttrs(const P4AclMeter &p4_acl_meter) } // namespace -ReturnCode AclRuleManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +ReturnCode AclRuleManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) { return StatusCode::SWSS_RC_UNIMPLEMENTED; } @@ -175,75 +176,81 @@ void AclRuleManager::enqueue(const std::string &table_name, const swss::KeyOpFie m_entries.push_back(entry); } -void AclRuleManager::drain() -{ - SWSS_LOG_ENTER(); - - for (const auto &key_op_fvs_tuple : m_entries) - { - std::string table_name; - std::string db_key; - parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &db_key); - const auto &op = kfvOp(key_op_fvs_tuple); - const std::vector &attributes = kfvFieldsValues(key_op_fvs_tuple); - - SWSS_LOG_NOTICE("OP: %s, RULE_KEY: %s", op.c_str(), QuotedVar(db_key).c_str()); - - ReturnCode status; - auto app_db_entry_or = deserializeAclRuleAppDbEntry(table_name, db_key, attributes); - if (!app_db_entry_or.ok()) - { - status = app_db_entry_or.status(); - SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", - QuotedVar(table_name + ":" + db_key).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, - /*replace=*/true); - continue; - } - auto &app_db_entry = *app_db_entry_or; - - status = validateAclRuleAppDbEntry(app_db_entry); - if (!status.ok()) - { - SWSS_LOG_ERROR("Validation failed for ACL rule APP DB entry with key %s: %s", - QuotedVar(table_name + ":" + db_key).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, - /*replace=*/true); - continue; - } +void AclRuleManager::drainWithNotExecuted() { + drainMgmtWithNotExecuted(m_entries, m_publisher); +} - const auto &acl_table_name = app_db_entry.acl_table_name; - const auto &acl_rule_key = - KeyGenerator::generateAclRuleKey(app_db_entry.match_fvs, std::to_string(app_db_entry.priority)); +ReturnCode AclRuleManager::drain() { + SWSS_LOG_ENTER(); - const auto &operation = kfvOp(key_op_fvs_tuple); - if (operation == SET_COMMAND) - { - auto *acl_rule = getAclRule(acl_table_name, acl_rule_key); - if (acl_rule == nullptr) - { - status = processAddRuleRequest(acl_rule_key, app_db_entry); - } - else - { - status = processUpdateRuleRequest(app_db_entry, *acl_rule); - } - } - else if (operation == DEL_COMMAND) - { - status = processDeleteRuleRequest(acl_table_name, acl_rule_key); - } - else - { - status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Unknown operation type " << operation; - SWSS_LOG_ERROR("%s", status.message().c_str()); - } - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), status, - /*replace=*/true); - } - m_entries.clear(); + ReturnCode status; + while (!m_entries.empty()) { + auto key_op_fvs_tuple = m_entries.front(); + m_entries.pop_front(); + std::string table_name; + std::string db_key; + parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &db_key); + const auto& op = kfvOp(key_op_fvs_tuple); + const std::vector& attributes = + kfvFieldsValues(key_op_fvs_tuple); + + SWSS_LOG_NOTICE("OP: %s, RULE_KEY: %s", op.c_str(), + QuotedVar(db_key).c_str()); + + auto app_db_entry_or = + deserializeAclRuleAppDbEntry(table_name, db_key, attributes); + if (!app_db_entry_or.ok()) { + status = app_db_entry_or.status(); + SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", + QuotedVar(table_name + ":" + db_key).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; + } + auto& app_db_entry = *app_db_entry_or; + + status = validateAclRuleAppDbEntry(app_db_entry); + if (!status.ok()) { + SWSS_LOG_ERROR( + "Validation failed for ACL rule APP DB entry with key %s: %s", + QuotedVar(table_name + ":" + db_key).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; + } + + const auto& acl_table_name = app_db_entry.acl_table_name; + const auto& acl_rule_key = KeyGenerator::generateAclRuleKey( + app_db_entry.match_fvs, std::to_string(app_db_entry.priority)); + + const auto& operation = kfvOp(key_op_fvs_tuple); + if (operation == SET_COMMAND) { + auto* acl_rule = getAclRule(acl_table_name, acl_rule_key); + if (acl_rule == nullptr) { + status = processAddRuleRequest(acl_rule_key, app_db_entry); + } else { + status = processUpdateRuleRequest(app_db_entry, *acl_rule); + } + } else if (operation == DEL_COMMAND) { + status = processDeleteRuleRequest(acl_table_name, acl_rule_key); + } else { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unknown operation type " << operation; + SWSS_LOG_ERROR("%s", status.message().c_str()); + } + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + if (!status.ok()) { + break; + } + } + drainWithNotExecuted(); + return status; } ReturnCode AclRuleManager::setUpUserDefinedTraps() @@ -757,6 +764,7 @@ ReturnCode AclRuleManager::setMatchValue(const acl_entry_attr_union_t attr_name, sai_attribute_value_t *value, P4AclRule *acl_rule, const std::string &ip_type_bit_type) { + SWSS_LOG_ENTER(); try { switch (attr_name) @@ -852,6 +860,7 @@ ReturnCode AclRuleManager::setMatchValue(const acl_entry_attr_union_t attr_name, case SAI_ACL_ENTRY_ATTR_FIELD_IP_IDENTIFICATION: case SAI_ACL_ENTRY_ATTR_FIELD_OUTER_VLAN_ID: case SAI_ACL_ENTRY_ATTR_FIELD_INNER_VLAN_ID: + case SAI_ACL_ENTRY_ATTR_FIELD_VRF_ID: case SAI_ACL_ENTRY_ATTR_FIELD_INNER_ETHER_TYPE: case SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_SRC_PORT: case SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_DST_PORT: { @@ -987,6 +996,7 @@ ReturnCode AclRuleManager::setMatchValue(const acl_entry_attr_union_t attr_name, } case SAI_ACL_ENTRY_ATTR_FIELD_TUNNEL_VNI: case SAI_ACL_ENTRY_ATTR_FIELD_ROUTE_DST_USER_META: + case SAI_ACL_ENTRY_ATTR_FIELD_ACL_USER_META: case SAI_ACL_ENTRY_ATTR_FIELD_IPV6_FLOW_LABEL: { const std::vector &value_and_mask = tokenize(attr_value, kDataMaskDelimiter); value->aclfield.data.u32 = to_uint(trim(value_and_mask[0])); @@ -1020,6 +1030,18 @@ ReturnCode AclRuleManager::setMatchValue(const acl_entry_attr_union_t attr_name, value->aclfield.mask.u32 = 0xFFFFFFFF; break; } + case SAI_ACL_ENTRY_ATTR_FIELD_IPMC_NPU_META_DST_HIT: + { + const std::vector& value_and_mask = + tokenize(attr_value, kDataMaskDelimiter); + uint8_t hit_value = to_uint(trim(value_and_mask[0])); + if (value_and_mask.size() > 1) + { + SWSS_LOG_INFO("Mask ignored for IPMC table hit field."); + } + value->aclfield.data.booldata = hit_value != 0; + break; + } default: { return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "ACL match field " << attr_name << " is not supported."; diff --git a/orchagent/p4orch/acl_rule_manager.h b/orchagent/p4orch/acl_rule_manager.h index 230f226f983..fd588154ecc 100644 --- a/orchagent/p4orch/acl_rule_manager.h +++ b/orchagent/p4orch/acl_rule_manager.h @@ -42,9 +42,11 @@ class AclRuleManager : public ObjectManagerInterface virtual ~AclRuleManager() = default; void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; - void drain() override; + ReturnCode drain() override; + void drainWithNotExecuted() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; - ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; // Update counters stats for every rule in each ACL table in COUNTERS_DB, if // counters are enabled in rules. diff --git a/orchagent/p4orch/acl_table_manager.cpp b/orchagent/p4orch/acl_table_manager.cpp index 416120fa5d7..a55538aa64c 100644 --- a/orchagent/p4orch/acl_table_manager.cpp +++ b/orchagent/p4orch/acl_table_manager.cpp @@ -1,5 +1,6 @@ #include "p4orch/acl_table_manager.h" +#include #include #include #include @@ -7,7 +8,6 @@ #include "SaiAttributeList.h" #include "crmorch.h" #include "dbconnector.h" -#include #include "logger.h" #include "orch.h" #include "p4orch.h" @@ -205,7 +205,8 @@ ReturnCodeOr> AclTableManager::getUdfSaiAttrs(const return udf_attrs; } -ReturnCode AclTableManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +ReturnCode AclTableManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) { return StatusCode::SWSS_RC_UNIMPLEMENTED; } @@ -215,86 +216,94 @@ void AclTableManager::enqueue(const std::string &table_name, const swss::KeyOpFi m_entries.push_back(entry); } -void AclTableManager::drain() -{ - SWSS_LOG_ENTER(); - - for (const auto &key_op_fvs_tuple : m_entries) - { - std::string table_name; - std::string db_key; - parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &db_key); - SWSS_LOG_NOTICE("P4AclTableManager drain tuple for table %s", QuotedVar(table_name).c_str()); - if (table_name != APP_P4RT_ACL_TABLE_DEFINITION_NAME) - { - ReturnCode status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Invalid table " << QuotedVar(table_name); - SWSS_LOG_ERROR("%s", status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, - /*replace=*/true); - continue; - } - const std::vector &attributes = kfvFieldsValues(key_op_fvs_tuple); +void AclTableManager::drainWithNotExecuted() { + drainMgmtWithNotExecuted(m_entries, m_publisher); +} - ReturnCode status; - const std::string &operation = kfvOp(key_op_fvs_tuple); - if (operation == SET_COMMAND) - { - auto app_db_entry_or = deserializeAclTableDefinitionAppDbEntry(db_key, attributes); - if (!app_db_entry_or.ok()) - { - status = app_db_entry_or.status(); - SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", - QuotedVar(table_name + ":" + db_key).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, - /*replace=*/true); - continue; - } - auto &app_db_entry = *app_db_entry_or; +ReturnCode AclTableManager::drain() { + SWSS_LOG_ENTER(); - status = validateAclTableDefinitionAppDbEntry(app_db_entry); - if (!status.ok()) - { - SWSS_LOG_ERROR("Validation failed for ACL definition APP DB entry with key %s: %s", - QuotedVar(table_name + ":" + db_key).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, - /*replace=*/true); - continue; - } - auto *acl_table_definition = getAclTable(app_db_entry.acl_table_name); - if (acl_table_definition == nullptr) - { - SWSS_LOG_NOTICE("ACL table SET %s", app_db_entry.acl_table_name.c_str()); - status = processAddTableRequest(app_db_entry); - } - else - { - // All attributes in sai_acl_table_attr_t are CREATE_ONLY. - status = ReturnCode(StatusCode::SWSS_RC_UNIMPLEMENTED) - << "Unable to update ACL table definition in APP DB entry with key " - << QuotedVar(table_name + ":" + db_key) - << " : All attributes in sai_acl_table_attr_t are CREATE_ONLY."; - } - } - else if (operation == DEL_COMMAND) - { - status = processDeleteTableRequest(db_key); - } - else - { - status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Unknown operation type " << QuotedVar(operation); - } - if (!status.ok()) - { - SWSS_LOG_ERROR("Processed DEFINITION entry status: %s", status.message().c_str()); - } - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), status, + ReturnCode status; + while (!m_entries.empty()) { + auto key_op_fvs_tuple = m_entries.front(); + m_entries.pop_front(); + std::string table_name; + std::string db_key; + + parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &db_key); + SWSS_LOG_NOTICE("P4AclTableManager drain tuple for table %s", + QuotedVar(table_name).c_str()); + if (table_name != APP_P4RT_ACL_TABLE_DEFINITION_NAME) { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Invalid table " << QuotedVar(table_name); + SWSS_LOG_ERROR("%s", status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; + } + const std::vector& attributes = + kfvFieldsValues(key_op_fvs_tuple); + + const std::string& operation = kfvOp(key_op_fvs_tuple); + if (operation == SET_COMMAND) { + auto app_db_entry_or = + deserializeAclTableDefinitionAppDbEntry(db_key, attributes); + if (!app_db_entry_or.ok()) { + status = app_db_entry_or.status(); + SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", + QuotedVar(table_name + ":" + db_key).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, /*replace=*/true); - } - m_entries.clear(); + break; + } + auto& app_db_entry = *app_db_entry_or; + + status = validateAclTableDefinitionAppDbEntry(app_db_entry); + if (!status.ok()) { + SWSS_LOG_ERROR( + "Validation failed for ACL definition APP DB entry with key %s: %s", + QuotedVar(table_name + ":" + db_key).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; + } + auto* acl_table_definition = getAclTable(app_db_entry.acl_table_name); + if (acl_table_definition == nullptr) { + SWSS_LOG_NOTICE("ACL table SET %s", + app_db_entry.acl_table_name.c_str()); + status = processAddTableRequest(app_db_entry); + } else { + // All attributes in sai_acl_table_attr_t are CREATE_ONLY. + status = + ReturnCode(StatusCode::SWSS_RC_UNIMPLEMENTED) + << "Unable to update ACL table definition in APP DB entry with key " + << QuotedVar(table_name + ":" + db_key) + << " : All attributes in sai_acl_table_attr_t are CREATE_ONLY."; + } + } else if (operation == DEL_COMMAND) { + status = processDeleteTableRequest(db_key); + } else { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unknown operation type " << QuotedVar(operation); + } + if (!status.ok()) { + SWSS_LOG_ERROR("Processed DEFINITION entry status: %s", + status.message().c_str()); + } + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + if (!status.ok()) { + break; + } + } + drainWithNotExecuted(); + return status; } ReturnCodeOr AclTableManager::deserializeAclTableDefinitionAppDbEntry( diff --git a/orchagent/p4orch/acl_table_manager.h b/orchagent/p4orch/acl_table_manager.h index 5ebaf459e91..04983006b9e 100644 --- a/orchagent/p4orch/acl_table_manager.h +++ b/orchagent/p4orch/acl_table_manager.h @@ -32,9 +32,11 @@ class AclTableManager : public ObjectManagerInterface virtual ~AclTableManager(); void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; - void drain() override; + ReturnCode drain() override; + void drainWithNotExecuted() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; - ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; // Get ACL table definition by table name in cache. Return nullptr if not // found. diff --git a/orchagent/p4orch/acl_util.cpp b/orchagent/p4orch/acl_util.cpp index 0ef81a07e37..052e31934a7 100644 --- a/orchagent/p4orch/acl_util.cpp +++ b/orchagent/p4orch/acl_util.cpp @@ -1,7 +1,8 @@ #include "p4orch/acl_util.h" -#include "converter.h" #include + +#include "converter.h" #include "logger.h" #include "sai_serialize.h" #include "table.h" @@ -894,6 +895,7 @@ bool isDiffMatchFieldValue(const acl_entry_attr_union_t attr_name, const sai_att case SAI_ACL_ENTRY_ATTR_FIELD_ACL_IP_TYPE: case SAI_ACL_ENTRY_ATTR_FIELD_TUNNEL_VNI: case SAI_ACL_ENTRY_ATTR_FIELD_ROUTE_DST_USER_META: + case SAI_ACL_ENTRY_ATTR_FIELD_ACL_USER_META: case SAI_ACL_ENTRY_ATTR_FIELD_IPV6_FLOW_LABEL: case SAI_ACL_ENTRY_ATTR_FIELD_ACL_IP_FRAG: case SAI_ACL_ENTRY_ATTR_FIELD_PACKET_VLAN: { @@ -927,6 +929,7 @@ bool isDiffMatchFieldValue(const acl_entry_attr_union_t attr_name, const sai_att case SAI_ACL_ENTRY_ATTR_FIELD_IP_IDENTIFICATION: case SAI_ACL_ENTRY_ATTR_FIELD_OUTER_VLAN_ID: case SAI_ACL_ENTRY_ATTR_FIELD_INNER_VLAN_ID: + case SAI_ACL_ENTRY_ATTR_FIELD_VRF_ID: case SAI_ACL_ENTRY_ATTR_FIELD_INNER_ETHER_TYPE: case SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_SRC_PORT: case SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_DST_PORT: { @@ -952,6 +955,10 @@ bool isDiffMatchFieldValue(const acl_entry_attr_union_t attr_name, const sai_att return memcmp(value.aclfield.data.mac, old_value.aclfield.data.mac, sizeof(sai_mac_t)) || memcmp(value.aclfield.mask.mac, old_value.aclfield.mask.mac, sizeof(sai_mac_t)); } + case SAI_ACL_ENTRY_ATTR_FIELD_IPMC_NPU_META_DST_HIT: + { + return value.aclfield.data.booldata != old_value.aclfield.data.booldata; + } default: { return false; } diff --git a/orchagent/p4orch/acl_util.h b/orchagent/p4orch/acl_util.h index 8810843fd6b..652838c130a 100644 --- a/orchagent/p4orch/acl_util.h +++ b/orchagent/p4orch/acl_util.h @@ -1,11 +1,11 @@ #pragma once #include +#include #include #include #include -#include #include "p4orch/p4orch_util.h" #include "return_code.h" extern "C" @@ -243,7 +243,7 @@ struct P4AclTableDefinition P4AclTableDefinition(const std::string &acl_table_name, const sai_acl_stage_t stage, const uint32_t priority, const uint32_t size, const std::string &meter_unit, const std::string &counter_unit) : acl_table_name(acl_table_name), stage(stage), priority(priority), size(size), meter_unit(meter_unit), - counter_unit(counter_unit){}; + counter_unit(counter_unit) {}; }; struct P4UserDefinedTrapHostifTableEntry @@ -251,7 +251,7 @@ struct P4UserDefinedTrapHostifTableEntry sai_object_id_t user_defined_trap; sai_object_id_t hostif_table_entry; P4UserDefinedTrapHostifTableEntry() - : user_defined_trap(SAI_NULL_OBJECT_ID), hostif_table_entry(SAI_NULL_OBJECT_ID){}; + : user_defined_trap(SAI_NULL_OBJECT_ID), hostif_table_entry(SAI_NULL_OBJECT_ID) {}; }; using acl_rule_attr_lookup_t = std::map; @@ -324,6 +324,9 @@ using P4AclRuleTables = std::map>; #define P4_MATCH_SRC_IPV6_WORD3 "SAI_ACL_TABLE_ATTR_FIELD_SRC_IPV6_WORD3" #define P4_MATCH_SRC_IPV6_WORD2 "SAI_ACL_TABLE_ATTR_FIELD_SRC_IPV6_WORD2" #define P4_MATCH_ROUTE_DST_USER_META "SAI_ACL_TABLE_ATTR_FIELD_ROUTE_DST_USER_META" +#define P4_MATCH_ACL_USER_META "SAI_ACL_TABLE_ATTR_FIELD_ACL_USER_META" +#define P4_MATCH_VRF_ID "SAI_ACL_TABLE_ATTR_FIELD_VRF_ID" +#define P4_MATCH_IPMC_TABLE_HIT "SAI_ACL_TABLE_ATTR_FIELD_IPMC_NPU_META_DST_HIT" #define P4_ACTION_PACKET_ACTION "SAI_ACL_ENTRY_ATTR_ACTION_PACKET_ACTION" #define P4_ACTION_REDIRECT "SAI_ACL_ENTRY_ATTR_ACTION_REDIRECT" @@ -484,6 +487,9 @@ static const acl_table_attr_lookup_t aclMatchTableAttrLookup = { {P4_MATCH_TUNNEL_VNI, SAI_ACL_TABLE_ATTR_FIELD_TUNNEL_VNI}, {P4_MATCH_IPV6_NEXT_HEADER, SAI_ACL_TABLE_ATTR_FIELD_IPV6_NEXT_HEADER}, {P4_MATCH_ROUTE_DST_USER_META, SAI_ACL_TABLE_ATTR_FIELD_ROUTE_DST_USER_META}, + {P4_MATCH_ACL_USER_META, SAI_ACL_TABLE_ATTR_FIELD_ACL_USER_META}, + {P4_MATCH_VRF_ID, SAI_ACL_TABLE_ATTR_FIELD_VRF_ID}, + {P4_MATCH_IPMC_TABLE_HIT, SAI_ACL_TABLE_ATTR_FIELD_IPMC_NPU_META_DST_HIT}, }; static const acl_table_attr_format_lookup_t aclMatchTableAttrFormatLookup = { @@ -533,6 +539,9 @@ static const acl_table_attr_format_lookup_t aclMatchTableAttrFormatLookup = { {SAI_ACL_TABLE_ATTR_FIELD_TUNNEL_VNI, Format::HEX_STRING}, {SAI_ACL_TABLE_ATTR_FIELD_IPV6_NEXT_HEADER, Format::HEX_STRING}, {SAI_ACL_TABLE_ATTR_FIELD_ROUTE_DST_USER_META, Format::HEX_STRING}, + {SAI_ACL_TABLE_ATTR_FIELD_ACL_USER_META, Format::HEX_STRING}, + {SAI_ACL_TABLE_ATTR_FIELD_VRF_ID, Format::HEX_STRING}, + {SAI_ACL_TABLE_ATTR_FIELD_IPMC_NPU_META_DST_HIT, Format::HEX_STRING}, }; static const acl_table_attr_lookup_t aclCompositeMatchTableAttrLookup = { @@ -589,6 +598,9 @@ static const acl_rule_attr_lookup_t aclMatchEntryAttrLookup = { {P4_MATCH_TUNNEL_VNI, SAI_ACL_ENTRY_ATTR_FIELD_TUNNEL_VNI}, {P4_MATCH_IPV6_NEXT_HEADER, SAI_ACL_ENTRY_ATTR_FIELD_IPV6_NEXT_HEADER}, {P4_MATCH_ROUTE_DST_USER_META, SAI_ACL_ENTRY_ATTR_FIELD_ROUTE_DST_USER_META}, + {P4_MATCH_ACL_USER_META, SAI_ACL_ENTRY_ATTR_FIELD_ACL_USER_META}, + {P4_MATCH_VRF_ID, SAI_ACL_ENTRY_ATTR_FIELD_VRF_ID}, + {P4_MATCH_IPMC_TABLE_HIT, SAI_ACL_ENTRY_ATTR_FIELD_IPMC_NPU_META_DST_HIT}, }; static const acl_rule_attr_lookup_t aclCompositeMatchEntryAttrLookup = { diff --git a/orchagent/p4orch/ext_tables_manager.cpp b/orchagent/p4orch/ext_tables_manager.cpp index 8b5ded6d7f0..537f67f692e 100644 --- a/orchagent/p4orch/ext_tables_manager.cpp +++ b/orchagent/p4orch/ext_tables_manager.cpp @@ -1,22 +1,22 @@ #include "p4orch/ext_tables_manager.h" +#include #include +#include #include #include #include #include -#include +#include "crmorch.h" #include "directory.h" -#include #include "logger.h" -#include "tokenize.h" #include "orch.h" -#include "crmorch.h" #include "p4orch/p4orch.h" #include "p4orch/p4orch_util.h" +#include "tokenize.h" -extern sai_counter_api_t* sai_counter_api; +extern sai_counter_api_t *sai_counter_api; extern sai_generic_programmable_api_t *sai_generic_programmable_api; extern Directory gDirectory; @@ -43,10 +43,10 @@ std::string getCrossRefTableName(const std::string table_name) auto it = FixedTablesMap.find(table_name); if (it != FixedTablesMap.end()) { - return(it->second); + return (it->second); } - return(table_name); + return (table_name); } ReturnCode ExtTablesManager::validateActionParamsCrossRef(P4ExtTableAppDbEntry &app_db_entry, ActionInfo *action) @@ -55,8 +55,7 @@ ReturnCode ExtTablesManager::validateActionParamsCrossRef(P4ExtTableAppDbEntry & std::unordered_map cross_ref_key_j; ReturnCode status; - for (auto param_defn_it = action->params.begin(); - param_defn_it != action->params.end(); param_defn_it++) + for (auto param_defn_it = action->params.begin(); param_defn_it != action->params.end(); param_defn_it++) { ActionParamInfo action_param_defn = param_defn_it->second; if (action_param_defn.table_reference_map.empty()) @@ -71,24 +70,24 @@ ReturnCode ExtTablesManager::validateActionParamsCrossRef(P4ExtTableAppDbEntry & { SWSS_LOG_ERROR("Required param not specified for action %s\n", action_name.c_str()); return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Required param not specified for action %s " << action_name.c_str(); + << "Required param not specified for action %s " << action_name.c_str(); } for (auto cross_ref_it = action_param_defn.table_reference_map.begin(); - cross_ref_it != action_param_defn.table_reference_map.end(); cross_ref_it++) + cross_ref_it != action_param_defn.table_reference_map.end(); cross_ref_it++) { - cross_ref_key_j[cross_ref_it->first].push_back(nlohmann::json::object_t::value_type(prependMatchField(cross_ref_it->second), app_db_param_it->second)); + cross_ref_key_j[cross_ref_it->first].push_back( + nlohmann::json::object_t::value_type(prependMatchField(cross_ref_it->second), app_db_param_it->second)); } } - for (auto it = cross_ref_key_j.begin(); it != cross_ref_key_j.end(); it++) { const std::string table_name = getCrossRefTableName(it->first); const std::string table_key = it->second.dump(); std::string key; sai_object_type_t object_type; - sai_object_id_t oid; + sai_object_id_t oid; DepObject dep_object = {}; if (gP4Orch->m_p4TableToManagerMap.find(table_name) != gP4Orch->m_p4TableToManagerMap.end()) @@ -98,10 +97,10 @@ ReturnCode ExtTablesManager::validateActionParamsCrossRef(P4ExtTableAppDbEntry & { SWSS_LOG_ERROR("Cross-table reference validation failed from fixed-table %s", table_name.c_str()); return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Cross-table reference valdiation failed from fixed-table"; + << "Cross-table reference valdiation failed from fixed-table"; } } - else + else { if (getTableInfo(table_name)) { @@ -109,16 +108,21 @@ ReturnCode ExtTablesManager::validateActionParamsCrossRef(P4ExtTableAppDbEntry & status = getSaiObject(ext_table_key, object_type, key); if (!status.ok()) { - SWSS_LOG_ERROR("Cross-table reference validation failed from extension-table %s", table_name.c_str()); + SWSS_LOG_ERROR("Cross-table reference validation failed from extension-table %s", + table_name.c_str()); return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Cross-table reference valdiation failed from extension table"; + << "Cross-table reference valdiation failed from extension " + "table"; } } else { - SWSS_LOG_ERROR("Cross-table reference validation failed due to non-existent table %s", table_name.c_str()); + SWSS_LOG_ERROR("Cross-table reference validation failed due to non-existent table " + "%s", + table_name.c_str()); return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Cross-table reference valdiation failed due to non-existent table"; + << "Cross-table reference valdiation failed due to non-existent " + "table"; } } @@ -126,19 +130,20 @@ ReturnCode ExtTablesManager::validateActionParamsCrossRef(P4ExtTableAppDbEntry & { SWSS_LOG_ERROR("Cross-table reference validation failed, no OID found from table %s", table_name.c_str()); return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Cross-table reference valdiation failed, no OID found"; + << "Cross-table reference valdiation failed, no OID found"; } if (oid == SAI_NULL_OBJECT_ID) { - SWSS_LOG_ERROR("Cross-table reference validation failed, null OID expected from table %s", table_name.c_str()); - return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Cross-table reference valdiation failed, null OID"; + SWSS_LOG_ERROR("Cross-table reference validation failed, null OID expected from " + "table %s", + table_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Cross-table reference valdiation failed, null OID"; } - dep_object.sai_object = object_type; - dep_object.key = key; - dep_object.oid = oid; + dep_object.sai_object = object_type; + dep_object.key = key; + dep_object.oid = oid; app_db_entry.action_dep_objects[action_name] = dep_object; } @@ -157,7 +162,7 @@ ReturnCode ExtTablesManager::validateP4ExtTableAppDbEntry(P4ExtTableAppDbEntry & { SWSS_LOG_ERROR("Not a valid extension table %s", app_db_entry.table_name.c_str()); return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Not a valid extension table " << app_db_entry.table_name.c_str(); + << "Not a valid extension table " << app_db_entry.table_name.c_str(); } if (table->action_ref_tables.empty()) @@ -167,15 +172,15 @@ ReturnCode ExtTablesManager::validateP4ExtTableAppDbEntry(P4ExtTableAppDbEntry & ActionInfo *action; for (auto app_db_action_it = app_db_entry.action_params.begin(); - app_db_action_it != app_db_entry.action_params.end(); app_db_action_it++) + app_db_action_it != app_db_entry.action_params.end(); app_db_action_it++) { auto action_name = app_db_action_it->first; action = getTableActionInfo(table, action_name); if (action == nullptr) { return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Not a valid action " << action_name.c_str() - << " in extension table " << app_db_entry.table_name.c_str(); + << "Not a valid action " << action_name.c_str() << " in extension table " + << app_db_entry.table_name.c_str(); } if (!action->refers_to) @@ -186,25 +191,23 @@ ReturnCode ExtTablesManager::validateP4ExtTableAppDbEntry(P4ExtTableAppDbEntry & status = validateActionParamsCrossRef(app_db_entry, action); if (!status.ok()) { - return status; + return status; } } return ReturnCode(); } - ReturnCodeOr ExtTablesManager::deserializeP4ExtTableEntry( - const std::string &table_name, - const std::string &key, const std::vector &attributes) + const std::string &table_name, const std::string &key, const std::vector &attributes) { - std::string action_name; + std::string action_name; SWSS_LOG_ENTER(); P4ExtTableAppDbEntry app_db_entry_or = {}; app_db_entry_or.table_name = table_name; - app_db_entry_or.table_key = key; + app_db_entry_or.table_key = key; action_name = ""; for (const auto &it : attributes) @@ -223,7 +226,7 @@ ReturnCodeOr ExtTablesManager::deserializeP4ExtTableEntry( { SWSS_LOG_ERROR("Unknown extension entry field"); return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Unknown extension entry field " << QuotedVar(field); + << "Unknown extension entry field " << QuotedVar(field); } const auto &prefix = tokenized_fields[0]; @@ -244,11 +247,10 @@ ReturnCodeOr ExtTablesManager::deserializeP4ExtTableEntry( return app_db_entry_or; } - ReturnCode ExtTablesManager::prepareP4SaiExtAPIParams(const P4ExtTableAppDbEntry &app_db_entry, std::string &ext_table_entry_attr) { - nlohmann::json sai_j, sai_metadata_j, sai_array_j = {}, sai_entry_j; + nlohmann::json sai_j, sai_metadata_j, sai_array_j = {}, sai_entry_j; SWSS_LOG_ENTER(); @@ -260,37 +262,37 @@ ReturnCode ExtTablesManager::prepareP4SaiExtAPIParams(const P4ExtTableAppDbEntry { SWSS_LOG_ERROR("extension entry for invalid table %s", app_db_entry.table_name.c_str()); return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "extension entry for invalid table " << app_db_entry.table_name.c_str(); + << "extension entry for invalid table " << app_db_entry.table_name.c_str(); } nlohmann::json j = nlohmann::json::parse(app_db_entry.table_key); for (auto it = j.begin(); it != j.end(); ++it) { - std::string match, value, prefix; - std::size_t pos; + std::string match, value, prefix; + std::size_t pos; match = it.key(); value = it.value(); - prefix = p4orch::kMatchPrefix; + prefix = p4orch::kMatchPrefix; pos = match.rfind(prefix); if (pos != std::string::npos) { match.erase(0, prefix.length()); } - else + else { SWSS_LOG_ERROR("Failed to encode match fields for sai call"); return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Failed to encode match fields for sai call"; } - prefix = p4orch::kFieldDelimiter; + prefix = p4orch::kFieldDelimiter; pos = match.rfind(prefix); if (pos != std::string::npos) { match.erase(0, prefix.length()); } - else + else { SWSS_LOG_ERROR("Failed to encode match fields for sai call"); return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Failed to encode match fields for sai call"; @@ -301,7 +303,7 @@ ReturnCode ExtTablesManager::prepareP4SaiExtAPIParams(const P4ExtTableAppDbEntry { SWSS_LOG_ERROR("extension entry for invalid match field %s", match.c_str()); return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "extension entry for invalid match field " << match.c_str(); + << "extension entry for invalid match field " << match.c_str(); } sai_metadata_j = nlohmann::json::object({}); @@ -315,7 +317,7 @@ ReturnCode ExtTablesManager::prepareP4SaiExtAPIParams(const P4ExtTableAppDbEntry } for (auto app_db_action_it = app_db_entry.action_params.begin(); - app_db_action_it != app_db_entry.action_params.end(); app_db_action_it++) + app_db_action_it != app_db_entry.action_params.end(); app_db_action_it++) { sai_j = nlohmann::json::object({}); auto action_dep_object_it = app_db_entry.action_dep_objects.find(app_db_action_it->first); @@ -323,9 +325,9 @@ ReturnCode ExtTablesManager::prepareP4SaiExtAPIParams(const P4ExtTableAppDbEntry { auto action_defn_it = table->action_fields.find(app_db_action_it->first); for (auto app_db_param_it = app_db_action_it->second.begin(); - app_db_param_it != app_db_action_it->second.end(); app_db_param_it++) + app_db_param_it != app_db_action_it->second.end(); app_db_param_it++) { - nlohmann::json params_j = nlohmann::json::object({}); + nlohmann::json params_j = nlohmann::json::object({}); if (action_defn_it != table->action_fields.end()) { auto param_defn_it = action_defn_it->second.params.find(app_db_param_it->first); @@ -396,9 +398,8 @@ bool createGenericCounter(sai_object_id_t &counter_id) return true; } - ReturnCode ExtTablesManager::createP4ExtTableEntry(const P4ExtTableAppDbEntry &app_db_entry, - P4ExtTableEntry &ext_table_entry) + P4ExtTableEntry &ext_table_entry) { ReturnCode status; sai_object_type_t object_type; @@ -428,22 +429,20 @@ ReturnCode ExtTablesManager::createP4ExtTableEntry(const P4ExtTableAppDbEntry &a generic_programmable_attr.value.json.json.list = (int8_t *)const_cast(ext_table_entry_attr.c_str()); generic_programmable_attrs.push_back(generic_programmable_attr); - auto *table = getTableInfo(app_db_entry.table_name); if (!table) { SWSS_LOG_ERROR("extension entry for invalid table %s", app_db_entry.table_name.c_str()); return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "extension entry for invalid table " << app_db_entry.table_name.c_str(); + << "extension entry for invalid table " << app_db_entry.table_name.c_str(); } if (table->counter_bytes_enabled || table->counter_packets_enabled) { if (!createGenericCounter(counter_id)) { - SWSS_LOG_WARN("Failed to create counter for table %s, key %s\n", - app_db_entry.table_name.c_str(), - app_db_entry.table_key.c_str()); + SWSS_LOG_WARN("Failed to create counter for table %s, key %s\n", app_db_entry.table_name.c_str(), + app_db_entry.table_key.c_str()); } else { @@ -457,33 +456,29 @@ ReturnCode ExtTablesManager::createP4ExtTableEntry(const P4ExtTableAppDbEntry &a sai_object_id_t sai_generic_programmable_oid = SAI_NULL_OBJECT_ID; sai_status_t sai_status = sai_generic_programmable_api->create_generic_programmable( - &sai_generic_programmable_oid, gSwitchId, - (uint32_t)generic_programmable_attrs.size(), - generic_programmable_attrs.data()); + &sai_generic_programmable_oid, gSwitchId, (uint32_t)generic_programmable_attrs.size(), + generic_programmable_attrs.data()); if (sai_status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("create sai api call failed for extension entry table %s, entry %s", - app_db_entry.table_name.c_str(), app_db_entry.table_key.c_str()); + app_db_entry.table_name.c_str(), app_db_entry.table_key.c_str()); return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "create sai api call failed for extension entry table " - << app_db_entry.table_name.c_str() - << " , entry " << app_db_entry.table_key.c_str(); + << "create sai api call failed for extension entry table " << app_db_entry.table_name.c_str() + << " , entry " << app_db_entry.table_key.c_str(); } std::string crm_table_name = "EXT_" + app_db_entry.table_name; boost::algorithm::to_upper(crm_table_name); gCrmOrch->incCrmExtTableUsedCounter(CrmResourceType::CRM_EXT_TABLE, crm_table_name); - ext_table_entry.sai_entry_oid = sai_generic_programmable_oid; for (auto action_dep_object_it = app_db_entry.action_dep_objects.begin(); - action_dep_object_it != app_db_entry.action_dep_objects.end(); action_dep_object_it++) + action_dep_object_it != app_db_entry.action_dep_objects.end(); action_dep_object_it++) { auto action_dep_object = action_dep_object_it->second; m_p4OidMapper->increaseRefCount(action_dep_object.sai_object, action_dep_object.key); ext_table_entry.action_dep_objects[action_dep_object_it->first] = action_dep_object; } - auto ext_table_key = KeyGenerator::generateExtTableKey(app_db_entry.table_name, app_db_entry.table_key); status = getSaiObject(ext_table_key, object_type, key); if (!status.ok()) @@ -497,9 +492,8 @@ ReturnCode ExtTablesManager::createP4ExtTableEntry(const P4ExtTableAppDbEntry &a return ReturnCode(); } - ReturnCode ExtTablesManager::updateP4ExtTableEntry(const P4ExtTableAppDbEntry &app_db_entry, - P4ExtTableEntry *ext_table_entry) + P4ExtTableEntry *ext_table_entry) { ReturnCode status; std::string ext_table_entry_attr; @@ -510,11 +504,10 @@ ReturnCode ExtTablesManager::updateP4ExtTableEntry(const P4ExtTableAppDbEntry &a if (ext_table_entry->sai_entry_oid == SAI_NULL_OBJECT_ID) { SWSS_LOG_ERROR("update sai api call for NULL extension entry table %s, entry %s", - app_db_entry.table_name.c_str(), ext_table_entry->table_key.c_str()); + app_db_entry.table_name.c_str(), ext_table_entry->table_key.c_str()); return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "update sai api call for NULL extension entry table " - << app_db_entry.table_name.c_str() - << " , entry " << ext_table_entry->table_key.c_str(); + << "update sai api call for NULL extension entry table " << app_db_entry.table_name.c_str() + << " , entry " << ext_table_entry->table_key.c_str(); } status = prepareP4SaiExtAPIParams(app_db_entry, ext_table_entry_attr); @@ -531,24 +524,21 @@ ReturnCode ExtTablesManager::updateP4ExtTableEntry(const P4ExtTableAppDbEntry &a generic_programmable_attr.value.json.json.list = (int8_t *)const_cast(ext_table_entry_attr.c_str()); sai_status_t sai_status = sai_generic_programmable_api->set_generic_programmable_attribute( - ext_table_entry->sai_entry_oid, - &generic_programmable_attr); + ext_table_entry->sai_entry_oid, &generic_programmable_attr); if (sai_status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("update sai api call failed for extension entry table %s, entry %s", - app_db_entry.table_name.c_str(), ext_table_entry->table_key.c_str()); + app_db_entry.table_name.c_str(), ext_table_entry->table_key.c_str()); return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "update sai api call failed for extension entry table " - << app_db_entry.table_name.c_str() - << " , entry " << ext_table_entry->table_key.c_str(); + << "update sai api call failed for extension entry table " << app_db_entry.table_name.c_str() + << " , entry " << ext_table_entry->table_key.c_str(); } - old_action_dep_objects = ext_table_entry->action_dep_objects; ext_table_entry->action_dep_objects.clear(); for (auto action_dep_object_it = app_db_entry.action_dep_objects.begin(); - action_dep_object_it != app_db_entry.action_dep_objects.end(); action_dep_object_it++) + action_dep_object_it != app_db_entry.action_dep_objects.end(); action_dep_object_it++) { auto action_dep_object = action_dep_object_it->second; m_p4OidMapper->increaseRefCount(action_dep_object.sai_object, action_dep_object.key); @@ -556,7 +546,7 @@ ReturnCode ExtTablesManager::updateP4ExtTableEntry(const P4ExtTableAppDbEntry &a } for (auto old_action_dep_object_it = old_action_dep_objects.begin(); - old_action_dep_object_it != old_action_dep_objects.end(); old_action_dep_object_it++) + old_action_dep_object_it != old_action_dep_objects.end(); old_action_dep_object_it++) { auto old_action_dep_object = old_action_dep_object_it->second; m_p4OidMapper->decreaseRefCount(old_action_dep_object.sai_object, old_action_dep_object.key); @@ -565,8 +555,7 @@ ReturnCode ExtTablesManager::updateP4ExtTableEntry(const P4ExtTableAppDbEntry &a return ReturnCode(); } -ReturnCode ExtTablesManager::removeP4ExtTableEntry(const std::string &table_name, - const std::string &table_key) +ReturnCode ExtTablesManager::removeP4ExtTableEntry(const std::string &table_name, const std::string &table_key) { ReturnCode status; sai_object_type_t object_type; @@ -578,36 +567,31 @@ ReturnCode ExtTablesManager::removeP4ExtTableEntry(const std::string &table_name if (!ext_table_entry) { LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) - << "extension entry with key " << QuotedVar(table_key) - << " does not exist for table " << QuotedVar(table_name)); + << "extension entry with key " << QuotedVar(table_key) << " does not exist for table " + << QuotedVar(table_name)); } if (ext_table_entry->sai_entry_oid == SAI_NULL_OBJECT_ID) { - SWSS_LOG_ERROR("remove sai api call for NULL extension entry table %s, entry %s", - table_name.c_str(), table_key.c_str()); - return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "remove sai api call for NULL extension entry table " - << table_name.c_str() << " , entry " << table_key.c_str(); + SWSS_LOG_ERROR("remove sai api call for NULL extension entry table %s, entry %s", table_name.c_str(), + table_key.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "remove sai api call for NULL extension entry table " + << table_name.c_str() << " , entry " << table_key.c_str(); } - SWSS_LOG_ERROR("table: %s, key: %s", ext_table_entry->table_name.c_str(), - ext_table_entry->table_key.c_str()); - sai_status_t sai_status = sai_generic_programmable_api->remove_generic_programmable( - ext_table_entry->sai_entry_oid); + SWSS_LOG_ERROR("table: %s, key: %s", ext_table_entry->table_name.c_str(), ext_table_entry->table_key.c_str()); + sai_status_t sai_status = sai_generic_programmable_api->remove_generic_programmable(ext_table_entry->sai_entry_oid); if (sai_status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("remove sai api call failed for extension entry table %s, entry %s", - table_name.c_str(), table_key.c_str()); - return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "remove sai api call failed for extension entry table " - << table_name.c_str() << " , entry " << table_key.c_str(); + SWSS_LOG_ERROR("remove sai api call failed for extension entry table %s, entry %s", table_name.c_str(), + table_key.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "remove sai api call failed for extension entry table " + << table_name.c_str() << " , entry " << table_key.c_str(); } std::string crm_table_name = "EXT_" + table_name; boost::algorithm::to_upper(crm_table_name); gCrmOrch->decCrmExtTableUsedCounter(CrmResourceType::CRM_EXT_TABLE, crm_table_name); - auto ext_table_key = KeyGenerator::generateExtTableKey(table_name, table_key); status = getSaiObject(ext_table_key, object_type, key); if (!status.ok()) @@ -630,7 +614,7 @@ ReturnCode ExtTablesManager::removeP4ExtTableEntry(const std::string &table_name m_p4OidMapper->eraseOID(object_type, key); for (auto action_dep_object_it = ext_table_entry->action_dep_objects.begin(); - action_dep_object_it != ext_table_entry->action_dep_objects.end(); action_dep_object_it++) + action_dep_object_it != ext_table_entry->action_dep_objects.end(); action_dep_object_it++) { auto action_dep_object = action_dep_object_it->second; m_p4OidMapper->decreaseRefCount(action_dep_object.sai_object, action_dep_object.key); @@ -647,7 +631,6 @@ ReturnCode ExtTablesManager::removeP4ExtTableEntry(const std::string &table_name return ReturnCode(); } - ReturnCode ExtTablesManager::processAddRequest(const P4ExtTableAppDbEntry &app_db_entry) { SWSS_LOG_ENTER(); @@ -662,15 +645,14 @@ ReturnCode ExtTablesManager::processAddRequest(const P4ExtTableAppDbEntry &app_d } ReturnCode ExtTablesManager::processUpdateRequest(const P4ExtTableAppDbEntry &app_db_entry, - P4ExtTableEntry *ext_table_entry) + P4ExtTableEntry *ext_table_entry) { SWSS_LOG_ENTER(); auto status = updateP4ExtTableEntry(app_db_entry, ext_table_entry); if (!status.ok()) { - SWSS_LOG_ERROR("Failed to update extension entry with key %s", - app_db_entry.table_key.c_str()); + SWSS_LOG_ERROR("Failed to update extension entry with key %s", app_db_entry.table_key.c_str()); } return ReturnCode(); } @@ -682,14 +664,13 @@ ReturnCode ExtTablesManager::processDeleteRequest(const P4ExtTableAppDbEntry &ap auto status = removeP4ExtTableEntry(app_db_entry.table_name, app_db_entry.table_key); if (!status.ok()) { - SWSS_LOG_ERROR("Failed to remove extension entry with key %s", - app_db_entry.table_key.c_str()); + SWSS_LOG_ERROR("Failed to remove extension entry with key %s", app_db_entry.table_key.c_str()); } return ReturnCode(); } - -ReturnCode ExtTablesManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +ReturnCode ExtTablesManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) { object_type = SAI_OBJECT_TYPE_GENERIC_PROGRAMMABLE; object_key = json_key; @@ -702,118 +683,120 @@ void ExtTablesManager::enqueue(const std::string &table_name, const swss::KeyOpF m_entriesTables[table_name].push_back(entry); } -void ExtTablesManager::drain() -{ - SWSS_LOG_ENTER(); - std::string table_prefix = "EXT_"; - - if (gP4Orch->tablesinfo) { - for (auto table_it = gP4Orch->tablesinfo->m_tablePrecedenceMap.begin(); - table_it != gP4Orch->tablesinfo->m_tablePrecedenceMap.end(); ++table_it) - { - auto table_name = table_prefix + table_it->second; - boost::algorithm::to_upper(table_name); - auto it_m = m_entriesTables.find(table_name); - if (it_m == m_entriesTables.end()) - { - continue; - } - - for (const auto &key_op_fvs_tuple : it_m->second) - { - std::string table_name; - std::string table_key; - - parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &table_key); - const std::vector &attributes = kfvFieldsValues(key_op_fvs_tuple); - - if (table_name.rfind(table_prefix, 0) == std::string::npos) - { - SWSS_LOG_ERROR("Table %s is without prefix %s", table_name.c_str(), table_prefix.c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - StatusCode::SWSS_RC_INVALID_PARAM, /*replace=*/true); - continue; - } - table_name = table_name.substr(table_prefix.length()); - boost::algorithm::to_lower(table_name); +void ExtTablesManager::drainWithNotExecuted() { + for (auto& entries_table : m_entriesTables) { + drainMgmtWithNotExecuted(entries_table.second, m_publisher); + } +} - ReturnCode status; - auto app_db_entry_or = deserializeP4ExtTableEntry(table_name, table_key, attributes); - if (!app_db_entry_or.ok()) - { - status = app_db_entry_or.status(); - SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", - QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, /*replace=*/true); - continue; - } +ReturnCode ExtTablesManager::drain() { + SWSS_LOG_ENTER(); + std::string table_prefix = "EXT_"; + ReturnCode ret; + + if (gP4Orch->tablesinfo) { + for (auto table_it = gP4Orch->tablesinfo->m_tablePrecedenceMap.begin(); + table_it != gP4Orch->tablesinfo->m_tablePrecedenceMap.end(); + ++table_it) { + auto table_name = table_prefix + table_it->second; + boost::algorithm::to_upper(table_name); + auto it_m = m_entriesTables.find(table_name); + if (it_m == m_entriesTables.end()) { + continue; + } - auto &app_db_entry = *app_db_entry_or; - status = validateP4ExtTableAppDbEntry(app_db_entry); - if (!status.ok()) - { - SWSS_LOG_ERROR("Validation failed for extension APP DB entry with key %s: %s", - QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, /*replace=*/true); - continue; - } + ReturnCode status; + while (!it_m->second.empty()) { + auto key_op_fvs_tuple = it_m->second.front(); + it_m->second.pop_front(); + std::string table_name; + std::string table_key; + + parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &table_key); + const std::vector& attributes = + kfvFieldsValues(key_op_fvs_tuple); + + if (table_name.rfind(table_prefix, 0) == std::string::npos) { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM); + SWSS_LOG_ERROR("Table %s is without prefix %s", table_name.c_str(), + table_prefix.c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; + } + table_name = table_name.substr(table_prefix.length()); + boost::algorithm::to_lower(table_name); + + auto app_db_entry_or = + deserializeP4ExtTableEntry(table_name, table_key, attributes); + if (!app_db_entry_or.ok()) { + status = app_db_entry_or.status(); + SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", + QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; + } - const std::string &operation = kfvOp(key_op_fvs_tuple); - if (operation == SET_COMMAND) - { - auto *ext_table_entry = getP4ExtTableEntry(app_db_entry.table_name, app_db_entry.table_key); - if (ext_table_entry == nullptr) - { - // Create extension entry - app_db_entry.db_key = kfvKey(key_op_fvs_tuple); - status = processAddRequest(app_db_entry); - } - else - { - // Modify existing extension entry - status = processUpdateRequest(app_db_entry, ext_table_entry); - } - } - else if (operation == DEL_COMMAND) - { - // Delete extension entry - status = processDeleteRequest(app_db_entry); - } - else - { - status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Unknown operation type " << QuotedVar(operation); - SWSS_LOG_ERROR("%s", status.message().c_str()); - } - if (!status.ok()) - { - SWSS_LOG_ERROR("Processing failed for extension APP_DB entry with key %s: %s", - QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), status.message().c_str()); - } - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, /*replace=*/true); + auto& app_db_entry = *app_db_entry_or; + status = validateP4ExtTableAppDbEntry(app_db_entry); + if (!status.ok()) { + SWSS_LOG_ERROR( + "Validation failed for extension APP DB entry with key %s: %s", + QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; } - it_m->second.clear(); + const std::string& operation = kfvOp(key_op_fvs_tuple); + if (operation == SET_COMMAND) { + auto* ext_table_entry = getP4ExtTableEntry(app_db_entry.table_name, + app_db_entry.table_key); + if (ext_table_entry == nullptr) { + // Create extension entry + app_db_entry.db_key = kfvKey(key_op_fvs_tuple); + status = processAddRequest(app_db_entry); + } else { + // Modify existing extension entry + status = processUpdateRequest(app_db_entry, ext_table_entry); + } + } else if (operation == DEL_COMMAND) { + // Delete extension entry + status = processDeleteRequest(app_db_entry); + } else { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unknown operation type " << QuotedVar(operation); + SWSS_LOG_ERROR("%s", status.message().c_str()); + } + if (!status.ok()) { + SWSS_LOG_ERROR( + "Processing failed for extension APP_DB entry with key %s: %s", + QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), + status.message().c_str()); + } + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + if (!status.ok()) { + break; + } + } + if (!status.ok()) { + ret = status; } } + } - // Now report error for all remaining un-processed entries - for (auto it_m = m_entriesTables.begin(); it_m != m_entriesTables.end(); it_m++) - { - for (const auto &key_op_fvs_tuple : it_m->second) - { - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - StatusCode::SWSS_RC_INVALID_PARAM, /*replace=*/true); - } - - it_m->second.clear(); - } + drainWithNotExecuted(); + return ret; } - void ExtTablesManager::doExtCounterStatsTask() { SWSS_LOG_ENTER(); @@ -823,12 +806,12 @@ void ExtTablesManager::doExtCounterStatsTask() return; } - sai_stat_id_t stat_ids[] = { SAI_COUNTER_STAT_PACKETS, SAI_COUNTER_STAT_BYTES }; + sai_stat_id_t stat_ids[] = {SAI_COUNTER_STAT_PACKETS, SAI_COUNTER_STAT_BYTES}; uint64_t stats[2]; std::vector counter_stats_values; for (auto table_it = gP4Orch->tablesinfo->m_tableInfoMap.begin(); - table_it != gP4Orch->tablesinfo->m_tableInfoMap.end(); ++table_it) + table_it != gP4Orch->tablesinfo->m_tableInfoMap.end(); ++table_it) { if (!table_it->second.counter_bytes_enabled && !table_it->second.counter_packets_enabled) { @@ -842,8 +825,8 @@ void ExtTablesManager::doExtCounterStatsTask() continue; } - for (auto ext_table_entry_it = ext_table_it->second.begin(); - ext_table_entry_it != ext_table_it->second.end(); ++ext_table_entry_it) + for (auto ext_table_entry_it = ext_table_it->second.begin(); ext_table_entry_it != ext_table_it->second.end(); + ++ext_table_entry_it) { auto *ext_table_entry = &ext_table_entry_it->second; if (ext_table_entry->sai_counter_oid == SAI_NULL_OBJECT_ID) @@ -852,18 +835,17 @@ void ExtTablesManager::doExtCounterStatsTask() } sai_status_t sai_status = - sai_counter_api->get_counter_stats(ext_table_entry->sai_counter_oid, 2, stat_ids, stats); + sai_counter_api->get_counter_stats(ext_table_entry->sai_counter_oid, 2, stat_ids, stats); if (sai_status != SAI_STATUS_SUCCESS) { - SWSS_LOG_WARN("Failed to set counters stats for extension entry %s:%s in COUNTERS_DB: ", - table_name.c_str(), ext_table_entry->table_key.c_str()); + SWSS_LOG_WARN("Failed to set counters stats for extension entry %s:%s in " + "COUNTERS_DB: ", + table_name.c_str(), ext_table_entry->table_key.c_str()); continue; } - counter_stats_values.push_back( - swss::FieldValueTuple{P4_COUNTER_STATS_PACKETS, std::to_string(stats[0])}); - counter_stats_values.push_back( - swss::FieldValueTuple{P4_COUNTER_STATS_BYTES, std::to_string(stats[1])}); + counter_stats_values.push_back(swss::FieldValueTuple{P4_COUNTER_STATS_PACKETS, std::to_string(stats[0])}); + counter_stats_values.push_back(swss::FieldValueTuple{P4_COUNTER_STATS_BYTES, std::to_string(stats[1])}); // Set field value tuples for counters stats in COUNTERS_DB m_countersTable->set(ext_table_entry->db_key, counter_stats_values); @@ -878,4 +860,3 @@ std::string ExtTablesManager::verifyState(const std::string &key, const std::vec return result; } - diff --git a/orchagent/p4orch/ext_tables_manager.h b/orchagent/p4orch/ext_tables_manager.h index cb61d5f308d..fab0cbc2ce8 100644 --- a/orchagent/p4orch/ext_tables_manager.h +++ b/orchagent/p4orch/ext_tables_manager.h @@ -1,12 +1,12 @@ #pragma once #include +#include #include #include #include #include "macaddress.h" -#include #include "orch.h" #include "p4orch/object_manager_interface.h" #include "p4orch/p4oidmapper.h" @@ -31,7 +31,7 @@ struct P4ExtTableEntry P4ExtTableEntry() {}; P4ExtTableEntry(const std::string &db_key, const std::string &table_name, const std::string &table_key) - : db_key(db_key), table_name(table_name), table_key(table_key) + : db_key(db_key), table_name(table_name), table_key(table_key) { } }; @@ -44,10 +44,9 @@ class ExtTablesManager : public ObjectManagerInterface { public: ExtTablesManager(P4OidMapper *p4oidMapper, VRFOrch *vrfOrch, ResponsePublisherInterface *publisher) - : m_vrfOrch(vrfOrch), - m_countersDb(std::make_unique("COUNTERS_DB", 0)), - m_countersTable(std::make_unique( - m_countersDb.get(), std::string(COUNTERS_TABLE) + DEFAULT_KEY_SEPARATOR + APP_P4RT_TABLE_NAME)) + : m_vrfOrch(vrfOrch), m_countersDb(std::make_unique("COUNTERS_DB", 0)), + m_countersTable(std::make_unique( + m_countersDb.get(), std::string(COUNTERS_TABLE) + DEFAULT_KEY_SEPARATOR + APP_P4RT_TABLE_NAME)) { SWSS_LOG_ENTER(); @@ -59,23 +58,23 @@ class ExtTablesManager : public ObjectManagerInterface virtual ~ExtTablesManager() = default; void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; - void drain() override; + ReturnCode drain() override; + void drainWithNotExecuted() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; - ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; // For every extension entry, update counters stats in COUNTERS_DB, if // counters are enabled for those entries void doExtCounterStatsTask(); private: - ReturnCodeOr deserializeP4ExtTableEntry( - const std::string &table_name, - const std::string &key, const std::vector &attributes); + ReturnCodeOr deserializeP4ExtTableEntry(const std::string &table_name, const std::string &key, + const std::vector &attributes); ReturnCode validateActionParamsCrossRef(P4ExtTableAppDbEntry &app_db_entry, ActionInfo *action); ReturnCode validateP4ExtTableAppDbEntry(P4ExtTableAppDbEntry &app_db_entry); P4ExtTableEntry *getP4ExtTableEntry(const std::string &table_name, const std::string &table_key); - ReturnCode prepareP4SaiExtAPIParams(const P4ExtTableAppDbEntry &app_db_entry, - std::string &ext_table_entry_attr); + ReturnCode prepareP4SaiExtAPIParams(const P4ExtTableAppDbEntry &app_db_entry, std::string &ext_table_entry_attr); ReturnCode createP4ExtTableEntry(const P4ExtTableAppDbEntry &app_db_entry, P4ExtTableEntry &ext_table_entry); ReturnCode updateP4ExtTableEntry(const P4ExtTableAppDbEntry &app_db_entry, P4ExtTableEntry *ext_table_entry); ReturnCode removeP4ExtTableEntry(const std::string &table_name, const std::string &table_key); diff --git a/orchagent/p4orch/gre_tunnel_manager.cpp b/orchagent/p4orch/gre_tunnel_manager.cpp index 9f4cc7f7b65..f5286e74ab8 100644 --- a/orchagent/p4orch/gre_tunnel_manager.cpp +++ b/orchagent/p4orch/gre_tunnel_manager.cpp @@ -1,6 +1,7 @@ #include "p4orch/gre_tunnel_manager.h" #include +#include #include #include #include @@ -9,7 +10,6 @@ #include "crmorch.h" #include "dbconnector.h" #include "ipaddress.h" -#include #include "logger.h" #include "p4orch/p4orch_util.h" #include "sai_serialize.h" @@ -27,6 +27,7 @@ extern sai_tunnel_api_t *sai_tunnel_api; extern sai_router_interface_api_t *sai_router_intfs_api; extern CrmOrch *gCrmOrch; extern sai_object_id_t gVirtualRouterId; +extern sai_object_id_t gUnderlayIfId; namespace { @@ -98,7 +99,8 @@ P4GreTunnelEntry::P4GreTunnelEntry(const std::string &tunnel_id, const std::stri tunnel_key = KeyGenerator::generateTunnelKey(tunnel_id); } -ReturnCode GreTunnelManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +ReturnCode GreTunnelManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) { return StatusCode::SWSS_RC_UNIMPLEMENTED; } @@ -108,74 +110,79 @@ void GreTunnelManager::enqueue(const std::string &table_name, const swss::KeyOpF m_entries.push_back(entry); } -void GreTunnelManager::drain() -{ - SWSS_LOG_ENTER(); - - for (const auto &key_op_fvs_tuple : m_entries) - { - std::string table_name; - std::string key; - parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &key); - const std::vector &attributes = kfvFieldsValues(key_op_fvs_tuple); - - const std::string &operation = kfvOp(key_op_fvs_tuple); - - ReturnCode status; - auto app_db_entry_or = deserializeP4GreTunnelAppDbEntry(key, attributes); - if (!app_db_entry_or.ok()) - { - status = app_db_entry_or.status(); - SWSS_LOG_ERROR("Unable to deserialize GRE Tunnel APP DB entry with key %s: %s", - QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, - /*replace=*/true); - continue; - } - auto &app_db_entry = *app_db_entry_or; +void GreTunnelManager::drainWithNotExecuted() { + drainMgmtWithNotExecuted(m_entries, m_publisher); +} - const std::string tunnel_key = KeyGenerator::generateTunnelKey(app_db_entry.tunnel_id); +ReturnCode GreTunnelManager::drain() { + SWSS_LOG_ENTER(); - // Fulfill the operation. - if (operation == SET_COMMAND) - { - status = validateGreTunnelAppDbEntry(app_db_entry); - if (!status.ok()) - { - SWSS_LOG_ERROR("Validation failed for GRE Tunnel APP DB entry with key %s: %s", - QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, - /*replace=*/true); - continue; - } - auto *gre_tunnel_entry = getGreTunnelEntry(tunnel_key); - if (gre_tunnel_entry == nullptr) - { - // Create new GRE tunnel. - status = processAddRequest(app_db_entry); - } - else - { - // Modify existing GRE tunnel. - status = processUpdateRequest(app_db_entry, gre_tunnel_entry); - } - } - else if (operation == DEL_COMMAND) - { - // Delete GRE tunnel. - status = processDeleteRequest(tunnel_key); - } - else - { - status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Unknown operation type " << QuotedVar(operation); - SWSS_LOG_ERROR("%s", status.message().c_str()); - } - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), status, + ReturnCode status; + while (!m_entries.empty()) { + auto key_op_fvs_tuple = m_entries.front(); + m_entries.pop_front(); + std::string table_name; + std::string key; + parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &key); + const std::vector& attributes = + kfvFieldsValues(key_op_fvs_tuple); + const std::string& operation = kfvOp(key_op_fvs_tuple); + + auto app_db_entry_or = deserializeP4GreTunnelAppDbEntry(key, attributes); + if (!app_db_entry_or.ok()) { + status = app_db_entry_or.status(); + SWSS_LOG_ERROR( + "Unable to deserialize GRE Tunnel APP DB entry with key %s: %s", + QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; + } + auto& app_db_entry = *app_db_entry_or; + + const std::string tunnel_key = + KeyGenerator::generateTunnelKey(app_db_entry.tunnel_id); + + // Fulfill the operation. + if (operation == SET_COMMAND) { + status = validateGreTunnelAppDbEntry(app_db_entry); + if (!status.ok()) { + SWSS_LOG_ERROR( + "Validation failed for GRE Tunnel APP DB entry with key %s: %s", + QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, /*replace=*/true); - } - m_entries.clear(); + break; + } + auto* gre_tunnel_entry = getGreTunnelEntry(tunnel_key); + if (gre_tunnel_entry == nullptr) { + // Create new GRE tunnel. + status = processAddRequest(app_db_entry); + } else { + // Modify existing GRE tunnel. + status = processUpdateRequest(app_db_entry, gre_tunnel_entry); + } + } else if (operation == DEL_COMMAND) { + // Delete GRE tunnel. + status = processDeleteRequest(tunnel_key); + } else { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unknown operation type " << QuotedVar(operation); + SWSS_LOG_ERROR("%s", status.message().c_str()); + } + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + if (!status.ok()) { + break; + } + } + drainWithNotExecuted(); + return status; } P4GreTunnelEntry *GreTunnelManager::getGreTunnelEntry(const std::string &tunnel_key) @@ -317,48 +324,21 @@ ReturnCode GreTunnelManager::createGreTunnel(P4GreTunnelEntry &gre_tunnel_entry) << "Router intf " << QuotedVar(gre_tunnel_entry.router_interface_id) << " does not exist"); } - std::vector overlay_intf_attrs; - - sai_attribute_t overlay_intf_attr; - overlay_intf_attr.id = SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID; - overlay_intf_attr.value.oid = gVirtualRouterId; - overlay_intf_attrs.push_back(overlay_intf_attr); - - overlay_intf_attr.id = SAI_ROUTER_INTERFACE_ATTR_TYPE; - overlay_intf_attr.value.s32 = SAI_ROUTER_INTERFACE_TYPE_LOOPBACK; - overlay_intf_attrs.push_back(overlay_intf_attr); - - // Call SAI API. - CHECK_ERROR_AND_LOG_AND_RETURN( - sai_router_intfs_api->create_router_interface(&gre_tunnel_entry.overlay_if_oid, gSwitchId, - (uint32_t)overlay_intf_attrs.size(), overlay_intf_attrs.data()), - "Failed to create the Loopback router interface for GRE tunnel " - "SAI_TUNNEL_ATTR_OVERLAY_INTERFACE attribute" - << QuotedVar(gre_tunnel_entry.tunnel_key)); - // Prepare attributes for the SAI creation call. + // TODO: Remove when SAI_TUNNEL_ATTR_OVERLAY_INTERFACE is not + // mandatory Use gUnderlayIfId, a shared global loopback rif, for encap + // tunnels + gre_tunnel_entry.overlay_if_oid = gUnderlayIfId; std::vector tunnel_attrs = getSaiAttrs(gre_tunnel_entry); // Call SAI API. - auto sai_status = sai_tunnel_api->create_tunnel(&gre_tunnel_entry.tunnel_oid, gSwitchId, - (uint32_t)tunnel_attrs.size(), tunnel_attrs.data()); - if (sai_status != SAI_STATUS_SUCCESS) - { - auto status = ReturnCode(sai_status) << "Failed to create GRE tunnel " << QuotedVar(gre_tunnel_entry.tunnel_key) - << " on rif " << QuotedVar(gre_tunnel_entry.router_interface_id); - SWSS_LOG_ERROR("%s", status.message().c_str()); - auto recovery_status = sai_router_intfs_api->remove_router_interface(gre_tunnel_entry.overlay_if_oid); - if (recovery_status != SAI_STATUS_SUCCESS) - { - auto rc = ReturnCode(recovery_status) << "Failed to recover overlay router interface due to SAI call " - "failure: Failed to remove loopback router interface " - << QuotedVar(sai_serialize_object_id(gre_tunnel_entry.overlay_if_oid)) - << " while clean up dependencies."; - SWSS_LOG_ERROR("%s", rc.message().c_str()); - SWSS_RAISE_CRITICAL_STATE(rc.message()); - } - return status; - } + CHECK_ERROR_AND_LOG_AND_RETURN( + sai_tunnel_api->create_tunnel(&gre_tunnel_entry.tunnel_oid, gSwitchId, + (uint32_t)tunnel_attrs.size(), + tunnel_attrs.data()), + "Failed to create GRE tunnel " + << QuotedVar(gre_tunnel_entry.tunnel_key) << " on rif " + << QuotedVar(gre_tunnel_entry.router_interface_id)); // On successful creation, increment ref count. m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_ROUTER_INTERFACE, router_interface_key); @@ -429,32 +409,6 @@ ReturnCode GreTunnelManager::removeGreTunnel(const std::string &tunnel_key) CHECK_ERROR_AND_LOG_AND_RETURN(sai_tunnel_api->remove_tunnel(gre_tunnel_entry->tunnel_oid), "Failed to remove GRE tunnel " << QuotedVar(gre_tunnel_entry->tunnel_key)); - auto sai_status = sai_router_intfs_api->remove_router_interface(gre_tunnel_entry->overlay_if_oid); - if (sai_status != SAI_STATUS_SUCCESS) - { - auto status = ReturnCode(sai_status) << "Failed to remove loopback router interface " - << QuotedVar(sai_serialize_object_id(gre_tunnel_entry->overlay_if_oid)) - << " when removing GRE tunnel " << QuotedVar(gre_tunnel_entry->tunnel_key); - SWSS_LOG_ERROR("%s", status.message().c_str()); - - // Try to recreate the GRE tunnel - std::vector tunnel_attrs = getSaiAttrs(*gre_tunnel_entry); - - // Call SAI API. - auto recovery_status = sai_tunnel_api->create_tunnel(&gre_tunnel_entry->tunnel_oid, gSwitchId, - (uint32_t)tunnel_attrs.size(), tunnel_attrs.data()); - if (recovery_status != SAI_STATUS_SUCCESS) - { - auto rc = ReturnCode(recovery_status) << "Failed to recover the GRE tunnel due to SAI call failure : " - "Failed to create GRE tunnel " - << QuotedVar(gre_tunnel_entry->tunnel_key) << " on rif " - << QuotedVar(gre_tunnel_entry->router_interface_id); - SWSS_LOG_ERROR("%s", rc.message().c_str()); - SWSS_RAISE_CRITICAL_STATE(rc.message()); - } - return status; - } - // On successful deletion, decrement ref count. m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_ROUTER_INTERFACE, KeyGenerator::generateRouterInterfaceKey(gre_tunnel_entry->router_interface_id)); @@ -594,36 +548,15 @@ std::string GreTunnelManager::verifyStateAsicDb(const P4GreTunnelEntry *gre_tunn swss::DBConnector db("ASIC_DB", 0); swss::Table table(&db, "ASIC_STATE"); - // Verify Overlay router interface ASIC DB attributes - std::string key = sai_serialize_object_type(SAI_OBJECT_TYPE_ROUTER_INTERFACE) + ":" + - sai_serialize_object_id(gre_tunnel_entry->overlay_if_oid); - std::vector values; - if (!table.get(key, values)) - { - return std::string("ASIC DB key not found ") + key; - } - - std::vector overlay_intf_attrs; - sai_attribute_t overlay_intf_attr; - overlay_intf_attr.id = SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID; - overlay_intf_attr.value.oid = gVirtualRouterId; - overlay_intf_attrs.push_back(overlay_intf_attr); - overlay_intf_attr.id = SAI_ROUTER_INTERFACE_ATTR_TYPE; - overlay_intf_attr.value.s32 = SAI_ROUTER_INTERFACE_TYPE_LOOPBACK; - overlay_intf_attrs.push_back(overlay_intf_attr); - std::vector exp = saimeta::SaiAttributeList::serialize_attr_list( - SAI_OBJECT_TYPE_ROUTER_INTERFACE, (uint32_t)overlay_intf_attrs.size(), overlay_intf_attrs.data(), - /*countOnly=*/false); - verifyAttrs(values, exp, std::vector{}, - /*allow_unknown=*/false); - // Verify Tunnel ASIC DB attributes std::vector attrs = getSaiAttrs(*gre_tunnel_entry); - exp = saimeta::SaiAttributeList::serialize_attr_list(SAI_OBJECT_TYPE_TUNNEL, (uint32_t)attrs.size(), attrs.data(), - /*countOnly=*/false); - key = - sai_serialize_object_type(SAI_OBJECT_TYPE_TUNNEL) + ":" + sai_serialize_object_id(gre_tunnel_entry->tunnel_oid); - values.clear(); + std::vector exp = + saimeta::SaiAttributeList::serialize_attr_list( + SAI_OBJECT_TYPE_TUNNEL, (uint32_t)attrs.size(), attrs.data(), + /*countOnly=*/false); + std::string key = sai_serialize_object_type(SAI_OBJECT_TYPE_TUNNEL) + ":" + + sai_serialize_object_id(gre_tunnel_entry->tunnel_oid); + std::vector values; if (!table.get(key, values)) { return std::string("ASIC DB key not found ") + key; diff --git a/orchagent/p4orch/gre_tunnel_manager.h b/orchagent/p4orch/gre_tunnel_manager.h index d5cb32e9bf3..fe014106bd9 100644 --- a/orchagent/p4orch/gre_tunnel_manager.h +++ b/orchagent/p4orch/gre_tunnel_manager.h @@ -70,9 +70,11 @@ class GreTunnelManager : public ObjectManagerInterface virtual ~GreTunnelManager() = default; void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; - void drain() override; + ReturnCode drain() override; + void drainWithNotExecuted() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; - ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; ReturnCodeOr getConstGreTunnelEntry(const std::string &gre_tunnel_key); diff --git a/orchagent/p4orch/l3_admit_manager.cpp b/orchagent/p4orch/l3_admit_manager.cpp index d319c54a734..598e3e11ed6 100644 --- a/orchagent/p4orch/l3_admit_manager.cpp +++ b/orchagent/p4orch/l3_admit_manager.cpp @@ -1,13 +1,13 @@ #include "p4orch/l3_admit_manager.h" #include +#include #include #include #include #include "SaiAttributeList.h" #include "dbconnector.h" -#include #include "logger.h" #include "p4orch/p4orch_util.h" #include "portsorch.h" @@ -54,6 +54,13 @@ ReturnCodeOr> getSaiAttrs(const P4L3AdmitEntry &l3_ LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) << "Failed to get port info for port " << QuotedVar(l3_admit_entry.port_name)); } + if (port.m_type != Port::Type::PHY) { + LOG_ERROR_AND_RETURN( + ReturnCode(StatusCode::SWSS_RC_UNIMPLEMENTED) + << "Port " << QuotedVar(l3_admit_entry.port_name) << "'s type " + << port.m_type + << " is not physical and is not supported for L3 Admit entry."); + } l3_admit_attr.id = SAI_MY_MAC_ATTR_PORT_ID; l3_admit_attr.value.oid = port.m_port_id; l3_admit_attrs.push_back(l3_admit_attr); @@ -64,7 +71,8 @@ ReturnCodeOr> getSaiAttrs(const P4L3AdmitEntry &l3_ } // namespace -ReturnCode L3AdmitManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +ReturnCode L3AdmitManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) { return StatusCode::SWSS_RC_UNIMPLEMENTED; } @@ -74,66 +82,70 @@ void L3AdmitManager::enqueue(const std::string &table_name, const swss::KeyOpFie m_entries.push_back(entry); } -void L3AdmitManager::drain() -{ - SWSS_LOG_ENTER(); +void L3AdmitManager::drainWithNotExecuted() { + drainMgmtWithNotExecuted(m_entries, m_publisher); +} - for (const auto &key_op_fvs_tuple : m_entries) - { - std::string table_name; - std::string key; - parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &key); - const std::vector &attributes = kfvFieldsValues(key_op_fvs_tuple); - - ReturnCode status; - auto app_db_entry_or = deserializeP4L3AdmitAppDbEntry(key, attributes); - if (!app_db_entry_or.ok()) - { - status = app_db_entry_or.status(); - SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", - QuotedVar(table_name + ":" + key).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, - /*replace=*/true); - continue; - } - auto &app_db_entry = *app_db_entry_or; +ReturnCode L3AdmitManager::drain() { + SWSS_LOG_ENTER(); - const std::string l3_admit_key = - KeyGenerator::generateL3AdmitKey(app_db_entry.mac_address_data, app_db_entry.mac_address_mask, - app_db_entry.port_name, app_db_entry.priority); + ReturnCode status; + while (!m_entries.empty()) { + auto key_op_fvs_tuple = m_entries.front(); + m_entries.pop_front(); + std::string table_name; + std::string key; + parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &key); + const std::vector& attributes = + kfvFieldsValues(key_op_fvs_tuple); + + auto app_db_entry_or = deserializeP4L3AdmitAppDbEntry(key, attributes); + if (!app_db_entry_or.ok()) { + status = app_db_entry_or.status(); + SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", + QuotedVar(table_name + ":" + key).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; + } + auto& app_db_entry = *app_db_entry_or; - // Fulfill the operation. - const std::string &operation = kfvOp(key_op_fvs_tuple); - if (operation == SET_COMMAND) - { - auto *l3_admit_entry = getL3AdmitEntry(l3_admit_key); - if (l3_admit_entry == nullptr) - { - // Create new l3 admit. - status = processAddRequest(app_db_entry, l3_admit_key); - } - else - { - // Duplicate l3 admit entry, no-op - status = ReturnCode(StatusCode::SWSS_RC_SUCCESS) - << "L3 Admit entry with the same key received: " << QuotedVar(l3_admit_key); - } - } - else if (operation == DEL_COMMAND) - { - // Delete l3 admit. - status = processDeleteRequest(l3_admit_key); - } - else - { - status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Unknown operation type " << QuotedVar(operation); - SWSS_LOG_ERROR("%s", status.message().c_str()); - } - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), status, - /*replace=*/true); + const std::string l3_admit_key = KeyGenerator::generateL3AdmitKey( + app_db_entry.mac_address_data, app_db_entry.mac_address_mask, + app_db_entry.port_name, app_db_entry.priority); + + // Fulfill the operation. + const std::string& operation = kfvOp(key_op_fvs_tuple); + if (operation == SET_COMMAND) { + auto* l3_admit_entry = getL3AdmitEntry(l3_admit_key); + if (l3_admit_entry == nullptr) { + // Create new l3 admit. + status = processAddRequest(app_db_entry, l3_admit_key); + } else { + // Duplicate l3 admit entry, no-op + status = ReturnCode(StatusCode::SWSS_RC_SUCCESS) + << "L3 Admit entry with the same key received: " + << QuotedVar(l3_admit_key); + } + } else if (operation == DEL_COMMAND) { + // Delete l3 admit. + status = processDeleteRequest(l3_admit_key); + } else { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unknown operation type " << QuotedVar(operation); + SWSS_LOG_ERROR("%s", status.message().c_str()); } - m_entries.clear(); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + if (!status.ok()) { + break; + } + } + drainWithNotExecuted(); + return status; } P4L3AdmitEntry *L3AdmitManager::getL3AdmitEntry(const std::string &l3_admit_key) @@ -195,9 +207,21 @@ ReturnCodeOr L3AdmitManager::deserializeP4L3AdmitAppDbEntry // "match/in_port":"Ethernet0" if (j.find(prependMatchField(p4orch::kInPort)) != j.end()) - { - app_db_entry.port_name = j[prependMatchField(p4orch::kInPort)]; - } + { + std::string in_port = j[prependMatchField(p4orch::kInPort)]; + swss::Port port; + if (!gPortsOrch->getPort(in_port, port)) { + return ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) + << "Failed to get port info for port " << QuotedVar(in_port); + } + if (port.m_type != Port::Type::PHY) { + return ReturnCode(StatusCode::SWSS_RC_UNIMPLEMENTED) + << "Port " << QuotedVar(in_port) << "'s type " << port.m_type + << " is not physical and is not supported for " + "L3 Admit entry."; + } + app_db_entry.port_name = j[prependMatchField(p4orch::kInPort)]; + } } catch (std::exception &ex) { diff --git a/orchagent/p4orch/l3_admit_manager.h b/orchagent/p4orch/l3_admit_manager.h index d378775c4f4..c2d940cf717 100644 --- a/orchagent/p4orch/l3_admit_manager.h +++ b/orchagent/p4orch/l3_admit_manager.h @@ -61,9 +61,11 @@ class L3AdmitManager : public ObjectManagerInterface virtual ~L3AdmitManager() = default; void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; - void drain() override; + ReturnCode drain() override; + void drainWithNotExecuted() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; - ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; private: // Gets the internal cached next hop entry by its key. diff --git a/orchagent/p4orch/mirror_session_manager.cpp b/orchagent/p4orch/mirror_session_manager.cpp index 3554344fb38..4f3b2dc3bc1 100644 --- a/orchagent/p4orch/mirror_session_manager.cpp +++ b/orchagent/p4orch/mirror_session_manager.cpp @@ -1,10 +1,10 @@ #include "p4orch/mirror_session_manager.h" #include +#include #include "SaiAttributeList.h" #include "dbconnector.h" -#include #include "p4orch/p4orch_util.h" #include "portsorch.h" #include "sai_serialize.h" @@ -21,13 +21,14 @@ extern sai_object_id_t gSwitchId; namespace p4orch { -ReturnCode MirrorSessionManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +ReturnCode MirrorSessionManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) { - std::string value; + std::string value; try { - nlohmann::json j = nlohmann::json::parse(json_key); + nlohmann::json j = nlohmann::json::parse(json_key); if (j.find(prependMatchField(p4orch::kMirrorSessionId)) != j.end()) { value = j.at(prependMatchField(p4orch::kMirrorSessionId)).get(); @@ -54,63 +55,68 @@ void MirrorSessionManager::enqueue(const std::string &table_name, const swss::Ke m_entries.push_back(entry); } -void MirrorSessionManager::drain() -{ - SWSS_LOG_ENTER(); - - for (const auto &key_op_fvs_tuple : m_entries) - { - std::string table_name; - std::string key; - parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &key); - const std::vector &attributes = kfvFieldsValues(key_op_fvs_tuple); - - ReturnCode status; - auto app_db_entry_or = deserializeP4MirrorSessionAppDbEntry(key, attributes); - if (!app_db_entry_or.ok()) - { - status = app_db_entry_or.status(); - SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", - QuotedVar(table_name + ":" + key).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, - /*replace=*/true); - continue; - } - auto &app_db_entry = *app_db_entry_or; +void MirrorSessionManager::drainWithNotExecuted() { + drainMgmtWithNotExecuted(m_entries, m_publisher); +} - const std::string mirror_session_key = KeyGenerator::generateMirrorSessionKey(app_db_entry.mirror_session_id); +ReturnCode MirrorSessionManager::drain() { + SWSS_LOG_ENTER(); - // Fulfill the operation. - const std::string &operation = kfvOp(key_op_fvs_tuple); - if (operation == SET_COMMAND) - { - auto *mirror_session_entry = getMirrorSessionEntry(mirror_session_key); - if (mirror_session_entry == nullptr) - { - // Create new mirror session. - status = processAddRequest(app_db_entry); - } - else - { - // Modify existing mirror session. - status = processUpdateRequest(app_db_entry, mirror_session_entry); - } - } - else if (operation == DEL_COMMAND) - { - // Delete mirror session. - status = processDeleteRequest(mirror_session_key); - } - else - { - status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Unknown operation type " << QuotedVar(operation); - SWSS_LOG_ERROR("%s", status.message().c_str()); - } - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), status, - /*replace=*/true); - } - m_entries.clear(); + ReturnCode status; + while (!m_entries.empty()) { + auto key_op_fvs_tuple = m_entries.front(); + m_entries.pop_front(); + std::string table_name; + std::string key; + parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &key); + const std::vector& attributes = + kfvFieldsValues(key_op_fvs_tuple); + + auto app_db_entry_or = + deserializeP4MirrorSessionAppDbEntry(key, attributes); + if (!app_db_entry_or.ok()) { + status = app_db_entry_or.status(); + SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", + QuotedVar(table_name + ":" + key).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; + } + auto& app_db_entry = *app_db_entry_or; + + const std::string mirror_session_key = + KeyGenerator::generateMirrorSessionKey(app_db_entry.mirror_session_id); + + // Fulfill the operation. + const std::string& operation = kfvOp(key_op_fvs_tuple); + if (operation == SET_COMMAND) { + auto* mirror_session_entry = getMirrorSessionEntry(mirror_session_key); + if (mirror_session_entry == nullptr) { + // Create new mirror session. + status = processAddRequest(app_db_entry); + } else { + // Modify existing mirror session. + status = processUpdateRequest(app_db_entry, mirror_session_entry); + } + } else if (operation == DEL_COMMAND) { + // Delete mirror session. + status = processDeleteRequest(mirror_session_key); + } else { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unknown operation type " << QuotedVar(operation); + SWSS_LOG_ERROR("%s", status.message().c_str()); + } + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + if (!status.ok()) { + break; + } + } + drainWithNotExecuted(); + return status; } ReturnCodeOr> getSaiAttrs(const P4MirrorSessionEntry &mirror_session_entry) diff --git a/orchagent/p4orch/mirror_session_manager.h b/orchagent/p4orch/mirror_session_manager.h index 5f1c26e10a3..64b823f8607 100644 --- a/orchagent/p4orch/mirror_session_manager.h +++ b/orchagent/p4orch/mirror_session_manager.h @@ -83,11 +83,13 @@ class MirrorSessionManager : public ObjectManagerInterface void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; - void drain() override; + ReturnCode drain() override; + void drainWithNotExecuted() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; - ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; private: ReturnCodeOr deserializeP4MirrorSessionAppDbEntry( diff --git a/orchagent/p4orch/neighbor_manager.cpp b/orchagent/p4orch/neighbor_manager.cpp index 7eab9671831..d453f228fc3 100644 --- a/orchagent/p4orch/neighbor_manager.cpp +++ b/orchagent/p4orch/neighbor_manager.cpp @@ -1,5 +1,6 @@ #include "p4orch/neighbor_manager.h" +#include #include #include #include @@ -7,7 +8,6 @@ #include "SaiAttributeList.h" #include "crmorch.h" #include "dbconnector.h" -#include #include "logger.h" #include "orch.h" #include "p4orch/p4orch_util.h" @@ -324,14 +324,15 @@ ReturnCode NeighborManager::processDeleteRequest(const std::string &neighbor_key return status; } -ReturnCode NeighborManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +ReturnCode NeighborManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) { - std::string router_intf_id, neighbor_id; + std::string router_intf_id, neighbor_id; swss::IpAddress neighbor; try { - nlohmann::json j = nlohmann::json::parse(json_key); + nlohmann::json j = nlohmann::json::parse(json_key); if (j.find(prependMatchField(p4orch::kRouterInterfaceId)) != j.end()) { router_intf_id = j.at(prependMatchField(p4orch::kRouterInterfaceId)).get(); @@ -350,7 +351,8 @@ ReturnCode NeighborManager::getSaiObject(const std::string &json_key, sai_object } else { - SWSS_LOG_ERROR("%s match parameter absent: required for dependent object query", p4orch::kRouterInterfaceId); + SWSS_LOG_ERROR("%s match parameter absent: required for dependent object query", + p4orch::kRouterInterfaceId); } } catch (std::exception &ex) @@ -366,74 +368,78 @@ void NeighborManager::enqueue(const std::string &table_name, const swss::KeyOpFi m_entries.push_back(entry); } -void NeighborManager::drain() -{ - SWSS_LOG_ENTER(); - - for (const auto &key_op_fvs_tuple : m_entries) - { - std::string table_name; - std::string db_key; - parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &db_key); - const std::vector &attributes = kfvFieldsValues(key_op_fvs_tuple); - - ReturnCode status; - auto app_db_entry_or = deserializeNeighborEntry(db_key, attributes); - if (!app_db_entry_or.ok()) - { - status = app_db_entry_or.status(); - SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", - QuotedVar(table_name + ":" + db_key).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, - /*replace=*/true); - continue; - } - auto &app_db_entry = *app_db_entry_or; - - status = validateNeighborAppDbEntry(app_db_entry); - if (!status.ok()) - { - SWSS_LOG_ERROR("Validation failed for Neighbor APP DB entry with key %s: %s", - QuotedVar(table_name + ":" + db_key).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, - /*replace=*/true); - continue; - } +void NeighborManager::drainWithNotExecuted() { + drainMgmtWithNotExecuted(m_entries, m_publisher); +} - const std::string neighbor_key = - KeyGenerator::generateNeighborKey(app_db_entry.router_intf_id, app_db_entry.neighbor_id); +ReturnCode NeighborManager::drain() { + SWSS_LOG_ENTER(); - const std::string &operation = kfvOp(key_op_fvs_tuple); - if (operation == SET_COMMAND) - { - auto *neighbor_entry = getNeighborEntry(neighbor_key); - if (neighbor_entry == nullptr) - { - // Create neighbor - status = processAddRequest(app_db_entry, neighbor_key); - } - else - { - // Modify existing neighbor - status = processUpdateRequest(app_db_entry, neighbor_entry); - } - } - else if (operation == DEL_COMMAND) - { - // Delete neighbor - status = processDeleteRequest(neighbor_key); - } - else - { - status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Unknown operation type " << QuotedVar(operation); - SWSS_LOG_ERROR("%s", status.message().c_str()); - } - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), status, - /*replace=*/true); - } - m_entries.clear(); + ReturnCode status; + while (!m_entries.empty()) { + auto key_op_fvs_tuple = m_entries.front(); + m_entries.pop_front(); + std::string table_name; + std::string db_key; + parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &db_key); + const std::vector& attributes = + kfvFieldsValues(key_op_fvs_tuple); + + auto app_db_entry_or = deserializeNeighborEntry(db_key, attributes); + if (!app_db_entry_or.ok()) { + status = app_db_entry_or.status(); + SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", + QuotedVar(table_name + ":" + db_key).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; + } + auto& app_db_entry = *app_db_entry_or; + + status = validateNeighborAppDbEntry(app_db_entry); + if (!status.ok()) { + SWSS_LOG_ERROR( + "Validation failed for Neighbor APP DB entry with key %s: %s", + QuotedVar(table_name + ":" + db_key).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; + } + + const std::string neighbor_key = KeyGenerator::generateNeighborKey( + app_db_entry.router_intf_id, app_db_entry.neighbor_id); + + const std::string& operation = kfvOp(key_op_fvs_tuple); + if (operation == SET_COMMAND) { + auto* neighbor_entry = getNeighborEntry(neighbor_key); + if (neighbor_entry == nullptr) { + // Create neighbor + status = processAddRequest(app_db_entry, neighbor_key); + } else { + // Modify existing neighbor + status = processUpdateRequest(app_db_entry, neighbor_entry); + } + } else if (operation == DEL_COMMAND) { + // Delete neighbor + status = processDeleteRequest(neighbor_key); + } else { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unknown operation type " << QuotedVar(operation); + SWSS_LOG_ERROR("%s", status.message().c_str()); + } + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + if (!status.ok()) { + break; + } + } + drainWithNotExecuted(); + return status; } std::string NeighborManager::verifyState(const std::string &key, const std::vector &tuple) diff --git a/orchagent/p4orch/neighbor_manager.h b/orchagent/p4orch/neighbor_manager.h index 0022d3a8cc5..a2dcae132ad 100644 --- a/orchagent/p4orch/neighbor_manager.h +++ b/orchagent/p4orch/neighbor_manager.h @@ -50,9 +50,11 @@ class NeighborManager : public ObjectManagerInterface virtual ~NeighborManager() = default; void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; - void drain() override; + ReturnCode drain() override; + void drainWithNotExecuted() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; - ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; private: ReturnCodeOr deserializeNeighborEntry(const std::string &key, diff --git a/orchagent/p4orch/next_hop_manager.cpp b/orchagent/p4orch/next_hop_manager.cpp index 1614a266f69..a00d94d13ed 100644 --- a/orchagent/p4orch/next_hop_manager.cpp +++ b/orchagent/p4orch/next_hop_manager.cpp @@ -1,5 +1,6 @@ #include "p4orch/next_hop_manager.h" +#include #include #include #include @@ -8,7 +9,6 @@ #include "crmorch.h" #include "dbconnector.h" #include "ipaddress.h" -#include #include "logger.h" #include "p4orch/p4orch.h" #include "p4orch/p4orch_util.h" @@ -147,13 +147,14 @@ ReturnCodeOr> NextHopManager::getSaiAttrs(const P4N return next_hop_attrs; } -ReturnCode NextHopManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +ReturnCode NextHopManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) { - std::string value; + std::string value; try { - nlohmann::json j = nlohmann::json::parse(json_key); + nlohmann::json j = nlohmann::json::parse(json_key); if (j.find(prependMatchField(p4orch::kNexthopId)) != j.end()) { value = j.at(prependMatchField(p4orch::kNexthopId)).get(); @@ -179,73 +180,78 @@ void NextHopManager::enqueue(const std::string &table_name, const swss::KeyOpFie m_entries.push_back(entry); } -void NextHopManager::drain() -{ - SWSS_LOG_ENTER(); - - for (const auto &key_op_fvs_tuple : m_entries) - { - std::string table_name; - std::string key; - parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &key); - const std::vector &attributes = kfvFieldsValues(key_op_fvs_tuple); - - ReturnCode status; - auto app_db_entry_or = deserializeP4NextHopAppDbEntry(key, attributes); - if (!app_db_entry_or.ok()) - { - status = app_db_entry_or.status(); - SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", - QuotedVar(table_name + ":" + key).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, - /*replace=*/true); - continue; - } - auto &app_db_entry = *app_db_entry_or; +void NextHopManager::drainWithNotExecuted() { + drainMgmtWithNotExecuted(m_entries, m_publisher); +} - const std::string next_hop_key = KeyGenerator::generateNextHopKey(app_db_entry.next_hop_id); +ReturnCode NextHopManager::drain() { + SWSS_LOG_ENTER(); - // Fulfill the operation. - const std::string &operation = kfvOp(key_op_fvs_tuple); - if (operation == SET_COMMAND) - { - status = validateAppDbEntry(app_db_entry); - if (!status.ok()) - { - SWSS_LOG_ERROR("Validation failed for Nexthop APP DB entry with key %s: %s", - QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, - /*replace=*/true); - continue; - } - auto *next_hop_entry = getNextHopEntry(next_hop_key); - if (next_hop_entry == nullptr) - { - // Create new next hop. - status = processAddRequest(app_db_entry); - } - else - { - // Modify existing next hop. - status = processUpdateRequest(app_db_entry, next_hop_entry); - } - } - else if (operation == DEL_COMMAND) - { - // Delete next hop. - status = processDeleteRequest(next_hop_key); - } - else - { - status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Unknown operation type " << QuotedVar(operation); - SWSS_LOG_ERROR("%s", status.message().c_str()); - } - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), status, + ReturnCode status; + while (!m_entries.empty()) { + auto key_op_fvs_tuple = m_entries.front(); + m_entries.pop_front(); + std::string table_name; + std::string key; + parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &key); + const std::vector& attributes = + kfvFieldsValues(key_op_fvs_tuple); + + auto app_db_entry_or = deserializeP4NextHopAppDbEntry(key, attributes); + if (!app_db_entry_or.ok()) { + status = app_db_entry_or.status(); + SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", + QuotedVar(table_name + ":" + key).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; + } + auto& app_db_entry = *app_db_entry_or; + + const std::string next_hop_key = + KeyGenerator::generateNextHopKey(app_db_entry.next_hop_id); + + // Fulfill the operation. + const std::string& operation = kfvOp(key_op_fvs_tuple); + if (operation == SET_COMMAND) { + status = validateAppDbEntry(app_db_entry); + if (!status.ok()) { + SWSS_LOG_ERROR( + "Validation failed for Nexthop APP DB entry with key %s: %s", + QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, /*replace=*/true); - } - m_entries.clear(); + break; + } + auto* next_hop_entry = getNextHopEntry(next_hop_key); + if (next_hop_entry == nullptr) { + // Create new next hop. + status = processAddRequest(app_db_entry); + } else { + // Modify existing next hop. + status = processUpdateRequest(app_db_entry, next_hop_entry); + } + } else if (operation == DEL_COMMAND) { + // Delete next hop. + status = processDeleteRequest(next_hop_key); + } else { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unknown operation type " << QuotedVar(operation); + SWSS_LOG_ERROR("%s", status.message().c_str()); + } + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + if (!status.ok()) { + break; + } + } + drainWithNotExecuted(); + return status; } P4NextHopEntry *NextHopManager::getNextHopEntry(const std::string &next_hop_key) diff --git a/orchagent/p4orch/next_hop_manager.h b/orchagent/p4orch/next_hop_manager.h index 7bacdad5347..a5153f6aee6 100644 --- a/orchagent/p4orch/next_hop_manager.h +++ b/orchagent/p4orch/next_hop_manager.h @@ -58,9 +58,11 @@ class NextHopManager : public ObjectManagerInterface virtual ~NextHopManager() = default; void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; - void drain() override; + ReturnCode drain() override; + void drainWithNotExecuted() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; - ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; private: // Gets the internal cached next hop entry by its key. diff --git a/orchagent/p4orch/object_manager_interface.h b/orchagent/p4orch/object_manager_interface.h index 966288a156d..edbaef2beae 100644 --- a/orchagent/p4orch/object_manager_interface.h +++ b/orchagent/p4orch/object_manager_interface.h @@ -1,6 +1,7 @@ #pragma once #include "orch.h" +#include "return_code.h" class ObjectManagerInterface { @@ -11,12 +12,17 @@ class ObjectManagerInterface virtual void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) = 0; // Processes all entries in the queue - virtual void drain() = 0; + // Stops on first failure, returns first error status. + virtual ReturnCode drain() = 0; + + // Drains all entries in the queue without execution. + virtual void drainWithNotExecuted() = 0; // StateVerification helper function for the manager virtual std::string verifyState(const std::string &key, const std::vector &tuple) = 0; // For sai extension objects depending on a sai object // return sai object id for a given table with a given key - virtual ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) = 0; + virtual ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) = 0; }; diff --git a/orchagent/p4orch/p4oidmapper.cpp b/orchagent/p4orch/p4oidmapper.cpp index 63215846a62..d942f3b381d 100644 --- a/orchagent/p4orch/p4oidmapper.cpp +++ b/orchagent/p4orch/p4oidmapper.cpp @@ -1,6 +1,7 @@ #include "p4oidmapper.h" #include +#include #include #include @@ -12,19 +13,9 @@ extern "C" #include "sai.h" } -namespace -{ - -std::string convertToDBField(_In_ const sai_object_type_t object_type, _In_ const std::string &key) -{ - return sai_serialize_object_type(object_type) + ":" + key; -} - -} // namespace +using ::nlohmann::json; -P4OidMapper::P4OidMapper() : m_db("APPL_STATE_DB", 0), m_table(&m_db, "P4RT_KEY_TO_OID") -{ -} +P4OidMapper::P4OidMapper() : m_db("APPL_STATE_DB", 0) {} bool P4OidMapper::setOID(_In_ sai_object_type_t object_type, _In_ const std::string &key, _In_ sai_object_id_t oid, _In_ uint32_t ref_count) @@ -38,7 +29,6 @@ bool P4OidMapper::setOID(_In_ sai_object_type_t object_type, _In_ const std::str } m_oidTables[object_type][key] = {oid, ref_count}; - m_table.hset("", convertToDBField(object_type, key), sai_serialize_object_id(oid)); return true; } @@ -107,7 +97,6 @@ bool P4OidMapper::eraseOID(_In_ sai_object_type_t object_type, _In_ const std::s } m_oidTables[object_type].erase(key); - m_table.hdel("", convertToDBField(object_type, key)); return true; } @@ -116,7 +105,6 @@ void P4OidMapper::eraseAllOIDs(_In_ sai_object_type_t object_type) SWSS_LOG_ENTER(); m_oidTables[object_type].clear(); - m_table.del(""); } size_t P4OidMapper::getNumEntries(_In_ sai_object_type_t object_type) const @@ -200,20 +188,25 @@ std::string P4OidMapper::verifyOIDMapping(_In_ sai_object_type_t object_type, _I << sai_serialize_object_id(mapper_oid); return msg.str(); } - std::string db_oid; - if (!m_table.hget("", convertToDBField(object_type, key), db_oid)) - { - std::stringstream msg; - msg << "OID not found in mapper DB for key " << key; - return msg.str(); - } - if (db_oid != sai_serialize_object_id(oid)) - { - std::stringstream msg; - msg << "OID mismatched in mapper DB for key " << key << ": " << db_oid << " vs " - << sai_serialize_object_id(oid); - return msg.str(); - } return ""; } + +std::string P4OidMapper::dumpStateCache() { + json cache = json({}); + for (int i = 0; i < SAI_OBJECT_TYPE_MAX; i++) { + if (m_oidTables[i].empty()) { + continue; + } + + json oid_mapper_j = json({}); + for (const auto& kv_pair : m_oidTables[i]) { + MapperEntry m = kv_pair.second; + json mapper_entry_j = {{"sai_oid", sai_serialize_object_id(m.sai_oid)}, {"ref_count", m.ref_count}}; + oid_mapper_j[kv_pair.first] = mapper_entry_j; + } + std::string sai_object_type = sai_serialize_object_type(static_cast(i)); + cache[sai_object_type] = oid_mapper_j; + } + return cache.dump(4); +} diff --git a/orchagent/p4orch/p4oidmapper.h b/orchagent/p4orch/p4oidmapper.h index 325acf9503c..8dc97562e56 100644 --- a/orchagent/p4orch/p4oidmapper.h +++ b/orchagent/p4orch/p4oidmapper.h @@ -74,6 +74,9 @@ class P4OidMapper std::string verifyOIDMapping(_In_ sai_object_type_t object_type, _In_ const std::string &key, _In_ sai_object_id_t oid); + // Returns a json string that contains each non-empty OID mapper. + std::string dumpStateCache(); + private: struct MapperEntry { @@ -85,5 +88,4 @@ class P4OidMapper std::unordered_map m_oidTables[SAI_OBJECT_TYPE_MAX]; swss::DBConnector m_db; - swss::Table m_table; }; diff --git a/orchagent/p4orch/p4orch.cpp b/orchagent/p4orch/p4orch.cpp index eca0918171f..39039fcd81a 100644 --- a/orchagent/p4orch/p4orch.cpp +++ b/orchagent/p4orch/p4orch.cpp @@ -8,21 +8,22 @@ #include "copporch.h" #include "logger.h" #include "orch.h" -#include "p4orch/p4orch_util.h" -#include "p4orch/tables_definition_manager.h" #include "p4orch/acl_rule_manager.h" #include "p4orch/acl_table_manager.h" +#include "p4orch/ext_tables_manager.h" #include "p4orch/gre_tunnel_manager.h" #include "p4orch/l3_admit_manager.h" #include "p4orch/neighbor_manager.h" #include "p4orch/next_hop_manager.h" +#include "p4orch/p4orch_util.h" #include "p4orch/route_manager.h" #include "p4orch/router_interface_manager.h" -#include "p4orch/ext_tables_manager.h" +#include "p4orch/tables_definition_manager.h" #include "portsorch.h" #include "return_code.h" #include "sai_serialize.h" #include "timer.h" +#include "timestamp.h" extern PortsOrch *gPortsOrch; #define P4_ACL_COUNTERS_STATS_POLL_TIMER_NAME "P4_ACL_COUNTERS_STATS_POLL_TIMER" @@ -60,18 +61,21 @@ P4Orch::P4Orch(swss::DBConnector *db, std::vector tableNames, VRFOr m_p4TableToManagerMap[APP_P4RT_L3_ADMIT_TABLE_NAME] = m_l3AdmitManager.get(); m_p4TableToManagerMap[APP_P4RT_EXT_TABLES_MANAGER] = m_extTablesManager.get(); - m_p4ManagerPrecedence.push_back(m_tablesDefnManager.get()); - m_p4ManagerPrecedence.push_back(m_routerIntfManager.get()); - m_p4ManagerPrecedence.push_back(m_neighborManager.get()); - m_p4ManagerPrecedence.push_back(m_greTunnelManager.get()); - m_p4ManagerPrecedence.push_back(m_nextHopManager.get()); - m_p4ManagerPrecedence.push_back(m_wcmpManager.get()); - m_p4ManagerPrecedence.push_back(m_routeManager.get()); - m_p4ManagerPrecedence.push_back(m_mirrorSessionManager.get()); - m_p4ManagerPrecedence.push_back(m_aclTableManager.get()); - m_p4ManagerPrecedence.push_back(m_aclRuleManager.get()); - m_p4ManagerPrecedence.push_back(m_l3AdmitManager.get()); - m_p4ManagerPrecedence.push_back(m_extTablesManager.get()); + m_p4ManagerAddPrecedence.push_back(m_tablesDefnManager.get()); + m_p4ManagerAddPrecedence.push_back(m_routerIntfManager.get()); + m_p4ManagerAddPrecedence.push_back(m_neighborManager.get()); + m_p4ManagerAddPrecedence.push_back(m_greTunnelManager.get()); + m_p4ManagerAddPrecedence.push_back(m_nextHopManager.get()); + m_p4ManagerAddPrecedence.push_back(m_wcmpManager.get()); + m_p4ManagerAddPrecedence.push_back(m_routeManager.get()); + m_p4ManagerAddPrecedence.push_back(m_mirrorSessionManager.get()); + m_p4ManagerAddPrecedence.push_back(m_aclTableManager.get()); + m_p4ManagerAddPrecedence.push_back(m_aclRuleManager.get()); + m_p4ManagerAddPrecedence.push_back(m_l3AdmitManager.get()); + m_p4ManagerAddPrecedence.push_back(m_extTablesManager.get()); + for (auto* manager : m_p4ManagerAddPrecedence) { + m_p4ManagerDelPrecedence.insert(m_p4ManagerDelPrecedence.begin(), manager); + } tablesinfo = nullptr; // Add timer executor to update ACL counters stats in COUNTERS_DB @@ -88,8 +92,16 @@ P4Orch::P4Orch(swss::DBConnector *db, std::vector tableNames, VRFOr Orch::addExecutor(ext_executor); m_extCounterStatsTimer->start(); - // Add port state change notification handling support + // Add p4rt notification handling support swss::DBConnector notificationsDb("ASIC_DB", 0); + + m_p4rtNotificationConsumer = + new swss::NotificationConsumer(¬ificationsDb, APP_P4RT_TABLE_NAME); + auto p4rtNotifier = + new Notifier(m_p4rtNotificationConsumer, this, "P4RT_NOTIFICATIONS"); + Orch::addExecutor(p4rtNotifier); + + // Add port state change notification handling support m_portStatusNotificationConsumer = new swss::NotificationConsumer(¬ificationsDb, "NOTIFICATIONS"); auto portStatusNotifier = new Notifier(m_portStatusNotificationConsumer, this, "PORT_STATUS_NOTIFICATIONS"); Orch::addExecutor(portStatusNotifier); @@ -114,46 +126,11 @@ void P4Orch::doTask(Consumer &consumer) auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) { - const swss::KeyOpFieldsValuesTuple key_op_fvs_tuple = it->second; - const std::string key = kfvKey(key_op_fvs_tuple); + enqueue(it->second); it = consumer.m_toSync.erase(it); - std::string table_name; - std::string key_content; - parseP4RTKey(key, &table_name, &key_content); - if (table_name.empty()) - { - auto status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Table name cannot be empty, but was empty in key: " << key; - SWSS_LOG_ERROR("%s", status.message().c_str()); - m_publisher.publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status); - continue; - } - if (m_p4TableToManagerMap.find(table_name) != m_p4TableToManagerMap.end()) - { - m_p4TableToManagerMap[table_name]->enqueue(table_name, key_op_fvs_tuple); - } - else - { - if (table_name.rfind(p4orch::kTablePrefixEXT, 0) != std::string::npos) - { - m_p4TableToManagerMap[APP_P4RT_EXT_TABLES_MANAGER]->enqueue(table_name, key_op_fvs_tuple); - } - else - { - auto status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Failed to find P4Orch Manager for " << table_name << " P4RT DB table"; - SWSS_LOG_ERROR("%s", status.message().c_str()); - m_publisher.publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status); - } - } - } - - for (const auto &manager : m_p4ManagerPrecedence) - { - manager->drain(); } + drain(SET_COMMAND); + m_publisher.flush(); } void P4Orch::doTask(swss::SelectableTimer &timer) @@ -180,6 +157,96 @@ void P4Orch::doTask(swss::SelectableTimer &timer) } } +void P4Orch::enqueue(const swss::KeyOpFieldsValuesTuple& entry) { + const std::string& key = kfvKey(entry); + const std::vector& values = kfvFieldsValues(entry); + std::string table_name; + std::string key_content; + parseP4RTKey(key, &table_name, &key_content); + if (table_name.empty()) { + auto status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Table name cannot be empty, but was empty in key: " + << key; + SWSS_LOG_ERROR("%s", status.message().c_str()); + m_publisher.publish(APP_P4RT_TABLE_NAME, key, values, status, + /*replace=*/true); + return; + } + if (m_p4TableToManagerMap.find(table_name) != m_p4TableToManagerMap.end()) { + m_p4TableToManagerMap[table_name]->enqueue(table_name, entry); + } else { + if (table_name.rfind(p4orch::kTablePrefixEXT, 0) != std::string::npos) { + m_p4TableToManagerMap[APP_P4RT_EXT_TABLES_MANAGER]->enqueue(table_name, + entry); + } else { + auto status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Failed to find P4Orch Manager for " << table_name + << " P4RT DB table"; + SWSS_LOG_ERROR("%s", status.message().c_str()); + m_publisher.publish(APP_P4RT_TABLE_NAME, kfvKey(entry), + kfvFieldsValues(entry), status, /*replace=*/true); + } + } +} + +ReturnCode P4Orch::drain(const std::string& op) { + ReturnCode status; + if (op == SET_COMMAND) { + for (const auto& manager : m_p4ManagerAddPrecedence) { + if (status.ok()) { + status = manager->drain(); + } else { + manager->drainWithNotExecuted(); + } + } + } else { + for (const auto& manager : m_p4ManagerDelPrecedence) { + if (status.ok()) { + status = manager->drain(); + } else { + manager->drainWithNotExecuted(); + } + } + } + return status; +} + +void P4Orch::handleP4rtNotification( + const std::vector& values) { + std::string prev_op = ""; + ReturnCode status; + for (const auto& value : values) { + std::string op = DEL_COMMAND; + std::vector vals; + if (!fvValue(value).empty()) { + op = SET_COMMAND; + JSon::readJson(fvValue(value), vals); + } + if (prev_op.empty()) { + prev_op = op; + } + swss::KeyOpFieldsValuesTuple key_op_fvs_tuple(fvField(value), op, vals); + + // Call drain after grouping the same type of requests together. + if (op != prev_op && status.ok()) { + status = drain(prev_op); + prev_op = op; + } + + // Stop enqueue if there is any failure. + if (status.ok()) { + enqueue(key_op_fvs_tuple); + } else { + m_publisher.publish(APP_P4RT_TABLE_NAME, fvField(value), vals, + ReturnCode(StatusCode::SWSS_RC_NOT_EXECUTED), + /*replace=*/true); + } + } + if (!prev_op.empty() && status.ok()) { + drain(prev_op); + } +} + void P4Orch::handlePortStatusChangeNotification(const std::string &op, const std::string &data) { if (op == "port_state_change") @@ -231,8 +298,10 @@ void P4Orch::doTask(NotificationConsumer &consumer) consumer.pop(op, data, values); - if (&consumer == m_portStatusNotificationConsumer) - { + + if (&consumer == m_p4rtNotificationConsumer) { + handleP4rtNotification(values); + } else if (&consumer == m_portStatusNotificationConsumer) { handlePortStatusChangeNotification(op, data); } } diff --git a/orchagent/p4orch/p4orch.h b/orchagent/p4orch/p4orch.h index 9385346d200..6bcadf8c06f 100644 --- a/orchagent/p4orch/p4orch.h +++ b/orchagent/p4orch/p4orch.h @@ -10,9 +10,9 @@ #include "notificationconsumer.h" #include "notifier.h" #include "orch.h" -#include "p4orch/tables_definition_manager.h" #include "p4orch/acl_rule_manager.h" #include "p4orch/acl_table_manager.h" +#include "p4orch/ext_tables_manager.h" #include "p4orch/gre_tunnel_manager.h" #include "p4orch/l3_admit_manager.h" #include "p4orch/mirror_session_manager.h" @@ -22,22 +22,22 @@ #include "p4orch/p4oidmapper.h" #include "p4orch/route_manager.h" #include "p4orch/router_interface_manager.h" +#include "p4orch/tables_definition_manager.h" #include "p4orch/wcmp_manager.h" -#include "p4orch/ext_tables_manager.h" #include "response_publisher.h" +#include "return_code.h" #include "vrforch.h" static const std::map FixedTablesMap = { - {"router_interface_table", APP_P4RT_ROUTER_INTERFACE_TABLE_NAME }, - {"neighbor_table", APP_P4RT_NEIGHBOR_TABLE_NAME}, - {"nexthop_table", APP_P4RT_NEXTHOP_TABLE_NAME}, - {"wcmp_group_table", APP_P4RT_WCMP_GROUP_TABLE_NAME}, - {"ipv4_table", APP_P4RT_IPV4_TABLE_NAME}, - {"ipv6_table", APP_P4RT_IPV6_TABLE_NAME}, - {"mirror_session_table", APP_P4RT_MIRROR_SESSION_TABLE_NAME}, - {"l3_admit_table", APP_P4RT_L3_ADMIT_TABLE_NAME}, - {"tunnel_table", APP_P4RT_TUNNEL_TABLE_NAME} -}; + {"router_interface_table", APP_P4RT_ROUTER_INTERFACE_TABLE_NAME}, + {"neighbor_table", APP_P4RT_NEIGHBOR_TABLE_NAME}, + {"nexthop_table", APP_P4RT_NEXTHOP_TABLE_NAME}, + {"wcmp_group_table", APP_P4RT_WCMP_GROUP_TABLE_NAME}, + {"ipv4_table", APP_P4RT_IPV4_TABLE_NAME}, + {"ipv6_table", APP_P4RT_IPV6_TABLE_NAME}, + {"mirror_session_table", APP_P4RT_MIRROR_SESSION_TABLE_NAME}, + {"l3_admit_table", APP_P4RT_L3_ADMIT_TABLE_NAME}, + {"tunnel_table", APP_P4RT_TUNNEL_TABLE_NAME}}; class P4Orch : public Orch { @@ -55,16 +55,19 @@ class P4Orch : public Orch // m_p4TableToManagerMap: P4 APP DB table name, P4 Object Manager std::unordered_map m_p4TableToManagerMap; - private: void doTask(Consumer &consumer); void doTask(swss::SelectableTimer &timer); void doTask(swss::NotificationConsumer &consumer); + void enqueue(const swss::KeyOpFieldsValuesTuple& entry); + ReturnCode drain(const std::string& op); + void handleP4rtNotification(const std::vector& values); void handlePortStatusChangeNotification(const std::string &op, const std::string &data); // P4 object manager request processing order. - std::vector m_p4ManagerPrecedence; + std::vector m_p4ManagerAddPrecedence; + std::vector m_p4ManagerDelPrecedence; swss::SelectableTimer *m_aclCounterStatsTimer; swss::SelectableTimer *m_extCounterStatsTimer; @@ -83,7 +86,12 @@ class P4Orch : public Orch std::unique_ptr m_extTablesManager; // Notification consumer for port state change + swss::NotificationConsumer* m_p4rtNotificationConsumer; swss::NotificationConsumer *m_portStatusNotificationConsumer; + // Sepcial publisher that writes to APPL DB instead of APPL STATE DB. + ResponsePublisher m_publisher{"APPL_DB", /*bool buffered=*/true, /*db_write_thread=*/true}; + + friend class P4OrchTest; friend class p4orch::test::WcmpManagerTest; }; diff --git a/orchagent/p4orch/p4orch_util.cpp b/orchagent/p4orch/p4orch_util.cpp index b2ea0a762b6..00566b70fda 100644 --- a/orchagent/p4orch/p4orch_util.cpp +++ b/orchagent/p4orch/p4orch_util.cpp @@ -1,6 +1,6 @@ -#include "p4orch/p4orch.h" #include "p4orch/p4orch_util.h" +#include "p4orch/p4orch.h" #include "schema.h" using ::p4orch::kTableKeyDelimiter; @@ -116,12 +116,22 @@ ActionInfo *getTableActionInfo(TableInfo *table, const std::string &action_name) std::string KeyGenerator::generateTablesInfoKey(const std::string &context) { - std::map fv_map = { - {"context", context} - }; + std::map fv_map = {{"context", context}}; return generateKey(fv_map); } +void drainMgmtWithNotExecuted(std::deque& entries, + ResponsePublisherInterface* publisher) { + for (const auto& key_op_fvs_tuple : entries) { + publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), + ReturnCode(StatusCode::SWSS_RC_NOT_EXECUTED), + /*replace=*/true); + } + entries.clear(); + return; +} + std::string KeyGenerator::generateRouteKey(const std::string &vrf_id, const swss::IpPrefix &ip_prefix) { std::map fv_map = { diff --git a/orchagent/p4orch/p4orch_util.h b/orchagent/p4orch/p4orch_util.h index f95a9fd8eb7..be60f860f54 100644 --- a/orchagent/p4orch/p4orch_util.h +++ b/orchagent/p4orch/p4orch_util.h @@ -1,22 +1,24 @@ #pragma once +#include #include #include #include #include #include -#include #include +#include #include "ipaddress.h" #include "ipprefix.h" #include "macaddress.h" +#include "response_publisher_interface.h" +#include "return_code.h" #include "table.h" extern "C" { #include "saitypes.h" } - namespace p4orch { @@ -110,48 +112,48 @@ std::string prependParamField(const std::string &str); struct ActionParamInfo { - std::string name; - std::string fieldtype; - std::string datatype; - std::unordered_map table_reference_map; + std::string name; + std::string fieldtype; + std::string datatype; + std::unordered_map table_reference_map; }; struct ActionInfo { - std::string name; + std::string name; std::unordered_map params; - bool refers_to; + bool refers_to; }; struct TableMatchInfo { - std::string name; - std::string fieldtype; - std::string datatype; - std::unordered_map table_reference_map; + std::string name; + std::string fieldtype; + std::string datatype; + std::unordered_map table_reference_map; }; /** - * Dervied table definition + * Dervied table definition * This is a derived state out of table definition provided by P4RT-APP */ struct TableInfo { - std::string name; - int id; - int precedence; - std::unordered_map match_fields; - std::unordered_map action_fields; - bool counter_bytes_enabled; - bool counter_packets_enabled; - std::vector action_ref_tables; - // list of tables across all actions, of current table, refer to + std::string name; + int id; + int precedence; + std::unordered_map match_fields; + std::unordered_map action_fields; + bool counter_bytes_enabled; + bool counter_packets_enabled; + std::vector action_ref_tables; + // list of tables across all actions, of current table, refer to }; /** * table-name to table-definition map */ -typedef std::unordered_map TableInfoMap; +typedef std::unordered_map TableInfoMap; struct TablesInfoAppDbEntry { @@ -159,7 +161,6 @@ struct TablesInfoAppDbEntry std::string info; }; - struct P4RouterInterfaceAppDbEntry { std::string router_interface_id; @@ -296,8 +297,8 @@ struct P4AclRuleAppDbEntry struct DepObject { sai_object_type_t sai_object; - std::string key; - sai_object_id_t oid; + std::string key; + sai_object_id_t oid; }; struct P4ExtTableAppDbEntry @@ -309,7 +310,6 @@ struct P4ExtTableAppDbEntry std::unordered_map action_dep_objects; }; - TableInfo *getTableInfo(const std::string &table_name); ActionInfo *getTableActionInfo(TableInfo *table, const std::string &action_name); @@ -333,6 +333,10 @@ std::string verifyAttrs(const std::vector &targets, const std::vector &exp, const std::vector &opt, bool allow_unknown); +// Helper function to drain all entries in the manager without execution. +void drainMgmtWithNotExecuted(std::deque& entries, + ResponsePublisherInterface* publisher); + // class KeyGenerator includes member functions to generate keys for entries // stored in P4 Orch managers. class KeyGenerator diff --git a/orchagent/p4orch/route_manager.cpp b/orchagent/p4orch/route_manager.cpp index bc8f3bbcd89..e30c21c9184 100644 --- a/orchagent/p4orch/route_manager.cpp +++ b/orchagent/p4orch/route_manager.cpp @@ -1,6 +1,7 @@ #include "p4orch/route_manager.h" #include +#include #include #include #include @@ -11,7 +12,6 @@ #include "converter.h" #include "crmorch.h" #include "dbconnector.h" -#include #include "logger.h" #include "p4orch/p4orch_util.h" #include "sai_serialize.h" @@ -103,17 +103,14 @@ sai_object_id_t getNexthopOid(const P4RouteEntry &route_entry, const P4OidMapper } // Returns the SAI action of the given entry. -sai_packet_action_t getSaiAction(const P4RouteEntry &route_entry) -{ - if (route_entry.action == p4orch::kDrop || route_entry.action == p4orch::kSetMetadataAndDrop) - { - return SAI_PACKET_ACTION_DROP; - } - else if (route_entry.action == p4orch::kTrap) - { - return SAI_PACKET_ACTION_TRAP; - } - return SAI_PACKET_ACTION_FORWARD; +sai_packet_action_t prepareSaiAction(const P4RouteEntry& route_entry) { + if (route_entry.action == p4orch::kDrop || + route_entry.action == p4orch::kSetMetadataAndDrop) { + return SAI_PACKET_ACTION_DROP; + } else if (route_entry.action == p4orch::kTrap) { + return SAI_PACKET_ACTION_TRAP; + } + return SAI_PACKET_ACTION_FORWARD; } // Returns the metadata of the given entry. @@ -127,46 +124,55 @@ uint32_t getMetadata(const P4RouteEntry &route_entry) } // Returns a list of SAI actions for route update. -std::vector getSaiActions(const std::string action) -{ - static const auto *const kRouteActionToSaiActions = - new std::unordered_map>({ - {p4orch::kSetNexthopId, - std::vector{SAI_ROUTE_ENTRY_ATTR_META_DATA, SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, - SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION}}, - {p4orch::kSetWcmpGroupId, - std::vector{SAI_ROUTE_ENTRY_ATTR_META_DATA, SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, - SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION}}, - {p4orch::kSetNexthopIdAndMetadata, - std::vector{SAI_ROUTE_ENTRY_ATTR_META_DATA, SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, - SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION}}, - {p4orch::kSetWcmpGroupIdAndMetadata, - std::vector{SAI_ROUTE_ENTRY_ATTR_META_DATA, SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, - SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION}}, - {p4orch::kDrop, - std::vector{SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION, SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, - SAI_ROUTE_ENTRY_ATTR_META_DATA}}, - {p4orch::kTrap, - std::vector{SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION, SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, - SAI_ROUTE_ENTRY_ATTR_META_DATA}}, - {p4orch::kSetMetadataAndDrop, - std::vector{SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION, SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, - SAI_ROUTE_ENTRY_ATTR_META_DATA}}, - }); - - if (kRouteActionToSaiActions->count(action) == 0) - { - return std::vector{}; - } - return kRouteActionToSaiActions->at(action); +std::vector prepareSaiActions( + const std::string action) { + static const auto* const kRouteActionToSaiActions = new std::unordered_map< + std::string, std::vector>({ + {p4orch::kSetNexthopId, + std::vector{SAI_ROUTE_ENTRY_ATTR_META_DATA, + SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, + SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION}}, + {p4orch::kSetWcmpGroupId, + std::vector{SAI_ROUTE_ENTRY_ATTR_META_DATA, + SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, + SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION}}, + {p4orch::kSetNexthopIdAndMetadata, + std::vector{SAI_ROUTE_ENTRY_ATTR_META_DATA, + SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, + SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION}}, + {p4orch::kSetWcmpGroupIdAndMetadata, + std::vector{SAI_ROUTE_ENTRY_ATTR_META_DATA, + SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, + SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION}}, + {p4orch::kDrop, + std::vector{SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION, + SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, + SAI_ROUTE_ENTRY_ATTR_META_DATA}}, + {p4orch::kTrap, + std::vector{SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION, + SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, + SAI_ROUTE_ENTRY_ATTR_META_DATA}}, + {p4orch::kSetMetadataAndDrop, + std::vector{SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION, + SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, + SAI_ROUTE_ENTRY_ATTR_META_DATA}}, + }); + + if (kRouteActionToSaiActions->count(action) == 0) { + return std::vector{}; + } + return kRouteActionToSaiActions->at(action); } } // namespace -RouteUpdater::RouteUpdater(const P4RouteEntry &old_route, const P4RouteEntry &new_route, P4OidMapper *mapper) - : m_oldRoute(old_route), m_newRoute(new_route), m_p4OidMapper(mapper), m_actions(getSaiActions(new_route.action)) -{ - updateIdx(); +RouteUpdater::RouteUpdater(const P4RouteEntry& old_route, + const P4RouteEntry& new_route, P4OidMapper* mapper) + : m_oldRoute(old_route), + m_newRoute(new_route), + m_p4OidMapper(mapper), + m_actions(prepareSaiActions(new_route.action)) { + updateIdx(); } P4RouteEntry RouteUpdater::getOldEntry() const @@ -179,31 +185,32 @@ P4RouteEntry RouteUpdater::getNewEntry() const return m_newRoute; } -sai_route_entry_t RouteUpdater::getSaiEntry() const -{ - return m_newRoute.sai_route_entry; +sai_route_entry_t RouteUpdater::prepareSaiEntry() const { + return m_newRoute.sai_route_entry; } -sai_attribute_t RouteUpdater::getSaiAttr() const -{ - sai_attribute_t route_attr = {}; - if (m_idx < 0 || m_idx >= static_cast(m_actions.size())) - { - return route_attr; - } - route_attr.id = m_actions[m_idx]; - switch (m_actions[m_idx]) - { +sai_attribute_t RouteUpdater::prepareSaiAttr() const { + return prepareSaiAttr(m_idx); +} + +sai_attribute_t RouteUpdater::prepareSaiAttr(int idx) const { + sai_attribute_t route_attr = {}; + if (idx < 0 || idx >= static_cast(m_actions.size())) { + return route_attr; + } + route_attr.id = m_actions[idx]; + switch (m_actions[idx]) { case SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID: route_attr.value.oid = (m_revert) ? getNexthopOid(m_oldRoute, *m_p4OidMapper) : getNexthopOid(m_newRoute, *m_p4OidMapper); break; case SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION: - route_attr.value.s32 = (m_revert) ? getSaiAction(m_oldRoute) : getSaiAction(m_newRoute); - break; + route_attr.value.s32 = (m_revert) ? prepareSaiAction(m_oldRoute) + : prepareSaiAction(m_newRoute); + break; default: route_attr.value.u32 = (m_revert) ? getMetadata(m_oldRoute) : getMetadata(m_newRoute); - } + } return route_attr; } @@ -233,37 +240,39 @@ ReturnCode RouteUpdater::getStatus() const return m_status; } -bool RouteUpdater::updateIdx() -{ - if (m_revert) - { - for (--m_idx; m_idx >= 0; --m_idx) - { - if (checkAction()) - { - return false; - } - } - return true; - } - for (++m_idx; m_idx < static_cast(m_actions.size()); ++m_idx) - { - if (checkAction()) - { - return false; - } - } - return true; +std::vector RouteUpdater::GetSaiAttrList() const { + std::vector attrs; + for (int idx = m_idx; idx >= 0 && idx < static_cast(m_actions.size());) { + attrs.push_back(prepareSaiAttr(idx)); + updateIdx(idx); + } + return attrs; } -bool RouteUpdater::checkAction() const -{ - if (m_idx < 0 || m_idx >= static_cast(m_actions.size())) - { +bool RouteUpdater::updateIdx() { return updateIdx(m_idx); } + +bool RouteUpdater::updateIdx(int& idx) const { + if (m_revert) { + for (--idx; idx >= 0; --idx) { + if (checkAction(idx)) { return false; + } } - switch (m_actions[m_idx]) - { + return true; + } + for (++idx; idx < static_cast(m_actions.size()); ++idx) { + if (checkAction(idx)) { + return false; + } + } + return true; +} + +bool RouteUpdater::checkAction(int idx) const { + if (idx < 0 || idx >= static_cast(m_actions.size())) { + return false; + } + switch (m_actions[idx]) { case SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID: if (getNexthopOid(m_oldRoute, *m_p4OidMapper) == getNexthopOid(m_newRoute, *m_p4OidMapper)) { @@ -271,10 +280,9 @@ bool RouteUpdater::checkAction() const } return true; case SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION: - if (getSaiAction(m_oldRoute) == getSaiAction(m_newRoute)) - { - return false; - } + if (prepareSaiAction(m_oldRoute) == prepareSaiAction(m_newRoute)) { + return false; + } return true; default: if (getMetadata(m_oldRoute) == getMetadata(m_newRoute)) @@ -282,7 +290,7 @@ bool RouteUpdater::checkAction() const return false; } return true; - } + } return false; } @@ -297,13 +305,13 @@ RouteManager::RouteManager(P4OidMapper *p4oidMapper, VRFOrch *vrfOrch, ResponseP m_publisher = publisher; } -sai_route_entry_t RouteManager::getSaiEntry(const P4RouteEntry &route_entry) -{ - sai_route_entry_t sai_entry; - sai_entry.vr_id = m_vrfOrch->getVRFid(route_entry.vrf_id); - sai_entry.switch_id = gSwitchId; - copy(sai_entry.destination, route_entry.route_prefix); - return sai_entry; +sai_route_entry_t RouteManager::prepareSaiEntry( + const P4RouteEntry& route_entry) { + sai_route_entry_t sai_entry; + sai_entry.vr_id = m_vrfOrch->getVRFid(route_entry.vrf_id); + sai_entry.switch_id = gSwitchId; + copy(sai_entry.destination, route_entry.route_prefix); + return sai_entry; } bool RouteManager::mergeRouteEntry(const P4RouteEntry &dest, const P4RouteEntry &src, P4RouteEntry *ret) @@ -565,6 +573,52 @@ ReturnCode RouteManager::validateDelRouteEntry(const P4RouteEntry &route_entry) return ReturnCode(); } +ReturnCode RouteManager::processRouteEntries( + const std::vector& route_entries, + const std::vector& tuple_list, + const std::string& op, bool update) { + SWSS_LOG_ENTER(); + + ReturnCode status; + // In syncd, bulk SAI calls use mode SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR. + if (op == SET_COMMAND) { + if (!update) { + auto statuses = createRouteEntries(route_entries); + for (size_t i = 0; i < route_entries.size(); ++i) { + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(tuple_list[i]), + kfvFieldsValues(tuple_list[i]), statuses[i], + /*replace=*/true); + if (status.ok() && !statuses[i].ok()) { + status = statuses[i]; + } + } + } else { + // TODO: Stop on first failure for batch update. + auto statuses = updateRouteEntries(route_entries); + for (size_t i = 0; i < route_entries.size(); ++i) { + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(tuple_list[i]), + kfvFieldsValues(tuple_list[i]), statuses[i], + /*replace=*/true); + if (status.ok() && !statuses[i].ok()) { + status = statuses[i]; + } + } + } + } else { + auto statuses = deleteRouteEntries(route_entries); + for (size_t i = 0; i < route_entries.size(); ++i) { + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(tuple_list[i]), + kfvFieldsValues(tuple_list[i]), statuses[i], + /*replace=*/true); + if (status.ok() && !statuses[i].ok()) { + status = statuses[i]; + } + } + } + + return status; +} + std::vector RouteManager::createRouteEntries(const std::vector &route_entries) { SWSS_LOG_ENTER(); @@ -584,7 +638,7 @@ std::vector RouteManager::createRouteEntries(const std::vector> &updaters, std::vector &indice, std::vector &statuses) { - std::vector sai_route_entries(size); - std::vector sai_attrs(size); - std::vector object_statuses(size); - // We will perform route update in multiple SAI calls. - // If error is encountered, the previous SAI calls will be reverted. - // Raise critical state if the revert fails. - // We avoid changing multiple attributes of the same entry in a single bulk - // call. - constexpr int kMaxAttrUpdate = 20; - int i; - for (i = 0; i < kMaxAttrUpdate; ++i) - { - for (int j = 0; j < size; ++j) - { - sai_route_entries[j] = updaters[indice[j]]->getSaiEntry(); - sai_attrs[j] = updaters[indice[j]]->getSaiAttr(); - m_routerBulker.set_entry_attribute(&object_statuses[j], &sai_route_entries[j], &sai_attrs[j]); + std::vector sai_route_entries; + std::vector sai_attrs; + std::vector updator_idx; + // All SAI attribute update will be performed in a single bulk SAI call. + // Syncd will stop on the first error. + // If error occures, successful update will be reverted for that particular + // request. + for (int i = 0; i < size; ++i) { + for (const auto& att : updaters[indice[i]]->GetSaiAttrList()) { + sai_route_entries.push_back(updaters[indice[i]]->prepareSaiEntry()); + sai_attrs.push_back(att); + updator_idx.push_back(indice[i]); + } + statuses[indice[i]] = ReturnCode(); + } + std::vector object_statuses(updator_idx.size()); + for (size_t i = 0; i < updator_idx.size(); ++i) { + m_routerBulker.set_entry_attribute(&object_statuses[i], + &sai_route_entries[i], &sai_attrs[i]); + } + m_routerBulker.flush(); + + for (size_t i = 0; i < updator_idx.size(); ++i) { + updaters[updator_idx[i]]->updateResult(object_statuses[i]); + if (object_statuses[i] != SAI_STATUS_SUCCESS) { + auto revert_attrs = updaters[updator_idx[i]]->GetSaiAttrList(); + if (!revert_attrs.empty()) { + std::vector revert_entries(revert_attrs.size()); + std::vector revert_statuses(revert_attrs.size()); + for (size_t j = 0; j < revert_attrs.size(); ++j) { + revert_entries[j] = updaters[updator_idx[i]]->prepareSaiEntry(); + m_routerBulker.set_entry_attribute( + &revert_statuses[j], &revert_entries[j], &revert_attrs[j]); } m_routerBulker.flush(); - int new_size = 0; - for (int j = 0; j < size; j++) - { - if (updaters[indice[j]]->updateResult(object_statuses[j])) - { - statuses[indice[j]] = updaters[indice[j]]->getStatus(); - if (statuses[indice[j]].ok()) - { - updateRouteEntriesMeta(updaters[indice[j]]->getOldEntry(), updaters[indice[j]]->getNewEntry()); - } - } - else - { - indice[new_size++] = indice[j]; - } - } - if (new_size == 0) - { - break; + for (size_t j = 0; j < revert_attrs.size(); ++j) { + updaters[updator_idx[i]]->updateResult(revert_statuses[j]); } - size = new_size; + } + statuses[updator_idx[i]] = updaters[updator_idx[i]]->getStatus(); + for (size_t j = updator_idx[i] + 1; j < statuses.size(); ++j) { + statuses[j] = ReturnCode(StatusCode::SWSS_RC_NOT_EXECUTED); + } + break; } - // Just a safety check to prevent infinite loop. Should not happen. - if (i == kMaxAttrUpdate) - { - SWSS_RAISE_CRITICAL_STATE("Route update operation did not terminate."); + } + + for (int i = 0; i < size; ++i) { + if (statuses[indice[i]].ok()) { + updateRouteEntriesMeta(updaters[indice[i]]->getOldEntry(), + updaters[indice[i]]->getNewEntry()); + } else { + break; } + } + return; } @@ -837,7 +901,8 @@ std::vector RouteManager::deleteRouteEntries(const std::vector create_route_list; - std::vector update_route_list; - std::vector delete_route_list; - std::vector create_tuple_list; - std::vector update_tuple_list; - std::vector delete_tuple_list; - std::unordered_set route_entry_list; - - for (const auto &key_op_fvs_tuple : m_entries) - { - std::string table_name; - std::string key; - parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &key); - const std::vector &attributes = kfvFieldsValues(key_op_fvs_tuple); - - ReturnCode status; - auto route_entry_or = deserializeRouteEntry(key, attributes, table_name); - if (!route_entry_or.ok()) - { - status = route_entry_or.status(); - SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", - QuotedVar(table_name + ":" + key).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, - /*replace=*/true); - continue; - } - auto &route_entry = *route_entry_or; - - // A single batch should not modify the same route more than once. - if (route_entry_list.count(route_entry.route_entry_key) != 0) - { - status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Route entry has been included in the same batch"; - SWSS_LOG_ERROR("%s: %s", status.message().c_str(), QuotedVar(route_entry.route_entry_key).c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, - /*replace=*/true); - continue; - } +void RouteManager::drainWithNotExecuted() { + drainMgmtWithNotExecuted(m_entries, m_publisher); +} - const std::string &operation = kfvOp(key_op_fvs_tuple); - status = validateRouteEntry(route_entry, operation); - if (!status.ok()) - { - SWSS_LOG_ERROR("Validation failed for Route APP DB entry with key %s: %s", - QuotedVar(table_name + ":" + key).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, - /*replace=*/true); - continue; - } - route_entry_list.insert(route_entry.route_entry_key); +ReturnCode RouteManager::drain() { + SWSS_LOG_ENTER(); - if (operation == SET_COMMAND) - { - if (getRouteEntry(route_entry.route_entry_key) == nullptr) - { - create_route_list.push_back(route_entry); - create_tuple_list.push_back(key_op_fvs_tuple); - } - else - { - update_route_list.push_back(route_entry); - update_tuple_list.push_back(key_op_fvs_tuple); - } - } - else - { - delete_route_list.push_back(route_entry); - delete_tuple_list.push_back(key_op_fvs_tuple); - } - } + std::vector route_list; + std::vector tuple_list; + std::unordered_set route_entry_list; - if (!create_route_list.empty()) - { - auto statuses = createRouteEntries(create_route_list); - for (size_t i = 0; i < create_route_list.size(); ++i) - { - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(create_tuple_list[i]), - kfvFieldsValues(create_tuple_list[i]), statuses[i], - /*replace=*/true); - } - } - if (!update_route_list.empty()) - { - auto statuses = updateRouteEntries(update_route_list); - for (size_t i = 0; i < update_route_list.size(); ++i) - { - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(update_tuple_list[i]), - kfvFieldsValues(update_tuple_list[i]), statuses[i], - /*replace=*/true); - } - } - if (!delete_route_list.empty()) - { - auto statuses = deleteRouteEntries(delete_route_list); - for (size_t i = 0; i < delete_route_list.size(); ++i) - { - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(delete_tuple_list[i]), - kfvFieldsValues(delete_tuple_list[i]), statuses[i], - /*replace=*/true); - } - } - m_entries.clear(); + ReturnCode status; + std::string prev_op; + bool prev_update = false; + while (!m_entries.empty()) { + auto key_op_fvs_tuple = m_entries.front(); + m_entries.pop_front(); + std::string table_name; + std::string key; + parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &key); + const std::vector& attributes = + kfvFieldsValues(key_op_fvs_tuple); + + auto route_entry_or = deserializeRouteEntry(key, attributes, table_name); + if (!route_entry_or.ok()) { + status = route_entry_or.status(); + SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", + QuotedVar(table_name + ":" + key).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; + } + auto& route_entry = *route_entry_or; + + // A single batch should not modify the same route more than once. + if (route_entry_list.count(route_entry.route_entry_key) != 0) { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Route entry has been included in the same batch"; + SWSS_LOG_ERROR("%s: %s", status.message().c_str(), + QuotedVar(route_entry.route_entry_key).c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; + } + + const std::string& operation = kfvOp(key_op_fvs_tuple); + status = validateRouteEntry(route_entry, operation); + if (!status.ok()) { + SWSS_LOG_ERROR( + "Validation failed for Route APP DB entry with key %s: %s", + QuotedVar(table_name + ":" + key).c_str(), status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; + } + route_entry_list.insert(route_entry.route_entry_key); + + bool update = (getRouteEntry(route_entry.route_entry_key) != nullptr); + if (prev_op == "") { + prev_op = operation; + prev_update = update; + } + // Process the entries if the operation type changes. + if (operation != prev_op || update != prev_update) { + status = + processRouteEntries(route_list, tuple_list, prev_op, prev_update); + route_list.clear(); + tuple_list.clear(); + prev_op = operation; + prev_update = update; + } + + if (!status.ok()) { + // Return SWSS_RC_NOT_EXECUTED if failure has occured. + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), + ReturnCode(StatusCode::SWSS_RC_NOT_EXECUTED), + /*replace=*/true); + break; + } else { + route_list.push_back(route_entry); + tuple_list.push_back(key_op_fvs_tuple); + } + } + + if (!route_list.empty()) { + auto rc = processRouteEntries(route_list, tuple_list, prev_op, prev_update); + if (!rc.ok()) { + status = rc; + } + } + drainWithNotExecuted(); + return status; } std::string RouteManager::verifyState(const std::string &key, const std::vector &tuple) @@ -1159,8 +1213,9 @@ std::string RouteManager::verifyStateAsicDb(const P4RouteEntry *route_entry) swss::DBConnector db("ASIC_DB", 0); swss::Table table(&db, "ASIC_STATE"); - std::string key = sai_serialize_object_type(SAI_OBJECT_TYPE_ROUTE_ENTRY) + ":" + - sai_serialize_route_entry(getSaiEntry(*route_entry)); + std::string key = sai_serialize_object_type(SAI_OBJECT_TYPE_ROUTE_ENTRY) + + ":" + + sai_serialize_route_entry(prepareSaiEntry(*route_entry)); std::vector values; if (!table.get(key, values)) { diff --git a/orchagent/p4orch/route_manager.h b/orchagent/p4orch/route_manager.h index 6572fd61372..66c64f9c19a 100644 --- a/orchagent/p4orch/route_manager.h +++ b/orchagent/p4orch/route_manager.h @@ -49,9 +49,9 @@ class RouteUpdater P4RouteEntry getOldEntry() const; P4RouteEntry getNewEntry() const; - sai_route_entry_t getSaiEntry() const; + sai_route_entry_t prepareSaiEntry() const; // Returns the next SAI attribute that should be performed. - sai_attribute_t getSaiAttr() const; + sai_attribute_t prepareSaiAttr() const; // Updates the state by the given SAI result. // Returns true if all operations are completed. // This method will raise critical state if a recovery action fails. @@ -59,14 +59,21 @@ class RouteUpdater // Returns the overall status of the route update. // This method should only be called after UpdateResult returns true. ReturnCode getStatus() const; + // Returns a list of SAI attributes that the update needs to preform from + // the current state to the final state. If the updater is in revert mode, + // the final state is the old route entry. + std::vector GetSaiAttrList() const; - private: + private: // Updates the action index. // Returns true if there are no more actions. bool updateIdx(); + bool updateIdx(int& idx) const; // Checks if the current action should be performed or not. // Returns true if the action should be performed. - bool checkAction() const; + bool checkAction(int idx) const; + // Returns the SAI attribute that should be performed by the given index. + sai_attribute_t prepareSaiAttr(int idx) const; P4OidMapper *m_p4OidMapper; P4RouteEntry m_oldRoute; @@ -84,9 +91,11 @@ class RouteManager : public ObjectManagerInterface virtual ~RouteManager() = default; void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; - void drain() override; + ReturnCode drain() override; + void drainWithNotExecuted() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; - ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; private: // Applies route entry updates from src to dest. The merged result will be @@ -122,6 +131,12 @@ class RouteManager : public ObjectManagerInterface // Deletes a list of route entries. std::vector deleteRouteEntries(const std::vector &route_entries); + // Process a list of route entries by the given operation. + ReturnCode processRouteEntries( + const std::vector& route_entries, + const std::vector& tuple_list, + const std::string& op, bool update); + // On a successful route entry update, updates the reference counters and // internal data. void updateRouteEntriesMeta(const P4RouteEntry &old_entry, const P4RouteEntry &new_entry); @@ -137,7 +152,7 @@ class RouteManager : public ObjectManagerInterface std::string verifyStateAsicDb(const P4RouteEntry *route_entry); // Returns the SAI entry. - sai_route_entry_t getSaiEntry(const P4RouteEntry &route_entry); + sai_route_entry_t prepareSaiEntry(const P4RouteEntry& route_entry); P4RouteTable m_routeTable; P4OidMapper *m_p4OidMapper; diff --git a/orchagent/p4orch/router_interface_manager.cpp b/orchagent/p4orch/router_interface_manager.cpp index e174b5ec7e2..e2336edbb9c 100644 --- a/orchagent/p4orch/router_interface_manager.cpp +++ b/orchagent/p4orch/router_interface_manager.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -10,7 +11,6 @@ #include "SaiAttributeList.h" #include "dbconnector.h" #include "directory.h" -#include #include "logger.h" #include "orch.h" #include "p4orch/p4orch_util.h" @@ -104,7 +104,16 @@ ReturnCodeOr> getSaiAttrs(const P4RouterInterfaceEn attr.id = SAI_ROUTER_INTERFACE_ATTR_VLAN_ID; attr.value.oid = port.m_vlan_info.vlan_oid; break; - // TODO: add support for PORT::SUBPORT + case Port::SUBPORT: + attr.value.s32 = SAI_ROUTER_INTERFACE_TYPE_SUB_PORT; + attrs.push_back(attr); + attr.id = SAI_ROUTER_INTERFACE_ATTR_PORT_ID; + attr.value.oid = port.m_port_id; + attrs.push_back(attr); + attr.id = SAI_ROUTER_INTERFACE_ATTR_OUTER_VLAN_ID; + attr.value.oid = port.m_vlan_info.vlan_oid; + break; + default: LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Unsupported port type: " << port.m_type); } @@ -337,13 +346,14 @@ ReturnCode RouterInterfaceManager::processDeleteRequest(const std::string &route return status; } -ReturnCode RouterInterfaceManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +ReturnCode RouterInterfaceManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) { - std::string value; + std::string value; try { - nlohmann::json j = nlohmann::json::parse(json_key); + nlohmann::json j = nlohmann::json::parse(json_key); if (j.find(prependMatchField(p4orch::kRouterInterfaceId)) != j.end()) { value = j.at(prependMatchField(p4orch::kRouterInterfaceId)).get(); @@ -353,7 +363,8 @@ ReturnCode RouterInterfaceManager::getSaiObject(const std::string &json_key, sai } else { - SWSS_LOG_ERROR("%s match parameter absent: required for dependent object query", p4orch::kRouterInterfaceId); + SWSS_LOG_ERROR("%s match parameter absent: required for dependent object query", + p4orch::kRouterInterfaceId); } } catch (std::exception &ex) @@ -369,73 +380,79 @@ void RouterInterfaceManager::enqueue(const std::string &table_name, const swss:: m_entries.push_back(entry); } -void RouterInterfaceManager::drain() -{ - SWSS_LOG_ENTER(); - - for (const auto &key_op_fvs_tuple : m_entries) - { - std::string table_name; - std::string db_key; - parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &db_key); - const std::vector &attributes = kfvFieldsValues(key_op_fvs_tuple); - - ReturnCode status; - auto app_db_entry_or = deserializeRouterIntfEntry(db_key, attributes); - if (!app_db_entry_or.ok()) - { - status = app_db_entry_or.status(); - SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", - QuotedVar(table_name + ":" + db_key).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, - /*replace=*/true); - continue; - } - auto &app_db_entry = *app_db_entry_or; - - status = validateRouterInterfaceAppDbEntry(app_db_entry); - if (!status.ok()) - { - SWSS_LOG_ERROR("Validation failed for Router Interface APP DB entry with key %s: %s", - QuotedVar(table_name + ":" + db_key).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, - /*replace=*/true); - continue; - } +void RouterInterfaceManager::drainWithNotExecuted() { + drainMgmtWithNotExecuted(m_entries, m_publisher); +} - const std::string router_intf_key = KeyGenerator::generateRouterInterfaceKey(app_db_entry.router_interface_id); +ReturnCode RouterInterfaceManager::drain() { + SWSS_LOG_ENTER(); - const std::string &operation = kfvOp(key_op_fvs_tuple); - if (operation == SET_COMMAND) - { - auto *router_intf_entry = getRouterInterfaceEntry(router_intf_key); - if (router_intf_entry == nullptr) - { - // Create router interface - status = processAddRequest(app_db_entry, router_intf_key); - } - else - { - // Modify existing router interface - status = processUpdateRequest(app_db_entry, router_intf_entry); - } - } - else if (operation == DEL_COMMAND) - { - // Delete router interface - status = processDeleteRequest(router_intf_key); - } - else - { - status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Unknown operation type " << QuotedVar(operation); - SWSS_LOG_ERROR("%s", status.message().c_str()); - } - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), status, - /*replace=*/true); - } - m_entries.clear(); + ReturnCode status; + while (!m_entries.empty()) { + auto key_op_fvs_tuple = m_entries.front(); + m_entries.pop_front(); + std::string table_name; + std::string db_key; + parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &db_key); + const std::vector& attributes = + kfvFieldsValues(key_op_fvs_tuple); + + auto app_db_entry_or = deserializeRouterIntfEntry(db_key, attributes); + if (!app_db_entry_or.ok()) { + status = app_db_entry_or.status(); + SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", + QuotedVar(table_name + ":" + db_key).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; + } + auto& app_db_entry = *app_db_entry_or; + + status = validateRouterInterfaceAppDbEntry(app_db_entry); + if (!status.ok()) { + SWSS_LOG_ERROR( + "Validation failed for Router Interface APP DB entry with key %s: %s", + QuotedVar(table_name + ":" + db_key).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; + } + + const std::string router_intf_key = + KeyGenerator::generateRouterInterfaceKey( + app_db_entry.router_interface_id); + + const std::string& operation = kfvOp(key_op_fvs_tuple); + if (operation == SET_COMMAND) { + auto* router_intf_entry = getRouterInterfaceEntry(router_intf_key); + if (router_intf_entry == nullptr) { + // Create router interface + status = processAddRequest(app_db_entry, router_intf_key); + } else { + // Modify existing router interface + status = processUpdateRequest(app_db_entry, router_intf_entry); + } + } else if (operation == DEL_COMMAND) { + // Delete router interface + status = processDeleteRequest(router_intf_key); + } else { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unknown operation type " << QuotedVar(operation); + SWSS_LOG_ERROR("%s", status.message().c_str()); + } + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + if (!status.ok()) { + break; + } + } + drainWithNotExecuted(); + return status; } std::string RouterInterfaceManager::verifyState(const std::string &key, const std::vector &tuple) diff --git a/orchagent/p4orch/router_interface_manager.h b/orchagent/p4orch/router_interface_manager.h index 427400e9c0c..62a7c5af437 100644 --- a/orchagent/p4orch/router_interface_manager.h +++ b/orchagent/p4orch/router_interface_manager.h @@ -50,9 +50,11 @@ class RouterInterfaceManager : public ObjectManagerInterface virtual ~RouterInterfaceManager() = default; void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; - void drain() override; + ReturnCode drain() override; + void drainWithNotExecuted() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; - ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; private: ReturnCodeOr deserializeRouterIntfEntry( diff --git a/orchagent/p4orch/tables_definition_manager.cpp b/orchagent/p4orch/tables_definition_manager.cpp index c0fab4265a2..3df54bcc865 100644 --- a/orchagent/p4orch/tables_definition_manager.cpp +++ b/orchagent/p4orch/tables_definition_manager.cpp @@ -1,36 +1,29 @@ #include "p4orch/tables_definition_manager.h" #include +#include #include #include #include #include #include "directory.h" -#include #include "logger.h" -#include "tokenize.h" #include "orch.h" #include "p4orch/p4orch.h" #include "p4orch/p4orch_util.h" +#include "tokenize.h" extern "C" { #include "saitypes.h" } - extern Directory gDirectory; extern P4Orch *gP4Orch; -const std::map format_datatype_map = -{ - {"MAC", "SAI_ATTR_VALUE_TYPE_MAC"}, - {"IPV4", "SAI_ATTR_VALUE_TYPE_IPV4"}, - {"IPV6", "SAI_ATTR_VALUE_TYPE_IPV6"} -}; +const std::map format_datatype_map = { + {"MAC", "SAI_ATTR_VALUE_TYPE_MAC"}, {"IPV4", "SAI_ATTR_VALUE_TYPE_IPV4"}, {"IPV6", "SAI_ATTR_VALUE_TYPE_IPV6"}}; - -std::string -BitwidthToDatatype (int bitwidth) +std::string BitwidthToDatatype(int bitwidth) { std::string datatype = "SAI_ATTR_VALUE_TYPE_CHARDATA"; @@ -58,8 +51,7 @@ BitwidthToDatatype (int bitwidth) return datatype; } -std::string -parseBitwidthToDatatype (const nlohmann::json &json) +std::string parseBitwidthToDatatype(const nlohmann::json &json) { int bitwidth; std::string datatype = "SAI_ATTR_VALUE_TYPE_CHARDATA"; @@ -73,8 +65,7 @@ parseBitwidthToDatatype (const nlohmann::json &json) return datatype; } -std::string -parseFormatToDatatype (const nlohmann::json &json, std::string datatype) +std::string parseFormatToDatatype(const nlohmann::json &json, std::string datatype) { std::string format; @@ -92,8 +83,7 @@ parseFormatToDatatype (const nlohmann::json &json, std::string datatype) return datatype; } -ReturnCode -parseTableMatchReferences (const nlohmann::json &match_json, TableMatchInfo &match) +ReturnCode parseTableMatchReferences(const nlohmann::json &match_json, TableMatchInfo &match) { std::string table, field; @@ -110,7 +100,8 @@ parseTableMatchReferences (const nlohmann::json &match_json, TableMatchInfo &mat catch (std::exception &ex) { return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "can not parse tables from app-db supplied table definition info"; + << "can not parse tables from app-db supplied table definition " + "info"; } } } @@ -118,8 +109,7 @@ parseTableMatchReferences (const nlohmann::json &match_json, TableMatchInfo &mat return ReturnCode(); } -ReturnCode -parseActionParamReferences (const nlohmann::json ¶m_json, ActionParamInfo ¶m) +ReturnCode parseActionParamReferences(const nlohmann::json ¶m_json, ActionParamInfo ¶m) { std::string table, field; @@ -136,7 +126,8 @@ parseActionParamReferences (const nlohmann::json ¶m_json, ActionParamInfo &p catch (std::exception &ex) { return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "can not parse tables from app-db supplied table definition info"; + << "can not parse tables from app-db supplied table definition " + "info"; } } } @@ -144,8 +135,7 @@ parseActionParamReferences (const nlohmann::json ¶m_json, ActionParamInfo &p return ReturnCode(); } -ReturnCode -parseTableActionParams (const nlohmann::json &action_json, ActionInfo &action) +ReturnCode parseTableActionParams(const nlohmann::json &action_json, ActionInfo &action) { action.refers_to = false; if (action_json.find(p4orch::kActionParams) != action_json.end()) @@ -167,7 +157,8 @@ parseTableActionParams (const nlohmann::json &action_json, ActionInfo &action) if (!param.table_reference_map.empty()) { /** - * Helps avoid walk of action parameters if this is set to false at action level + * Helps avoid walk of action parameters if this is set to false at + * action level */ action.refers_to = true; } @@ -175,7 +166,8 @@ parseTableActionParams (const nlohmann::json &action_json, ActionInfo &action) catch (std::exception &ex) { return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "can not parse tables from app-db supplied table definition info"; + << "can not parse tables from app-db supplied table definition " + "info"; } } } @@ -183,8 +175,7 @@ parseTableActionParams (const nlohmann::json &action_json, ActionInfo &action) return ReturnCode(); } -ReturnCode -parseTableCounter (const nlohmann::json &table_json, TableInfo &table) +ReturnCode parseTableCounter(const nlohmann::json &table_json, TableInfo &table) { if (table_json.find(p4orch::kCounterUnit) != table_json.end()) { @@ -207,8 +198,7 @@ parseTableCounter (const nlohmann::json &table_json, TableInfo &table) return ReturnCode(); } -ReturnCode -parseTablesInfo (const nlohmann::json &info_json, TablesInfo &info_entry) +ReturnCode parseTablesInfo(const nlohmann::json &info_json, TablesInfo &info_entry) { ReturnCode status; int table_id; @@ -216,8 +206,7 @@ parseTablesInfo (const nlohmann::json &info_json, TablesInfo &info_entry) if (info_json.find(p4orch::kTables) == info_json.end()) { - return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "no tables in app-db supplied table definition info"; + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "no tables in app-db supplied table definition info"; } for (const auto &table_json : info_json[p4orch::kTables]) @@ -230,18 +219,18 @@ parseTablesInfo (const nlohmann::json &info_json, TablesInfo &info_entry) catch (std::exception &ex) { return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "can not parse tables from app-db supplied table definition info"; + << "can not parse tables from app-db supplied table definition " + "info"; } - - TableInfo table = {}; + TableInfo table = {}; table.name = table_name; - table.id = table_id; + table.id = table_id; try { for (const auto &match_json : table_json[p4orch::kmatchFields]) { - TableMatchInfo match = {}; + TableMatchInfo match = {}; std::string match_name; match_name = match_json.at(p4orch::kName).get(); @@ -254,7 +243,7 @@ parseTablesInfo (const nlohmann::json &info_json, TablesInfo &info_entry) for (const auto &action_json : table_json[p4orch::kActions]) { - ActionInfo action = {}; + ActionInfo action = {}; std::string action_name; action_name = action_json.at(p4orch::kAlias).get(); @@ -263,23 +252,21 @@ parseTablesInfo (const nlohmann::json &info_json, TablesInfo &info_entry) table.action_fields[action_name] = action; /** - * If any parameter of action refers to another table, add that one in the - * cross-reference list of current table + * If any parameter of action refers to another table, add that one in + * the cross-reference list of current table */ - for (auto param_it = action.params.begin(); - param_it != action.params.end(); param_it++) + for (auto param_it = action.params.begin(); param_it != action.params.end(); param_it++) { ActionParamInfo action_param = param_it->second; for (auto ref_it = action_param.table_reference_map.begin(); - ref_it != action_param.table_reference_map.end(); ref_it++) + ref_it != action_param.table_reference_map.end(); ref_it++) { - if (std::find(table.action_ref_tables.begin(), - table.action_ref_tables.end(), - ref_it->first) == table.action_ref_tables.end()) + if (std::find(table.action_ref_tables.begin(), table.action_ref_tables.end(), ref_it->first) == + table.action_ref_tables.end()) { table.action_ref_tables.push_back(ref_it->first); } - } + } } } @@ -288,10 +275,9 @@ parseTablesInfo (const nlohmann::json &info_json, TablesInfo &info_entry) catch (std::exception &ex) { return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "can not parse table " << QuotedVar(table_name.c_str()) << "match fields"; + << "can not parse table " << QuotedVar(table_name.c_str()) << "match fields"; } - info_entry.m_tableIdNameMap[std::to_string(table_id)] = table_name; info_entry.m_tableInfoMap[table_name] = table; } @@ -299,7 +285,6 @@ parseTablesInfo (const nlohmann::json &info_json, TablesInfo &info_entry) return ReturnCode(); } - ReturnCodeOr TablesDefnManager::deserializeTablesInfoEntry( const std::string &key, const std::vector &attributes) { @@ -327,7 +312,7 @@ ReturnCodeOr TablesDefnManager::deserializeTablesInfoEntry else { return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Unexpected field " << QuotedVar(field) << " in table entry"; + << "Unexpected field " << QuotedVar(field) << " in table entry"; } } @@ -416,14 +401,13 @@ ReturnCode TablesDefnManager::processDeleteRequest(const std::string &context_ke return ReturnCode(); } -ReturnCode TablesDefnManager::getSaiObject(const std::string &json_key, - sai_object_type_t &object_type, std::string &object_key) +ReturnCode TablesDefnManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) { return StatusCode::SWSS_RC_INVALID_PARAM; } -std::unordered_map> -createGraph (std::vector> preReq) +std::unordered_map> createGraph(std::vector> preReq) { std::unordered_map> graph; @@ -443,8 +427,7 @@ createGraph (std::vector> preReq) return graph; } -std::unordered_map -computeIndegree (std::unordered_map> &graph) +std::unordered_map computeIndegree(std::unordered_map> &graph) { std::unordered_map degrees; @@ -467,19 +450,16 @@ computeIndegree (std::unordered_map> &graph) return degrees; } - -std::vector -findTablePrecedence (int tables, std::vector> preReq, TablesInfo *tables_info) +std::vector findTablePrecedence(int tables, std::vector> preReq, TablesInfo *tables_info) { std::unordered_map> graph = createGraph(preReq); std::unordered_map degrees = computeIndegree(graph); std::vector visited; std::vector toposort; - std::queue zeros; + std::queue zeros; // initialize queue with tables having no dependencies - for (auto table_it = tables_info->m_tableInfoMap.begin(); - table_it != tables_info->m_tableInfoMap.end(); table_it++) + for (auto table_it = tables_info->m_tableInfoMap.begin(); table_it != tables_info->m_tableInfoMap.end(); table_it++) { TableInfo table_info = table_it->second; if (degrees.find(table_info.id) == degrees.end()) @@ -491,7 +471,8 @@ findTablePrecedence (int tables, std::vector> preReq, Tables for (int i = 0; i < tables; i++) { - // Err input data like possible cyclic dependencies, could not build precedence order + // Err input data like possible cyclic dependencies, could not build + // precedence order if (zeros.empty()) { SWSS_LOG_ERROR("Filed to build table precedence order"); @@ -530,21 +511,19 @@ findTablePrecedence (int tables, std::vector> preReq, Tables return toposort; } - -void -buildTablePrecedence (TablesInfo *tables_info) +void buildTablePrecedence(TablesInfo *tables_info) { std::vector> preReq; std::vector orderedTables; int tables = 0; - if (!tables_info) { + if (!tables_info) + { return; } // build dependencies - for (auto table_it = tables_info->m_tableInfoMap.begin(); - table_it != tables_info->m_tableInfoMap.end(); table_it++) + for (auto table_it = tables_info->m_tableInfoMap.begin(); table_it != tables_info->m_tableInfoMap.end(); table_it++) { TableInfo table_info = table_it->second; tables++; @@ -552,18 +531,18 @@ buildTablePrecedence (TablesInfo *tables_info) for (std::size_t i = 0; i < table_info.action_ref_tables.size(); i++) { /** - * For now processing precedence order is only amongst extension tables - * Skip fixed tables, include them in precedence calculations when fixed - * and extension tables processing precedence may be interleaved - */ + * For now processing precedence order is only amongst extension tables + * Skip fixed tables, include them in precedence calculations when fixed + * and extension tables processing precedence may be interleaved + */ if (FixedTablesMap.find(table_info.action_ref_tables[i]) != FixedTablesMap.end()) { continue; } TableInfo ref_table_info = tables_info->m_tableInfoMap[table_info.action_ref_tables[i]]; - if (std::find(preReq.begin(), preReq.end(), - std::make_pair(table_info.id, ref_table_info.id)) == preReq.end()) + if (std::find(preReq.begin(), preReq.end(), std::make_pair(table_info.id, ref_table_info.id)) == + preReq.end()) { preReq.push_back(std::make_pair(table_info.id, ref_table_info.id)); } @@ -573,7 +552,8 @@ buildTablePrecedence (TablesInfo *tables_info) // find precedence of tables based on dependencies orderedTables = findTablePrecedence(tables, preReq, tables_info); - // update each table with calculated precedence value and build table precedence map + // update each table with calculated precedence value and build table + // precedence map for (std::size_t i = 0; i < orderedTables.size(); i++) { auto table_id = orderedTables[i]; @@ -596,86 +576,91 @@ buildTablePrecedence (TablesInfo *tables_info) return; } - void TablesDefnManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { m_entries.push_back(entry); } -void TablesDefnManager::drain() -{ - SWSS_LOG_ENTER(); - - for (const auto &key_op_fvs_tuple : m_entries) - { - std::string table_name; - std::string key; - parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &key); - const std::vector &attributes = kfvFieldsValues(key_op_fvs_tuple); - - ReturnCode status; - auto app_db_entry_or = deserializeTablesInfoEntry(key, attributes); - if (!app_db_entry_or.ok()) - { - status = app_db_entry_or.status(); - SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", - QuotedVar(table_name + ":" + key).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, /*replace=*/true); - continue; - } - auto &app_db_entry = *app_db_entry_or; - - status = validateTablesInfoAppDbEntry(app_db_entry); - if (!status.ok()) - { - SWSS_LOG_ERROR("Validation failed for tables definition APP DB entry with key %s: %s", - QuotedVar(table_name + ":" + key).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, /*replace=*/true); - continue; - } +void TablesDefnManager::drainWithNotExecuted() { + drainMgmtWithNotExecuted(m_entries, m_publisher); +} - const std::string context_key = KeyGenerator::generateTablesInfoKey(app_db_entry.context); +ReturnCode TablesDefnManager::drain() { + SWSS_LOG_ENTER(); + + ReturnCode status; + while (!m_entries.empty()) { + auto key_op_fvs_tuple = m_entries.front(); + m_entries.pop_front(); + std::string table_name; + std::string key; + parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &key); + const std::vector& attributes = + kfvFieldsValues(key_op_fvs_tuple); + + auto app_db_entry_or = deserializeTablesInfoEntry(key, attributes); + if (!app_db_entry_or.ok()) { + status = app_db_entry_or.status(); + SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", + QuotedVar(table_name + ":" + key).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; + } + auto& app_db_entry = *app_db_entry_or; + + status = validateTablesInfoAppDbEntry(app_db_entry); + if (!status.ok()) { + SWSS_LOG_ERROR( + "Validation failed for tables definition APP DB entry with key %s: " + "%s", + QuotedVar(table_name + ":" + key).c_str(), status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; + } - const std::string &operation = kfvOp(key_op_fvs_tuple); - if (operation == SET_COMMAND) - { - auto *tablesinfo = getTablesInfoEntry(context_key); - if (tablesinfo == nullptr) - { - // Create TablesInfo - status = processAddRequest(app_db_entry, context_key); - } - else - { - // Modify existing TablesInfo - status = processUpdateRequest(app_db_entry, context_key); - } - } - else if (operation == DEL_COMMAND) - { - // Delete TablesInfo - status = processDeleteRequest(context_key); - } - else - { - status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Unknown operation type " << QuotedVar(operation); - SWSS_LOG_ERROR("%s", status.message().c_str()); - } - if (!status.ok()) - { - SWSS_LOG_ERROR("Processing failed for tables definition APP DB entry with key %s: %s", - QuotedVar(table_name + ":" + key).c_str(), status.message().c_str()); - } - else - { - buildTablePrecedence(gP4Orch->tablesinfo); - } - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, /*replace=*/true); + const std::string context_key = + KeyGenerator::generateTablesInfoKey(app_db_entry.context); + + const std::string& operation = kfvOp(key_op_fvs_tuple); + if (operation == SET_COMMAND) { + auto* tablesinfo = getTablesInfoEntry(context_key); + if (tablesinfo == nullptr) { + // Create TablesInfo + status = processAddRequest(app_db_entry, context_key); + } else { + // Modify existing TablesInfo + status = processUpdateRequest(app_db_entry, context_key); + } + } else if (operation == DEL_COMMAND) { + // Delete TablesInfo + status = processDeleteRequest(context_key); + } else { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unknown operation type " << QuotedVar(operation); + SWSS_LOG_ERROR("%s", status.message().c_str()); + } + if (!status.ok()) { + SWSS_LOG_ERROR( + "Processing failed for tables definition APP DB entry with key %s: " + "%s", + QuotedVar(table_name + ":" + key).c_str(), status.message().c_str()); + } else { + buildTablePrecedence(gP4Orch->tablesinfo); + } + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + if (!status.ok()) { + break; } - m_entries.clear(); + } + drainWithNotExecuted(); + return status; } std::string TablesDefnManager::verifyState(const std::string &key, const std::vector &tuple) diff --git a/orchagent/p4orch/tables_definition_manager.h b/orchagent/p4orch/tables_definition_manager.h index 088b832bcdf..49c5855fc34 100644 --- a/orchagent/p4orch/tables_definition_manager.h +++ b/orchagent/p4orch/tables_definition_manager.h @@ -1,12 +1,12 @@ #pragma once #include +#include #include #include #include #include "macaddress.h" -#include #include "orch.h" #include "p4orch/object_manager_interface.h" #include "p4orch/p4oidmapper.h" @@ -23,8 +23,8 @@ extern "C" */ struct TablesInfo { - std::string context; - nlohmann::json info; + std::string context; + nlohmann::json info; std::unordered_map m_tableIdNameMap; std::unordered_map m_tableInfoMap; std::map m_tablePrecedenceMap; @@ -57,13 +57,15 @@ class TablesDefnManager : public ObjectManagerInterface virtual ~TablesDefnManager() = default; void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; - void drain() override; + ReturnCode drain() override; + void drainWithNotExecuted() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; - ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; private: - ReturnCodeOr deserializeTablesInfoEntry( - const std::string &key, const std::vector &attributes); + ReturnCodeOr deserializeTablesInfoEntry(const std::string &key, + const std::vector &attributes); TablesInfo *getTablesInfoEntry(const std::string &context_key); ReturnCode createTablesInfo(const std::string &context_key, TablesInfo &tablesinfo_entry); ReturnCode removeTablesInfo(const std::string &context_key); diff --git a/orchagent/p4orch/tests/Makefile.am b/orchagent/p4orch/tests/Makefile.am index d541bbe6372..19ec88a6173 100644 --- a/orchagent/p4orch/tests/Makefile.am +++ b/orchagent/p4orch/tests/Makefile.am @@ -4,9 +4,9 @@ INCLUDES = -I $(top_srcdir) -I $(ORCHAGENT_DIR) -I $(P4ORCH_DIR) -I $(top_srcdir CFLAGS_SAI = -I /usr/include/sai -TESTS = p4orch_tests p4orch_tests_asan p4orch_tests_tsan p4orch_tests_usan +TESTS = p4orch_tests -noinst_PROGRAMS = p4orch_tests p4orch_tests_asan p4orch_tests_tsan p4orch_tests_usan +noinst_PROGRAMS = p4orch_tests if DEBUG DBGFLAGS = -ggdb -DDEBUG @@ -16,11 +16,6 @@ endif CFLAGS_GTEST = LDADD_GTEST = -lgtest -lgtest_main -lgmock -lgmock_main -CFLAGS_COVERAGE = --coverage -fprofile-arcs -ftest-coverage -LDADD_COVERAGE = -lgcov -CFLAGS_ASAN = -fsanitize=address -CFLAGS_TSAN = -fsanitize=thread -CFLAGS_USAN = -fsanitize=undefined p4orch_tests_SOURCES = $(ORCHAGENT_DIR)/orch.cpp \ $(ORCHAGENT_DIR)/vrforch.cpp \ @@ -28,6 +23,8 @@ p4orch_tests_SOURCES = $(ORCHAGENT_DIR)/orch.cpp \ $(ORCHAGENT_DIR)/copporch.cpp \ $(ORCHAGENT_DIR)/switch/switch_capabilities.cpp \ $(ORCHAGENT_DIR)/switch/switch_helper.cpp \ + $(ORCHAGENT_DIR)/switch/trimming/capabilities.cpp \ + $(ORCHAGENT_DIR)/switch/trimming/helper.cpp \ $(ORCHAGENT_DIR)/switchorch.cpp \ $(ORCHAGENT_DIR)/request_parser.cpp \ $(top_srcdir)/lib/recorder.cpp \ @@ -35,6 +32,7 @@ p4orch_tests_SOURCES = $(ORCHAGENT_DIR)/orch.cpp \ $(ORCHAGENT_DIR)/flex_counter/flow_counter_handler.cpp \ $(ORCHAGENT_DIR)/port/port_capabilities.cpp \ $(ORCHAGENT_DIR)/port/porthlpr.cpp \ + $(ORCHAGENT_DIR)/notifications.cpp \ $(P4ORCH_DIR)/p4oidmapper.cpp \ $(P4ORCH_DIR)/p4orch.cpp \ $(P4ORCH_DIR)/p4orch_util.cpp \ @@ -63,6 +61,7 @@ p4orch_tests_SOURCES = $(ORCHAGENT_DIR)/orch.cpp \ fake_notificationconsumer.cpp \ fake_table.cpp \ p4oidmapper_test.cpp \ + p4orch_test.cpp \ p4orch_util_test.cpp \ return_code_test.cpp \ route_manager_test.cpp \ @@ -76,30 +75,16 @@ p4orch_tests_SOURCES = $(ORCHAGENT_DIR)/orch.cpp \ mirror_session_manager_test.cpp \ test_main.cpp \ mock_sai_acl.cpp \ + mock_sai_bridge.cpp \ mock_sai_hostif.cpp \ mock_sai_serialize.cpp \ mock_sai_router_interface.cpp \ + mock_sai_neighbor.cpp \ + mock_sai_next_hop.cpp \ + mock_sai_route.cpp \ mock_sai_switch.cpp \ mock_sai_udf.cpp -p4orch_tests_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_COVERAGE) $(CFLAGS_SAI) -p4orch_tests_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_COVERAGE) $(CFLAGS_SAI) -p4orch_tests_LDADD = $(LDADD_GTEST) $(LDADD_COVERAGE) -lpthread -lsairedis -lswsscommon -lsaimeta -lsaimetadata -lzmq - -p4orch_tests_asan_SOURCES = $(p4orch_tests_SOURCES) -p4orch_tests_asan_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_ASAN) $(CFLAGS_SAI) -p4orch_tests_asan_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_ASAN) $(CFLAGS_SAI) -p4orch_tests_asan_LDFLAGS = $(CFLAGS_ASAN) -p4orch_tests_asan_LDADD = $(LDADD_GTEST) -lpthread -lsairedis -lswsscommon -lsaimeta -lsaimetadata -lzmq - -p4orch_tests_tsan_SOURCES = $(p4orch_tests_SOURCES) -p4orch_tests_tsan_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_TSAN) $(CFLAGS_SAI) -p4orch_tests_tsan_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_TSAN) $(CFLAGS_SAI) -p4orch_tests_tsan_LDFLAGS = $(CFLAGS_TSAN) -p4orch_tests_tsan_LDADD = $(LDADD_GTEST) -lpthread -lsairedis -lswsscommon -lsaimeta -lsaimetadata -lzmq - -p4orch_tests_usan_SOURCES = $(p4orch_tests_SOURCES) -p4orch_tests_usan_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_USAN) $(CFLAGS_SAI) -p4orch_tests_usan_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_USAN) $(CFLAGS_SAI) -p4orch_tests_usan_LDFLAGS = $(CFLAGS_USAN) -p4orch_tests_usan_LDADD = $(LDADD_GTEST) -lpthread -lsairedis -lswsscommon -lsaimeta -lsaimetadata -lzmq +p4orch_tests_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) $(CFLAGS_ASAN) +p4orch_tests_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) $(CFLAGS_ASAN) +p4orch_tests_LDADD = $(LDADD_GTEST) $(LDFLAGS_ASAN) -lpthread -lsairedis -lswsscommon -lsaimeta -lsaimetadata -lzmq diff --git a/orchagent/p4orch/tests/acl_manager_test.cpp b/orchagent/p4orch/tests/acl_manager_test.cpp index 5827fd1a13e..d38221bf8a1 100644 --- a/orchagent/p4orch/tests/acl_manager_test.cpp +++ b/orchagent/p4orch/tests/acl_manager_test.cpp @@ -3,13 +3,14 @@ #include #include +#include #include #include "acl_rule_manager.h" #include "acl_table_manager.h" #include "acl_util.h" #include "acltable.h" -#include +#include "mock_response_publisher.h" #include "mock_sai_acl.h" #include "mock_sai_hostif.h" #include "mock_sai_policer.h" @@ -37,6 +38,7 @@ extern sai_udf_api_t *sai_udf_api; extern int gBatchSize; extern VRFOrch *gVrfOrch; extern P4Orch *gP4Orch; +extern std::unique_ptr gMockResponsePublisher; extern SwitchOrch *gSwitchOrch; extern sai_object_id_t gSwitchId; extern sai_object_id_t gVrfOid; @@ -240,7 +242,7 @@ std::string BuildMatchFieldJsonStrKindComposite(std::vector elem { nlohmann::json match_json; match_json[kAclMatchFieldKind] = kAclMatchFieldKindComposite; - for (const auto element : elements) + for (const auto &element : elements) { match_json[kAclMatchFieldElements].push_back(element); } @@ -634,8 +636,16 @@ P4AclTableDefinitionAppDbEntry getDefaultAclTableDefAppDbEntry() app_db_entry.match_field_lookup["inner_vlan_pri"] = BuildMatchFieldJsonStrKindSaiField(P4_MATCH_INNER_VLAN_PRI); app_db_entry.match_field_lookup["inner_vlan_id"] = BuildMatchFieldJsonStrKindSaiField(P4_MATCH_INNER_VLAN_ID); app_db_entry.match_field_lookup["inner_vlan_cfi"] = BuildMatchFieldJsonStrKindSaiField(P4_MATCH_INNER_VLAN_CFI); - app_db_entry.match_field_lookup["l3_class_id"] = - BuildMatchFieldJsonStrKindSaiField(P4_MATCH_ROUTE_DST_USER_META, P4_FORMAT_HEX_STRING, /*bitwidth=*/6); + app_db_entry.match_field_lookup["vrf_id"] = + BuildMatchFieldJsonStrKindSaiField(P4_MATCH_VRF_ID, P4_FORMAT_HEX_STRING, + /*bitwidth=*/16); + app_db_entry.match_field_lookup["ipmc_table_hit"] = + BuildMatchFieldJsonStrKindSaiField(P4_MATCH_IPMC_TABLE_HIT, + P4_FORMAT_HEX_STRING, /*bitwidth=*/1); + app_db_entry.match_field_lookup["l3_clasvs_id"] = + BuildMatchFieldJsonStrKindSaiField(P4_MATCH_ROUTE_DST_USER_META, P4_FORMAT_HEX_STRING, /*bitwidth=*/32); + app_db_entry.match_field_lookup["acl_user_meta"] = + BuildMatchFieldJsonStrKindSaiField(P4_MATCH_ACL_USER_META, P4_FORMAT_HEX_STRING, /*bitwidth=*/8); app_db_entry.match_field_lookup["src_ipv6_64bit"] = BuildMatchFieldJsonStrKindComposite( {nlohmann::json::parse(BuildMatchFieldJsonStrKindSaiField(P4_MATCH_SRC_IPV6_WORD3, P4_FORMAT_IPV6, 32)), nlohmann::json::parse(BuildMatchFieldJsonStrKindSaiField(P4_MATCH_SRC_IPV6_WORD2, P4_FORMAT_IPV6, 32))}, @@ -815,6 +825,7 @@ class AclManagerTest : public ::testing::Test delete gP4Orch; delete copp_orch_; delete gSwitchOrch; + gMockResponsePublisher.reset(); } void setUpMockApi() @@ -943,6 +954,7 @@ class AclManagerTest : public ::testing::Test acl_table_manager_ = gP4Orch->getAclTableManager(); acl_rule_manager_ = gP4Orch->getAclRuleManager(); p4_oid_mapper_ = acl_table_manager_->m_p4OidMapper; + gMockResponsePublisher = std::make_unique(); } void AddDefaultUserTrapsSaiCalls(sai_object_id_t *user_defined_trap_oid) @@ -977,10 +989,14 @@ class AclManagerTest : public ::testing::Test IsExpectedAclTableDefinitionMapping(*GetAclTable(app_db_entry.acl_table_name), app_db_entry)); } - void DrainTableTuples() - { - acl_table_manager_->drain(); + ReturnCode DrainTableTuples(bool failure_before) { + if (failure_before) { + acl_table_manager_->drainWithNotExecuted(); + return ReturnCode(StatusCode::SWSS_RC_NOT_EXECUTED); + } + return acl_table_manager_->drain(); } + void EnqueueTableTuple(const swss::KeyOpFieldsValuesTuple &entry) { acl_table_manager_->enqueue(APP_P4RT_ACL_TABLE_DEFINITION_NAME, entry); @@ -990,10 +1006,14 @@ class AclManagerTest : public ::testing::Test return acl_table_manager_->verifyState(key, tuple); } - void DrainRuleTuples() - { - acl_rule_manager_->drain(); + ReturnCode DrainRuleTuples(bool failure_before) { + if (failure_before) { + acl_rule_manager_->drainWithNotExecuted(); + return ReturnCode(StatusCode::SWSS_RC_NOT_EXECUTED); + } + return acl_rule_manager_->drain(); } + void EnqueueRuleTuple(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { acl_rule_manager_->enqueue(table_name, entry); @@ -1067,6 +1087,7 @@ class AclManagerTest : public ::testing::Test StrictMock mock_sai_hostif_; StrictMock mock_sai_switch_; StrictMock mock_sai_udf_; + // StrictMock *gMockResponsePublisher; CoppOrch *copp_orch_; P4OidMapper *p4_oid_mapper_; p4orch::AclTableManager *acl_table_manager_; @@ -1093,7 +1114,12 @@ TEST_F(AclManagerTest, DrainTableTuplesToProcessSetDelRequestSucceeds) .WillOnce(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); - DrainTableTuples(); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(p4rtAclTableName), + Eq(getDefaultTableDefFieldValueTuples()), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, + DrainTableTuples(/*failure_before=*/false)); EXPECT_NE(nullptr, GetAclTable(kAclIngressTableName)); // Drain table tuples to process DEL request @@ -1103,7 +1129,12 @@ TEST_F(AclManagerTest, DrainTableTuplesToProcessSetDelRequestSucceeds) EXPECT_CALL(mock_sai_udf_, remove_udf_group(Eq(kUdfGroupOid1))).WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_udf_, remove_udf(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); EnqueueTableTuple(swss::KeyOpFieldsValuesTuple({p4rtAclTableName, DEL_COMMAND, {}})); - DrainTableTuples(); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(p4rtAclTableName), + Eq(std::vector{}), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, + DrainTableTuples(/*failure_before=*/false)); EXPECT_EQ(nullptr, GetAclTable(kAclIngressTableName)); } @@ -1129,14 +1160,24 @@ TEST_F(AclManagerTest, DrainTableTuplesToProcessUpdateRequestExpectFails) EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); - DrainTableTuples(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(p4rtAclTableName), Eq(attributes), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, + DrainTableTuples(/*failure_before=*/false)); EXPECT_NE(nullptr, GetAclTable(kAclIngressTableName)); // Drain table tuples to process SET request, try to update table priority // to 100: should fail to update. attributes.push_back(swss::FieldValueTuple{kPriority, "100"}); EnqueueTableTuple(swss::KeyOpFieldsValuesTuple({p4rtAclTableName, SET_COMMAND, attributes})); - DrainTableTuples(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(p4rtAclTableName), Eq(attributes), + Eq(StatusCode::SWSS_RC_UNIMPLEMENTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_UNIMPLEMENTED, + DrainTableTuples(/*failure_before=*/false)); EXPECT_EQ(234, GetAclTable(kAclIngressTableName)->priority); } @@ -1147,13 +1188,23 @@ TEST_F(AclManagerTest, DrainTableTuplesWithInvalidTableNameOpsFails) swss::KeyOpFieldsValuesTuple({p4rtAclTableName, SET_COMMAND, getDefaultTableDefFieldValueTuples()})); // Drain table tuples to process SET request on invalid ACL definition table // name: "UNDEFINED" - DrainTableTuples(); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(p4rtAclTableName), + Eq(getDefaultTableDefFieldValueTuples()), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + DrainTableTuples(/*failure_before=*/false)); EXPECT_EQ(nullptr, GetAclTable(kAclIngressTableName)); p4rtAclTableName = std::string(APP_P4RT_ACL_TABLE_DEFINITION_NAME) + kTableKeyDelimiter + kAclIngressTableName; EnqueueTableTuple(swss::KeyOpFieldsValuesTuple({p4rtAclTableName, "UPDATE", getDefaultTableDefFieldValueTuples()})); // Drain table tuples to process invalid operation: "UPDATE" - DrainTableTuples(); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(p4rtAclTableName), + Eq(getDefaultTableDefFieldValueTuples()), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + DrainTableTuples(/*failure_before=*/false)); EXPECT_EQ(nullptr, GetAclTable(kAclIngressTableName)); } @@ -1167,7 +1218,12 @@ TEST_F(AclManagerTest, DrainTableTuplesWithInvalidFieldFails) attributes.push_back(swss::FieldValueTuple{"undefined", "undefined"}); EnqueueTableTuple(swss::KeyOpFieldsValuesTuple({p4rtAclTableName, SET_COMMAND, attributes})); // Drain table tuples to process SET request - DrainTableTuples(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(p4rtAclTableName), Eq(attributes), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + DrainTableTuples(/*failure_before=*/false)); EXPECT_EQ(nullptr, GetAclTable(kAclIngressTableName)); // Invalid attribute field @@ -1175,7 +1231,12 @@ TEST_F(AclManagerTest, DrainTableTuplesWithInvalidFieldFails) attributes.push_back(swss::FieldValueTuple{"undefined/undefined", "undefined"}); EnqueueTableTuple(swss::KeyOpFieldsValuesTuple({p4rtAclTableName, SET_COMMAND, attributes})); // Drain table tuples to process SET request - DrainTableTuples(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(p4rtAclTableName), Eq(attributes), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + DrainTableTuples(/*failure_before=*/false)); EXPECT_EQ(nullptr, GetAclTable(kAclIngressTableName)); // Invalid meter unit value @@ -1183,7 +1244,12 @@ TEST_F(AclManagerTest, DrainTableTuplesWithInvalidFieldFails) attributes.push_back(swss::FieldValueTuple{"meter/unit", "undefined"}); EnqueueTableTuple(swss::KeyOpFieldsValuesTuple({p4rtAclTableName, SET_COMMAND, attributes})); // Drain table tuples to process SET request - DrainTableTuples(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(p4rtAclTableName), Eq(attributes), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + DrainTableTuples(/*failure_before=*/false)); EXPECT_EQ(nullptr, GetAclTable(kAclIngressTableName)); // Invalid counter unit value @@ -1191,7 +1257,12 @@ TEST_F(AclManagerTest, DrainTableTuplesWithInvalidFieldFails) attributes.push_back(swss::FieldValueTuple{"counter/unit", "undefined"}); EnqueueTableTuple(swss::KeyOpFieldsValuesTuple({p4rtAclTableName, SET_COMMAND, attributes})); // Drain table tuples to process SET request - DrainTableTuples(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(p4rtAclTableName), Eq(attributes), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + DrainTableTuples(/*failure_before=*/false)); EXPECT_EQ(nullptr, GetAclTable(kAclIngressTableName)); } @@ -2333,7 +2404,13 @@ TEST_F(AclManagerTest, DrainRuleTuplesToProcessSetRequestSucceeds) EXPECT_CALL(mock_sai_acl_, create_acl_counter(_, _, _, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_policer_, create_policer(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclMeterOid1), Return(SAI_STATUS_SUCCESS))); - DrainRuleTuples(); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(rule_tuple_key), + Eq(getDefaultRuleFieldValueTuples()), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))) + .Times(2); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, + DrainRuleTuples(/*failure_before=*/false)); const auto &acl_rule_key = "match/ether_type=0x0800:match/ipv6_dst=fdf8:f53b:82e4::53 & " "fdf8:f53b:82e4::53:priority=15"; @@ -2360,7 +2437,12 @@ TEST_F(AclManagerTest, DrainRuleTuplesToProcessSetDelRequestSucceeds) .WillOnce(DoAll(SetArgPointee<0>(kAclCounterOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_policer_, create_policer(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclMeterOid1), Return(SAI_STATUS_SUCCESS))); - DrainRuleTuples(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(rule_tuple_key), Eq(attributes), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, + DrainRuleTuples(/*failure_before=*/false)); // Populate counter stats EXPECT_CALL(mock_sai_policer_, get_policer_stats(Eq(kAclMeterOid1), _, _, _)) .WillOnce(DoAll(Invoke([](sai_object_id_t policer_id, uint32_t number_of_counters, @@ -2394,7 +2476,12 @@ TEST_F(AclManagerTest, DrainRuleTuplesToProcessSetDelRequestSucceeds) EXPECT_CALL(mock_sai_acl_, remove_acl_entry(Eq(kAclIngressRuleOid1))).WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_acl_, remove_acl_counter(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_policer_, remove_policer(Eq(kAclMeterOid1))).WillOnce(Return(SAI_STATUS_SUCCESS)); - DrainRuleTuples(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(rule_tuple_key), Eq(attributes), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, + DrainRuleTuples(/*failure_before=*/false)); EXPECT_EQ(nullptr, GetAclRule(kAclIngressTableName, acl_rule_key)); } @@ -2409,7 +2496,12 @@ TEST_F(AclManagerTest, DrainRuleTuplesToProcessSetRequestInvalidTableNameRuleKey swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); // Drain rule tuple to process SET request with invalid ACL table name: // "INVALID_TABLE_NAME" - DrainRuleTuples(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(rule_tuple_key), Eq(attributes), + Eq(StatusCode::SWSS_RC_NOT_FOUND), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, + DrainRuleTuples(/*failure_before=*/false)); auto acl_rule_key = "match/ether_type=0x0800:match/ipv6_dst=fdf8:f53b:82e4::53 & " "fdf8:f53b:82e4::53:priority=15"; @@ -2426,7 +2518,12 @@ TEST_F(AclManagerTest, DrainRuleTuplesToProcessSetRequestInvalidTableNameRuleKey swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); // Drain rule tuple to process SET request without priority field in rule // JSON key - DrainRuleTuples(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(rule_tuple_key), Eq(attributes), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + DrainRuleTuples(/*failure_before=*/false)); EXPECT_EQ(nullptr, GetAclRule(kAclIngressTableName, acl_rule_key)); } @@ -2487,7 +2584,12 @@ TEST_F(AclManagerTest, DrainRuleTuplesWithInvalidCommand) const auto &rule_tuple_key = std::string(kAclIngressTableName) + kTableKeyDelimiter + acl_rule_json_key; EnqueueRuleTuple(std::string(kAclIngressTableName), swss::KeyOpFieldsValuesTuple({rule_tuple_key, "INVALID_COMMAND", attributes})); - DrainRuleTuples(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(rule_tuple_key), Eq(attributes), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + DrainRuleTuples(/*failure_before=*/false)); const auto &acl_rule_key = "match/ether_type=0x0800:match/ipv6_dst=fdf8:f53b:82e4::53 & " "fdf8:f53b:82e4::53:priority=15"; EXPECT_EQ(nullptr, GetAclRule(kAclIngressTableName, acl_rule_key)); @@ -2698,6 +2800,8 @@ TEST_F(AclManagerTest, AclRuleWithValidMatchFields) app_db_entry.match_fvs["inner_vlan_pri"] = "200"; app_db_entry.match_fvs["inner_vlan_id"] = "200"; app_db_entry.match_fvs["inner_vlan_cfi"] = "200"; + app_db_entry.match_fvs["vrf_id"] = "0x777"; + app_db_entry.match_fvs["ipmc_table_hit"] = "0x1"; const auto &acl_rule_key = KeyGenerator::generateAclRuleKey(app_db_entry.match_fvs, "100"); @@ -2794,6 +2898,11 @@ TEST_F(AclManagerTest, AclRuleWithValidMatchFields) EXPECT_EQ(SAI_ACL_IP_FRAG_HEAD, acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_FIELD_ACL_IP_FRAG].aclfield.data.u32); EXPECT_EQ(SAI_PACKET_VLAN_SINGLE_OUTER_TAG, acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_FIELD_PACKET_VLAN].aclfield.data.u32); + EXPECT_EQ(0x777, acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_FIELD_VRF_ID].aclfield.data.u16); + EXPECT_EQ(0xFFFF, acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_FIELD_VRF_ID].aclfield.mask.u16); + EXPECT_EQ(true, + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_FIELD_IPMC_NPU_META_DST_HIT] + .aclfield.data.booldata); // Check action field value EXPECT_EQ(SAI_PACKET_ACTION_TRAP, @@ -2849,6 +2958,8 @@ TEST_F(AclManagerTest, AclRuleWithColorPacketActionsButNoRateLimit) acl_rule->action_fvs[SAI_ACL_ENTRY_ATTR_ACTION_SET_USER_TRAP_ID].aclaction.parameter.oid); } +#pragma GCC diagnostic warning "-Wdisabled-optimization" + TEST_F(AclManagerTest, AclRuleWithValidAction) { ASSERT_NO_FATAL_FAILURE(AddDefaultIngressTable()); @@ -3201,6 +3312,8 @@ TEST_F(AclManagerTest, AclRuleWithValidAction) EXPECT_EQ(nullptr, GetAclRule(kAclIngressTableName, acl_rule_key)); } +#pragma GCC diagnostic pop + TEST_F(AclManagerTest, AclRuleWithVrfAction) { ASSERT_NO_FATAL_FAILURE(AddDefaultIngressTable()); @@ -3410,6 +3523,8 @@ TEST_F(AclManagerTest, AclRuleWithIpTypeBitEncoding) ASSERT_EQ(nullptr, acl_rule); } +#pragma GCC diagnostic warning "-Wdisabled-optimization" + TEST_F(AclManagerTest, UpdateAclRuleWithActionMeterChange) { ASSERT_NO_FATAL_FAILURE(AddDefaultIngressTable()); @@ -3834,6 +3949,8 @@ TEST_F(AclManagerTest, UpdateAclRuleWithActionMeterChange) acl_rule->action_fvs[SAI_ACL_ENTRY_ATTR_ACTION_SET_USER_TRAP_ID].aclaction.parameter.oid); } +#pragma GCC diagnostic pop + TEST_F(AclManagerTest, UpdateAclRuleWithVrfActionChange) { ASSERT_NO_FATAL_FAILURE(AddDefaultIngressTable()); @@ -4568,6 +4685,277 @@ TEST_F(AclManagerTest, DISABLED_InitBindGroupToSwitchFails) EXPECT_THROW(new SwitchOrch(gAppDb, switch_tables, stateDbSwitchTable), std::runtime_error); } +TEST_F(AclManagerTest, CreatePreIngressTableWillCreateDefaultRule) { + auto app_db_entry = getDefaultAclTableDefAppDbEntry(); + app_db_entry.stage = STAGE_PRE_INGRESS; + EXPECT_CALL(mock_sai_acl_, create_acl_table(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kAclTableIngressOid), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_acl_, create_acl_table_group_member(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kAclGroupMemberIngressOid), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce( + DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) + .Times(3) + .WillRepeatedly( + DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_acl_, create_acl_entry(_, _, _, _)).Times(0); + sai_object_id_t user_defined_trap_oid = gUserDefinedTrapStartOid; + AddDefaultUserTrapsSaiCalls(&user_defined_trap_oid); + ASSERT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessAddTableRequest(app_db_entry)); + ASSERT_NO_FATAL_FAILURE(IsExpectedAclTableDefinitionMapping( + *GetAclTable(app_db_entry.acl_table_name), app_db_entry)); +} + +TEST_F(AclManagerTest, DrainTableNotExecuted) { + const auto& p4rtAclTableName_1 = + std::string(APP_P4RT_ACL_TABLE_DEFINITION_NAME) + kTableKeyDelimiter + + "ACL_TABLE_1"; + const auto& p4rtAclTableName_2 = + std::string(APP_P4RT_ACL_TABLE_DEFINITION_NAME) + kTableKeyDelimiter + + "ACL_TABLE_2"; + const auto& p4rtAclTableName_3 = + std::string(APP_P4RT_ACL_TABLE_DEFINITION_NAME) + kTableKeyDelimiter + + "ACL_TABLE_3"; + EnqueueTableTuple(swss::KeyOpFieldsValuesTuple( + {p4rtAclTableName_1, SET_COMMAND, getDefaultTableDefFieldValueTuples()})); + EnqueueTableTuple(swss::KeyOpFieldsValuesTuple( + {p4rtAclTableName_2, SET_COMMAND, getDefaultTableDefFieldValueTuples()})); + EnqueueTableTuple(swss::KeyOpFieldsValuesTuple( + {p4rtAclTableName_3, SET_COMMAND, getDefaultTableDefFieldValueTuples()})); + + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(p4rtAclTableName_1), + Eq(getDefaultTableDefFieldValueTuples()), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(p4rtAclTableName_2), + Eq(getDefaultTableDefFieldValueTuples()), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(p4rtAclTableName_3), + Eq(getDefaultTableDefFieldValueTuples()), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_EXECUTED, + DrainTableTuples(/*failure_before=*/true)); + EXPECT_EQ(nullptr, GetAclTable("ACL_TABLE_1")); + EXPECT_EQ(nullptr, GetAclTable("ACL_TABLE_2")); + EXPECT_EQ(nullptr, GetAclTable("ACL_TABLE_3")); +} + +TEST_F(AclManagerTest, DrainTableStopOnFirstFailure) { + const auto& p4rtAclTableName_1 = + std::string(APP_P4RT_ACL_TABLE_DEFINITION_NAME) + kTableKeyDelimiter + + "ACL_TABLE_1"; + const auto& p4rtAclTableName_2 = + std::string(APP_P4RT_ACL_TABLE_DEFINITION_NAME) + kTableKeyDelimiter + + "ACL_TABLE_2"; + const auto& p4rtAclTableName_3 = + std::string(APP_P4RT_ACL_TABLE_DEFINITION_NAME) + kTableKeyDelimiter + + "ACL_TABLE_3"; + EnqueueTableTuple(swss::KeyOpFieldsValuesTuple( + {p4rtAclTableName_1, SET_COMMAND, getDefaultTableDefFieldValueTuples()})); + EnqueueTableTuple(swss::KeyOpFieldsValuesTuple( + {p4rtAclTableName_2, SET_COMMAND, getDefaultTableDefFieldValueTuples()})); + EnqueueTableTuple(swss::KeyOpFieldsValuesTuple( + {p4rtAclTableName_3, SET_COMMAND, getDefaultTableDefFieldValueTuples()})); + + EXPECT_CALL(mock_sai_acl_, + create_acl_table(_, Eq(gSwitchId), Gt(2), + Truly(std::bind(MatchSaiAttributeAclTableStage, + SAI_ACL_STAGE_INGRESS, + std::placeholders::_1)))) + .WillOnce(DoAll(SetArgPointee<0>(kAclTableIngressOid), + Return(SAI_STATUS_SUCCESS))) + .WillOnce(Return(SAI_STATUS_FAILURE)); + EXPECT_CALL(mock_sai_acl_, + create_acl_table_group_member(_, Eq(gSwitchId), Eq(3), NotNull())) + .WillOnce(DoAll(SetArgPointee<0>(kAclGroupMemberIngressOid), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce( + DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) + .WillOnce( + DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))) + .WillOnce( + DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))) + .WillOnce(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, remove_udf_group(_)) + .WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_udf_, remove_udf(_)) + .WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(p4rtAclTableName_1), + Eq(getDefaultTableDefFieldValueTuples()), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(p4rtAclTableName_2), + Eq(getDefaultTableDefFieldValueTuples()), + Eq(StatusCode::SWSS_RC_UNKNOWN), Eq(true))); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(p4rtAclTableName_3), + Eq(getDefaultTableDefFieldValueTuples()), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, + DrainTableTuples(/*failure_before=*/false)); + EXPECT_NE(nullptr, GetAclTable("ACL_TABLE_1")); + EXPECT_EQ(nullptr, GetAclTable("ACL_TABLE_2")); + EXPECT_EQ(nullptr, GetAclTable("ACL_TABLE_3")); +} + +TEST_F(AclManagerTest, DrainRuleNotExecuted) { + ASSERT_NO_FATAL_FAILURE(AddDefaultIngressTable()); + auto attributes = getDefaultRuleFieldValueTuples(); + const auto& acl_rule_json_key_1 = + "{\"match/ether_type\":\"0x0800\",\"match/" + "ipv6_dst\":\"fdf8:f53b:82e4::53 & " + "fdf8:f53b:82e4::53\",\"priority\":15}"; + const auto& rule_tuple_key_1 = std::string(kAclIngressTableName) + + kTableKeyDelimiter + acl_rule_json_key_1; + const auto& acl_rule_json_key_2 = + "{\"match/ether_type\":\"0x0800\",\"match/" + "ipv6_dst\":\"fdf8:f53b:82e4::54 & " + "fdf8:f53b:82e4::54\",\"priority\":15}"; + const auto& rule_tuple_key_2 = std::string(kAclIngressTableName) + + kTableKeyDelimiter + acl_rule_json_key_2; + const auto& acl_rule_json_key_3 = + "{\"match/ether_type\":\"0x0800\",\"match/" + "ipv6_dst\":\"fdf8:f53b:82e4::55 & " + "fdf8:f53b:82e4::55\",\"priority\":15}"; + const auto& rule_tuple_key_3 = std::string(kAclIngressTableName) + + kTableKeyDelimiter + acl_rule_json_key_3; + + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple( + {rule_tuple_key_1, SET_COMMAND, attributes})); + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple( + {rule_tuple_key_2, SET_COMMAND, attributes})); + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple( + {rule_tuple_key_3, SET_COMMAND, attributes})); + + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(rule_tuple_key_1), Eq(attributes), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(rule_tuple_key_2), Eq(attributes), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(rule_tuple_key_3), Eq(attributes), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_EXECUTED, + DrainRuleTuples(/*failure_before=*/true)); + EXPECT_EQ( + nullptr, + GetAclRule(kAclIngressTableName, + "match/ether_type=0x0800:match/ipv6_dst=fdf8:f53b:82e4::53 & " + "fdf8:f53b:82e4::53:priority=15")); + EXPECT_EQ( + nullptr, + GetAclRule(kAclIngressTableName, + "match/ether_type=0x0800:match/ipv6_dst=fdf8:f53b:82e4::54 & " + "fdf8:f53b:82e4::54:priority=15")); + EXPECT_EQ( + nullptr, + GetAclRule(kAclIngressTableName, + "match/ether_type=0x0800:match/ipv6_dst=fdf8:f53b:82e4::55 & " + "fdf8:f53b:82e4::55:priority=15")); +} + +TEST_F(AclManagerTest, DrainRuleStopOnFirstFailure) { + ASSERT_NO_FATAL_FAILURE(AddDefaultIngressTable()); + auto attributes = getDefaultRuleFieldValueTuples(); + const auto& acl_rule_json_key_1 = + "{\"match/ether_type\":\"0x0800\",\"match/" + "ipv6_dst\":\"fdf8:f53b:82e4::53 & " + "fdf8:f53b:82e4::53\",\"priority\":15}"; + const auto& rule_tuple_key_1 = std::string(kAclIngressTableName) + + kTableKeyDelimiter + acl_rule_json_key_1; + const auto& acl_rule_json_key_2 = + "{\"match/ether_type\":\"0x0800\",\"match/" + "ipv6_dst\":\"fdf8:f53b:82e4::54 & " + "fdf8:f53b:82e4::54\",\"priority\":15}"; + const auto& rule_tuple_key_2 = std::string(kAclIngressTableName) + + kTableKeyDelimiter + acl_rule_json_key_2; + const auto& acl_rule_json_key_3 = + "{\"match/ether_type\":\"0x0800\",\"match/" + "ipv6_dst\":\"fdf8:f53b:82e4::55 & " + "fdf8:f53b:82e4::55\",\"priority\":15}"; + const auto& rule_tuple_key_3 = std::string(kAclIngressTableName) + + kTableKeyDelimiter + acl_rule_json_key_3; + + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple( + {rule_tuple_key_1, SET_COMMAND, attributes})); + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple( + {rule_tuple_key_2, SET_COMMAND, attributes})); + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple( + {rule_tuple_key_3, SET_COMMAND, attributes})); + + EXPECT_CALL(mock_sai_acl_, create_acl_entry(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kAclIngressRuleOid1), + Return(SAI_STATUS_SUCCESS))) + .WillOnce(Return(SAI_STATUS_FAILURE)); + EXPECT_CALL(mock_sai_acl_, create_acl_counter(_, _, _, _)) + .WillOnce( + DoAll(SetArgPointee<0>(kAclCounterOid1), Return(SAI_STATUS_SUCCESS))) + .WillOnce( + DoAll(SetArgPointee<0>(kAclCounterOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_policer_, create_policer(_, _, _, _)) + .WillOnce( + DoAll(SetArgPointee<0>(kAclMeterOid1), Return(SAI_STATUS_SUCCESS))) + .WillOnce( + DoAll(SetArgPointee<0>(kAclMeterOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_acl_, remove_acl_counter(_)) + .WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_policer_, remove_policer(_)) + .WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(rule_tuple_key_1), Eq(attributes), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(rule_tuple_key_2), Eq(attributes), + Eq(StatusCode::SWSS_RC_UNKNOWN), Eq(true))); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(rule_tuple_key_3), Eq(attributes), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, + DrainRuleTuples(/*failure_before=*/false)); + EXPECT_NE( + nullptr, + GetAclRule(kAclIngressTableName, + "match/ether_type=0x0800:match/ipv6_dst=fdf8:f53b:82e4::53 & " + "fdf8:f53b:82e4::53:priority=15")); + EXPECT_EQ( + nullptr, + GetAclRule(kAclIngressTableName, + "match/ether_type=0x0800:match/ipv6_dst=fdf8:f53b:82e4::54 & " + "fdf8:f53b:82e4::54:priority=15")); + EXPECT_EQ( + nullptr, + GetAclRule(kAclIngressTableName, + "match/ether_type=0x0800:match/ipv6_dst=fdf8:f53b:82e4::55 & " + "fdf8:f53b:82e4::55:priority=15")); +} + TEST_F(AclManagerTest, AclTableVerifyStateTest) { const auto &p4rtAclTableName = @@ -4586,7 +4974,12 @@ TEST_F(AclManagerTest, AclTableVerifyStateTest) .WillOnce(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); - DrainTableTuples(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(p4rtAclTableName), Eq(attributes), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, + DrainTableTuples(/*failure_before=*/false)); auto *acl_table = GetAclTable(kAclIngressTableName); EXPECT_NE(acl_table, nullptr); @@ -4766,7 +5159,8 @@ TEST_F(AclManagerTest, AclRuleVerifyStateTest) "ipv6_dst\":\"fdf8:f53b:82e4::53 & " "fdf8:f53b:82e4::53\",\"match/arp_tpa\": \"0xff112231\", " "\"match/in_ports\": \"Ethernet1,Ethernet2\", \"match/out_ports\": " - "\"Ethernet4,Ethernet5\", \"priority\":15}"; + "\"Ethernet4,Ethernet5\", \"priority\":15,\"match/ipmc_table_hit\":" + "\"0x1\"}"; const auto &rule_tuple_key = std::string(kAclIngressTableName) + kTableKeyDelimiter + acl_rule_json_key; EnqueueRuleTuple(std::string(kAclIngressTableName), swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); @@ -4776,7 +5170,12 @@ TEST_F(AclManagerTest, AclRuleVerifyStateTest) .WillOnce(DoAll(SetArgPointee<0>(kAclCounterOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_policer_, create_policer(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclMeterOid1), Return(SAI_STATUS_SUCCESS))); - DrainRuleTuples(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(rule_tuple_key), Eq(attributes), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, + DrainRuleTuples(/*failure_before=*/false)); // Setup ASIC DB. swss::Table table(nullptr, "ASIC_STATE"); @@ -4791,6 +5190,7 @@ TEST_F(AclManagerTest, AclRuleVerifyStateTest) swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_FIELD_ACL_IP_TYPE", "SAI_ACL_IP_TYPE_ANY&mask:0xffffffffffffffff"}, swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN", "2:255,17&mask:2:0xff,0xff"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_FIELD_IPMC_NPU_META_DST_HIT", "true"}, swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_1", "2:34,49&mask:2:0xff,0xff"}, swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_FIELD_IN_PORTS", "2:oid:0x112233,oid:0x1fed3"}, swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_FIELD_OUT_PORTS", "2:oid:0x9988,oid:0x56789abcdef"}, @@ -4824,12 +5224,14 @@ TEST_F(AclManagerTest, AclRuleVerifyStateTest) EXPECT_FALSE(VerifyRuleState(std::string(APP_P4RT_TABLE_NAME) + ":ACL_PUNT_TABLE:{\"match/ether_type\":\"0x0800\",\"match/" "ipv6_dst\":\"fdf8:f53b:82e4::53 & " - "fdf8:f53b:82e4::53\",\"priority\":0}", + "fdf8:f53b:82e4::53\",\"priority\":0,\"match/ipmc_table_hit\":" + "\"0x1\"}", attributes) .empty()); EXPECT_FALSE(VerifyRuleState(std::string(APP_P4RT_TABLE_NAME) + ":ACL_PUNT_TABLE:{\"match/ether_type\":\"0x0800\",\"match/" - "ipv6_dst\":\"127.0.0.1/24\",\"priority\":15}", + "ipv6_dst\":\"127.0.0.1/24\",\"priority\":15," + "\"match/ipmc_table_hit\":\"0x1\"}", attributes) .empty()); @@ -4837,7 +5239,8 @@ TEST_F(AclManagerTest, AclRuleVerifyStateTest) EXPECT_FALSE(VerifyRuleState(std::string(APP_P4RT_TABLE_NAME) + ":ACL_PUNT_TABLE:{\"match/ether_type\":\"0x0800\",\"match/" "ipv6_dst\":\"fdf8:f53b:82e4::54 & " - "fdf8:f53b:82e4::54\",\"priority\":15}", + "fdf8:f53b:82e4::54\",\"priority\":15,\"match/ipmc_table_hit\":" + "\"0x1\"}", attributes) .empty()); @@ -4847,7 +5250,8 @@ TEST_F(AclManagerTest, AclRuleVerifyStateTest) auto *acl_table = GetAclTable(kAclIngressTableName); EXPECT_NE(acl_table, nullptr); const auto &acl_rule_key = "match/arp_tpa=0xff112231:match/ether_type=0x0800:match/" - "in_ports=Ethernet1,Ethernet2:match/ipv6_dst=fdf8:f53b:82e4::53 & " + "in_ports=Ethernet1,Ethernet2:match/ipmc_table_hit=0x1:" + "match/ipv6_dst=fdf8:f53b:82e4::53 & " "fdf8:f53b:82e4::53:match/out_ports=Ethernet4,Ethernet5:priority=15"; auto *acl_rule = GetAclRule(kAclIngressTableName, acl_rule_key); ASSERT_NE(acl_rule, nullptr); @@ -5083,7 +5487,12 @@ TEST_F(AclManagerTest, AclTableVerifyStateAsicDbTest) .WillOnce(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); - DrainTableTuples(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(p4rtAclTableName), Eq(attributes), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, + DrainTableTuples(/*failure_before=*/false)); auto *acl_table = GetAclTable(kAclIngressTableName); EXPECT_NE(acl_table, nullptr); @@ -5205,7 +5614,12 @@ TEST_F(AclManagerTest, AclRuleVerifyStateAsicDbTest) .WillOnce(DoAll(SetArgPointee<0>(kAclCounterOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_policer_, create_policer(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclMeterOid1), Return(SAI_STATUS_SUCCESS))); - DrainRuleTuples(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(rule_tuple_key), Eq(attributes), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, + DrainRuleTuples(/*failure_before=*/false)); // Setup ASIC DB. swss::Table table(nullptr, "ASIC_STATE"); diff --git a/orchagent/p4orch/tests/fake_flexcounterorch.cpp b/orchagent/p4orch/tests/fake_flexcounterorch.cpp index 39f742e14cc..d26e39aea6e 100644 --- a/orchagent/p4orch/tests/fake_flexcounterorch.cpp +++ b/orchagent/p4orch/tests/fake_flexcounterorch.cpp @@ -1,12 +1,10 @@ #include "copporch.h" #include "flexcounterorch.h" -FlexCounterOrch::FlexCounterOrch(swss::DBConnector *db, std::vector &tableNames) : - Orch(db, tableNames), - m_flexCounterConfigTable(db, CFG_FLEX_COUNTER_TABLE_NAME), - m_bufferQueueConfigTable(db, CFG_BUFFER_QUEUE_TABLE_NAME), - m_bufferPgConfigTable(db, CFG_BUFFER_PG_TABLE_NAME), - m_deviceMetadataConfigTable(db, CFG_DEVICE_METADATA_TABLE_NAME) +FlexCounterOrch::FlexCounterOrch(swss::DBConnector *db, std::vector &tableNames) + : Orch(db, tableNames), + m_bufferQueueConfigTable(db, CFG_BUFFER_QUEUE_TABLE_NAME), m_bufferPgConfigTable(db, CFG_BUFFER_PG_TABLE_NAME), + m_deviceMetadataConfigTable(db, CFG_DEVICE_METADATA_TABLE_NAME) { } @@ -18,6 +16,10 @@ void FlexCounterOrch::doTask(Consumer &consumer) { } +void FlexCounterOrch::doTask(SelectableTimer &timer) +{ +} + bool FlexCounterOrch::getPortCountersState() const { return true; diff --git a/orchagent/p4orch/tests/fake_portorch.cpp b/orchagent/p4orch/tests/fake_portorch.cpp index b8a2f56fde8..6065d517565 100644 --- a/orchagent/p4orch/tests/fake_portorch.cpp +++ b/orchagent/p4orch/tests/fake_portorch.cpp @@ -8,19 +8,30 @@ extern "C" #include "portsorch.h" -#define PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 1000 -#define PORT_BUFFER_DROP_STAT_POLLING_INTERVAL_MS 60000 -#define QUEUE_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 10000 +#define PORT_SPEED_LIST_DEFAULT_SIZE 16 +#define PORT_STATE_POLLING_SEC 5 +#define PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 1000 +#define PORT_BUFFER_DROP_STAT_POLLING_INTERVAL_MS 60000 +#define QUEUE_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 10000 +#define QUEUE_WATERMARK_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 60000 +#define PG_WATERMARK_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 60000 +#define PG_DROP_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 10000 PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector &tableNames, DBConnector *chassisAppDb) - : Orch(db, tableNames), m_portStateTable(stateDb, STATE_PORT_TABLE_NAME), + : Orch(db, tableNames), + m_portStateTable(stateDb, STATE_PORT_TABLE_NAME), + m_portOpErrTable(stateDb, STATE_PORT_OPER_ERR_TABLE_NAME), port_stat_manager(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true), port_buffer_drop_stat_manager(PORT_BUFFER_DROP_STAT_FLEX_COUNTER_GROUP, StatsMode::READ, PORT_BUFFER_DROP_STAT_POLLING_INTERVAL_MS, true), - queue_stat_manager(QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, - QUEUE_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true) + queue_stat_manager(QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, QUEUE_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false), + queue_watermark_manager(QUEUE_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ_AND_CLEAR, QUEUE_WATERMARK_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false), + pg_watermark_manager(PG_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ_AND_CLEAR, PG_WATERMARK_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false), + pg_drop_stat_manager(PG_DROP_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, PG_DROP_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false), + wred_port_stat_manager(WRED_PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false), + wred_queue_stat_manager(WRED_QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, QUEUE_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false) { } @@ -185,15 +196,15 @@ void PortsOrch::generateQueueMap(std::map queues { } -void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState, bool voq) +void PortsOrch::generateQueueMapPerPort(const Port &port, FlexCounterQueueStates &queuesState, bool voq) { } -void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) +void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues, bool skip_host_tx_queue) { } -void PortsOrch::removePortBufferQueueCounters(const Port &port, string queues) +void PortsOrch::removePortBufferQueueCounters(const Port &port, string queues, bool skip_host_tx_queue) { } @@ -201,15 +212,15 @@ void PortsOrch::generatePriorityGroupMap(std::map p { } -void PortsOrch::generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState) +void PortsOrch::generatePriorityGroupMapPerPort(const Port &port, FlexCounterPgStates &pgsState) { } -void PortsOrch::createPortBufferPgCounters(const Port& port, string pgs) +void PortsOrch::createPortBufferPgCounters(const Port &port, string pgs) { } -void PortsOrch::removePortBufferPgCounters(const Port& port, string pgs) +void PortsOrch::removePortBufferPgCounters(const Port &port, string pgs) { } @@ -416,24 +427,11 @@ void PortsOrch::removeDefaultBridgePorts() { } -bool PortsOrch::initializePort(Port &port) -{ - return true; -} - -void PortsOrch::initializePriorityGroups(Port &port) -{ -} - -void PortsOrch::initializePortBufferMaximumParameters(Port &port) +void PortsOrch::initializePortBufferMaximumParameters(const Port &port) { } -void PortsOrch::initializeQueues(Port &port) -{ -} - -bool PortsOrch::addHostIntfs(Port &port, string alias, sai_object_id_t &host_intfs_id) +bool PortsOrch::addHostIntfs(Port &port, string alias, sai_object_id_t &host_intfs_id, bool isUp) { return true; } @@ -498,7 +496,7 @@ sai_status_t PortsOrch::removePort(sai_object_id_t port_id) return SAI_STATUS_SUCCESS; } -bool PortsOrch::initPort(const PortConfig &port) +bool PortsOrch::initExistingPort(const PortConfig &port) { return true; } @@ -591,7 +589,8 @@ bool PortsOrch::setGearboxPortsAttr(const Port &port, sai_port_attr_t id, void * return true; } -bool PortsOrch::setGearboxPortAttr(const Port &port, dest_port_type_t port_type, sai_port_attr_t id, void *value, bool override_fec) +bool PortsOrch::setGearboxPortAttr(const Port &port, dest_port_type_t port_type, sai_port_attr_t id, void *value, + bool override_fec) { return true; } @@ -601,7 +600,7 @@ task_process_status PortsOrch::setPortAdvSpeeds(Port &port, std::set &interface_types) +task_process_status PortsOrch::setPortAdvInterfaceTypes(Port &port, + std::set &interface_types) { return task_success; } @@ -687,7 +687,8 @@ void PortsOrch::voqSyncDelLagMember(Port &lag, Port &port) { } -std::unordered_set PortsOrch::generateCounterStats(const string &type, bool gearbox) +template +std::unordered_set PortsOrch::generateCounterStats(const vector &counterIds, std::string (*serializer)(const T)) { return {}; } @@ -695,3 +696,7 @@ std::unordered_set PortsOrch::generateCounterStats(const string &ty void PortsOrch::doTask(swss::SelectableTimer &timer) { } + +void PortsOrch::onWarmBootEnd() +{ +} diff --git a/orchagent/p4orch/tests/gre_tunnel_manager_test.cpp b/orchagent/p4orch/tests/gre_tunnel_manager_test.cpp index 2ba915d9c0a..63aecb760ea 100644 --- a/orchagent/p4orch/tests/gre_tunnel_manager_test.cpp +++ b/orchagent/p4orch/tests/gre_tunnel_manager_test.cpp @@ -4,11 +4,11 @@ #include #include +#include #include #include #include "ipaddress.h" -#include #include "mock_response_publisher.h" #include "mock_sai_router_interface.h" #include "mock_sai_serialize.h" @@ -34,6 +34,7 @@ using ::testing::StrictMock; using ::testing::Truly; extern sai_object_id_t gSwitchId; +extern sai_object_id_t gUnderlayIfId; extern sai_tunnel_api_t *sai_tunnel_api; extern sai_router_interface_api_t *sai_router_intfs_api; extern MockSaiTunnel *mock_sai_tunnel; @@ -45,7 +46,6 @@ constexpr sai_object_id_t kRouterInterfaceOid1 = 1; constexpr char *kGreTunnelP4AppDbId1 = "tunnel-1"; constexpr char *kGreTunnelP4AppDbKey1 = R"({"match/tunnel_id":"tunnel-1"})"; constexpr sai_object_id_t kGreTunnelOid1 = 0x11; -constexpr sai_object_id_t kOverlayRifOid1 = 0x101; // APP DB entries for Add request. const P4GreTunnelAppDbEntry kP4GreTunnelAppDbEntry1{/*tunnel_id=*/"tunnel-1", @@ -69,7 +69,7 @@ std::unordered_map CreateAttributeListForG tunnel_attrs.insert({tunnel_attr.id, tunnel_attr.value}); tunnel_attr.id = SAI_TUNNEL_ATTR_OVERLAY_INTERFACE; - tunnel_attr.value.oid = kOverlayRifOid1; + tunnel_attr.value.oid = gUnderlayIfId; tunnel_attrs.insert({tunnel_attr.id, tunnel_attr.value}); tunnel_attr.id = SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE; @@ -218,9 +218,12 @@ class GreTunnelManagerTest : public ::testing::Test gre_tunnel_manager_.enqueue(APP_P4RT_TUNNEL_TABLE_NAME, entry); } - void Drain() - { - gre_tunnel_manager_.drain(); + ReturnCode Drain(bool failure_before) { + if (failure_before) { + gre_tunnel_manager_.drainWithNotExecuted(); + return ReturnCode(StatusCode::SWSS_RC_NOT_EXECUTED); + } + return gre_tunnel_manager_.drain(); } std::string VerifyState(const std::string &key, const std::vector &tuple) @@ -272,7 +275,7 @@ class GreTunnelManagerTest : public ::testing::Test StrictMock mock_sai_tunnel_; StrictMock mock_sai_router_intf_; StrictMock mock_sai_serialize_; - MockResponsePublisher publisher_; + StrictMock publisher_; P4OidMapper p4_oid_mapper_; GreTunnelManager gre_tunnel_manager_; }; @@ -285,9 +288,6 @@ P4GreTunnelEntry *GreTunnelManagerTest::AddGreTunnelEntry1() kRouterInterfaceOid1)); // Set up mock call. - EXPECT_CALL(mock_sai_router_intf_, create_router_interface(::testing::NotNull(), Eq(gSwitchId), Eq(2), _)) - .WillOnce(DoAll(SetArgPointee<0>(kOverlayRifOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_tunnel_, create_tunnel(::testing::NotNull(), Eq(gSwitchId), Eq(6), Truly(std::bind(MatchCreateGreTunnelArgAttrList, std::placeholders::_1, CreateAttributeListForGreTunnelObject( @@ -343,22 +343,6 @@ TEST_F(GreTunnelManagerTest, ProcessAddRequestShouldFailWhenDependingPortIsNotPr EXPECT_EQ(GetGreTunnelEntry(gre_tunnel_key), nullptr); } -TEST_F(GreTunnelManagerTest, ProcessAddRequestShouldFailWhenRifSaiCallFails) -{ - const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kP4GreTunnelAppDbEntry1.tunnel_id); - EXPECT_TRUE(p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_ROUTER_INTERFACE, - KeyGenerator::generateRouterInterfaceKey(kRouterInterfaceId1), - kRouterInterfaceOid1)); - // Set up mock call. - EXPECT_CALL(mock_sai_router_intf_, create_router_interface(::testing::NotNull(), Eq(gSwitchId), Eq(2), _)) - .WillOnce(DoAll(SetArgPointee<0>(kOverlayRifOid1), Return(SAI_STATUS_FAILURE))); - - EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddRequest(kP4GreTunnelAppDbEntry1)); - - // The add request failed for the gre tunnel entry. - EXPECT_EQ(GetGreTunnelEntry(gre_tunnel_key), nullptr); -} - TEST_F(GreTunnelManagerTest, ProcessAddRequestShouldFailWhenTunnelSaiCallFails) { const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kP4GreTunnelAppDbEntry1.tunnel_id); @@ -366,39 +350,11 @@ TEST_F(GreTunnelManagerTest, ProcessAddRequestShouldFailWhenTunnelSaiCallFails) KeyGenerator::generateRouterInterfaceKey(kRouterInterfaceId1), kRouterInterfaceOid1)); // Set up mock call. - EXPECT_CALL(mock_sai_router_intf_, create_router_interface(::testing::NotNull(), Eq(gSwitchId), Eq(2), _)) - .WillOnce(DoAll(SetArgPointee<0>(kOverlayRifOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_tunnel_, create_tunnel(::testing::NotNull(), Eq(gSwitchId), Eq(6), Truly(std::bind(MatchCreateGreTunnelArgAttrList, std::placeholders::_1, CreateAttributeListForGreTunnelObject( kP4GreTunnelAppDbEntry1, kRouterInterfaceOid1))))) .WillOnce(Return(SAI_STATUS_FAILURE)); - EXPECT_CALL(mock_sai_router_intf_, remove_router_interface(Eq(kOverlayRifOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - - EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddRequest(kP4GreTunnelAppDbEntry1)); - - // The add request failed for the gre tunnel entry. - EXPECT_EQ(GetGreTunnelEntry(gre_tunnel_key), nullptr); -} - -TEST_F(GreTunnelManagerTest, ProcessAddRequestShouldRaiseCriticalWhenRecoverySaiCallFails) -{ - const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kP4GreTunnelAppDbEntry1.tunnel_id); - EXPECT_TRUE(p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_ROUTER_INTERFACE, - KeyGenerator::generateRouterInterfaceKey(kRouterInterfaceId1), - kRouterInterfaceOid1)); - // Set up mock call. - EXPECT_CALL(mock_sai_router_intf_, create_router_interface(::testing::NotNull(), Eq(gSwitchId), Eq(2), _)) - .WillOnce(DoAll(SetArgPointee<0>(kOverlayRifOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_tunnel_, create_tunnel(::testing::NotNull(), Eq(gSwitchId), Eq(6), - Truly(std::bind(MatchCreateGreTunnelArgAttrList, std::placeholders::_1, - CreateAttributeListForGreTunnelObject( - kP4GreTunnelAppDbEntry1, kRouterInterfaceOid1))))) - .WillOnce(Return(SAI_STATUS_FAILURE)); - EXPECT_CALL(mock_sai_router_intf_, remove_router_interface(Eq(kOverlayRifOid1))) - .WillOnce(Return(SAI_STATUS_FAILURE)); - // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddRequest(kP4GreTunnelAppDbEntry1)); @@ -465,66 +421,6 @@ TEST_F(GreTunnelManagerTest, ProcessDeleteRequestShouldFailIfTunnelSaiCallFails) EXPECT_TRUE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_TUNNEL, gre_tunnel_key)); } -TEST_F(GreTunnelManagerTest, ProcessDeleteRequestShouldFailIfRifSaiCallFails) -{ - auto *p4_tunnel_entry = AddGreTunnelEntry1(); - ASSERT_NE(p4_tunnel_entry, nullptr); - - const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kP4GreTunnelAppDbEntry1.tunnel_id); - - // Set up mock call. - EXPECT_CALL(mock_sai_tunnel_, remove_tunnel(Eq(p4_tunnel_entry->tunnel_oid))).WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_router_intf_, remove_router_interface(Eq(kOverlayRifOid1))) - .WillOnce(Return(SAI_STATUS_FAILURE)); - EXPECT_CALL(mock_sai_tunnel_, create_tunnel(::testing::NotNull(), Eq(gSwitchId), Eq(6), - Truly(std::bind(MatchCreateGreTunnelArgAttrList, std::placeholders::_1, - CreateAttributeListForGreTunnelObject( - kP4GreTunnelAppDbEntry1, kRouterInterfaceOid1))))) - .WillOnce(DoAll(SetArgPointee<0>(kGreTunnelOid1), Return(SAI_STATUS_SUCCESS))); - - EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessDeleteRequest(gre_tunnel_key)); - - // Validate the gre tunnel entry is not deleted in either P4 gre tunnel - // manager or central mapper. - p4_tunnel_entry = GetGreTunnelEntry(gre_tunnel_key); - ASSERT_NE(p4_tunnel_entry, nullptr); - EXPECT_TRUE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_TUNNEL, gre_tunnel_key)); -} - -TEST_F(GreTunnelManagerTest, ProcessDeleteRequestShouldRaiseCriticalIfRecoverySaiCallFails) -{ - auto *p4_tunnel_entry = AddGreTunnelEntry1(); - ASSERT_NE(p4_tunnel_entry, nullptr); - - const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kP4GreTunnelAppDbEntry1.tunnel_id); - - // Set up mock call. - EXPECT_CALL(mock_sai_tunnel_, remove_tunnel(Eq(p4_tunnel_entry->tunnel_oid))).WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_router_intf_, remove_router_interface(Eq(kOverlayRifOid1))) - .WillOnce(Return(SAI_STATUS_FAILURE)); - EXPECT_CALL(mock_sai_tunnel_, create_tunnel(::testing::NotNull(), Eq(gSwitchId), Eq(6), - Truly(std::bind(MatchCreateGreTunnelArgAttrList, std::placeholders::_1, - CreateAttributeListForGreTunnelObject( - kP4GreTunnelAppDbEntry1, kRouterInterfaceOid1))))) - .WillOnce(Return(SAI_STATUS_FAILURE)); - - // TODO: Expect critical state. - - EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessDeleteRequest(gre_tunnel_key)); - - // Validate the gre tunnel entry is not deleted in either P4 gre tunnel - // manager or central mapper. - p4_tunnel_entry = GetGreTunnelEntry(gre_tunnel_key); - ASSERT_NE(p4_tunnel_entry, nullptr); - EXPECT_TRUE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_TUNNEL, gre_tunnel_key)); -} - -TEST_F(GreTunnelManagerTest, GetGreTunnelEntryShouldReturnNullPointerForNonexistingGreTunnel) -{ - const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kP4GreTunnelAppDbEntry1.tunnel_id); - EXPECT_EQ(GetGreTunnelEntry(gre_tunnel_key), nullptr); -} - TEST_F(GreTunnelManagerTest, DeserializeP4GreTunnelAppDbEntryShouldReturnNullPointerForInvalidField) { std::vector attributes = {swss::FieldValueTuple(p4orch::kAction, p4orch::kTunnelAction), @@ -564,29 +460,36 @@ TEST_F(GreTunnelManagerTest, DeserializeP4GreTunnelAppDbEntryShouldReturnNullPoi EXPECT_FALSE(DeserializeP4GreTunnelAppDbEntry(kInvalidAppDbKey, attributes).ok()); } -TEST_F(GreTunnelManagerTest, DrainDuplicateSetRequestShouldSucceed) -{ - auto *p4_tunnel_entry = AddGreTunnelEntry1(); - ASSERT_NE(p4_tunnel_entry, nullptr); - - nlohmann::json j; - j[prependMatchField(p4orch::kTunnelId)] = kP4GreTunnelAppDbEntry1.tunnel_id; - - std::vector fvs{ - {p4orch::kAction, p4orch::kTunnelAction}, - {prependParamField(p4orch::kRouterInterfaceId), kP4GreTunnelAppDbEntry1.router_interface_id}, - {prependParamField(p4orch::kEncapSrcIp), kP4GreTunnelAppDbEntry1.encap_src_ip.to_string()}, - {prependParamField(p4orch::kEncapDstIp), kP4GreTunnelAppDbEntry1.encap_dst_ip.to_string()}}; - - swss::KeyOpFieldsValuesTuple app_db_entry(std::string(APP_P4RT_TUNNEL_TABLE_NAME) + kTableKeyDelimiter + j.dump(), - SET_COMMAND, fvs); - - Enqueue(app_db_entry); - Drain(); - - // Expect that the update call will fail, so gre tunnel entry's fields stay - // the same. - EXPECT_TRUE(ValidateGreTunnelEntryAdd(kP4GreTunnelAppDbEntry1)); +TEST_F(GreTunnelManagerTest, DrainDuplicateSetRequestShouldFail) { + auto* p4_tunnel_entry = AddGreTunnelEntry1(); + ASSERT_NE(p4_tunnel_entry, nullptr); + + nlohmann::json j; + j[prependMatchField(p4orch::kTunnelId)] = kP4GreTunnelAppDbEntry1.tunnel_id; + + std::vector fvs{ + {p4orch::kAction, p4orch::kTunnelAction}, + {prependParamField(p4orch::kRouterInterfaceId), + kP4GreTunnelAppDbEntry1.router_interface_id}, + {prependParamField(p4orch::kEncapSrcIp), + kP4GreTunnelAppDbEntry1.encap_src_ip.to_string()}, + {prependParamField(p4orch::kEncapDstIp), + kP4GreTunnelAppDbEntry1.encap_dst_ip.to_string()}}; + + swss::KeyOpFieldsValuesTuple app_db_entry( + std::string(APP_P4RT_TUNNEL_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + + Enqueue(app_db_entry); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_UNIMPLEMENTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_UNIMPLEMENTED, Drain(/*failure_before=*/false)); + + // Expect that the update call will fail, so gre tunnel entry's fields stay + // the same. + EXPECT_TRUE(ValidateGreTunnelEntryAdd(kP4GreTunnelAppDbEntry1)); } TEST_F(GreTunnelManagerTest, DrainDeleteRequestShouldSucceedForExistingGreTunnel) @@ -601,12 +504,14 @@ TEST_F(GreTunnelManagerTest, DrainDeleteRequestShouldSucceedForExistingGreTunnel std::vector fvs; swss::KeyOpFieldsValuesTuple app_db_entry(std::string(APP_P4RT_TUNNEL_TABLE_NAME) + kTableKeyDelimiter + j.dump(), DEL_COMMAND, fvs); - EXPECT_CALL(mock_sai_router_intf_, remove_router_interface(Eq(kOverlayRifOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_tunnel_, remove_tunnel(Eq(p4_tunnel_entry->tunnel_oid))).WillOnce(Return(SAI_STATUS_SUCCESS)); Enqueue(app_db_entry); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); // Validate the gre tunnel entry has been deleted in both P4 gre tunnel // manager and centralized mapper. @@ -634,13 +539,13 @@ TEST_F(GreTunnelManagerTest, DrainValidAppEntryShouldSucceed) SET_COMMAND, fvs); Enqueue(app_db_entry); - EXPECT_CALL(mock_sai_router_intf_, create_router_interface(::testing::NotNull(), Eq(gSwitchId), Eq(2), _)) - .WillOnce(DoAll(SetArgPointee<0>(kOverlayRifOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_tunnel_, create_tunnel(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kGreTunnelOid1), Return(SAI_STATUS_SUCCESS))); - - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); EXPECT_TRUE(ValidateGreTunnelEntryAdd(kP4GreTunnelAppDbEntry1)); } @@ -662,7 +567,12 @@ TEST_F(GreTunnelManagerTest, DrainInvalidAppEntryShouldFail) Enqueue(app_db_entry); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); EXPECT_EQ(GetGreTunnelEntry(kGreTunnelP4AppDbKey1), nullptr); EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_TUNNEL, kGreTunnelP4AppDbKey1)); @@ -676,7 +586,12 @@ TEST_F(GreTunnelManagerTest, DrainInvalidAppEntryShouldFail) Enqueue(app_db_entry); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); EXPECT_EQ(GetGreTunnelEntry(kGreTunnelP4AppDbKey1), nullptr); EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_TUNNEL, kGreTunnelP4AppDbKey1)); @@ -689,7 +604,12 @@ TEST_F(GreTunnelManagerTest, DrainInvalidAppEntryShouldFail) Enqueue(app_db_entry); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); EXPECT_EQ(GetGreTunnelEntry(kGreTunnelP4AppDbKey1), nullptr); EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_TUNNEL, kGreTunnelP4AppDbKey1)); @@ -702,7 +622,12 @@ TEST_F(GreTunnelManagerTest, DrainInvalidAppEntryShouldFail) Enqueue(app_db_entry); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); EXPECT_EQ(GetGreTunnelEntry(kGreTunnelP4AppDbKey1), nullptr); EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_TUNNEL, kGreTunnelP4AppDbKey1)); @@ -715,7 +640,12 @@ TEST_F(GreTunnelManagerTest, DrainInvalidAppEntryShouldFail) Enqueue(app_db_entry); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); EXPECT_EQ(GetGreTunnelEntry(kGreTunnelP4AppDbKey1), nullptr); EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_TUNNEL, kGreTunnelP4AppDbKey1)); @@ -728,11 +658,120 @@ TEST_F(GreTunnelManagerTest, DrainInvalidAppEntryShouldFail) Enqueue(app_db_entry); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); EXPECT_EQ(GetGreTunnelEntry(kGreTunnelP4AppDbKey1), nullptr); EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_TUNNEL, kGreTunnelP4AppDbKey1)); } +TEST_F(GreTunnelManagerTest, DrainNotExecuted) { + EXPECT_TRUE(p4_oid_mapper_.setOID( + SAI_OBJECT_TYPE_ROUTER_INTERFACE, + KeyGenerator::generateRouterInterfaceKey(kRouterInterfaceId1), + kRouterInterfaceOid1)); + std::vector fvs{ + {p4orch::kAction, p4orch::kTunnelAction}, + {prependParamField(p4orch::kRouterInterfaceId), + kP4GreTunnelAppDbEntry1.router_interface_id}, + {prependParamField(p4orch::kEncapSrcIp), + kP4GreTunnelAppDbEntry1.encap_src_ip.to_string()}, + {prependParamField(p4orch::kEncapDstIp), + kP4GreTunnelAppDbEntry1.encap_dst_ip.to_string()}}; + + nlohmann::json j; + j[prependMatchField(p4orch::kTunnelId)] = "1"; + swss::KeyOpFieldsValuesTuple app_db_entry_1( + std::string(APP_P4RT_TUNNEL_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + j[prependMatchField(p4orch::kTunnelId)] = "2"; + swss::KeyOpFieldsValuesTuple app_db_entry_2( + std::string(APP_P4RT_TUNNEL_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + j[prependMatchField(p4orch::kTunnelId)] = "3"; + swss::KeyOpFieldsValuesTuple app_db_entry_3( + std::string(APP_P4RT_TUNNEL_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + + Enqueue(app_db_entry_1); + Enqueue(app_db_entry_2); + Enqueue(app_db_entry_3); + + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_1)), + Eq(kfvFieldsValues(app_db_entry_1)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_2)), + Eq(kfvFieldsValues(app_db_entry_2)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_3)), + Eq(kfvFieldsValues(app_db_entry_3)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_EXECUTED, Drain(/*failure_before=*/true)); + EXPECT_EQ(nullptr, GetGreTunnelEntry(KeyGenerator::generateTunnelKey("1"))); + EXPECT_EQ(nullptr, GetGreTunnelEntry(KeyGenerator::generateTunnelKey("2"))); + EXPECT_EQ(nullptr, GetGreTunnelEntry(KeyGenerator::generateTunnelKey("3"))); +} + +TEST_F(GreTunnelManagerTest, DrainStopOnFirstFailure) { + EXPECT_TRUE(p4_oid_mapper_.setOID( + SAI_OBJECT_TYPE_ROUTER_INTERFACE, + KeyGenerator::generateRouterInterfaceKey(kRouterInterfaceId1), + kRouterInterfaceOid1)); + std::vector fvs{ + {p4orch::kAction, p4orch::kTunnelAction}, + {prependParamField(p4orch::kRouterInterfaceId), + kP4GreTunnelAppDbEntry1.router_interface_id}, + {prependParamField(p4orch::kEncapSrcIp), + kP4GreTunnelAppDbEntry1.encap_src_ip.to_string()}, + {prependParamField(p4orch::kEncapDstIp), + kP4GreTunnelAppDbEntry1.encap_dst_ip.to_string()}}; + + nlohmann::json j; + j[prependMatchField(p4orch::kTunnelId)] = "1"; + swss::KeyOpFieldsValuesTuple app_db_entry_1( + std::string(APP_P4RT_TUNNEL_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + j[prependMatchField(p4orch::kTunnelId)] = "2"; + swss::KeyOpFieldsValuesTuple app_db_entry_2( + std::string(APP_P4RT_TUNNEL_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + j[prependMatchField(p4orch::kTunnelId)] = "3"; + swss::KeyOpFieldsValuesTuple app_db_entry_3( + std::string(APP_P4RT_TUNNEL_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + + Enqueue(app_db_entry_1); + Enqueue(app_db_entry_2); + Enqueue(app_db_entry_3); + + EXPECT_CALL(mock_sai_tunnel_, create_tunnel(_, _, _, _)) + .WillOnce( + DoAll(SetArgPointee<0>(kGreTunnelOid1), Return(SAI_STATUS_SUCCESS))) + .WillOnce(Return(SAI_STATUS_FAILURE)); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_1)), + Eq(kfvFieldsValues(app_db_entry_1)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_2)), + Eq(kfvFieldsValues(app_db_entry_2)), + Eq(StatusCode::SWSS_RC_UNKNOWN), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_3)), + Eq(kfvFieldsValues(app_db_entry_3)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, Drain(/*failure_before=*/false)); + EXPECT_NE(nullptr, GetGreTunnelEntry(KeyGenerator::generateTunnelKey("1"))); + EXPECT_EQ(nullptr, GetGreTunnelEntry(KeyGenerator::generateTunnelKey("2"))); + EXPECT_EQ(nullptr, GetGreTunnelEntry(KeyGenerator::generateTunnelKey("3"))); +} + TEST_F(GreTunnelManagerTest, VerifyStateTest) { auto *p4_tunnel_entry = AddGreTunnelEntry1(); diff --git a/orchagent/p4orch/tests/l3_admit_manager_test.cpp b/orchagent/p4orch/tests/l3_admit_manager_test.cpp index 33f88d18397..c0b150ffd4d 100644 --- a/orchagent/p4orch/tests/l3_admit_manager_test.cpp +++ b/orchagent/p4orch/tests/l3_admit_manager_test.cpp @@ -4,10 +4,10 @@ #include #include +#include #include #include -#include #include "mock_response_publisher.h" #include "mock_sai_my_mac.h" #include "p4oidmapper.h" @@ -35,14 +35,19 @@ extern MockSaiMyMac *mock_sai_my_mac; namespace { +// A physical port set up in test_main.cpp constexpr char *kPortName1 = "Ethernet1"; constexpr sai_object_id_t kPortOid1 = 0x112233; constexpr uint32_t kMtu1 = 1500; +// A physical port set up in test_main.cpp constexpr char *kPortName2 = "Ethernet2"; constexpr sai_object_id_t kPortOid2 = 0x1fed3; constexpr uint32_t kMtu2 = 4500; +// A lag port set up in test_main.cpp +constexpr char* kPortName3 = "Ethernet7"; + constexpr char *kL3AdmitP4AppDbKey1 = R"({"match/dst_mac":"00:02:03:04:00:00&ff:ff:ff:ff:00:00","priority":2030})"; constexpr sai_object_id_t kL3AdmitOid1 = 0x1; constexpr sai_object_id_t kL3AdmitOid2 = 0x2; @@ -58,6 +63,12 @@ const P4L3AdmitAppDbEntry kP4L3AdmitAppDbEntry2{/*port_name=*/kPortName1, /*mac_address_mask=*/swss::MacAddress("ff:ff:ff:ff:ff:00"), /*priority=*/2030}; +const P4L3AdmitAppDbEntry kP4L3AdmitAppDbEntry3{ + /*port_name=*/kPortName1, + /*mac_address_data=*/swss::MacAddress("00:02:03:04:05:06"), + /*mac_address_mask=*/swss::MacAddress("ff:ff:ff:ff:ff:ff"), + /*priority=*/2030}; + std::unordered_map CreateAttributeListForL3AdmitObject( const P4L3AdmitAppDbEntry &app_entry, const sai_object_id_t &port_oid) { @@ -181,9 +192,12 @@ class L3AdmitManagerTest : public ::testing::Test l3_admit_manager_.enqueue(APP_P4RT_L3_ADMIT_TABLE_NAME, entry); } - void Drain() - { - l3_admit_manager_.drain(); + ReturnCode Drain(bool failure_before) { + if (failure_before) { + l3_admit_manager_.drainWithNotExecuted(); + return ReturnCode(StatusCode::SWSS_RC_NOT_EXECUTED); + } + return l3_admit_manager_.drain(); } std::string VerifyState(const std::string &key, const std::vector &tuple) @@ -233,7 +247,7 @@ class L3AdmitManagerTest : public ::testing::Test } StrictMock mock_sai_my_mac_; - MockResponsePublisher publisher_; + StrictMock publisher_; P4OidMapper p4_oid_mapper_; L3AdmitManager l3_admit_manager_; }; @@ -302,6 +316,23 @@ TEST_F(L3AdmitManagerTest, ProcessAddRequestShouldFailWhenDependingPortIsNotPres EXPECT_EQ(GetL3AdmitEntry(l3admit_key), nullptr); } +TEST_F(L3AdmitManagerTest, + ProcessAddRequestShouldFailWhenDependingPortTypeIsInvalid) { + const P4L3AdmitAppDbEntry kAppDbEntry{ + /*port_name=*/kPortName3, + /*mac_address_data=*/swss::MacAddress("00:02:03:04:00:00"), + /*mac_address_mask=*/swss::MacAddress("ff:ff:ff:ff:00:00"), + /*priority=*/2030}; + const auto l3admit_key = KeyGenerator::generateL3AdmitKey( + kAppDbEntry.mac_address_data, kAppDbEntry.mac_address_mask, + kAppDbEntry.port_name, kAppDbEntry.priority); + + EXPECT_EQ(StatusCode::SWSS_RC_UNIMPLEMENTED, + ProcessAddRequest(kAppDbEntry, l3admit_key)); + + EXPECT_EQ(GetL3AdmitEntry(l3admit_key), nullptr); +} + TEST_F(L3AdmitManagerTest, ProcessAddRequestShouldFailWhenSaiCallFails) { const auto l3admit_key = @@ -397,6 +428,28 @@ TEST_F(L3AdmitManagerTest, GetL3AdmitEntryShouldReturnNullPointerForNonexistingL EXPECT_EQ(GetL3AdmitEntry(l3admit_key), nullptr); } +TEST_F(L3AdmitManagerTest, + DeserializeP4L3AdmitAppDbEntryShouldReturnNullPointerForInvalidPort) { + // Invalid port NAME. + char* kInvalidAppDbKey = + R"({"match/dst_mac":"00:02:03:04:00:00","priority":2030,"match/in_port":"Ethernet70000"})"; + std::vector attributes = { + swss::FieldValueTuple(p4orch::kAction, p4orch::kL3AdmitAction)}; + + EXPECT_EQ( + DeserializeP4L3AdmitAppDbEntry(kInvalidAppDbKey, attributes).status(), + StatusCode::SWSS_RC_NOT_FOUND); + + // Unsupported port type. + kInvalidAppDbKey = + R"({"match/dst_mac":"00:02:03:04:00:00","priority":2030,"match/in_port":"Ethernet7"})"; + attributes = {swss::FieldValueTuple(p4orch::kAction, p4orch::kL3AdmitAction)}; + + EXPECT_EQ( + DeserializeP4L3AdmitAppDbEntry(kInvalidAppDbKey, attributes).status(), + StatusCode::SWSS_RC_UNIMPLEMENTED); +} + TEST_F(L3AdmitManagerTest, DeserializeP4L3AdmitAppDbEntryShouldReturnNullPointerForInvalidAction) { std::vector attributes = { @@ -453,7 +506,11 @@ TEST_F(L3AdmitManagerTest, DrainDuplicateSetRequestShouldSucceed) SET_COMMAND, fvs); Enqueue(app_db_entry); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); // Expect that the update call will fail, so l3 admit entry's fields stay // the same. @@ -478,7 +535,11 @@ TEST_F(L3AdmitManagerTest, DrainDeleteRequestShouldSucceedForExistingL3Admit) .WillOnce(Return(SAI_STATUS_SUCCESS)); Enqueue(app_db_entry); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); // Validate the l3 admit entry has been deleted in both P4 l3 admit // manager @@ -509,28 +570,157 @@ TEST_F(L3AdmitManagerTest, DrainValidAppEntryShouldSucceed) EXPECT_CALL(mock_sai_my_mac_, create_my_mac(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kL3AdmitOid2), Return(SAI_STATUS_SUCCESS))); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); EXPECT_TRUE(ValidateL3AdmitEntryAdd(kP4L3AdmitAppDbEntry2)); } -TEST_F(L3AdmitManagerTest, DrainInValidAppEntryShouldSucceed) -{ - nlohmann::json j; - j[prependMatchField(p4orch::kDstMac)] = "1"; // Invalid Mac - j[p4orch::kPriority] = 1000; +TEST_F(L3AdmitManagerTest, DrainInValidAppEntryShouldFail) { + nlohmann::json j; + j[prependMatchField(p4orch::kDstMac)] = "1"; // Invalid Mac + j[p4orch::kPriority] = 1000; - std::vector fvs{{p4orch::kAction, p4orch::kL3AdmitAction}}; + std::vector fvs{ + {p4orch::kAction, p4orch::kL3AdmitAction}}; - swss::KeyOpFieldsValuesTuple app_db_entry(std::string(APP_P4RT_L3_ADMIT_TABLE_NAME) + kTableKeyDelimiter + j.dump(), - SET_COMMAND, fvs); + swss::KeyOpFieldsValuesTuple app_db_entry( + std::string(APP_P4RT_L3_ADMIT_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); - Enqueue(app_db_entry); + Enqueue(app_db_entry); + + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, Drain(/*failure_before=*/false)); + constexpr char* kL3AdmitKey = R"({"match/dst_mac":"1","priority":1000})"; + EXPECT_EQ(GetL3AdmitEntry(kL3AdmitKey), nullptr); + EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_MY_MAC, kL3AdmitKey)); +} + +TEST_F(L3AdmitManagerTest, DrainNotExecuted) { + std::vector fvs{ + {p4orch::kAction, p4orch::kL3AdmitAction}}; + + nlohmann::json j; + j[prependMatchField(p4orch::kDstMac)] = + kP4L3AdmitAppDbEntry1.mac_address_data.to_string() + + p4orch::kDataMaskDelimiter + + kP4L3AdmitAppDbEntry1.mac_address_mask.to_string(); + j[p4orch::kPriority] = kP4L3AdmitAppDbEntry1.priority; + swss::KeyOpFieldsValuesTuple app_db_entry_1( + std::string(APP_P4RT_L3_ADMIT_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + j[prependMatchField(p4orch::kDstMac)] = + kP4L3AdmitAppDbEntry2.mac_address_data.to_string() + + p4orch::kDataMaskDelimiter + + kP4L3AdmitAppDbEntry2.mac_address_mask.to_string(); + swss::KeyOpFieldsValuesTuple app_db_entry_2( + std::string(APP_P4RT_L3_ADMIT_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + j[prependMatchField(p4orch::kDstMac)] = + kP4L3AdmitAppDbEntry3.mac_address_data.to_string() + + p4orch::kDataMaskDelimiter + + kP4L3AdmitAppDbEntry3.mac_address_mask.to_string(); + swss::KeyOpFieldsValuesTuple app_db_entry_3( + std::string(APP_P4RT_L3_ADMIT_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + + Enqueue(app_db_entry_1); + Enqueue(app_db_entry_2); + Enqueue(app_db_entry_3); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_1)), + Eq(kfvFieldsValues(app_db_entry_1)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_2)), + Eq(kfvFieldsValues(app_db_entry_2)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_3)), + Eq(kfvFieldsValues(app_db_entry_3)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_EXECUTED, Drain(/*failure_before=*/true)); + EXPECT_EQ(nullptr, GetL3AdmitEntry(KeyGenerator::generateL3AdmitKey( + kP4L3AdmitAppDbEntry1.mac_address_data, + kP4L3AdmitAppDbEntry1.mac_address_mask, "", + kP4L3AdmitAppDbEntry1.priority))); + EXPECT_EQ(nullptr, GetL3AdmitEntry(KeyGenerator::generateL3AdmitKey( + kP4L3AdmitAppDbEntry2.mac_address_data, + kP4L3AdmitAppDbEntry2.mac_address_mask, "", + kP4L3AdmitAppDbEntry2.priority))); + EXPECT_EQ(nullptr, GetL3AdmitEntry(KeyGenerator::generateL3AdmitKey( + kP4L3AdmitAppDbEntry3.mac_address_data, + kP4L3AdmitAppDbEntry3.mac_address_mask, "", + kP4L3AdmitAppDbEntry3.priority))); +} - Drain(); - constexpr char *kL3AdmitKey = R"({"match/dst_mac":"1","priority":1000})"; - EXPECT_EQ(GetL3AdmitEntry(kL3AdmitKey), nullptr); - EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_MY_MAC, kL3AdmitKey)); +TEST_F(L3AdmitManagerTest, DrainStopOnFirstFailure) { + std::vector fvs{ + {p4orch::kAction, p4orch::kL3AdmitAction}}; + + nlohmann::json j; + j[prependMatchField(p4orch::kDstMac)] = + kP4L3AdmitAppDbEntry1.mac_address_data.to_string() + + p4orch::kDataMaskDelimiter + + kP4L3AdmitAppDbEntry1.mac_address_mask.to_string(); + j[p4orch::kPriority] = kP4L3AdmitAppDbEntry1.priority; + swss::KeyOpFieldsValuesTuple app_db_entry_1( + std::string(APP_P4RT_L3_ADMIT_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + j[prependMatchField(p4orch::kDstMac)] = + kP4L3AdmitAppDbEntry2.mac_address_data.to_string() + + p4orch::kDataMaskDelimiter + + kP4L3AdmitAppDbEntry2.mac_address_mask.to_string(); + swss::KeyOpFieldsValuesTuple app_db_entry_2( + std::string(APP_P4RT_L3_ADMIT_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + j[prependMatchField(p4orch::kDstMac)] = + kP4L3AdmitAppDbEntry3.mac_address_data.to_string() + + p4orch::kDataMaskDelimiter + + kP4L3AdmitAppDbEntry3.mac_address_mask.to_string(); + swss::KeyOpFieldsValuesTuple app_db_entry_3( + std::string(APP_P4RT_L3_ADMIT_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + + Enqueue(app_db_entry_1); + Enqueue(app_db_entry_2); + Enqueue(app_db_entry_3); + EXPECT_CALL(mock_sai_my_mac_, create_my_mac(_, _, _, _)) + .WillOnce( + DoAll(SetArgPointee<0>(kL3AdmitOid1), Return(SAI_STATUS_SUCCESS))) + .WillOnce(Return(SAI_STATUS_FAILURE)); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_1)), + Eq(kfvFieldsValues(app_db_entry_1)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_2)), + Eq(kfvFieldsValues(app_db_entry_2)), + Eq(StatusCode::SWSS_RC_UNKNOWN), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_3)), + Eq(kfvFieldsValues(app_db_entry_3)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, Drain(/*failure_before=*/false)); + EXPECT_NE(nullptr, GetL3AdmitEntry(KeyGenerator::generateL3AdmitKey( + kP4L3AdmitAppDbEntry1.mac_address_data, + kP4L3AdmitAppDbEntry1.mac_address_mask, "", + kP4L3AdmitAppDbEntry1.priority))); + EXPECT_EQ(nullptr, GetL3AdmitEntry(KeyGenerator::generateL3AdmitKey( + kP4L3AdmitAppDbEntry2.mac_address_data, + kP4L3AdmitAppDbEntry2.mac_address_mask, "", + kP4L3AdmitAppDbEntry2.priority))); + EXPECT_EQ(nullptr, GetL3AdmitEntry(KeyGenerator::generateL3AdmitKey( + kP4L3AdmitAppDbEntry3.mac_address_data, + kP4L3AdmitAppDbEntry3.mac_address_mask, "", + kP4L3AdmitAppDbEntry3.priority))); } TEST_F(L3AdmitManagerTest, VerifyStateTest) diff --git a/orchagent/p4orch/tests/mirror_session_manager_test.cpp b/orchagent/p4orch/tests/mirror_session_manager_test.cpp index e1376819305..f2b994e097a 100644 --- a/orchagent/p4orch/tests/mirror_session_manager_test.cpp +++ b/orchagent/p4orch/tests/mirror_session_manager_test.cpp @@ -3,10 +3,10 @@ #include #include +#include #include #include -#include #include "mock_response_publisher.h" #include "mock_sai_mirror.h" #include "p4oidmapper.h" @@ -220,9 +220,12 @@ class MirrorSessionManagerTest : public ::testing::Test return mirror_session_manager_.enqueue(APP_P4RT_MIRROR_SESSION_TABLE_NAME, entry); } - void Drain() - { - return mirror_session_manager_.drain(); + ReturnCode Drain(bool failure_before) { + if (failure_before) { + mirror_session_manager_.drainWithNotExecuted(); + return ReturnCode(StatusCode::SWSS_RC_NOT_EXECUTED); + } + return mirror_session_manager_.drain(); } std::string VerifyState(const std::string &key, const std::vector &tuple) @@ -329,7 +332,7 @@ class MirrorSessionManagerTest : public ::testing::Test } StrictMock mock_sai_mirror_; - MockResponsePublisher publisher_; + StrictMock publisher_; P4OidMapper p4_oid_mapper_; p4orch::MirrorSessionManager mirror_session_manager_; }; @@ -360,7 +363,11 @@ TEST_F(MirrorSessionManagerTest, SuccessfulEnqueueAndDrain) swss::IpAddress(kDstIp1), swss::MacAddress(kSrcMac1), swss::MacAddress(kDstMac1)))))) .WillOnce(DoAll(SetArgPointee<0>(kMirrorSessionOid), Return(SAI_STATUS_SUCCESS))); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); // Check the added entry. auto mirror_entry = GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey(kMirrorSessionId)); @@ -443,7 +450,11 @@ TEST_F(MirrorSessionManagerTest, SuccessfulEnqueueAndDrain) std::vector({attr}))))) .WillOnce(Return(SAI_STATUS_SUCCESS)); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); mirror_entry = GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey(kMirrorSessionId)); ASSERT_NE(mirror_entry, nullptr); @@ -463,7 +474,11 @@ TEST_F(MirrorSessionManagerTest, SuccessfulEnqueueAndDrain) Enqueue(app_db_entry); // Set up mock call. EXPECT_CALL(mock_sai_mirror_, remove_mirror_session(Eq(kMirrorSessionOid))).WillOnce(Return(SAI_STATUS_SUCCESS)); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); mirror_entry = GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey(kMirrorSessionId)); EXPECT_EQ(mirror_entry, nullptr); @@ -487,7 +502,12 @@ TEST_F(MirrorSessionManagerTest, DrainShouldFailForInvalidAppDbEntryMatchFiled) std::string(APP_P4RT_MIRROR_SESSION_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs); Enqueue(app_db_entry); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); // Check the added entry. auto mirror_entry = GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey(kMirrorSessionId)); @@ -509,7 +529,12 @@ TEST_F(MirrorSessionManagerTest, DrainShouldFailForUnknownOp) std::string(APP_P4RT_MIRROR_SESSION_TABLE_NAME) + kTableKeyDelimiter + j.dump(), "unknown_op", fvs); Enqueue(app_db_entry); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); // Check the added entry. auto mirror_entry = GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey(kMirrorSessionId)); @@ -531,7 +556,12 @@ TEST_F(MirrorSessionManagerTest, DrainShouldFailForInvalidAppDbEntryFieldValue) std::string(APP_P4RT_MIRROR_SESSION_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs); Enqueue(app_db_entry); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); // Check the added entry. auto mirror_entry = GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey(kMirrorSessionId)); @@ -557,7 +587,12 @@ TEST_F(MirrorSessionManagerTest, DrainShouldFailForUnknownAppDbEntryFieldValue) std::string(APP_P4RT_MIRROR_SESSION_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs); Enqueue(app_db_entry); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); // Check the added entry. auto mirror_entry = GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey(kMirrorSessionId)); @@ -579,7 +614,12 @@ TEST_F(MirrorSessionManagerTest, DrainShouldFailForIncompleteAppDbEntry) std::string(APP_P4RT_MIRROR_SESSION_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs_missing_tos); Enqueue(app_db_entry); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); // Check the added entry. auto mirror_entry = GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey(kMirrorSessionId)); @@ -601,7 +641,11 @@ TEST_F(MirrorSessionManagerTest, DrainShouldFailForUnknownPort) std::string(APP_P4RT_MIRROR_SESSION_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs); Enqueue(app_db_entry); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_NOT_FOUND), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, Drain(/*failure_before=*/false)); // Check the added entry. auto mirror_entry = GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey(kMirrorSessionId)); @@ -631,7 +675,11 @@ TEST_F(MirrorSessionManagerTest, DrainShouldFailWhenCreateSaiCallFails) swss::IpAddress(kDstIp1), swss::MacAddress(kSrcMac1), swss::MacAddress(kDstMac1)))))) .WillOnce(Return(SAI_STATUS_FAILURE)); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_UNKNOWN), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, Drain(/*failure_before=*/false)); // Check the added entry. auto mirror_entry = GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey(kMirrorSessionId)); @@ -661,14 +709,22 @@ TEST_F(MirrorSessionManagerTest, DrainShouldFailWhenDeleteSaiCallFails) swss::IpAddress(kDstIp1), swss::MacAddress(kSrcMac1), swss::MacAddress(kDstMac1)))))) .WillOnce(DoAll(SetArgPointee<0>(kMirrorSessionOid), Return(SAI_STATUS_SUCCESS))); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); fvs = {}; app_db_entry = {std::string(APP_P4RT_MIRROR_SESSION_TABLE_NAME) + kTableKeyDelimiter + j.dump(), DEL_COMMAND, fvs}; Enqueue(app_db_entry); EXPECT_CALL(mock_sai_mirror_, remove_mirror_session(Eq(kMirrorSessionOid))).WillOnce(Return(SAI_STATUS_FAILURE)); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_UNKNOWN), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, Drain(/*failure_before=*/false)); // Check entry still exists. auto mirror_entry = GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey(kMirrorSessionId)); @@ -870,7 +926,11 @@ TEST_F(MirrorSessionManagerTest, UpdateFailureShouldNotChangeExistingEntry) // Set up mock call. EXPECT_CALL(mock_sai_mirror_, create_mirror_session(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kMirrorSessionOid), Return(SAI_STATUS_SUCCESS))); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); // Check the added entry. auto mirror_entry = GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey(kMirrorSessionId)); @@ -911,7 +971,11 @@ TEST_F(MirrorSessionManagerTest, UpdateFailureShouldNotChangeExistingEntry) .WillOnce(Return(SAI_STATUS_FAILURE)) .WillRepeatedly(Return(SAI_STATUS_SUCCESS)); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_UNKNOWN), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, Drain(/*failure_before=*/false)); mirror_entry = GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey(kMirrorSessionId)); ASSERT_NE(mirror_entry, nullptr); @@ -937,7 +1001,11 @@ TEST_F(MirrorSessionManagerTest, UpdateRecoveryFailureShouldRaiseCriticalState) // Set up mock call. EXPECT_CALL(mock_sai_mirror_, create_mirror_session(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kMirrorSessionOid), Return(SAI_STATUS_SUCCESS))); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); // Check the added entry. auto mirror_entry = GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey(kMirrorSessionId)); @@ -984,7 +1052,11 @@ TEST_F(MirrorSessionManagerTest, UpdateRecoveryFailureShouldRaiseCriticalState) .WillOnce(Return(SAI_STATUS_FAILURE)); // TODO: Expect critical state. - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_UNKNOWN), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, Drain(/*failure_before=*/false)); mirror_entry = GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey(kMirrorSessionId)); ASSERT_NE(mirror_entry, nullptr); @@ -1014,6 +1086,116 @@ TEST_F(MirrorSessionManagerTest, DeleteMirrorSessionNotInMapperShouldFail) ProcessDeleteRequest(KeyGenerator::generateMirrorSessionKey(kMirrorSessionId))); } +TEST_F(MirrorSessionManagerTest, DrainNotExecuted) { + std::vector fvs{ + {p4orch::kAction, p4orch::kMirrorAsIpv4Erspan}, + {prependParamField(p4orch::kPort), kPort1}, + {prependParamField(p4orch::kSrcIp), kSrcIp1}, + {prependParamField(p4orch::kDstIp), kDstIp1}, + {prependParamField(p4orch::kSrcMac), kSrcMac1}, + {prependParamField(p4orch::kDstMac), kDstMac1}, + {prependParamField(p4orch::kTtl), kTtl1}, + {prependParamField(p4orch::kTos), kTos1}}; + + nlohmann::json j; + j[prependMatchField(p4orch::kMirrorSessionId)] = "1"; + swss::KeyOpFieldsValuesTuple app_db_entry_1( + std::string(APP_P4RT_MIRROR_SESSION_TABLE_NAME) + kTableKeyDelimiter + + j.dump(), + SET_COMMAND, fvs); + j[prependMatchField(p4orch::kMirrorSessionId)] = "2"; + swss::KeyOpFieldsValuesTuple app_db_entry_2( + std::string(APP_P4RT_MIRROR_SESSION_TABLE_NAME) + kTableKeyDelimiter + + j.dump(), + SET_COMMAND, fvs); + j[prependMatchField(p4orch::kMirrorSessionId)] = "3"; + swss::KeyOpFieldsValuesTuple app_db_entry_3( + std::string(APP_P4RT_MIRROR_SESSION_TABLE_NAME) + kTableKeyDelimiter + + j.dump(), + SET_COMMAND, fvs); + + Enqueue(app_db_entry_1); + Enqueue(app_db_entry_2); + Enqueue(app_db_entry_3); + + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_1)), + Eq(kfvFieldsValues(app_db_entry_1)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_2)), + Eq(kfvFieldsValues(app_db_entry_2)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_3)), + Eq(kfvFieldsValues(app_db_entry_3)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_EXECUTED, Drain(/*failure_before=*/true)); + EXPECT_EQ(nullptr, + GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey("1"))); + EXPECT_EQ(nullptr, + GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey("2"))); + EXPECT_EQ(nullptr, + GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey("3"))); +} + +TEST_F(MirrorSessionManagerTest, DrainStopOnFirstFailure) { + std::vector fvs{ + {p4orch::kAction, p4orch::kMirrorAsIpv4Erspan}, + {prependParamField(p4orch::kPort), kPort1}, + {prependParamField(p4orch::kSrcIp), kSrcIp1}, + {prependParamField(p4orch::kDstIp), kDstIp1}, + {prependParamField(p4orch::kSrcMac), kSrcMac1}, + {prependParamField(p4orch::kDstMac), kDstMac1}, + {prependParamField(p4orch::kTtl), kTtl1}, + {prependParamField(p4orch::kTos), kTos1}}; + + nlohmann::json j; + j[prependMatchField(p4orch::kMirrorSessionId)] = "1"; + swss::KeyOpFieldsValuesTuple app_db_entry_1( + std::string(APP_P4RT_MIRROR_SESSION_TABLE_NAME) + kTableKeyDelimiter + + j.dump(), + SET_COMMAND, fvs); + j[prependMatchField(p4orch::kMirrorSessionId)] = "2"; + swss::KeyOpFieldsValuesTuple app_db_entry_2( + std::string(APP_P4RT_MIRROR_SESSION_TABLE_NAME) + kTableKeyDelimiter + + j.dump(), + SET_COMMAND, fvs); + j[prependMatchField(p4orch::kMirrorSessionId)] = "3"; + swss::KeyOpFieldsValuesTuple app_db_entry_3( + std::string(APP_P4RT_MIRROR_SESSION_TABLE_NAME) + kTableKeyDelimiter + + j.dump(), + SET_COMMAND, fvs); + + Enqueue(app_db_entry_1); + Enqueue(app_db_entry_2); + Enqueue(app_db_entry_3); + + EXPECT_CALL(mock_sai_mirror_, create_mirror_session(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kMirrorSessionOid), + Return(SAI_STATUS_SUCCESS))) + .WillOnce(Return(SAI_STATUS_FAILURE)); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_1)), + Eq(kfvFieldsValues(app_db_entry_1)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_2)), + Eq(kfvFieldsValues(app_db_entry_2)), + Eq(StatusCode::SWSS_RC_UNKNOWN), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_3)), + Eq(kfvFieldsValues(app_db_entry_3)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, Drain(/*failure_before=*/false)); + EXPECT_NE(nullptr, + GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey("1"))); + EXPECT_EQ(nullptr, + GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey("2"))); + EXPECT_EQ(nullptr, + GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey("3"))); +} + TEST_F(MirrorSessionManagerTest, VerifyStateTest) { AddDefaultMirrorSection(); diff --git a/orchagent/p4orch/tests/mock_sai_bridge.cpp b/orchagent/p4orch/tests/mock_sai_bridge.cpp new file mode 100644 index 00000000000..2029b07299d --- /dev/null +++ b/orchagent/p4orch/tests/mock_sai_bridge.cpp @@ -0,0 +1,113 @@ +#include "mock_sai_bridge.h" + +MockSaiBridge* mock_sai_bridge; + +sai_status_t mock_create_bridge( + _Out_ sai_object_id_t* bridge_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t* attr_list) { + return mock_sai_bridge->create_bridge( + bridge_id, switch_id, attr_count, attr_list); +} + +sai_status_t mock_remove_bridge( + _In_ sai_object_id_t bridge_id) { + return mock_sai_bridge->remove_bridge(bridge_id); +} + +sai_status_t mock_set_bridge_attribute( + _In_ sai_object_id_t bridge_id, + _In_ const sai_attribute_t* attr) { + return mock_sai_bridge->set_bridge_attribute(bridge_id, attr); +} + +sai_status_t mock_get_bridge_attribute( + _In_ sai_object_id_t bridge_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t* attr_list) { + return mock_sai_bridge->get_bridge_attribute( + bridge_id, attr_count, attr_list); +} + +sai_status_t mock_get_bridge_stats( + _In_ sai_object_id_t bridge_id, + _In_ uint32_t number_of_counters, + _In_ const sai_stat_id_t* counter_ids, + _Out_ uint64_t *counters) { + return mock_sai_bridge->get_bridge_stats( + bridge_id, number_of_counters, counter_ids, counters); +} + +sai_status_t mock_get_bridge_stats_ext( + _In_ sai_object_id_t bridge_id, + _In_ uint32_t number_of_counters, + _In_ const sai_stat_id_t* counter_ids, + _In_ sai_stats_mode_t mode, + _Out_ uint64_t *counters) { + return mock_sai_bridge->get_bridge_stats_ext( + bridge_id, number_of_counters, counter_ids, mode, counters); +} + +sai_status_t mock_clear_bridge_stats( + _In_ sai_object_id_t bridge_id, + _In_ uint32_t number_of_counters, + _In_ const sai_stat_id_t* counter_ids) { + return mock_sai_bridge->clear_bridge_stats( + bridge_id, number_of_counters, counter_ids); +} + +sai_status_t mock_create_bridge_port( + _Out_ sai_object_id_t* bridge_port_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t* attr_list) { + return mock_sai_bridge->create_bridge_port( + bridge_port_id, switch_id, attr_count, attr_list); +} + +sai_status_t mock_remove_bridge_port( + _In_ sai_object_id_t bridge_port_id) { + return mock_sai_bridge->remove_bridge_port(bridge_port_id); +} + +sai_status_t mock_set_bridge_port_attribute( + _In_ sai_object_id_t bridge_port_id, + _In_ const sai_attribute_t* attr) { + return mock_sai_bridge->set_bridge_port_attribute(bridge_port_id, attr); +} + +sai_status_t mock_get_bridge_port_attribute( + _In_ sai_object_id_t bridge_port_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t* attr_list) { + return mock_sai_bridge->get_bridge_port_attribute( + bridge_port_id, attr_count, attr_list); +} + +sai_status_t mock_get_bridge_port_stats( + _In_ sai_object_id_t bridge_port_id, + _In_ uint32_t number_of_counters, + _In_ const sai_stat_id_t* counter_ids, + _Out_ uint64_t *counters) { + return mock_sai_bridge->get_bridge_port_stats( + bridge_port_id, number_of_counters, counter_ids, counters); +} + +sai_status_t mock_get_bridge_port_stats_ext( + _In_ sai_object_id_t bridge_port_id, + _In_ uint32_t number_of_counters, + _In_ const sai_stat_id_t* counter_ids, + _In_ sai_stats_mode_t mode, + _Out_ uint64_t *counters) { + return mock_sai_bridge->get_bridge_port_stats_ext( + bridge_port_id, number_of_counters, counter_ids, mode, counters); +} + +sai_status_t mock_clear_bridge_port_stats( + _In_ sai_object_id_t bridge_port_id, + _In_ uint32_t number_of_counters, + _In_ const sai_stat_id_t* counter_ids) { + return mock_sai_bridge->clear_bridge_port_stats( + bridge_port_id, number_of_counters, counter_ids); +} diff --git a/orchagent/p4orch/tests/mock_sai_bridge.h b/orchagent/p4orch/tests/mock_sai_bridge.h new file mode 100644 index 00000000000..1ca15d3330e --- /dev/null +++ b/orchagent/p4orch/tests/mock_sai_bridge.h @@ -0,0 +1,157 @@ +#pragma once + +#include + +extern "C" { +#include "sai.h" +} + +// Mock Class mapping methods to bridge SAI APIs. +class MockSaiBridge { + public: + MOCK_METHOD4(create_bridge, + sai_status_t(_Out_ sai_object_id_t* bridge_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t* attr_list)); + + MOCK_METHOD1(remove_bridge, + sai_status_t(_In_ sai_object_id_t bridge_id)); + + MOCK_METHOD2(set_bridge_attribute, + sai_status_t(_In_ sai_object_id_t bridge_id, + _In_ const sai_attribute_t* attr)); + + MOCK_METHOD3(get_bridge_attribute, + sai_status_t(_In_ sai_object_id_t bridge_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t* attr_list)); + + MOCK_METHOD4(get_bridge_stats, + sai_status_t(_In_ sai_object_id_t bridge_id, + _In_ uint32_t number_of_counters, + _In_ const sai_stat_id_t* counter_ids, + _Out_ uint64_t *counters)); + + MOCK_METHOD5(get_bridge_stats_ext, + sai_status_t(_In_ sai_object_id_t bridge_id, + _In_ uint32_t number_of_counters, + _In_ const sai_stat_id_t* counter_ids, + _In_ sai_stats_mode_t mode, + _Out_ uint64_t *counters)); + + MOCK_METHOD3(clear_bridge_stats, + sai_status_t(_In_ sai_object_id_t bridge_id, + _In_ uint32_t number_of_counters, + _In_ const sai_stat_id_t *counter_ids)); + + MOCK_METHOD4(create_bridge_port, + sai_status_t(_Out_ sai_object_id_t* bridge_port_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t* attr_list)); + + MOCK_METHOD1(remove_bridge_port, + sai_status_t(_In_ sai_object_id_t bridge_port_id)); + + MOCK_METHOD2(set_bridge_port_attribute, + sai_status_t(_In_ sai_object_id_t bridge_port_id, + _In_ const sai_attribute_t* attr)); + + MOCK_METHOD3(get_bridge_port_attribute, + sai_status_t(_In_ sai_object_id_t bridge_port_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t* attr_list)); + + MOCK_METHOD4(get_bridge_port_stats, + sai_status_t(_In_ sai_object_id_t bridge_port_id, + _In_ uint32_t number_of_counters, + _In_ const sai_stat_id_t* counter_ids, + _Out_ uint64_t *counters)); + + MOCK_METHOD5(get_bridge_port_stats_ext, + sai_status_t(_In_ sai_object_id_t bridge_port_id, + _In_ uint32_t number_of_counters, + _In_ const sai_stat_id_t* counter_ids, + _In_ sai_stats_mode_t mode, + _Out_ uint64_t *counters)); + + MOCK_METHOD3(clear_bridge_port_stats, + sai_status_t(_In_ sai_object_id_t bridge_port_id, + _In_ uint32_t number_of_counters, + _In_ const sai_stat_id_t *counter_ids)); +}; + +extern MockSaiBridge* mock_sai_bridge; + +sai_status_t mock_create_bridge( + _Out_ sai_object_id_t* bridge_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t* attr_list); + +sai_status_t mock_remove_bridge( + _In_ sai_object_id_t bridge_id); + +sai_status_t mock_set_bridge_attribute( + _In_ sai_object_id_t bridge_id, + _In_ const sai_attribute_t* attr); + +sai_status_t mock_get_bridge_attribute( + _In_ sai_object_id_t bridge_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t* attr_list); + +sai_status_t mock_get_bridge_stats( + _In_ sai_object_id_t bridge_id, + _In_ uint32_t number_of_counters, + _In_ const sai_stat_id_t* counter_ids, + _Out_ uint64_t *counters); + +sai_status_t mock_get_bridge_stats_ext( + _In_ sai_object_id_t bridge_id, + _In_ uint32_t number_of_counters, + _In_ const sai_stat_id_t* counter_ids, + _In_ sai_stats_mode_t mode, + _Out_ uint64_t *counters); + +sai_status_t mock_clear_bridge_stats( + _In_ sai_object_id_t bridge_id, + _In_ uint32_t number_of_counters, + _In_ const sai_stat_id_t* counter_ids); + +sai_status_t mock_create_bridge_port( + _Out_ sai_object_id_t* bridge_port_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t* attr_list); + +sai_status_t mock_remove_bridge_port( + _In_ sai_object_id_t bridge_port_id); + +sai_status_t mock_set_bridge_port_attribute( + _In_ sai_object_id_t bridge_port_id, + _In_ const sai_attribute_t* attr); + +sai_status_t mock_get_bridge_port_attribute( + _In_ sai_object_id_t bridge_port_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t* attr_list); + +sai_status_t mock_get_bridge_port_stats( + _In_ sai_object_id_t bridge_port_id, + _In_ uint32_t number_of_counters, + _In_ const sai_stat_id_t* counter_ids, + _Out_ uint64_t *counters); + +sai_status_t mock_get_bridge_port_stats_ext( + _In_ sai_object_id_t bridge_port_id, + _In_ uint32_t number_of_counters, + _In_ const sai_stat_id_t* counter_ids, + _In_ sai_stats_mode_t mode, + _Out_ uint64_t *counters); + +sai_status_t mock_clear_bridge_port_stats( + _In_ sai_object_id_t bridge_port_id, + _In_ uint32_t number_of_counters, + _In_ const sai_stat_id_t* counter_ids); diff --git a/orchagent/p4orch/tests/mock_sai_neighbor.cpp b/orchagent/p4orch/tests/mock_sai_neighbor.cpp new file mode 100644 index 00000000000..8ec8c10dce2 --- /dev/null +++ b/orchagent/p4orch/tests/mock_sai_neighbor.cpp @@ -0,0 +1,40 @@ +#include "mock_sai_neighbor.h" + +MockSaiNeighbor* mock_sai_neighbor; + +sai_status_t mock_create_neighbor_entry( + _In_ const sai_neighbor_entry_t* neighbor_entry, _In_ uint32_t attr_count, + _In_ const sai_attribute_t* attr_list) { + return mock_sai_neighbor->create_neighbor_entry(neighbor_entry, attr_count, + attr_list); +} + +sai_status_t mock_remove_neighbor_entry( + _In_ const sai_neighbor_entry_t* neighbor_entry) { + return mock_sai_neighbor->remove_neighbor_entry(neighbor_entry); +} + +sai_status_t mock_create_neighbor_entries(_In_ uint32_t object_count, _In_ const sai_neighbor_entry_t *neighbor_entry, _In_ const uint32_t *attr_count, + _In_ const sai_attribute_t **attr_list, _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_status_t *object_statuses) +{ + return mock_sai_neighbor->create_neighbor_entries(object_count, neighbor_entry, attr_count, attr_list, mode, object_statuses); +} + +sai_status_t mock_remove_neighbor_entries(_In_ uint32_t object_count, _In_ const sai_neighbor_entry_t *neighbor_entry, _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses) +{ + return mock_sai_neighbor->remove_neighbor_entries(object_count, neighbor_entry, mode, object_statuses); +} + +sai_status_t mock_set_neighbor_entry_attribute( + _In_ const sai_neighbor_entry_t* neighbor_entry, + _In_ const sai_attribute_t* attr) { + return mock_sai_neighbor->set_neighbor_entry_attribute(neighbor_entry, attr); +} + +sai_status_t mock_get_neighbor_entry_attribute( + _In_ const sai_neighbor_entry_t* neighbor_entry, _In_ uint32_t attr_count, + _Inout_ sai_attribute_t* attr_list) { + return mock_sai_neighbor->get_neighbor_entry_attribute(neighbor_entry, + attr_count, attr_list); +} diff --git a/orchagent/p4orch/tests/mock_sai_neighbor.h b/orchagent/p4orch/tests/mock_sai_neighbor.h index cd8f2aa0a9d..56062ce1d29 100644 --- a/orchagent/p4orch/tests/mock_sai_neighbor.h +++ b/orchagent/p4orch/tests/mock_sai_neighbor.h @@ -16,6 +16,12 @@ class MockSaiNeighbor MOCK_METHOD1(remove_neighbor_entry, sai_status_t(_In_ const sai_neighbor_entry_t *neighbor_entry)); + MOCK_METHOD6(create_neighbor_entries, sai_status_t(_In_ uint32_t object_count, _In_ const sai_neighbor_entry_t *neighbor_entry, _In_ const uint32_t *attr_count, + _In_ const sai_attribute_t **attr_list, _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_status_t *object_statuses)); + + MOCK_METHOD4(remove_neighbor_entries, sai_status_t(_In_ uint32_t object_count, _In_ const sai_neighbor_entry_t *neighbor_entry, _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses)); + MOCK_METHOD2(set_neighbor_entry_attribute, sai_status_t(_In_ const sai_neighbor_entry_t *neighbor_entry, _In_ const sai_attribute_t *attr)); @@ -24,27 +30,21 @@ class MockSaiNeighbor _Inout_ sai_attribute_t *attr_list)); }; -MockSaiNeighbor *mock_sai_neighbor; +extern MockSaiNeighbor *mock_sai_neighbor; sai_status_t mock_create_neighbor_entry(_In_ const sai_neighbor_entry_t *neighbor_entry, _In_ uint32_t attr_count, - _In_ const sai_attribute_t *attr_list) -{ - return mock_sai_neighbor->create_neighbor_entry(neighbor_entry, attr_count, attr_list); -} + _In_ const sai_attribute_t *attr_list); -sai_status_t mock_remove_neighbor_entry(_In_ const sai_neighbor_entry_t *neighbor_entry) -{ - return mock_sai_neighbor->remove_neighbor_entry(neighbor_entry); -} +sai_status_t mock_remove_neighbor_entry(_In_ const sai_neighbor_entry_t *neighbor_entry); + +sai_status_t mock_create_neighbor_entries(_In_ uint32_t object_count, _In_ const sai_neighbor_entry_t *neighbor_entry, _In_ const uint32_t *attr_count, + _In_ const sai_attribute_t **attr_list, _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_status_t *object_statuses); + +sai_status_t mock_remove_neighbor_entries(_In_ uint32_t object_count, _In_ const sai_neighbor_entry_t *neighbor_entry, _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses); sai_status_t mock_set_neighbor_entry_attribute(_In_ const sai_neighbor_entry_t *neighbor_entry, - _In_ const sai_attribute_t *attr) -{ - return mock_sai_neighbor->set_neighbor_entry_attribute(neighbor_entry, attr); -} + _In_ const sai_attribute_t *attr); sai_status_t mock_get_neighbor_entry_attribute(_In_ const sai_neighbor_entry_t *neighbor_entry, - _In_ uint32_t attr_count, _Inout_ sai_attribute_t *attr_list) -{ - return mock_sai_neighbor->get_neighbor_entry_attribute(neighbor_entry, attr_count, attr_list); -} + _In_ uint32_t attr_count, _Inout_ sai_attribute_t *attr_list); diff --git a/orchagent/p4orch/tests/mock_sai_next_hop.cpp b/orchagent/p4orch/tests/mock_sai_next_hop.cpp new file mode 100644 index 00000000000..1558d3f4cc3 --- /dev/null +++ b/orchagent/p4orch/tests/mock_sai_next_hop.cpp @@ -0,0 +1,42 @@ +#include "mock_sai_next_hop.h" + +MockSaiNextHop* mock_sai_next_hop; + +sai_status_t mock_create_next_hop(_Out_ sai_object_id_t* next_hop_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t* attr_list) { + return mock_sai_next_hop->create_next_hop(next_hop_id, switch_id, attr_count, + attr_list); +} + +sai_status_t mock_remove_next_hop(_In_ sai_object_id_t next_hop_id) { + return mock_sai_next_hop->remove_next_hop(next_hop_id); +} + +sai_status_t mock_create_next_hops(_In_ sai_object_id_t switch_id, _In_ uint32_t object_count, _In_ const uint32_t *attr_count, + _In_ const sai_attribute_t **attr_list, _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_object_id_t *object_id, _Out_ sai_status_t *object_statuses) +{ + return mock_sai_next_hop->create_next_hops(switch_id, object_count, attr_count, attr_list, mode, + object_id, object_statuses); +} + +sai_status_t mock_remove_next_hops(_In_ uint32_t object_count, _In_ const sai_object_id_t *object_id, _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses) +{ + return mock_sai_next_hop->remove_next_hops(object_count, object_id, mode, object_statuses); +} + +sai_status_t mock_set_next_hop_attribute(_In_ sai_object_id_t next_hop_id, + _In_ const sai_attribute_t* attr) { + return mock_sai_next_hop->set_next_hop_attribute(next_hop_id, attr); +} + +sai_status_t mock_get_next_hop_attribute(_In_ sai_object_id_t next_hop_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t* attr_list) { + return mock_sai_next_hop->get_next_hop_attribute(next_hop_id, attr_count, + attr_list); +} + diff --git a/orchagent/p4orch/tests/mock_sai_next_hop.h b/orchagent/p4orch/tests/mock_sai_next_hop.h index 83e6e7d506b..a861914206e 100644 --- a/orchagent/p4orch/tests/mock_sai_next_hop.h +++ b/orchagent/p4orch/tests/mock_sai_next_hop.h @@ -22,30 +22,32 @@ class MockSaiNextHop MOCK_METHOD3(get_next_hop_attribute, sai_status_t(_In_ sai_object_id_t next_hop_id, _In_ uint32_t attr_count, _Inout_ sai_attribute_t *attr_list)); + + MOCK_METHOD7(create_next_hops, sai_status_t(_In_ sai_object_id_t switch_id, _In_ uint32_t object_count, _In_ const uint32_t *attr_count, + _In_ const sai_attribute_t **attr_list, _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_object_id_t *object_id, _Out_ sai_status_t *object_statuses)); + + MOCK_METHOD4(remove_next_hops, sai_status_t(_In_ uint32_t object_count, _In_ const sai_object_id_t *object_id, _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses)); }; // Note that before mock functions below are used, mock_sai_next_hop must be // initialized to point to an instance of MockSaiNextHop. -MockSaiNextHop *mock_sai_next_hop; +extern MockSaiNextHop *mock_sai_next_hop; sai_status_t mock_create_next_hop(_Out_ sai_object_id_t *next_hop_id, _In_ sai_object_id_t switch_id, - _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list) -{ - return mock_sai_next_hop->create_next_hop(next_hop_id, switch_id, attr_count, attr_list); -} + _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list); -sai_status_t mock_remove_next_hop(_In_ sai_object_id_t next_hop_id) -{ - return mock_sai_next_hop->remove_next_hop(next_hop_id); -} +sai_status_t mock_remove_next_hop(_In_ sai_object_id_t next_hop_id); -sai_status_t mock_set_next_hop_attribute(_In_ sai_object_id_t next_hop_id, _In_ const sai_attribute_t *attr) -{ - return mock_sai_next_hop->set_next_hop_attribute(next_hop_id, attr); -} +sai_status_t mock_set_next_hop_attribute(_In_ sai_object_id_t next_hop_id, _In_ const sai_attribute_t *attr); sai_status_t mock_get_next_hop_attribute(_In_ sai_object_id_t next_hop_id, _In_ uint32_t attr_count, - _Inout_ sai_attribute_t *attr_list) -{ - return mock_sai_next_hop->get_next_hop_attribute(next_hop_id, attr_count, attr_list); -} + _Inout_ sai_attribute_t *attr_list); + +sai_status_t mock_create_next_hops(_In_ sai_object_id_t switch_id, _In_ uint32_t object_count, _In_ const uint32_t *attr_count, + _In_ const sai_attribute_t **attr_list, _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_object_id_t *object_id, _Out_ sai_status_t *object_statuses); + +sai_status_t mock_remove_next_hops(_In_ uint32_t object_count, _In_ const sai_object_id_t *object_id, _In_ sai_bulk_op_error_mode_t mode, + _Out_ sai_status_t *object_statuses); diff --git a/orchagent/p4orch/tests/mock_sai_route.cpp b/orchagent/p4orch/tests/mock_sai_route.cpp new file mode 100644 index 00000000000..2cf9d736500 --- /dev/null +++ b/orchagent/p4orch/tests/mock_sai_route.cpp @@ -0,0 +1,62 @@ +#include "mock_sai_route.h" + +MockSaiRoute* mock_sai_route; + +sai_status_t create_route_entry(const sai_route_entry_t* route_entry, + uint32_t attr_count, + const sai_attribute_t* attr_list) { + return mock_sai_route->create_route_entry(route_entry, attr_count, attr_list); +} + +sai_status_t remove_route_entry(const sai_route_entry_t* route_entry) { + return mock_sai_route->remove_route_entry(route_entry); +} + +sai_status_t set_route_entry_attribute(const sai_route_entry_t* route_entry, + const sai_attribute_t* attr) { + return mock_sai_route->set_route_entry_attribute(route_entry, attr); +} + +sai_status_t get_route_entry_attribute(const sai_route_entry_t* route_entry, + uint32_t attr_count, + sai_attribute_t* attr_list) { + return mock_sai_route->get_route_entry_attribute(route_entry, attr_count, + attr_list); +} + +sai_status_t create_route_entries(uint32_t object_count, + const sai_route_entry_t* route_entry, + const uint32_t* attr_count, + const sai_attribute_t** attr_list, + sai_bulk_op_error_mode_t mode, + sai_status_t* object_statuses) { + return mock_sai_route->create_route_entries( + object_count, route_entry, attr_count, attr_list, mode, object_statuses); +} + +sai_status_t remove_route_entries(uint32_t object_count, + const sai_route_entry_t* route_entry, + sai_bulk_op_error_mode_t mode, + sai_status_t* object_statuses) { + return mock_sai_route->remove_route_entries(object_count, route_entry, mode, + object_statuses); +} + +sai_status_t set_route_entries_attribute(uint32_t object_count, + const sai_route_entry_t* route_entry, + const sai_attribute_t* attr_list, + sai_bulk_op_error_mode_t mode, + sai_status_t* object_statuses) { + return mock_sai_route->set_route_entries_attribute( + object_count, route_entry, attr_list, mode, object_statuses); +} + +sai_status_t get_route_entries_attribute(uint32_t object_count, + const sai_route_entry_t* route_entry, + const uint32_t* attr_count, + sai_attribute_t** attr_list, + sai_bulk_op_error_mode_t mode, + sai_status_t* object_statuses) { + return mock_sai_route->get_route_entries_attribute( + object_count, route_entry, attr_count, attr_list, mode, object_statuses); +} diff --git a/orchagent/p4orch/tests/mock_sai_route.h b/orchagent/p4orch/tests/mock_sai_route.h index b40cf6605ee..5e0af530260 100644 --- a/orchagent/p4orch/tests/mock_sai_route.h +++ b/orchagent/p4orch/tests/mock_sai_route.h @@ -1,36 +1,14 @@ #pragma once +#include + extern "C" { #include "sai.h" #include "sairoute.h" } -class SaiRouteInterface -{ - public: - virtual sai_status_t create_route_entry(const sai_route_entry_t *route_entry, uint32_t attr_count, - const sai_attribute_t *attr_list) = 0; - virtual sai_status_t remove_route_entry(const sai_route_entry_t *route_entry) = 0; - virtual sai_status_t set_route_entry_attribute(const sai_route_entry_t *route_entry, - const sai_attribute_t *attr) = 0; - virtual sai_status_t get_route_entry_attribute(const sai_route_entry_t *route_entry, uint32_t attr_count, - sai_attribute_t *attr_list) = 0; - virtual sai_status_t create_route_entries(uint32_t object_count, const sai_route_entry_t *route_entry, - const uint32_t *attr_count, const sai_attribute_t **attr_list, - sai_bulk_op_error_mode_t mode, sai_status_t *object_statuses) = 0; - virtual sai_status_t remove_route_entries(uint32_t object_count, const sai_route_entry_t *route_entry, - sai_bulk_op_error_mode_t mode, sai_status_t *object_statuses) = 0; - virtual sai_status_t set_route_entries_attribute(uint32_t object_count, const sai_route_entry_t *route_entry, - const sai_attribute_t *attr_list, sai_bulk_op_error_mode_t mode, - sai_status_t *object_statuses) = 0; - virtual sai_status_t get_route_entries_attribute(uint32_t object_count, const sai_route_entry_t *route_entry, - const uint32_t *attr_count, sai_attribute_t **attr_list, - sai_bulk_op_error_mode_t mode, sai_status_t *object_statuses) = 0; -}; - -class MockSaiRoute : public SaiRouteInterface -{ +class MockSaiRoute { public: MOCK_METHOD3(create_route_entry, sai_status_t(const sai_route_entry_t *route_entry, uint32_t attr_count, const sai_attribute_t *attr_list)); @@ -54,55 +32,29 @@ class MockSaiRoute : public SaiRouteInterface sai_status_t *object_statuses)); }; -MockSaiRoute *mock_sai_route; +extern MockSaiRoute *mock_sai_route; sai_status_t create_route_entry(const sai_route_entry_t *route_entry, uint32_t attr_count, - const sai_attribute_t *attr_list) -{ - return mock_sai_route->create_route_entry(route_entry, attr_count, attr_list); -} + const sai_attribute_t *attr_list); -sai_status_t remove_route_entry(const sai_route_entry_t *route_entry) -{ - return mock_sai_route->remove_route_entry(route_entry); -} +sai_status_t remove_route_entry(const sai_route_entry_t *route_entry); -sai_status_t set_route_entry_attribute(const sai_route_entry_t *route_entry, const sai_attribute_t *attr) -{ - return mock_sai_route->set_route_entry_attribute(route_entry, attr); -} +sai_status_t set_route_entry_attribute(const sai_route_entry_t *route_entry, const sai_attribute_t *attr); sai_status_t get_route_entry_attribute(const sai_route_entry_t *route_entry, uint32_t attr_count, - sai_attribute_t *attr_list) -{ - return mock_sai_route->get_route_entry_attribute(route_entry, attr_count, attr_list); -} + sai_attribute_t *attr_list); sai_status_t create_route_entries(uint32_t object_count, const sai_route_entry_t *route_entry, const uint32_t *attr_count, const sai_attribute_t **attr_list, - sai_bulk_op_error_mode_t mode, sai_status_t *object_statuses) -{ - return mock_sai_route->create_route_entries(object_count, route_entry, attr_count, attr_list, mode, - object_statuses); -} + sai_bulk_op_error_mode_t mode, sai_status_t *object_statuses); sai_status_t remove_route_entries(uint32_t object_count, const sai_route_entry_t *route_entry, - sai_bulk_op_error_mode_t mode, sai_status_t *object_statuses) -{ - return mock_sai_route->remove_route_entries(object_count, route_entry, mode, object_statuses); -} + sai_bulk_op_error_mode_t mode, sai_status_t *object_statuses); sai_status_t set_route_entries_attribute(uint32_t object_count, const sai_route_entry_t *route_entry, const sai_attribute_t *attr_list, sai_bulk_op_error_mode_t mode, - sai_status_t *object_statuses) -{ - return mock_sai_route->set_route_entries_attribute(object_count, route_entry, attr_list, mode, object_statuses); -} + sai_status_t *object_statuses); sai_status_t get_route_entries_attribute(uint32_t object_count, const sai_route_entry_t *route_entry, const uint32_t *attr_count, sai_attribute_t **attr_list, - sai_bulk_op_error_mode_t mode, sai_status_t *object_statuses) -{ - return mock_sai_route->get_route_entries_attribute(object_count, route_entry, attr_count, attr_list, mode, - object_statuses); -} + sai_bulk_op_error_mode_t mode, sai_status_t *object_statuses); diff --git a/orchagent/p4orch/tests/mock_sai_stp.h b/orchagent/p4orch/tests/mock_sai_stp.h new file mode 100644 index 00000000000..c0e60c59f0b --- /dev/null +++ b/orchagent/p4orch/tests/mock_sai_stp.h @@ -0,0 +1,115 @@ +#ifndef MOCK_SAI_STP_H +#define MOCK_SAI_STP_H + +#include +extern "C" +{ +#include "sai.h" +} + +// Mock class for SAI STP APIs +class MockSaiStp { +public: + // Mock method for creating an STP instance + MOCK_METHOD4(create_stp, + sai_status_t(_Out_ sai_object_id_t *stp_instance_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list)); + + // Mock method for removing an STP instance + MOCK_METHOD1(remove_stp, sai_status_t(_In_ sai_object_id_t stp_instance_id)); + + // Mock method for setting STP instance attributes + MOCK_METHOD2(set_stp_attribute, + sai_status_t(_In_ sai_object_id_t stp_instance_id, + _In_ const sai_attribute_t *attr)); + + // Mock method for getting STP instance attributes + MOCK_METHOD3(get_stp_attribute, + sai_status_t(_Out_ sai_object_id_t stp_instance_id, + _In_ uint32_t attr_count, + _In_ sai_attribute_t *attr_list)); + + // Mock method for creating an STP port + MOCK_METHOD4(create_stp_port, + sai_status_t(_Out_ sai_object_id_t *stp_port_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list)); + + // Mock method for removing an STP port + MOCK_METHOD1(remove_stp_port, + sai_status_t(_In_ sai_object_id_t stp_port_id)); + + // Mock method for setting STP port attributes + MOCK_METHOD2(set_stp_port_attribute, + sai_status_t(_Out_ sai_object_id_t stp_port_id, + _In_ const sai_attribute_t *attr)); + + // Mock method for getting STP port attributes + MOCK_METHOD3(get_stp_port_attribute, + sai_status_t(_Out_ sai_object_id_t stp_port_id, + _In_ uint32_t attr_count, + _In_ sai_attribute_t *attr_list)); + + // Mock method for flushing FDB entries + MOCK_METHOD3(flush_fdb_entries, + sai_status_t(_In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list)); +}; + +// Global mock object for SAI STP APIs +MockSaiStp *mock_sai_stp; + +sai_status_t mock_create_stp(_Out_ sai_object_id_t *stp_instance_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) +{ + return mock_sai_stp->create_stp(stp_instance_id, switch_id, attr_count, attr_list); +} + +sai_status_t mock_remove_stp(_In_ sai_object_id_t stp_instance_id) +{ + return mock_sai_stp->remove_stp(stp_instance_id); +} + +sai_status_t mock_set_stp_attribute(_In_ sai_object_id_t stp_instance_id, _In_ const sai_attribute_t *attr) +{ + return mock_sai_stp->set_stp_attribute(stp_instance_id, attr); +} + +sai_status_t mock_get_stp_attribute(_Out_ sai_object_id_t stp_instance_id, + _In_ uint32_t attr_count, _Inout_ sai_attribute_t *attr_list) +{ + return mock_sai_stp->get_stp_attribute(stp_instance_id, attr_count, attr_list); +} +sai_status_t mock_create_stp_port(_Out_ sai_object_id_t *stp_port_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) +{ + return mock_sai_stp->create_stp_port(stp_port_id, switch_id,attr_count, attr_list); +} + +sai_status_t mock_remove_stp_port(_In_ sai_object_id_t stp_port_id) +{ + return mock_sai_stp->remove_stp_port(stp_port_id); +} + +sai_status_t mock_set_stp_port_attribute(_In_ sai_object_id_t stp_port_id, + _In_ const sai_attribute_t *attr) +{ + return mock_sai_stp->set_stp_port_attribute(stp_port_id, attr); +} + +sai_status_t mock_get_stp_port_attribute(_Out_ sai_object_id_t stp_port_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t *attr_list) +{ + return mock_sai_stp->get_stp_port_attribute(stp_port_id, attr_count, attr_list); +} + +#endif // MOCK_SAI_STP_H diff --git a/orchagent/p4orch/tests/neighbor_manager_test.cpp b/orchagent/p4orch/tests/neighbor_manager_test.cpp index f335c8295fa..9d80ee09c98 100644 --- a/orchagent/p4orch/tests/neighbor_manager_test.cpp +++ b/orchagent/p4orch/tests/neighbor_manager_test.cpp @@ -3,10 +3,10 @@ #include #include +#include #include #include -#include #include "mock_response_publisher.h" #include "mock_sai_neighbor.h" #include "p4orch.h" @@ -34,6 +34,9 @@ constexpr sai_object_id_t kRouterInterfaceOid1 = 0x295100; constexpr char *kRouterInterfaceId2 = "Ethernet20"; constexpr sai_object_id_t kRouterInterfaceOid2 = 0x51411; +constexpr char* kRouterInterfaceId3 = "Ethernet21"; +constexpr sai_object_id_t kRouterInterfaceOid3 = 0x51412; + const swss::IpAddress kNeighborId1("10.0.0.22"); const swss::MacAddress kMacAddress1("00:01:02:03:04:05"); @@ -124,6 +127,8 @@ class NeighborManagerTest : public ::testing::Test mock_sai_neighbor = &mock_sai_neighbor_; sai_neighbor_api->create_neighbor_entry = mock_create_neighbor_entry; sai_neighbor_api->remove_neighbor_entry = mock_remove_neighbor_entry; + sai_neighbor_api->create_neighbor_entries = mock_create_neighbor_entries; + sai_neighbor_api->remove_neighbor_entries = mock_remove_neighbor_entries; sai_neighbor_api->set_neighbor_entry_attribute = mock_set_neighbor_entry_attribute; sai_neighbor_api->get_neighbor_entry_attribute = mock_get_neighbor_entry_attribute; } @@ -133,9 +138,12 @@ class NeighborManagerTest : public ::testing::Test neighbor_manager_.enqueue(APP_P4RT_NEIGHBOR_TABLE_NAME, entry); } - void Drain() - { - neighbor_manager_.drain(); + ReturnCode Drain(bool failure_before) { + if (failure_before) { + neighbor_manager_.drainWithNotExecuted(); + return ReturnCode(StatusCode::SWSS_RC_NOT_EXECUTED); + } + return neighbor_manager_.drain(); } std::string VerifyState(const std::string &key, const std::vector &tuple) @@ -254,7 +262,7 @@ class NeighborManagerTest : public ::testing::Test } StrictMock mock_sai_neighbor_; - MockResponsePublisher publisher_; + StrictMock publisher_; P4OidMapper p4_oid_mapper_; NeighborManager neighbor_manager_; }; @@ -728,7 +736,10 @@ TEST_F(NeighborManagerTest, DrainValidAttributes) Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key, SET_COMMAND, attributes)); EXPECT_CALL(mock_sai_neighbor_, create_neighbor_entry(_, _, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); - Drain(); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key), + Eq(attributes), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); P4NeighborEntry neighbor_entry(kRouterInterfaceId1, kNeighborId1, kMacAddress1); neighbor_entry.neigh_entry.switch_id = gSwitchId; @@ -742,7 +753,10 @@ TEST_F(NeighborManagerTest, DrainValidAttributes) Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key, SET_COMMAND, attributes)); EXPECT_CALL(mock_sai_neighbor_, set_neighbor_entry_attribute(_, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); - Drain(); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key), + Eq(attributes), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); neighbor_entry.dst_mac_address = kMacAddress2; ValidateNeighborEntry(neighbor_entry, /*router_intf_ref_count=*/1); @@ -752,7 +766,10 @@ TEST_F(NeighborManagerTest, DrainValidAttributes) Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key, DEL_COMMAND, attributes)); EXPECT_CALL(mock_sai_neighbor_, remove_neighbor_entry(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); - Drain(); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key), + Eq(attributes), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); ValidateNeighborEntryNotPresent(neighbor_entry, /*check_ref_count=*/true); } @@ -772,7 +789,12 @@ TEST_F(NeighborManagerTest, DrainInvalidAppDbEntryKey) // Enqueue entry for create operation. std::vector attributes; Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key, SET_COMMAND, attributes)); - Drain(); + EXPECT_CALL( + publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key), Eq(attributes), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); P4NeighborEntry neighbor_entry(kRouterInterfaceId1, kNeighborId1, kMacAddress1); ValidateNeighborEntryNotPresent(neighbor_entry, /*check_ref_count=*/true); @@ -790,6 +812,11 @@ TEST_F(NeighborManagerTest, DrainInvalidAppDbEntryAttributes) std::vector attributes; Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key, SET_COMMAND, attributes)); + EXPECT_CALL( + publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key), Eq(attributes), + Eq(StatusCode::SWSS_RC_NOT_FOUND), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, Drain(/*failure_before=*/false)); appl_db_key = std::string(APP_P4RT_NEIGHBOR_TABLE_NAME) + kTableKeyDelimiter + CreateNeighborAppDbKey(kRouterInterfaceId1, kNeighborId1); @@ -797,8 +824,12 @@ TEST_F(NeighborManagerTest, DrainInvalidAppDbEntryAttributes) attributes.clear(); attributes.push_back(swss::FieldValueTuple{p4orch::kDstMac, swss::MacAddress().to_string()}); Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key, SET_COMMAND, attributes)); - - Drain(); + EXPECT_CALL( + publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key), Eq(attributes), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); // Validate that first create operation did not create a neighbor entry. P4NeighborEntry neighbor_entry1(kRouterInterfaceId2, kNeighborId1, kMacAddress1); @@ -820,12 +851,122 @@ TEST_F(NeighborManagerTest, DrainInvalidOperation) std::vector attributes; Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key, "INVALID", attributes)); - Drain(); + EXPECT_CALL( + publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key), Eq(attributes), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); P4NeighborEntry neighbor_entry(kRouterInterfaceId1, kNeighborId1, kMacAddress1); ValidateNeighborEntryNotPresent(neighbor_entry, /*check_ref_count=*/true); } +TEST_F(NeighborManagerTest, DrainNotExecuted) { + ASSERT_TRUE(p4_oid_mapper_.setOID( + SAI_OBJECT_TYPE_ROUTER_INTERFACE, + KeyGenerator::generateRouterInterfaceKey(kRouterInterfaceId1), + kRouterInterfaceOid1)); + ASSERT_TRUE(p4_oid_mapper_.setOID( + SAI_OBJECT_TYPE_ROUTER_INTERFACE, + KeyGenerator::generateRouterInterfaceKey(kRouterInterfaceId2), + kRouterInterfaceOid2)); + ASSERT_TRUE(p4_oid_mapper_.setOID( + SAI_OBJECT_TYPE_ROUTER_INTERFACE, + KeyGenerator::generateRouterInterfaceKey(kRouterInterfaceId3), + kRouterInterfaceOid3)); + + const std::string appl_db_key_1 = + std::string(APP_P4RT_NEIGHBOR_TABLE_NAME) + kTableKeyDelimiter + + CreateNeighborAppDbKey(kRouterInterfaceId1, kNeighborId1); + const std::string appl_db_key_2 = + std::string(APP_P4RT_NEIGHBOR_TABLE_NAME) + kTableKeyDelimiter + + CreateNeighborAppDbKey(kRouterInterfaceId2, kNeighborId1); + const std::string appl_db_key_3 = + std::string(APP_P4RT_NEIGHBOR_TABLE_NAME) + kTableKeyDelimiter + + CreateNeighborAppDbKey(kRouterInterfaceId3, kNeighborId1); + + std::vector attributes; + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kDstMac), + kMacAddress1.to_string()}); + Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key_1, SET_COMMAND, attributes)); + Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key_2, SET_COMMAND, attributes)); + Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key_3, SET_COMMAND, attributes)); + + EXPECT_CALL( + publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key_1), Eq(attributes), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL( + publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key_2), Eq(attributes), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL( + publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key_3), Eq(attributes), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_EXECUTED, Drain(/*failure_before=*/true)); + EXPECT_EQ(nullptr, GetNeighborEntry(KeyGenerator::generateNeighborKey( + kRouterInterfaceId1, kNeighborId1))); + EXPECT_EQ(nullptr, GetNeighborEntry(KeyGenerator::generateNeighborKey( + kRouterInterfaceId2, kNeighborId1))); + EXPECT_EQ(nullptr, GetNeighborEntry(KeyGenerator::generateNeighborKey( + kRouterInterfaceId3, kNeighborId1))); +} + +TEST_F(NeighborManagerTest, DrainStopOnFirstFailure) { + ASSERT_TRUE(p4_oid_mapper_.setOID( + SAI_OBJECT_TYPE_ROUTER_INTERFACE, + KeyGenerator::generateRouterInterfaceKey(kRouterInterfaceId1), + kRouterInterfaceOid1)); + ASSERT_TRUE(p4_oid_mapper_.setOID( + SAI_OBJECT_TYPE_ROUTER_INTERFACE, + KeyGenerator::generateRouterInterfaceKey(kRouterInterfaceId2), + kRouterInterfaceOid2)); + ASSERT_TRUE(p4_oid_mapper_.setOID( + SAI_OBJECT_TYPE_ROUTER_INTERFACE, + KeyGenerator::generateRouterInterfaceKey(kRouterInterfaceId3), + kRouterInterfaceOid3)); + + const std::string appl_db_key_1 = + std::string(APP_P4RT_NEIGHBOR_TABLE_NAME) + kTableKeyDelimiter + + CreateNeighborAppDbKey(kRouterInterfaceId1, kNeighborId1); + const std::string appl_db_key_2 = + std::string(APP_P4RT_NEIGHBOR_TABLE_NAME) + kTableKeyDelimiter + + CreateNeighborAppDbKey(kRouterInterfaceId2, kNeighborId1); + const std::string appl_db_key_3 = + std::string(APP_P4RT_NEIGHBOR_TABLE_NAME) + kTableKeyDelimiter + + CreateNeighborAppDbKey(kRouterInterfaceId3, kNeighborId1); + + std::vector attributes; + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kDstMac), + kMacAddress1.to_string()}); + Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key_1, SET_COMMAND, attributes)); + Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key_2, SET_COMMAND, attributes)); + Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key_3, SET_COMMAND, attributes)); + + EXPECT_CALL(mock_sai_neighbor_, create_neighbor_entry(_, _, _)) + .WillOnce(Return(SAI_STATUS_SUCCESS)) + .WillOnce(Return(SAI_STATUS_FAILURE)); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key_1), + Eq(attributes), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key_2), + Eq(attributes), + Eq(StatusCode::SWSS_RC_UNKNOWN), Eq(true))); + EXPECT_CALL( + publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key_3), Eq(attributes), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, Drain(/*failure_before=*/false)); + EXPECT_NE(nullptr, GetNeighborEntry(KeyGenerator::generateNeighborKey( + kRouterInterfaceId1, kNeighborId1))); + EXPECT_EQ(nullptr, GetNeighborEntry(KeyGenerator::generateNeighborKey( + kRouterInterfaceId2, kNeighborId1))); + EXPECT_EQ(nullptr, GetNeighborEntry(KeyGenerator::generateNeighborKey( + kRouterInterfaceId3, kNeighborId1))); +} + TEST_F(NeighborManagerTest, VerifyStateTest) { P4NeighborEntry neighbor_entry(kRouterInterfaceId1, kNeighborId1, kMacAddress1); diff --git a/orchagent/p4orch/tests/next_hop_manager_test.cpp b/orchagent/p4orch/tests/next_hop_manager_test.cpp index 620474f1a13..39f4c06de6f 100644 --- a/orchagent/p4orch/tests/next_hop_manager_test.cpp +++ b/orchagent/p4orch/tests/next_hop_manager_test.cpp @@ -4,11 +4,11 @@ #include #include +#include #include #include #include "ipaddress.h" -#include #include "mock_response_publisher.h" #include "mock_sai_hostif.h" #include "mock_sai_next_hop.h" @@ -258,6 +258,8 @@ class NextHopManagerTest : public ::testing::Test sai_next_hop_api->remove_next_hop = mock_remove_next_hop; sai_next_hop_api->set_next_hop_attribute = mock_set_next_hop_attribute; sai_next_hop_api->get_next_hop_attribute = mock_get_next_hop_attribute; + sai_next_hop_api->create_next_hops = mock_create_next_hops; + sai_next_hop_api->remove_next_hops = mock_remove_next_hops; } void TearDown() override @@ -270,9 +272,12 @@ class NextHopManagerTest : public ::testing::Test next_hop_manager_.enqueue(APP_P4RT_NEXTHOP_TABLE_NAME, entry); } - void Drain() - { - next_hop_manager_.drain(); + ReturnCode Drain(bool failure_before) { + if (failure_before) { + next_hop_manager_.drainWithNotExecuted(); + return ReturnCode(StatusCode::SWSS_RC_NOT_EXECUTED); + } + return next_hop_manager_.drain(); } std::string VerifyState(const std::string &key, const std::vector &tuple) @@ -338,7 +343,7 @@ class NextHopManagerTest : public ::testing::Test } StrictMock mock_sai_next_hop_; - MockResponsePublisher publisher_; + StrictMock publisher_; P4OidMapper p4_oid_mapper_; NextHopManager next_hop_manager_; StrictMock mock_sai_hostif_; @@ -812,8 +817,11 @@ TEST_F(NextHopManagerTest, DrainValidAppEntryShouldSucceed) EXPECT_TRUE(ResolveNextHopEntryDependency(kP4NextHopAppDbEntry2, kRouterInterfaceOid2)); EXPECT_CALL(mock_sai_next_hop_, create_next_hop(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kNextHopOid), Return(SAI_STATUS_SUCCESS))); - - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); EXPECT_TRUE(ValidateNextHopEntryAdd(kP4NextHopAppDbEntry2, kNextHopOid)); } @@ -834,8 +842,11 @@ TEST_F(NextHopManagerTest, DrainValidTunnelNexthopAppEntryShouldSucceed) EXPECT_TRUE(ResolveNextHopEntryDependency(kP4TunnelNextHopAppDbEntry2, kTunnelOid2)); EXPECT_CALL(mock_sai_next_hop_, create_next_hop(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kTunnelNextHopOid), Return(SAI_STATUS_SUCCESS))); - - Drain(); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), + Eq(kfvKey(tunnel_app_db_entry)), + Eq(kfvFieldsValues(tunnel_app_db_entry)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); EXPECT_TRUE(ValidateNextHopEntryAdd(kP4TunnelNextHopAppDbEntry2, kTunnelNextHopOid)); @@ -847,7 +858,11 @@ TEST_F(NextHopManagerTest, DrainValidTunnelNexthopAppEntryShouldSucceed) EXPECT_CALL(mock_sai_next_hop_, remove_next_hop(Eq(kTunnelNextHopOid))).WillOnce(Return(SAI_STATUS_SUCCESS)); Enqueue(app_db_entry); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); // Validate the next hop entry has been deleted in both P4 next hop manager // and centralized mapper. @@ -878,8 +893,12 @@ TEST_F(NextHopManagerTest, DrainAppEntryWithInvalidOpShouldBeNoOp) Enqueue(app_db_entry); EXPECT_TRUE(ResolveNextHopEntryDependency(kP4NextHopAppDbEntry2, kRouterInterfaceOid2)); - - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); EXPECT_FALSE(ValidateNextHopEntryAdd(kP4NextHopAppDbEntry2, kNextHopOid)); } @@ -897,8 +916,13 @@ TEST_F(NextHopManagerTest, DrainAppEntryWithInvalidFieldShouldBeNoOp) SET_COMMAND, fvs); Enqueue(app_db_entry); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); - Drain(); EXPECT_FALSE(ValidateNextHopEntryAdd(kP4NextHopAppDbEntry2, kNextHopOid)); // Missing action field @@ -907,18 +931,26 @@ TEST_F(NextHopManagerTest, DrainAppEntryWithInvalidFieldShouldBeNoOp) app_db_entry = {std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs}; Enqueue(app_db_entry); - - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); EXPECT_FALSE(ValidateNextHopEntryAdd(kP4NextHopAppDbEntry2, kNextHopOid)); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); // Missing neighbor field fvs = {{p4orch::kAction, p4orch::kSetIpNexthop}, {prependParamField(p4orch::kRouterInterfaceId), kRouterInterfaceId2}}; app_db_entry = {std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs}; Enqueue(app_db_entry); - - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); EXPECT_FALSE(ValidateNextHopEntryAdd(kP4NextHopAppDbEntry2, kNextHopOid)); // set_ip_nexthop + missing router_interface_id @@ -926,8 +958,13 @@ TEST_F(NextHopManagerTest, DrainAppEntryWithInvalidFieldShouldBeNoOp) app_db_entry = {std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs}; Enqueue(app_db_entry); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); - Drain(); EXPECT_FALSE(ValidateNextHopEntryAdd(kP4NextHopAppDbEntry2, kNextHopOid)); // set_ip_nexthop + invalid param/tunnel_id @@ -937,8 +974,13 @@ TEST_F(NextHopManagerTest, DrainAppEntryWithInvalidFieldShouldBeNoOp) app_db_entry = {std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs}; Enqueue(app_db_entry); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); - Drain(); EXPECT_FALSE(ValidateNextHopEntryAdd(kP4NextHopAppDbEntry2, kNextHopOid)); // set_p2p_tunnel_encap_nexthop + invalid router_interface_id @@ -948,8 +990,13 @@ TEST_F(NextHopManagerTest, DrainAppEntryWithInvalidFieldShouldBeNoOp) app_db_entry = {std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs}; Enqueue(app_db_entry); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); - Drain(); EXPECT_FALSE(ValidateNextHopEntryAdd(kP4TunnelNextHopAppDbEntry2, kNextHopOid)); // set_p2p_tunnel_encap_nexthop + missing tunnel_id @@ -957,8 +1004,12 @@ TEST_F(NextHopManagerTest, DrainAppEntryWithInvalidFieldShouldBeNoOp) app_db_entry = {std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs}; Enqueue(app_db_entry); - - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); EXPECT_FALSE(ValidateNextHopEntryAdd(kP4TunnelNextHopAppDbEntry2, kNextHopOid)); } @@ -969,14 +1020,21 @@ TEST_F(NextHopManagerTest, DrainUpdateRequestShouldBeUnsupported) nlohmann::json j; j[prependMatchField(p4orch::kNexthopId)] = kNextHopId; - std::vector fvs{{prependParamField(p4orch::kNeighborId), kNeighborId2}, - {prependParamField(p4orch::kRouterInterfaceId), kRouterInterfaceId2}}; + std::vector fvs{ + {p4orch::kAction, p4orch::kSetIpNexthop}, + {prependParamField(p4orch::kNeighborId), kNeighborId2}, + {prependParamField(p4orch::kRouterInterfaceId), kRouterInterfaceId2}}; swss::KeyOpFieldsValuesTuple app_db_entry(std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs); Enqueue(app_db_entry); EXPECT_TRUE(ResolveNextHopEntryDependency(kP4NextHopAppDbEntry2, kRouterInterfaceOid2)); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_UNIMPLEMENTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_UNIMPLEMENTED, + Drain(/*failure_before=*/false)); // Expect that the update call will fail, so next hop entry's fields stay the // same. @@ -1004,7 +1062,11 @@ TEST_F(NextHopManagerTest, DrainDeleteRequestShouldSucceedForExistingNextHop) .WillOnce(Return(SAI_STATUS_SUCCESS)); Enqueue(app_db_entry); - Drain(); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry)), + Eq(kfvFieldsValues(app_db_entry)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); // Validate the next hop entry has been deleted in both P4 next hop manager // and centralized mapper. @@ -1090,6 +1152,96 @@ TEST_F(NextHopManagerTest, VerifyIpNextHopStateTest) p4_next_hop_entry->gre_tunnel_id = saved_gre_tunnel_id; } +TEST_F(NextHopManagerTest, DrainNotExecuted) { + std::vector fvs{ + {p4orch::kAction, p4orch::kSetIpNexthop}, + {prependParamField(p4orch::kNeighborId), kNeighborId2}, + {prependParamField(p4orch::kRouterInterfaceId), kRouterInterfaceId2}}; + EXPECT_TRUE(ResolveNextHopEntryDependency(kP4NextHopAppDbEntry2, + kRouterInterfaceOid2)); + nlohmann::json j; + j[prependMatchField(p4orch::kNexthopId)] = "1"; + swss::KeyOpFieldsValuesTuple app_db_entry_1( + std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + j[prependMatchField(p4orch::kNexthopId)] = "2"; + swss::KeyOpFieldsValuesTuple app_db_entry_2( + std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + j[prependMatchField(p4orch::kNexthopId)] = "3"; + swss::KeyOpFieldsValuesTuple app_db_entry_3( + std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + + Enqueue(app_db_entry_1); + Enqueue(app_db_entry_2); + Enqueue(app_db_entry_3); + + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_1)), + Eq(kfvFieldsValues(app_db_entry_1)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_2)), + Eq(kfvFieldsValues(app_db_entry_2)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_3)), + Eq(kfvFieldsValues(app_db_entry_3)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_EXECUTED, Drain(/*failure_before=*/true)); + EXPECT_EQ(nullptr, GetNextHopEntry(KeyGenerator::generateNextHopKey("1"))); + EXPECT_EQ(nullptr, GetNextHopEntry(KeyGenerator::generateNextHopKey("2"))); + EXPECT_EQ(nullptr, GetNextHopEntry(KeyGenerator::generateNextHopKey("3"))); +} + +TEST_F(NextHopManagerTest, DrainStopOnFirstFailure) { + std::vector fvs{ + {p4orch::kAction, p4orch::kSetIpNexthop}, + {prependParamField(p4orch::kNeighborId), kNeighborId2}, + {prependParamField(p4orch::kRouterInterfaceId), kRouterInterfaceId2}}; + EXPECT_TRUE(ResolveNextHopEntryDependency(kP4NextHopAppDbEntry2, + kRouterInterfaceOid2)); + nlohmann::json j; + j[prependMatchField(p4orch::kNexthopId)] = "1"; + swss::KeyOpFieldsValuesTuple app_db_entry_1( + std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + j[prependMatchField(p4orch::kNexthopId)] = "2"; + swss::KeyOpFieldsValuesTuple app_db_entry_2( + std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + j[prependMatchField(p4orch::kNexthopId)] = "3"; + swss::KeyOpFieldsValuesTuple app_db_entry_3( + std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + + Enqueue(app_db_entry_1); + Enqueue(app_db_entry_2); + Enqueue(app_db_entry_3); + + EXPECT_CALL(mock_sai_next_hop_, create_next_hop(_, _, _, _)) + .WillOnce( + DoAll(SetArgPointee<0>(kNextHopOid), Return(SAI_STATUS_SUCCESS))) + .WillOnce(Return(SAI_STATUS_FAILURE)); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_1)), + Eq(kfvFieldsValues(app_db_entry_1)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_2)), + Eq(kfvFieldsValues(app_db_entry_2)), + Eq(StatusCode::SWSS_RC_UNKNOWN), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(app_db_entry_3)), + Eq(kfvFieldsValues(app_db_entry_3)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, Drain(/*failure_before=*/false)); + EXPECT_NE(nullptr, GetNextHopEntry(KeyGenerator::generateNextHopKey("1"))); + EXPECT_EQ(nullptr, GetNextHopEntry(KeyGenerator::generateNextHopKey("2"))); + EXPECT_EQ(nullptr, GetNextHopEntry(KeyGenerator::generateNextHopKey("3"))); +} + TEST_F(NextHopManagerTest, VerifyTunnelNextHopStateTest) { ASSERT_TRUE(ResolveNextHopEntryDependency(kP4TunnelNextHopAppDbEntry1, kTunnelOid1)); diff --git a/orchagent/p4orch/tests/p4oidmapper_test.cpp b/orchagent/p4orch/tests/p4oidmapper_test.cpp index bde2ee656bd..e90446bcc14 100644 --- a/orchagent/p4orch/tests/p4oidmapper_test.cpp +++ b/orchagent/p4orch/tests/p4oidmapper_test.cpp @@ -21,10 +21,30 @@ constexpr char *kRouteObject2 = "Route2"; constexpr sai_object_id_t kOid1 = 1; constexpr sai_object_id_t kOid2 = 2; -std::string convertToDBField(_In_ const sai_object_type_t object_type, _In_ const std::string &key) -{ - return sai_serialize_object_type(object_type) + ":" + key; -} + +std::string cache_dump = + R"({ + "SAI_OBJECT_TYPE_NEXT_HOP": { + "NextHop1": { + "ref_count": 0, + "sai_oid": "oid:0x1" + }, + "NextHop2": { + "ref_count": 100, + "sai_oid": "oid:0x2" + } + }, + "SAI_OBJECT_TYPE_ROUTE_ENTRY": { + "Route1": { + "ref_count": 0, + "sai_oid": "oid:0xdeadf00ddeadf00d" + }, + "Route2": { + "ref_count": 200, + "sai_oid": "oid:0xdeadf00ddeadf00d" + } + } +})"; TEST(P4OidMapperTest, MapperTest) { @@ -86,7 +106,7 @@ TEST(P4OidMapperTest, MapperTest) EXPECT_FALSE(mapper.existsOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, kRouteObject1)); EXPECT_FALSE(mapper.existsOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, kRouteObject2)); EXPECT_FALSE(mapper.verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject1, kOid1).empty()); - EXPECT_FALSE(mapper.verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject2, kOid2).empty()); + EXPECT_TRUE(mapper.verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject2, kOid2).empty()); } TEST(P4OidMapperTest, ErrorTest) @@ -135,7 +155,6 @@ TEST(P4OidMapperTest, ErrorTest) TEST(P4OidMapperTest, VerifyMapperTest) { P4OidMapper mapper; - swss::Table table(nullptr, "P4RT_KEY_TO_OID"); EXPECT_TRUE(mapper.setOID(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject1, kOid1)); EXPECT_TRUE(mapper.setOID(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject2, kOid2, /*ref_count=*/100)); @@ -146,13 +165,25 @@ TEST(P4OidMapperTest, VerifyMapperTest) EXPECT_FALSE(mapper.verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject2, kOid1).empty()); EXPECT_FALSE(mapper.verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, "invalid", kOid1).empty()); - // Verification should fail if OID in DB mismatches. - table.hset("", convertToDBField(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject1), sai_serialize_object_id(kOid2)); - EXPECT_FALSE(mapper.verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject1, kOid1).empty()); +} - // Verification should fail if OID in DB is not found. - table.hdel("", convertToDBField(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject1)); - EXPECT_FALSE(mapper.verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject1, kOid1).empty()); +TEST(P4OidMapperTest, DumpEmptyStateCacheTest) { + P4OidMapper mapper; + std::string msg = mapper.dumpStateCache(); + EXPECT_EQ(msg, "{}"); +} + +TEST(P4OidMapperTest, DumpStateCacheTest) { + P4OidMapper mapper; + EXPECT_TRUE(mapper.setOID(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject1, kOid1)); + EXPECT_TRUE(mapper.setOID(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject2, kOid2, + /*ref_count=*/100)); + EXPECT_TRUE(mapper.setDummyOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, kRouteObject1)); + EXPECT_TRUE(mapper.setDummyOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, kRouteObject2, + /*ref_count=*/200)); + + std::string msg = mapper.dumpStateCache(); + EXPECT_EQ(msg, cache_dump); } } // namespace diff --git a/orchagent/p4orch/tests/p4orch_test.cpp b/orchagent/p4orch/tests/p4orch_test.cpp new file mode 100644 index 00000000000..4484c053e31 --- /dev/null +++ b/orchagent/p4orch/tests/p4orch_test.cpp @@ -0,0 +1,385 @@ +#include "p4orch.h" + +#include +#include + +#include + +#include "mock_response_publisher.h" +#include "mock_sai_bridge.h" +#include "mock_sai_hostif.h" +#include "mock_sai_neighbor.h" +#include "mock_sai_next_hop.h" +#include "mock_sai_route.h" +#include "mock_sai_router_interface.h" +#include "mock_sai_switch.h" + +using ::p4orch::kTableKeyDelimiter; + +extern P4Orch* gP4Orch; +extern VRFOrch* gVrfOrch; +extern std::unique_ptr gMockResponsePublisher; +extern VRFOrch* gVrfOrch; +extern swss::DBConnector* gAppDb; +extern sai_hostif_api_t* sai_hostif_api; +extern sai_switch_api_t* sai_switch_api; +extern sai_bridge_api_t* sai_bridge_api; +extern sai_router_interface_api_t* sai_router_intfs_api; +extern sai_neighbor_api_t* sai_neighbor_api; +extern sai_next_hop_api_t* sai_next_hop_api; +extern sai_route_api_t* sai_route_api; + +using ::testing::_; +using ::testing::DoAll; +using ::testing::Eq; +using ::testing::InSequence; +using ::testing::NiceMock; +using ::testing::Return; +using ::testing::SetArrayArgument; +using ::testing::StrictMock; + +class P4OrchTest : public ::testing::Test { + protected: + P4OrchTest() { + mock_sai_hostif = &mock_sai_hostif_; + sai_hostif_api->create_hostif_trap = mock_create_hostif_trap; + sai_hostif_api->create_hostif_table_entry = mock_create_hostif_table_entry; + mock_sai_switch = &mock_sai_switch_; + sai_switch_api->get_switch_attribute = mock_get_switch_attribute; + mock_sai_router_intf = &mock_sai_router_intf_; + sai_router_intfs_api->create_router_interface = + mock_create_router_interface; + sai_router_intfs_api->remove_router_interface = + mock_remove_router_interface; + sai_router_intfs_api->set_router_interface_attribute = + mock_set_router_interface_attribute; + sai_router_intfs_api->get_router_interface_attribute = + mock_get_router_interface_attribute; + mock_sai_neighbor = &mock_sai_neighbor_; + sai_neighbor_api->create_neighbor_entry = mock_create_neighbor_entry; + sai_neighbor_api->remove_neighbor_entry = mock_remove_neighbor_entry; + sai_neighbor_api->set_neighbor_entry_attribute = + mock_set_neighbor_entry_attribute; + sai_neighbor_api->get_neighbor_entry_attribute = + mock_get_neighbor_entry_attribute; + mock_sai_next_hop = &mock_sai_next_hop_; + sai_next_hop_api->create_next_hop = mock_create_next_hop; + sai_next_hop_api->remove_next_hop = mock_remove_next_hop; + sai_next_hop_api->set_next_hop_attribute = mock_set_next_hop_attribute; + sai_next_hop_api->get_next_hop_attribute = mock_get_next_hop_attribute; + mock_sai_route = &mock_sai_route_; + sai_route_api->create_route_entry = create_route_entry; + sai_route_api->remove_route_entry = remove_route_entry; + sai_route_api->set_route_entry_attribute = set_route_entry_attribute; + sai_route_api->get_route_entry_attribute = get_route_entry_attribute; + sai_route_api->create_route_entries = create_route_entries; + sai_route_api->remove_route_entries = remove_route_entries; + sai_route_api->set_route_entries_attribute = set_route_entries_attribute; + sai_route_api->get_route_entries_attribute = get_route_entries_attribute; + mock_sai_bridge = &mock_sai_bridge_; + sai_bridge_api->create_bridge = mock_create_bridge; + sai_bridge_api->remove_bridge = mock_remove_bridge; + sai_bridge_api->set_bridge_attribute = mock_set_bridge_attribute; + sai_bridge_api->get_bridge_attribute = mock_get_bridge_attribute; + sai_bridge_api->get_bridge_stats = mock_get_bridge_stats; + sai_bridge_api->get_bridge_stats_ext = mock_get_bridge_stats_ext; + sai_bridge_api->clear_bridge_stats = mock_clear_bridge_stats; + sai_bridge_api->create_bridge_port = mock_create_bridge_port; + sai_bridge_api->remove_bridge_port = mock_remove_bridge_port; + sai_bridge_api->set_bridge_port_attribute = mock_set_bridge_port_attribute; + sai_bridge_api->get_bridge_port_attribute = mock_get_bridge_port_attribute; + sai_bridge_api->get_bridge_port_stats = mock_get_bridge_port_stats; + sai_bridge_api->get_bridge_port_stats_ext = mock_get_bridge_port_stats_ext; + sai_bridge_api->clear_bridge_port_stats = mock_clear_bridge_port_stats; + + copp_orch_ = new CoppOrch(gAppDb, APP_COPP_TABLE_NAME); + std::vector p4_tables{APP_P4RT_TABLE_NAME}; + gP4Orch = new P4Orch(gAppDb, p4_tables, gVrfOrch, copp_orch_); + gMockResponsePublisher = std::make_unique(); + } + + ~P4OrchTest() { + delete gP4Orch; + delete copp_orch_; + gMockResponsePublisher.reset(); + } + + void HandleP4rtNotification( + const std::vector& values) { + gP4Orch->handleP4rtNotification(values); + } + + NiceMock mock_sai_hostif_; + NiceMock mock_sai_switch_; + NiceMock mock_sai_router_intf_; + NiceMock mock_sai_neighbor_; + NiceMock mock_sai_next_hop_; + NiceMock mock_sai_route_; + NiceMock mock_sai_bridge_; + CoppOrch* copp_orch_; +}; + +TEST_F(P4OrchTest, ProcessInvalidEntry) { + InSequence s; + std::vector values; + values.push_back(swss::FieldValueTuple{"invalid", ""}); + values.push_back(swss::FieldValueTuple{"invalid:invalid", ""}); + std::vector exp_values; + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq("invalid"), Eq(exp_values), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq("invalid:invalid"), Eq(exp_values), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + HandleP4rtNotification(values); +} + +TEST_F(P4OrchTest, ProcessP4Notification) { + InSequence s; + std::vector values; + + // Router interface + const std::string ritf_key = + std::string(APP_P4RT_ROUTER_INTERFACE_TABLE_NAME) + kTableKeyDelimiter + + "{\"match/router_interface_id\":\"intf-3/4\"}"; + std::vector ritf_attrs; + ritf_attrs.push_back( + swss::FieldValueTuple{prependParamField(p4orch::kPort), "Ethernet1"}); + ritf_attrs.push_back(swss::FieldValueTuple{prependParamField(p4orch::kSrcMac), + "00:01:02:03:04:05"}); + values.push_back( + swss::FieldValueTuple{ritf_key, swss::JSon::buildJson(ritf_attrs)}); + + // Neighbor + const std::string neighbor_key = std::string(APP_P4RT_NEIGHBOR_TABLE_NAME) + + kTableKeyDelimiter + + "{\"match/router_interface_id\":\"intf-3/" + "4\",\"match/neighbor_id\":\"10.0.0.22\"}"; + std::vector neighbor_attrs; + neighbor_attrs.push_back(swss::FieldValueTuple{ + prependParamField(p4orch::kDstMac), "00:01:02:03:04:05"}); + values.push_back(swss::FieldValueTuple{ + neighbor_key, swss::JSon::buildJson(neighbor_attrs)}); + + // Nexthop + const std::string nexthop_key = + std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + + "{\"match/nexthop_id\":\"ju1u32m1.atl11:qe-3/7\"}"; + std::vector nexthop_attrs; + nexthop_attrs.push_back( + swss::FieldValueTuple{p4orch::kAction, p4orch::kSetIpNexthop}); + nexthop_attrs.push_back(swss::FieldValueTuple{ + prependParamField(p4orch::kNeighborId), "10.0.0.22"}); + nexthop_attrs.push_back(swss::FieldValueTuple{ + prependParamField(p4orch::kRouterInterfaceId), "intf-3/4"}); + values.push_back( + swss::FieldValueTuple{nexthop_key, swss::JSon::buildJson(nexthop_attrs)}); + + // Route + const std::string route_key = + std::string(APP_P4RT_IPV4_TABLE_NAME) + kTableKeyDelimiter + + "{\"match/vrf_id\":\"b4-traffic\",\"match/ipv4_dst\":\"10.11.12.0/24\"}"; + std::vector route_attrs; + route_attrs.push_back( + swss::FieldValueTuple{p4orch::kAction, p4orch::kSetNexthopId}); + route_attrs.push_back(swss::FieldValueTuple{ + prependParamField(p4orch::kNexthopId), "ju1u32m1.atl11:qe-3/7"}); + values.push_back( + swss::FieldValueTuple{route_key, swss::JSon::buildJson(route_attrs)}); + + // Delete + values.push_back(swss::FieldValueTuple{ritf_key, ""}); + values.push_back(swss::FieldValueTuple{neighbor_key, ""}); + values.push_back(swss::FieldValueTuple{nexthop_key, ""}); + values.push_back(swss::FieldValueTuple{route_key, ""}); + + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(ritf_key), Eq(ritf_attrs), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(neighbor_key), Eq(neighbor_attrs), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(nexthop_key), Eq(nexthop_attrs), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(route_key), Eq(route_attrs), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + std::vector exp_values; + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(route_key), Eq(exp_values), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(nexthop_key), Eq(exp_values), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(neighbor_key), Eq(exp_values), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(ritf_key), Eq(exp_values), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + HandleP4rtNotification(values); +} + +TEST_F(P4OrchTest, ProcessP4NotificationStopOnFirstFailure) { + InSequence s; + std::vector values; + + // Router interface + const std::string ritf_key = + std::string(APP_P4RT_ROUTER_INTERFACE_TABLE_NAME) + kTableKeyDelimiter + + "{\"match/router_interface_id\":\"intf-3/4\"}"; + std::vector ritf_attrs; + ritf_attrs.push_back( + swss::FieldValueTuple{prependParamField(p4orch::kPort), "Ethernet1"}); + ritf_attrs.push_back(swss::FieldValueTuple{prependParamField(p4orch::kSrcMac), + "00:01:02:03:04:05"}); + values.push_back( + swss::FieldValueTuple{ritf_key, swss::JSon::buildJson(ritf_attrs)}); + + // Neighbor + const std::string neighbor_key = std::string(APP_P4RT_NEIGHBOR_TABLE_NAME) + + kTableKeyDelimiter + + "{\"match/router_interface_id\":\"intf-3/" + "4\",\"match/neighbor_id\":\"10.0.0.22\"}"; + std::vector neighbor_attrs; + neighbor_attrs.push_back(swss::FieldValueTuple{ + prependParamField(p4orch::kDstMac), "00:01:02:03:04:05"}); + values.push_back(swss::FieldValueTuple{ + neighbor_key, swss::JSon::buildJson(neighbor_attrs)}); + + // Nexthop + const std::string nexthop_key = + std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + + "{\"match/nexthop_id\":\"ju1u32m1.atl11:qe-3/7\"}"; + std::vector nexthop_attrs; + nexthop_attrs.push_back( + swss::FieldValueTuple{p4orch::kAction, p4orch::kSetIpNexthop}); + nexthop_attrs.push_back(swss::FieldValueTuple{ + prependParamField(p4orch::kNeighborId), "10.0.0.22"}); + nexthop_attrs.push_back(swss::FieldValueTuple{ + prependParamField(p4orch::kRouterInterfaceId), "intf-3/4"}); + values.push_back( + swss::FieldValueTuple{nexthop_key, swss::JSon::buildJson(nexthop_attrs)}); + + // Route + const std::string route_key = + std::string(APP_P4RT_IPV4_TABLE_NAME) + kTableKeyDelimiter + + "{\"match/vrf_id\":\"b4-traffic\",\"match/ipv4_dst\":\"10.11.12.0/24\"}"; + std::vector route_attrs; + route_attrs.push_back( + swss::FieldValueTuple{p4orch::kAction, p4orch::kSetNexthopId}); + route_attrs.push_back(swss::FieldValueTuple{ + prependParamField(p4orch::kNexthopId), "ju1u32m1.atl11:qe-3/7"}); + values.push_back( + swss::FieldValueTuple{route_key, swss::JSon::buildJson(route_attrs)}); + + // Delete + values.push_back(swss::FieldValueTuple{ritf_key, ""}); + values.push_back(swss::FieldValueTuple{neighbor_key, ""}); + values.push_back(swss::FieldValueTuple{nexthop_key, ""}); + values.push_back(swss::FieldValueTuple{route_key, ""}); + + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(ritf_key), Eq(ritf_attrs), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(neighbor_key), Eq(neighbor_attrs), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(nexthop_key), Eq(nexthop_attrs), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + std::vector exp_status{SAI_STATUS_FAILURE}; + EXPECT_CALL(mock_sai_route_, create_route_entries(_, _, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), + Return(SAI_STATUS_FAILURE))); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(route_key), Eq(route_attrs), + Eq(StatusCode::SWSS_RC_UNKNOWN), Eq(true))); + std::vector exp_values; + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(ritf_key), Eq(exp_values), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(neighbor_key), Eq(exp_values), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(nexthop_key), Eq(exp_values), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(route_key), Eq(exp_values), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + HandleP4rtNotification(values); +} + +TEST_F(P4OrchTest, ProcessP4NotificationStopOnFirstFailureDifferentTypes) { + InSequence s; + std::vector values; + + // Router interface + const std::string ritf_key_1 = + std::string(APP_P4RT_ROUTER_INTERFACE_TABLE_NAME) + kTableKeyDelimiter + + "{\"match/router_interface_id\":\"intf-1\"}"; + const std::string ritf_key_2 = + std::string(APP_P4RT_ROUTER_INTERFACE_TABLE_NAME) + kTableKeyDelimiter + + "{\"match/router_interface_id\":\"intf-2\"}"; + std::vector ritf_attrs; + ritf_attrs.push_back( + swss::FieldValueTuple{prependParamField(p4orch::kPort), "Ethernet1"}); + ritf_attrs.push_back(swss::FieldValueTuple{prependParamField(p4orch::kSrcMac), + "00:01:02:03:04:05"}); + + // Add + values.push_back( + swss::FieldValueTuple{ritf_key_1, swss::JSon::buildJson(ritf_attrs)}); + values.push_back( + swss::FieldValueTuple{ritf_key_2, swss::JSon::buildJson(ritf_attrs)}); + + // Delete + values.push_back(swss::FieldValueTuple{ritf_key_1, ""}); + values.push_back(swss::FieldValueTuple{ritf_key_2, ""}); + + // Add + values.push_back( + swss::FieldValueTuple{ritf_key_1, swss::JSon::buildJson(ritf_attrs)}); + values.push_back( + swss::FieldValueTuple{ritf_key_2, swss::JSon::buildJson(ritf_attrs)}); + + // Delete + values.push_back(swss::FieldValueTuple{ritf_key_1, ""}); + values.push_back(swss::FieldValueTuple{ritf_key_2, ""}); + + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(ritf_key_1), Eq(ritf_attrs), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(ritf_key_2), Eq(ritf_attrs), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL(mock_sai_router_intf_, remove_router_interface(_)) + .WillOnce(Return(SAI_STATUS_FAILURE)); + std::vector exp_values; + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(ritf_key_1), Eq(exp_values), + Eq(StatusCode::SWSS_RC_UNKNOWN), Eq(true))); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(ritf_key_2), Eq(exp_values), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(ritf_key_1), Eq(ritf_attrs), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(ritf_key_2), Eq(ritf_attrs), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(ritf_key_1), Eq(exp_values), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(ritf_key_2), Eq(exp_values), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + HandleP4rtNotification(values); +} diff --git a/orchagent/p4orch/tests/return_code_test.cpp b/orchagent/p4orch/tests/return_code_test.cpp index 7ab21121aa4..6ee73b74f9c 100644 --- a/orchagent/p4orch/tests/return_code_test.cpp +++ b/orchagent/p4orch/tests/return_code_test.cpp @@ -119,6 +119,7 @@ TEST(ReturnCodeTest, SaiCodeToReturnCodeMapping) {SAI_STATUS_TABLE_FULL, StatusCode::SWSS_RC_FULL}, {SAI_STATUS_NOT_IMPLEMENTED, StatusCode::SWSS_RC_UNIMPLEMENTED}, {SAI_STATUS_OBJECT_IN_USE, StatusCode::SWSS_RC_IN_USE}, + {SAI_STATUS_NOT_EXECUTED, StatusCode::SWSS_RC_NOT_EXECUTED}, {SAI_STATUS_FAILURE, StatusCode::SWSS_RC_UNKNOWN}, {SAI_STATUS_INVALID_ATTRIBUTE_0, StatusCode::SWSS_RC_INVALID_PARAM}, {SAI_STATUS_INVALID_ATTRIBUTE_10, StatusCode::SWSS_RC_INVALID_PARAM}, diff --git a/orchagent/p4orch/tests/route_manager_test.cpp b/orchagent/p4orch/tests/route_manager_test.cpp index bd03f357cf0..346251127d8 100644 --- a/orchagent/p4orch/tests/route_manager_test.cpp +++ b/orchagent/p4orch/tests/route_manager_test.cpp @@ -5,11 +5,11 @@ #include #include +#include #include #include #include "ipprefix.h" -#include #include "mock_response_publisher.h" #include "mock_sai_route.h" #include "p4orch.h" @@ -41,6 +41,7 @@ namespace constexpr char *kIpv4Prefix = "10.11.12.0/24"; constexpr char *kIpv4Prefix2 = "10.12.12.0/24"; constexpr char *kIpv6Prefix = "2001:db8:1::/32"; +constexpr char* kIpv6Prefix2 = "2001:db8:2::/32"; constexpr char *kNexthopId1 = "ju1u32m1.atl11:qe-3/7"; constexpr sai_object_id_t kNexthopOid1 = 1; constexpr char *kNexthopId2 = "ju1u32m2.atl11:qe-3/7"; @@ -247,9 +248,12 @@ class RouteManagerTest : public ::testing::Test route_manager_.enqueue(table_name, entry); } - void Drain() - { - route_manager_.drain(); + ReturnCode Drain(bool failure_before) { + if (failure_before) { + route_manager_.drainWithNotExecuted(); + return ReturnCode(StatusCode::SWSS_RC_NOT_EXECUTED); + } + return route_manager_.drain(); } std::string VerifyState(const std::string &key, const std::vector &tuple) @@ -1500,25 +1504,27 @@ TEST_F(RouteManagerTest, UpdateRouteFromSetWcmpToSetNextHopAndMetadataSucceeds) exp_sai_route_entry.vr_id = gVrfOid; exp_sai_route_entry.destination = sai_ipv4_route_prefix; - sai_attribute_t exp_sai_attr; - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; - exp_sai_attr.value.u32 = kMetadataInt2; + sai_attribute_t exp_sai_attr_1; + exp_sai_attr_1.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + exp_sai_attr_1.value.u32 = kMetadataInt2; + sai_attribute_t exp_sai_attr_2; + exp_sai_attr_2.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_2.value.oid = kNexthopOid2; - std::vector exp_status{SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector exp_status{SAI_STATUS_SUCCESS, + SAI_STATUS_SUCCESS}; - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; - exp_sai_attr.value.oid = kNexthopOid2; - - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_route_, + set_route_entries_attribute( + Eq(2), + RouteEntryArrayEq(std::vector{ + exp_sai_route_entry, exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr_1, + exp_sai_attr_2}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); @@ -1586,25 +1592,26 @@ TEST_F(RouteManagerTest, UpdateRouteFromSetNexthopIdAndMetadataToSetWcmpSucceeds exp_sai_route_entry.vr_id = gVrfOid; exp_sai_route_entry.destination = sai_ipv4_route_prefix; - sai_attribute_t exp_sai_attr; - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; - exp_sai_attr.value.u32 = 0; - - std::vector exp_status{SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); - - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; - exp_sai_attr.value.oid = kWcmpGroupOid2; + sai_attribute_t exp_sai_attr_1; + exp_sai_attr_1.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + exp_sai_attr_1.value.u32 = 0; + sai_attribute_t exp_sai_attr_2; + exp_sai_attr_2.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_2.value.oid = kWcmpGroupOid2; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector exp_status{SAI_STATUS_SUCCESS, + SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, + set_route_entries_attribute( + Eq(2), + RouteEntryArrayEq(std::vector{ + exp_sai_route_entry, exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr_1, + exp_sai_attr_2}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); @@ -1654,29 +1661,37 @@ TEST_F(RouteManagerTest, UpdateRouteEntryDropWithSaiErrorShouldFail) SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kDrop, ""); - std::vector exp_failure_status{SAI_STATUS_FAILURE}; - std::vector exp_success_status{SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), - Return(SAI_STATUS_FAILURE))); + std::vector exp_status_1{SAI_STATUS_FAILURE, + SAI_STATUS_NOT_EXECUTED}; + std::vector exp_status_2{SAI_STATUS_SUCCESS, + SAI_STATUS_FAILURE}; + std::vector exp_status_3{SAI_STATUS_SUCCESS}; + std::vector exp_status_4{SAI_STATUS_FAILURE}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(Eq(2), _, _, _, _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_1.begin(), exp_status_1.end()), + Return(SAI_STATUS_FAILURE))); EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), - Return(SAI_STATUS_SUCCESS))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), - Return(SAI_STATUS_FAILURE))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), - Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(Eq(2), _, _, _, _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_2.begin(), exp_status_2.end()), + Return(SAI_STATUS_FAILURE))); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(Eq(1), _, _, _, _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_3.begin(), exp_status_3.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), - Return(SAI_STATUS_SUCCESS))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), - Return(SAI_STATUS_FAILURE))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), - Return(SAI_STATUS_FAILURE))); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(Eq(2), _, _, _, _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_2.begin(), exp_status_2.end()), + Return(SAI_STATUS_FAILURE))); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(Eq(1), _, _, _, _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_4.begin(), exp_status_4.end()), + Return(SAI_STATUS_FAILURE))); // TODO: Expect critical state. EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); @@ -1688,29 +1703,37 @@ TEST_F(RouteManagerTest, UpdateRouteEntryTrapWithSaiErrorShouldFail) SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kTrap, ""); - std::vector exp_failure_status{SAI_STATUS_FAILURE}; - std::vector exp_success_status{SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), - Return(SAI_STATUS_FAILURE))); + std::vector exp_status_1{SAI_STATUS_FAILURE, + SAI_STATUS_NOT_EXECUTED}; + std::vector exp_status_2{SAI_STATUS_SUCCESS, + SAI_STATUS_FAILURE}; + std::vector exp_status_3{SAI_STATUS_SUCCESS}; + std::vector exp_status_4{SAI_STATUS_FAILURE}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(Eq(2), _, _, _, _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_1.begin(), exp_status_1.end()), + Return(SAI_STATUS_FAILURE))); EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), - Return(SAI_STATUS_SUCCESS))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), - Return(SAI_STATUS_FAILURE))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), - Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(Eq(2), _, _, _, _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_2.begin(), exp_status_2.end()), + Return(SAI_STATUS_FAILURE))); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(Eq(1), _, _, _, _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_3.begin(), exp_status_3.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), - Return(SAI_STATUS_SUCCESS))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), - Return(SAI_STATUS_FAILURE))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), - Return(SAI_STATUS_FAILURE))); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(Eq(2), _, _, _, _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_2.begin(), exp_status_2.end()), + Return(SAI_STATUS_FAILURE))); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(Eq(1), _, _, _, _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_4.begin(), exp_status_4.end()), + Return(SAI_STATUS_FAILURE))); // TODO: Expect critical state. EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); @@ -1772,25 +1795,26 @@ TEST_F(RouteManagerTest, UpdateRouteWithDifferentNexthopIdsAndMetadatasSucceeds) exp_sai_route_entry.vr_id = gVrfOid; exp_sai_route_entry.destination = sai_ipv4_route_prefix; - sai_attribute_t exp_sai_attr; - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; - exp_sai_attr.value.u32 = kMetadataInt2; + sai_attribute_t exp_sai_attr_1; + exp_sai_attr_1.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + exp_sai_attr_1.value.u32 = kMetadataInt2; + sai_attribute_t exp_sai_attr_2; + exp_sai_attr_2.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_2.value.oid = kNexthopOid2; - std::vector exp_status{SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); - - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; - exp_sai_attr.value.oid = kNexthopOid2; - - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector exp_status{SAI_STATUS_SUCCESS, + SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, + set_route_entries_attribute( + Eq(2), + RouteEntryArrayEq(std::vector{ + exp_sai_route_entry, exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr_1, + exp_sai_attr_2}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); @@ -1817,25 +1841,26 @@ TEST_F(RouteManagerTest, UpdateRouteFromNexthopIdToDropSucceeds) exp_sai_route_entry.vr_id = gVrfOid; exp_sai_route_entry.destination = sai_ipv4_route_prefix; - sai_attribute_t exp_sai_attr; - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; - exp_sai_attr.value.s32 = SAI_PACKET_ACTION_DROP; - - std::vector exp_status{SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + sai_attribute_t exp_sai_attr_1; + exp_sai_attr_1.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr_1.value.s32 = SAI_PACKET_ACTION_DROP; + sai_attribute_t exp_sai_attr_2; + exp_sai_attr_2.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_2.value.oid = SAI_NULL_OBJECT_ID; - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; - exp_sai_attr.value.oid = SAI_NULL_OBJECT_ID; - - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector exp_status{SAI_STATUS_SUCCESS, + SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, + set_route_entries_attribute( + Eq(2), + RouteEntryArrayEq(std::vector{ + exp_sai_route_entry, exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr_1, + exp_sai_attr_2}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); @@ -1845,57 +1870,54 @@ TEST_F(RouteManagerTest, UpdateRouteFromNexthopIdToDropSucceeds) EXPECT_EQ(0, ref_cnt); } -TEST_F(RouteManagerTest, UpdateRouteFromNexthopIdToRouteMetadataSucceeds) -{ - auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); - sai_ip_prefix_t sai_ipv4_route_prefix; - copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); - SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); - - auto route_entry = - GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetMetadataAndDrop, "", kMetadata1); - - sai_route_entry_t exp_sai_route_entry; - exp_sai_route_entry.switch_id = gSwitchId; - exp_sai_route_entry.vr_id = gVrfOid; - exp_sai_route_entry.destination = sai_ipv4_route_prefix; - - sai_attribute_t exp_sai_attr; - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; - exp_sai_attr.value.s32 = kMetadataInt1; - - std::vector exp_status{SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); - - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; - exp_sai_attr.value.oid = SAI_NULL_OBJECT_ID; - - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); - - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; - exp_sai_attr.value.s32 = SAI_PACKET_ACTION_DROP; - - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); - - EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), - ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); - VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); - uint32_t ref_cnt; - EXPECT_TRUE( - p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); - EXPECT_EQ(0, ref_cnt); +TEST_F(RouteManagerTest, UpdateRouteFromNexthopIdToDropWithMetadataSucceeds) { + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, + kNexthopOid1); + + auto route_entry = + GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, + p4orch::kSetMetadataAndDrop, "", kMetadata1); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr_1; + exp_sai_attr_1.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr_1.value.s32 = SAI_PACKET_ACTION_DROP; + sai_attribute_t exp_sai_attr_2; + exp_sai_attr_2.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_2.value.oid = SAI_NULL_OBJECT_ID; + sai_attribute_t exp_sai_attr_3; + exp_sai_attr_3.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + exp_sai_attr_3.value.s32 = kMetadataInt1; + + std::vector exp_status{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS, + SAI_STATUS_SUCCESS}; + EXPECT_CALL( + mock_sai_route_, + set_route_entries_attribute( + Eq(3), + RouteEntryArrayEq(std::vector{ + exp_sai_route_entry, exp_sai_route_entry, exp_sai_route_entry}), + AttrArrayEq(std::vector{ + exp_sai_attr_1, exp_sai_attr_2, exp_sai_attr_3}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), + Return(SAI_STATUS_SUCCESS))); + + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); + uint32_t ref_cnt; + EXPECT_TRUE(p4_oid_mapper_.getRefCount( + SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), + &ref_cnt)); + EXPECT_EQ(0, ref_cnt); } TEST_F(RouteManagerTest, UpdateRouteFromNexthopIdAndMetadataToDropSucceeds) @@ -1912,34 +1934,30 @@ TEST_F(RouteManagerTest, UpdateRouteFromNexthopIdAndMetadataToDropSucceeds) exp_sai_route_entry.vr_id = gVrfOid; exp_sai_route_entry.destination = sai_ipv4_route_prefix; - sai_attribute_t exp_sai_attr; - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; - exp_sai_attr.value.s32 = SAI_PACKET_ACTION_DROP; - - std::vector exp_status{SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); - - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; - exp_sai_attr.value.oid = SAI_NULL_OBJECT_ID; - - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); - - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; - exp_sai_attr.value.u32 = 0; - - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + sai_attribute_t exp_sai_attr_1; + exp_sai_attr_1.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr_1.value.s32 = SAI_PACKET_ACTION_DROP; + sai_attribute_t exp_sai_attr_2; + exp_sai_attr_2.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_2.value.oid = SAI_NULL_OBJECT_ID; + sai_attribute_t exp_sai_attr_3; + exp_sai_attr_3.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + exp_sai_attr_3.value.u32 = 0; + + std::vector exp_status{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS, + SAI_STATUS_SUCCESS}; + EXPECT_CALL( + mock_sai_route_, + set_route_entries_attribute( + Eq(3), + RouteEntryArrayEq(std::vector{ + exp_sai_route_entry, exp_sai_route_entry, exp_sai_route_entry}), + AttrArrayEq(std::vector{ + exp_sai_attr_1, exp_sai_attr_2, exp_sai_attr_3}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); @@ -1965,25 +1983,26 @@ TEST_F(RouteManagerTest, UpdateRouteFromDropToNexthopIdSucceeds) exp_sai_route_entry.vr_id = gVrfOid; exp_sai_route_entry.destination = sai_ipv4_route_prefix; - sai_attribute_t exp_sai_attr; - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; - exp_sai_attr.value.oid = kNexthopOid2; - - std::vector exp_status{SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); - - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; - exp_sai_attr.value.s32 = SAI_PACKET_ACTION_FORWARD; + sai_attribute_t exp_sai_attr_1; + exp_sai_attr_1.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_1.value.oid = kNexthopOid2; + sai_attribute_t exp_sai_attr_2; + exp_sai_attr_2.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr_2.value.s32 = SAI_PACKET_ACTION_FORWARD; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector exp_status{SAI_STATUS_SUCCESS, + SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, + set_route_entries_attribute( + Eq(2), + RouteEntryArrayEq(std::vector{ + exp_sai_route_entry, exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr_1, + exp_sai_attr_2}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); @@ -2010,34 +2029,30 @@ TEST_F(RouteManagerTest, UpdateRouteFromDropToWcmpWithMetadataSucceeds) exp_sai_route_entry.vr_id = gVrfOid; exp_sai_route_entry.destination = sai_ipv4_route_prefix; - sai_attribute_t exp_sai_attr; - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; - exp_sai_attr.value.u32 = kMetadataInt2; - - std::vector exp_status{SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); - - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; - exp_sai_attr.value.oid = kWcmpGroupOid1; - - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); - - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; - exp_sai_attr.value.s32 = SAI_PACKET_ACTION_FORWARD; - - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + sai_attribute_t exp_sai_attr_1; + exp_sai_attr_1.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + exp_sai_attr_1.value.u32 = kMetadataInt2; + sai_attribute_t exp_sai_attr_2; + exp_sai_attr_2.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_2.value.oid = kWcmpGroupOid1; + sai_attribute_t exp_sai_attr_3; + exp_sai_attr_3.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr_3.value.s32 = SAI_PACKET_ACTION_FORWARD; + + std::vector exp_status{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS, + SAI_STATUS_SUCCESS}; + EXPECT_CALL( + mock_sai_route_, + set_route_entries_attribute( + Eq(3), + RouteEntryArrayEq(std::vector{ + exp_sai_route_entry, exp_sai_route_entry, exp_sai_route_entry}), + AttrArrayEq(std::vector{ + exp_sai_attr_1, exp_sai_attr_2, exp_sai_attr_3}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); @@ -2062,26 +2077,26 @@ TEST_F(RouteManagerTest, UpdateRouteFromTrapToDropAndSetMetadataSucceeds) exp_sai_route_entry.vr_id = gVrfOid; exp_sai_route_entry.destination = sai_ipv4_route_prefix; - sai_attribute_t exp_sai_attr; - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; - exp_sai_attr.value.s32 = SAI_PACKET_ACTION_DROP; - - std::vector exp_status{SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); - - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; - exp_sai_attr.value.u32 = kMetadataInt2; - - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + sai_attribute_t exp_sai_attr_1; + exp_sai_attr_1.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr_1.value.s32 = SAI_PACKET_ACTION_DROP; + sai_attribute_t exp_sai_attr_2; + exp_sai_attr_2.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + exp_sai_attr_2.value.u32 = kMetadataInt2; + std::vector exp_status{SAI_STATUS_SUCCESS, + SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, + set_route_entries_attribute( + Eq(2), + RouteEntryArrayEq(std::vector{ + exp_sai_route_entry, exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr_1, + exp_sai_attr_2}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); @@ -2156,25 +2171,27 @@ TEST_F(RouteManagerTest, UpdateRouteFromNexthopIdToTrapSucceeds) exp_sai_route_entry.vr_id = gVrfOid; exp_sai_route_entry.destination = sai_ipv4_route_prefix; - sai_attribute_t exp_sai_attr; - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; - exp_sai_attr.value.s32 = SAI_PACKET_ACTION_TRAP; + sai_attribute_t exp_sai_attr_1; + exp_sai_attr_1.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr_1.value.s32 = SAI_PACKET_ACTION_TRAP; + sai_attribute_t exp_sai_attr_2; + exp_sai_attr_2.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_2.value.oid = SAI_NULL_OBJECT_ID; - std::vector exp_status{SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); - - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; - exp_sai_attr.value.oid = SAI_NULL_OBJECT_ID; + std::vector exp_status{SAI_STATUS_SUCCESS, + SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_route_, + set_route_entries_attribute( + Eq(2), + RouteEntryArrayEq(std::vector{ + exp_sai_route_entry, exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr_1, + exp_sai_attr_2}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); @@ -2200,25 +2217,27 @@ TEST_F(RouteManagerTest, UpdateRouteFromTrapToNexthopIdSucceeds) exp_sai_route_entry.vr_id = gVrfOid; exp_sai_route_entry.destination = sai_ipv4_route_prefix; - sai_attribute_t exp_sai_attr; - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; - exp_sai_attr.value.oid = kNexthopOid2; - - std::vector exp_status{SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + sai_attribute_t exp_sai_attr_1; + exp_sai_attr_1.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_1.value.oid = kNexthopOid2; + sai_attribute_t exp_sai_attr_2; + exp_sai_attr_2.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr_2.value.s32 = SAI_PACKET_ACTION_FORWARD; - exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; - exp_sai_attr.value.s32 = SAI_PACKET_ACTION_FORWARD; + std::vector exp_status{SAI_STATUS_SUCCESS, + SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), - AttrArrayEq(std::vector{exp_sai_attr}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_route_, + set_route_entries_attribute( + Eq(2), + RouteEntryArrayEq(std::vector{ + exp_sai_route_entry, exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr_1, + exp_sai_attr_2}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); @@ -2239,33 +2258,32 @@ TEST_F(RouteManagerTest, UpdateRouteFromTrapToNexthopIdAndMetadataRecoverFailure kNexthopId2, kMetadata1); p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), kNexthopOid2); - std::vector exp_failure_status{SAI_STATUS_FAILURE}; - std::vector exp_success_status{SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), - Return(SAI_STATUS_SUCCESS))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), - Return(SAI_STATUS_SUCCESS))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), - Return(SAI_STATUS_FAILURE))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), - Return(SAI_STATUS_SUCCESS))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), - Return(SAI_STATUS_SUCCESS))); + std::vector exp_status_1{ + SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS, SAI_STATUS_FAILURE}; + std::vector exp_status_2{SAI_STATUS_SUCCESS, + SAI_STATUS_SUCCESS}; + std::vector exp_status_3{SAI_STATUS_SUCCESS, + SAI_STATUS_FAILURE}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(Eq(3), _, _, _, _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_1.begin(), exp_status_1.end()), + Return(SAI_STATUS_FAILURE))); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(Eq(2), _, _, _, _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_2.begin(), exp_status_2.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), - Return(SAI_STATUS_SUCCESS))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), - Return(SAI_STATUS_SUCCESS))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), - Return(SAI_STATUS_FAILURE))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), - Return(SAI_STATUS_SUCCESS))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), - Return(SAI_STATUS_FAILURE))); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(Eq(3), _, _, _, _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_1.begin(), exp_status_1.end()), + Return(SAI_STATUS_FAILURE))); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(Eq(2), _, _, _, _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_3.begin(), exp_status_3.end()), + Return(SAI_STATUS_FAILURE))); // TODO: Expect critical state. EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); @@ -2336,25 +2354,27 @@ TEST_F(RouteManagerTest, UpdateRouteFromNexthopIdAndMetadataToDropRecoverFailure auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kDrop, ""); - std::vector exp_failure_status{SAI_STATUS_FAILURE}; - std::vector exp_success_status{SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), - Return(SAI_STATUS_FAILURE))); + std::vector exp_status_1{ + SAI_STATUS_FAILURE, SAI_STATUS_NOT_EXECUTED, SAI_STATUS_NOT_EXECUTED}; + std::vector exp_status_2{ + SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS, SAI_STATUS_FAILURE}; + std::vector exp_status_3{SAI_STATUS_SUCCESS, + SAI_STATUS_FAILURE}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(Eq(3), _, _, _, _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_1.begin(), exp_status_1.end()), + Return(SAI_STATUS_FAILURE))); EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), - Return(SAI_STATUS_SUCCESS))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), - Return(SAI_STATUS_SUCCESS))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), - Return(SAI_STATUS_FAILURE))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), - Return(SAI_STATUS_SUCCESS))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), - Return(SAI_STATUS_FAILURE))); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(Eq(3), _, _, _, _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_2.begin(), exp_status_2.end()), + Return(SAI_STATUS_FAILURE))); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(Eq(2), _, _, _, _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_3.begin(), exp_status_3.end()), + Return(SAI_STATUS_FAILURE))); // TODO: Expect critical state. EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); @@ -2372,21 +2392,26 @@ TEST_F(RouteManagerTest, UpdateRouteFromDifferentNexthopIdAndMetadataRecoverFail p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), kNexthopOid2); - std::vector exp_failure_status{SAI_STATUS_FAILURE}; - std::vector exp_success_status{SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), - Return(SAI_STATUS_FAILURE))); + std::vector exp_status_1{SAI_STATUS_FAILURE, + SAI_STATUS_NOT_EXECUTED}; + std::vector exp_status_2{SAI_STATUS_SUCCESS, + SAI_STATUS_FAILURE}; + std::vector exp_status_3{SAI_STATUS_FAILURE}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(Eq(2), _, _, _, _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_1.begin(), exp_status_1.end()), + Return(SAI_STATUS_FAILURE))); EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), - Return(SAI_STATUS_SUCCESS))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), - Return(SAI_STATUS_FAILURE))) - .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), - Return(SAI_STATUS_FAILURE))); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(Eq(2), _, _, _, _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_2.begin(), exp_status_2.end()), + Return(SAI_STATUS_FAILURE))); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(Eq(1), _, _, _, _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_3.begin(), exp_status_3.end()), + Return(SAI_STATUS_FAILURE))); // TODO: Expect critical state. EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); @@ -2477,7 +2502,7 @@ TEST_F(RouteManagerTest, RouteCreateAndUpdateInDrainSucceeds) FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_1)), Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))) .Times(1); - Drain(); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId2), kNexthopOid2); auto key_op_fvs_2 = GenerateKeyOpFieldsValuesTuple(gVrfName, swss_ipv4_route_prefix, SET_COMMAND, @@ -2489,7 +2514,7 @@ TEST_F(RouteManagerTest, RouteCreateAndUpdateInDrainSucceeds) FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_2)), Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))) .Times(1); - Drain(); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId2); sai_ip_prefix_t sai_ipv4_route_prefix; @@ -2512,7 +2537,7 @@ TEST_F(RouteManagerTest, RouteCreateAndUpdateInDrainSucceeds) FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_3)), Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))) .Times(1); - Drain(); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetMetadataAndDrop, "", kMetadata1); VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); @@ -2538,7 +2563,7 @@ TEST_F(RouteManagerTest, RouteCreateAndDeleteInDrainSucceeds) FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_1)), Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))) .Times(1); - Drain(); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); auto key_op_fvs_2 = GenerateKeyOpFieldsValuesTuple(gVrfName, swss_ipv4_route_prefix, DEL_COMMAND, "", ""); Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_2); @@ -2548,7 +2573,7 @@ TEST_F(RouteManagerTest, RouteCreateAndDeleteInDrainSucceeds) FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_2)), Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))) .Times(1); - Drain(); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); std::string key = KeyGenerator::generateRouteKey(gVrfName, swss_ipv4_route_prefix); auto *route_entry_ptr = GetRouteEntry(key); @@ -2583,7 +2608,8 @@ TEST_F(RouteManagerTest, UpdateFailsWhenCreateAndUpdateTheSameRouteInDrain) Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))) .Times(1); - Drain(); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); sai_ip_prefix_t sai_ipv4_route_prefix; copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); @@ -2618,7 +2644,8 @@ TEST_F(RouteManagerTest, DeleteFailsWhenCreateAndDeleteTheSameRouteInDrain) FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_2)), Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))) .Times(1); - Drain(); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); sai_ip_prefix_t sai_ipv4_route_prefix; copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); @@ -2662,7 +2689,7 @@ TEST_F(RouteManagerTest, RouteCreateInDrainSucceedsWhenVrfIsEmpty) FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs)), Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))) .Times(1); - Drain(); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); std::string key = KeyGenerator::generateRouteKey(kDefaultVrfName, swss::IpPrefix(kIpv4Prefix)); auto *route_entry_ptr = GetRouteEntry(key); EXPECT_NE(nullptr, route_entry_ptr); @@ -2683,7 +2710,8 @@ TEST_F(RouteManagerTest, DeserializeRouteEntryInDrainFails) FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs)), Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))) .Times(1); - Drain(); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); } TEST_F(RouteManagerTest, ValidateRouteEntryInDrainFailsWhenVrfDoesNotExist) @@ -2696,7 +2724,7 @@ TEST_F(RouteManagerTest, ValidateRouteEntryInDrainFailsWhenVrfDoesNotExist) FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs)), Eq(StatusCode::SWSS_RC_NOT_FOUND), Eq(true))) .Times(1); - Drain(); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, Drain(/*failure_before=*/false)); } TEST_F(RouteManagerTest, ValidateRouteEntryInDrainFailsWhenNexthopDoesNotExist) @@ -2708,7 +2736,7 @@ TEST_F(RouteManagerTest, ValidateRouteEntryInDrainFailsWhenNexthopDoesNotExist) FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs)), Eq(StatusCode::SWSS_RC_NOT_FOUND), Eq(true))) .Times(1); - Drain(); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, Drain(/*failure_before=*/false)); } TEST_F(RouteManagerTest, InvalidateSetRouteEntryInDrainFails) @@ -2722,7 +2750,8 @@ TEST_F(RouteManagerTest, InvalidateSetRouteEntryInDrainFails) FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs)), Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))) .Times(1); - Drain(); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); } TEST_F(RouteManagerTest, InvalidateDelRouteEntryInDrainFails) @@ -2735,7 +2764,7 @@ TEST_F(RouteManagerTest, InvalidateDelRouteEntryInDrainFails) FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs)), Eq(StatusCode::SWSS_RC_NOT_FOUND), Eq(true))) .Times(1); - Drain(); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, Drain(/*failure_before=*/false)); } TEST_F(RouteManagerTest, InvalidCommandInDrainFails) @@ -2754,7 +2783,8 @@ TEST_F(RouteManagerTest, InvalidCommandInDrainFails) FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs)), Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))) .Times(1); - Drain(); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); } TEST_F(RouteManagerTest, BatchedCreateSucceeds) @@ -2797,9 +2827,9 @@ TEST_F(RouteManagerTest, BatchedCreateSucceeds) mock_sai_route_, create_route_entries( Eq(2), - RouteEntryArrayEq(std::vector{exp_sai_route_entry_ipv6, exp_sai_route_entry_ipv4}), + RouteEntryArrayEq(std::vector{exp_sai_route_entry_ipv4, exp_sai_route_entry_ipv6}), ArrayEq(std::vector{1, 1}), - AttrArrayArrayEq(std::vector>{{exp_sai_attr_ipv6}, {exp_sai_attr_ipv4}}), + AttrArrayArrayEq(std::vector>{{exp_sai_attr_ipv4}, {exp_sai_attr_ipv6}}), Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_THAT(CreateRouteEntries(std::vector{route_entry_ipv4, route_entry_ipv6}), @@ -2850,14 +2880,14 @@ TEST_F(RouteManagerTest, BatchedCreatePartiallySucceeds) exp_sai_attr_ipv6.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; exp_sai_attr_ipv6.value.oid = kWcmpGroupOid1; - std::vector exp_status{SAI_STATUS_FAILURE, SAI_STATUS_SUCCESS}; + std::vector exp_status{SAI_STATUS_SUCCESS, SAI_STATUS_FAILURE}; EXPECT_CALL( mock_sai_route_, create_route_entries( Eq(2), - RouteEntryArrayEq(std::vector{exp_sai_route_entry_ipv6, exp_sai_route_entry_ipv4}), + RouteEntryArrayEq(std::vector{exp_sai_route_entry_ipv4, exp_sai_route_entry_ipv6}), ArrayEq(std::vector{1, 1}), - AttrArrayArrayEq(std::vector>{{exp_sai_attr_ipv6}, {exp_sai_attr_ipv4}}), + AttrArrayArrayEq(std::vector>{{exp_sai_attr_ipv4}, {exp_sai_attr_ipv6}}), Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_FAILURE))); EXPECT_THAT(CreateRouteEntries(std::vector{route_entry_ipv4, route_entry_ipv6}), @@ -2913,25 +2943,26 @@ TEST_F(RouteManagerTest, BatchedUpdateSucceeds) exp_sai_attr_ipv6.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; exp_sai_attr_ipv6.value.oid = kWcmpGroupOid2; - std::vector exp_status_1{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(2), - RouteEntryArrayEq(std::vector{exp_sai_route_entry_ipv6, - exp_sai_route_entry_ipv4}), - AttrArrayEq(std::vector{exp_sai_attr_ipv6, exp_sai_attr_ipv4}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status_1.begin(), exp_status_1.end()), Return(SAI_STATUS_SUCCESS))); - sai_attribute_t exp_sai_attr_ipv6_2; exp_sai_attr_ipv6_2.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; exp_sai_attr_ipv6_2.value.s32 = SAI_PACKET_ACTION_FORWARD; - std::vector exp_status_2{SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry_ipv6}), - AttrArrayEq(std::vector{exp_sai_attr_ipv6_2}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status_2.begin(), exp_status_2.end()), Return(SAI_STATUS_SUCCESS))); + std::vector exp_status{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS, + SAI_STATUS_SUCCESS}; + EXPECT_CALL( + mock_sai_route_, + set_route_entries_attribute( + Eq(3), + RouteEntryArrayEq(std::vector{ + exp_sai_route_entry_ipv4, exp_sai_route_entry_ipv6, + exp_sai_route_entry_ipv6}), + AttrArrayEq(std::vector{ + exp_sai_attr_ipv4, exp_sai_attr_ipv6, exp_sai_attr_ipv6_2}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry_ipv4, route_entry_ipv6}), ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS, StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry_ipv4, sai_ipv4_route_prefix, gVrfOid); @@ -2986,31 +3017,144 @@ TEST_F(RouteManagerTest, BatchedUpdatePartiallySucceeds) exp_sai_attr_ipv6.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; exp_sai_attr_ipv6.value.oid = kWcmpGroupOid2; - std::vector exp_status_1{SAI_STATUS_FAILURE, SAI_STATUS_SUCCESS}; - EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( - Eq(2), - RouteEntryArrayEq(std::vector{exp_sai_route_entry_ipv6, - exp_sai_route_entry_ipv4}), - AttrArrayEq(std::vector{exp_sai_attr_ipv6, exp_sai_attr_ipv4}), - Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) - .WillOnce(DoAll(SetArrayArgument<4>(exp_status_1.begin(), exp_status_1.end()), Return(SAI_STATUS_FAILURE))); - EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry_ipv4, route_entry_ipv6}), - ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS, StatusCode::SWSS_RC_UNKNOWN})); + sai_attribute_t exp_sai_attr_ipv6_2; + exp_sai_attr_ipv6_2.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr_ipv6_2.value.s32 = SAI_PACKET_ACTION_FORWARD; + + std::vector exp_status{SAI_STATUS_SUCCESS, SAI_STATUS_FAILURE, + SAI_STATUS_NOT_EXECUTED}; + EXPECT_CALL( + mock_sai_route_, + set_route_entries_attribute( + Eq(3), + RouteEntryArrayEq(std::vector{ + exp_sai_route_entry_ipv4, exp_sai_route_entry_ipv6, + exp_sai_route_entry_ipv6}), + AttrArrayEq(std::vector{ + exp_sai_attr_ipv4, exp_sai_attr_ipv6, exp_sai_attr_ipv6_2}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), + Return(SAI_STATUS_FAILURE))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry_ipv4, + route_entry_ipv6}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS, + StatusCode::SWSS_RC_UNKNOWN})); VerifyRouteEntry(route_entry_ipv4, sai_ipv4_route_prefix, gVrfOid); - route_entry_ipv6 = GenerateP4RouteEntry(gVrfName, swss_ipv6_route_prefix, p4orch::kDrop, ""); + route_entry_ipv6 = GenerateP4RouteEntry(gVrfName, swss_ipv6_route_prefix, + p4orch::kDrop, ""); VerifyRouteEntry(route_entry_ipv6, sai_ipv6_route_prefix, gVrfOid); uint32_t ref_cnt; - EXPECT_TRUE( - p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); + EXPECT_TRUE(p4_oid_mapper_.getRefCount( + SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), + &ref_cnt)); EXPECT_EQ(0, ref_cnt); - EXPECT_TRUE(p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, - KeyGenerator::generateWcmpGroupKey(kWcmpGroup1), &ref_cnt)); + EXPECT_TRUE(p4_oid_mapper_.getRefCount( + SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(kWcmpGroup1), &ref_cnt)); EXPECT_EQ(1, ref_cnt); - EXPECT_TRUE(p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, - KeyGenerator::generateWcmpGroupKey(kWcmpGroup2), &ref_cnt)); + EXPECT_TRUE(p4_oid_mapper_.getRefCount( + SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(kWcmpGroup2), &ref_cnt)); EXPECT_EQ(0, ref_cnt); } +TEST_F(RouteManagerTest, BatchedUpdatePartiallySucceedsRecover) { + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + auto route_entry_ipv4 = GenerateP4RouteEntry( + gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup1); + SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, + kNexthopOid1); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(kWcmpGroup1), + kWcmpGroupOid1); + + auto swss_ipv6_route_prefix = swss::IpPrefix(kIpv6Prefix); + sai_ip_prefix_t sai_ipv6_route_prefix; + copy(sai_ipv6_route_prefix, swss_ipv6_route_prefix); + auto route_entry_ipv6 = GenerateP4RouteEntry( + gVrfName, swss_ipv6_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup2); + SetupDropRouteEntry(gVrfName, swss_ipv6_route_prefix); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(kWcmpGroup2), + kWcmpGroupOid2); + + sai_route_entry_t exp_sai_route_entry_ipv4; + exp_sai_route_entry_ipv4.switch_id = gSwitchId; + exp_sai_route_entry_ipv4.vr_id = gVrfOid; + exp_sai_route_entry_ipv4.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr_ipv4; + exp_sai_attr_ipv4.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_ipv4.value.oid = kWcmpGroupOid1; + + sai_route_entry_t exp_sai_route_entry_ipv6; + exp_sai_route_entry_ipv6.switch_id = gSwitchId; + exp_sai_route_entry_ipv6.vr_id = gVrfOid; + exp_sai_route_entry_ipv6.destination = sai_ipv6_route_prefix; + + sai_attribute_t exp_sai_attr_ipv6; + exp_sai_attr_ipv6.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_ipv6.value.oid = kWcmpGroupOid2; + sai_attribute_t exp_sai_attr_ipv6_2; + exp_sai_attr_ipv6_2.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr_ipv6_2.value.s32 = SAI_PACKET_ACTION_FORWARD; + sai_attribute_t exp_sai_attr_ipv6_3; + exp_sai_attr_ipv6_3.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_ipv6_3.value.oid = SAI_NULL_OBJECT_ID; + + std::vector exp_status_1{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS, + SAI_STATUS_FAILURE}; + std::vector exp_status_2{SAI_STATUS_SUCCESS}; + EXPECT_CALL( + mock_sai_route_, + set_route_entries_attribute( + Eq(3), + RouteEntryArrayEq(std::vector{ + exp_sai_route_entry_ipv4, exp_sai_route_entry_ipv6, + exp_sai_route_entry_ipv6}), + AttrArrayEq(std::vector{ + exp_sai_attr_ipv4, exp_sai_attr_ipv6, exp_sai_attr_ipv6_2}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_1.begin(), exp_status_1.end()), + Return(SAI_STATUS_FAILURE))); + EXPECT_CALL( + mock_sai_route_, + set_route_entries_attribute( + Eq(1), + RouteEntryArrayEq( + std::vector{exp_sai_route_entry_ipv6}), + AttrArrayEq(std::vector{exp_sai_attr_ipv6_3}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<4>(exp_status_2.begin(), exp_status_2.end()), + Return(SAI_STATUS_FAILURE))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry_ipv4, + route_entry_ipv6}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS, + StatusCode::SWSS_RC_UNKNOWN})); + VerifyRouteEntry(route_entry_ipv4, sai_ipv4_route_prefix, gVrfOid); + route_entry_ipv6 = + GenerateP4RouteEntry(gVrfName, swss_ipv6_route_prefix, p4orch::kDrop, ""); + VerifyRouteEntry(route_entry_ipv6, sai_ipv6_route_prefix, gVrfOid); + uint32_t ref_cnt; + EXPECT_TRUE(p4_oid_mapper_.getRefCount( + SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), + &ref_cnt)); + EXPECT_EQ(0, ref_cnt); + EXPECT_TRUE(p4_oid_mapper_.getRefCount( + SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(kWcmpGroup1), &ref_cnt)); + EXPECT_EQ(1, ref_cnt); + EXPECT_TRUE(p4_oid_mapper_.getRefCount( + SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(kWcmpGroup2), &ref_cnt)); + EXPECT_EQ(0, ref_cnt); +} + TEST_F(RouteManagerTest, BatchedDeleteSucceeds) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); @@ -3038,7 +3182,7 @@ TEST_F(RouteManagerTest, BatchedDeleteSucceeds) std::vector exp_status{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; EXPECT_CALL(mock_sai_route_, remove_route_entries(Eq(2), RouteEntryArrayEq(std::vector{ - exp_sai_route_entry_ipv6, exp_sai_route_entry_ipv4}), + exp_sai_route_entry_ipv4, exp_sai_route_entry_ipv6}), Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) .WillOnce(DoAll(SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_THAT(DeleteRouteEntries(std::vector{route_entry_ipv4, route_entry_ipv6}), @@ -3079,10 +3223,10 @@ TEST_F(RouteManagerTest, BatchedDeletePartiallySucceeds) exp_sai_route_entry_ipv6.vr_id = gVrfOid; exp_sai_route_entry_ipv6.destination = sai_ipv6_route_prefix; - std::vector exp_status{SAI_STATUS_FAILURE, SAI_STATUS_SUCCESS}; + std::vector exp_status{SAI_STATUS_SUCCESS, SAI_STATUS_FAILURE}; EXPECT_CALL(mock_sai_route_, remove_route_entries(Eq(2), RouteEntryArrayEq(std::vector{ - exp_sai_route_entry_ipv6, exp_sai_route_entry_ipv4}), + exp_sai_route_entry_ipv4, exp_sai_route_entry_ipv6}), Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) .WillOnce(DoAll(SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_FAILURE))); EXPECT_THAT(DeleteRouteEntries(std::vector{route_entry_ipv4, route_entry_ipv6}), @@ -3097,6 +3241,184 @@ TEST_F(RouteManagerTest, BatchedDeletePartiallySucceeds) EXPECT_EQ(0, ref_cnt); } +TEST_F(RouteManagerTest, DrainNotExecuted) { + auto prefix_1 = swss::IpPrefix(kIpv4Prefix); + auto prefix_2 = swss::IpPrefix(kIpv4Prefix2); + auto prefix_3 = swss::IpPrefix(kIpv6Prefix); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, + KeyGenerator::generateNextHopKey(kNexthopId1), + kNexthopOid1); + auto key_op_fvs_1 = GenerateKeyOpFieldsValuesTuple( + gVrfName, prefix_1, SET_COMMAND, p4orch::kSetNexthopId, kNexthopId1); + auto key_op_fvs_2 = GenerateKeyOpFieldsValuesTuple( + gVrfName, prefix_2, SET_COMMAND, p4orch::kSetNexthopId, kNexthopId1); + auto key_op_fvs_3 = GenerateKeyOpFieldsValuesTuple( + gVrfName, prefix_3, SET_COMMAND, p4orch::kSetNexthopId, kNexthopId1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_2); + Enqueue(APP_P4RT_IPV6_TABLE_NAME, key_op_fvs_3); + + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_1)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_1)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_2)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_2)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_3)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_3)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_EXECUTED, Drain(/*failure_before=*/true)); + EXPECT_EQ(nullptr, + GetRouteEntry(KeyGenerator::generateRouteKey(gVrfName, prefix_1))); + EXPECT_EQ(nullptr, + GetRouteEntry(KeyGenerator::generateRouteKey(gVrfName, prefix_2))); + EXPECT_EQ(nullptr, + GetRouteEntry(KeyGenerator::generateRouteKey(gVrfName, prefix_3))); +} + +TEST_F(RouteManagerTest, DrainStopOnFirstFailureCreate) { + auto prefix_1 = swss::IpPrefix(kIpv4Prefix); + auto prefix_2 = swss::IpPrefix(kIpv4Prefix2); + auto prefix_3 = swss::IpPrefix(kIpv6Prefix); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, + KeyGenerator::generateNextHopKey(kNexthopId1), + kNexthopOid1); + auto key_op_fvs_1 = GenerateKeyOpFieldsValuesTuple( + gVrfName, prefix_1, SET_COMMAND, p4orch::kSetNexthopId, kNexthopId1); + auto key_op_fvs_2 = GenerateKeyOpFieldsValuesTuple( + gVrfName, prefix_2, SET_COMMAND, p4orch::kSetNexthopId, kNexthopId1); + auto key_op_fvs_3 = GenerateKeyOpFieldsValuesTuple( + gVrfName, prefix_3, SET_COMMAND, p4orch::kSetNexthopId, kNexthopId1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_2); + Enqueue(APP_P4RT_IPV6_TABLE_NAME, key_op_fvs_3); + + std::vector exp_status{SAI_STATUS_SUCCESS, SAI_STATUS_FAILURE, + SAI_STATUS_NOT_EXECUTED}; + EXPECT_CALL(mock_sai_route_, create_route_entries(Eq(3), _, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), + Return(SAI_STATUS_FAILURE))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_1)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_1)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_2)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_2)), + Eq(StatusCode::SWSS_RC_UNKNOWN), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_3)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_3)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, Drain(/*failure_before=*/false)); + EXPECT_NE(nullptr, + GetRouteEntry(KeyGenerator::generateRouteKey(gVrfName, prefix_1))); + EXPECT_EQ(nullptr, + GetRouteEntry(KeyGenerator::generateRouteKey(gVrfName, prefix_2))); + EXPECT_EQ(nullptr, + GetRouteEntry(KeyGenerator::generateRouteKey(gVrfName, prefix_3))); +} + +TEST_F(RouteManagerTest, DrainStopOnFirstFailureCreateAndUpdate) { + auto prefix_1 = swss::IpPrefix(kIpv4Prefix); + auto prefix_2 = swss::IpPrefix(kIpv4Prefix2); + auto prefix_3 = swss::IpPrefix(kIpv6Prefix); + SetupDropRouteEntry(gVrfName, prefix_3); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, + KeyGenerator::generateNextHopKey(kNexthopId1), + kNexthopOid1); + auto key_op_fvs_1 = GenerateKeyOpFieldsValuesTuple( + gVrfName, prefix_1, SET_COMMAND, p4orch::kSetNexthopId, kNexthopId1); + auto key_op_fvs_2 = GenerateKeyOpFieldsValuesTuple( + gVrfName, prefix_2, SET_COMMAND, p4orch::kSetNexthopId, kNexthopId1); + auto key_op_fvs_3 = GenerateKeyOpFieldsValuesTuple( + gVrfName, prefix_3, SET_COMMAND, p4orch::kSetNexthopId, kNexthopId1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_2); + Enqueue(APP_P4RT_IPV6_TABLE_NAME, key_op_fvs_3); + + std::vector exp_status{SAI_STATUS_SUCCESS, SAI_STATUS_FAILURE}; + EXPECT_CALL(mock_sai_route_, create_route_entries(Eq(2), _, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), + Return(SAI_STATUS_FAILURE))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_1)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_1)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_2)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_2)), + Eq(StatusCode::SWSS_RC_UNKNOWN), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_3)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_3)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, Drain(/*failure_before=*/false)); + EXPECT_NE(nullptr, + GetRouteEntry(KeyGenerator::generateRouteKey(gVrfName, prefix_1))); + EXPECT_EQ(nullptr, + GetRouteEntry(KeyGenerator::generateRouteKey(gVrfName, prefix_2))); + EXPECT_NE(nullptr, + GetRouteEntry(KeyGenerator::generateRouteKey(gVrfName, prefix_3))); +} + +TEST_F(RouteManagerTest, DrainStopOnFirstFailureMultipleCreateAndUpdate) { + auto prefix_1 = swss::IpPrefix(kIpv4Prefix); + auto prefix_2 = swss::IpPrefix(kIpv4Prefix2); + auto prefix_3 = swss::IpPrefix(kIpv6Prefix); + auto prefix_4 = swss::IpPrefix(kIpv6Prefix2); + SetupDropRouteEntry(gVrfName, prefix_3); + SetupDropRouteEntry(gVrfName, prefix_4); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, + KeyGenerator::generateNextHopKey(kNexthopId1), + kNexthopOid1); + auto key_op_fvs_1 = GenerateKeyOpFieldsValuesTuple( + gVrfName, prefix_1, SET_COMMAND, p4orch::kSetNexthopId, kNexthopId1); + auto key_op_fvs_2 = GenerateKeyOpFieldsValuesTuple( + gVrfName, prefix_2, SET_COMMAND, p4orch::kSetNexthopId, kNexthopId1); + auto key_op_fvs_3 = GenerateKeyOpFieldsValuesTuple( + gVrfName, prefix_3, SET_COMMAND, p4orch::kSetNexthopId, kNexthopId1); + auto key_op_fvs_4 = GenerateKeyOpFieldsValuesTuple( + gVrfName, prefix_4, SET_COMMAND, p4orch::kSetNexthopId, kNexthopId1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_2); + Enqueue(APP_P4RT_IPV6_TABLE_NAME, key_op_fvs_3); + Enqueue(APP_P4RT_IPV6_TABLE_NAME, key_op_fvs_4); + + std::vector exp_status{SAI_STATUS_SUCCESS, SAI_STATUS_FAILURE}; + EXPECT_CALL(mock_sai_route_, create_route_entries(Eq(2), _, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), + Return(SAI_STATUS_FAILURE))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_1)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_1)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_2)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_2)), + Eq(StatusCode::SWSS_RC_UNKNOWN), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_3)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_3)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_4)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_4)), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, Drain(/*failure_before=*/false)); + EXPECT_NE(nullptr, + GetRouteEntry(KeyGenerator::generateRouteKey(gVrfName, prefix_1))); + EXPECT_EQ(nullptr, + GetRouteEntry(KeyGenerator::generateRouteKey(gVrfName, prefix_2))); + EXPECT_NE(nullptr, + GetRouteEntry(KeyGenerator::generateRouteKey(gVrfName, prefix_3))); + EXPECT_NE(nullptr, + GetRouteEntry(KeyGenerator::generateRouteKey(gVrfName, prefix_4))); +} + TEST_F(RouteManagerTest, VerifyStateTest) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); diff --git a/orchagent/p4orch/tests/router_interface_manager_test.cpp b/orchagent/p4orch/tests/router_interface_manager_test.cpp index d1c7330cc77..5f8afea5232 100644 --- a/orchagent/p4orch/tests/router_interface_manager_test.cpp +++ b/orchagent/p4orch/tests/router_interface_manager_test.cpp @@ -41,12 +41,17 @@ constexpr char *kPortName2 = "Ethernet2"; constexpr sai_object_id_t kPortOid2 = 0x1fed3; constexpr uint32_t kMtu2 = 4500; +constexpr char* kPortName10 = "Ethernet10"; +constexpr sai_object_id_t kPortOid10 = 0xabcfff; +constexpr uint32_t kMtu10 = 9100; + constexpr char *kRouterInterfaceId1 = "intf-3/4"; constexpr sai_object_id_t kRouterInterfaceOid1 = 0x295100; const swss::MacAddress kMacAddress1("00:01:02:03:04:05"); constexpr char *kRouterInterfaceId2 = "Ethernet20"; constexpr sai_object_id_t kRouterInterfaceOid2 = 0x51411; +constexpr sai_object_id_t kVlanOid2 = 0xffffff; const swss::MacAddress kMacAddress2("00:ff:ee:dd:cc:bb"); const swss::MacAddress kZeroMacAddress("00:00:00:00:00:00"); @@ -55,7 +60,7 @@ constexpr char *kRouterIntfAppDbKey = R"({"match/router_interface_id":"intf-3/4" std::unordered_map CreateRouterInterfaceAttributeList( const sai_object_id_t &virtual_router_oid, const swss::MacAddress mac_address, const sai_object_id_t &port_oid, - const uint32_t mtu) + const uint32_t mtu, const bool sub_port = false, const sai_object_id_t& vlan_oid = 0) { std::unordered_map attr_list; sai_attribute_value_t attr_value; @@ -69,7 +74,15 @@ std::unordered_map CreateRouterInterfaceAt attr_list[SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS] = attr_value; } - attr_value.s32 = SAI_ROUTER_INTERFACE_TYPE_PORT; + if (sub_port) + { + attr_value.oid = vlan_oid; + attr_list[SAI_ROUTER_INTERFACE_ATTR_OUTER_VLAN_ID] = attr_value; + + attr_value.s32 = SAI_ROUTER_INTERFACE_TYPE_SUB_PORT; + } else + attr_value.s32 = SAI_ROUTER_INTERFACE_TYPE_PORT; + attr_list[SAI_ROUTER_INTERFACE_ATTR_TYPE] = attr_value; attr_value.oid = port_oid; @@ -135,6 +148,15 @@ bool MatchCreateRouterInterfaceAttributeList( matched_attr_num++; break; + case SAI_ROUTER_INTERFACE_ATTR_OUTER_VLAN_ID: + if (attr_list[i].value.oid != + expected_attr_list.at(SAI_ROUTER_INTERFACE_ATTR_OUTER_VLAN_ID) + .oid) { + return false; + } + matched_attr_num++; + break; + default: // Unexpected attribute present in attribute list return false; @@ -167,9 +189,12 @@ class RouterInterfaceManagerTest : public ::testing::Test router_intf_manager_.enqueue(APP_P4RT_ROUTER_INTERFACE_TABLE_NAME, entry); } - void Drain() - { - router_intf_manager_.drain(); + ReturnCode Drain(bool failure_before) { + if (failure_before) { + router_intf_manager_.drainWithNotExecuted(); + return ReturnCode(StatusCode::SWSS_RC_NOT_EXECUTED); + } + return router_intf_manager_.drain(); } std::string VerifyState(const std::string &key, const std::vector &tuple) @@ -245,14 +270,14 @@ class RouterInterfaceManagerTest : public ::testing::Test } void AddRouterInterfaceEntry(P4RouterInterfaceEntry &router_intf_entry, const sai_object_id_t port_oid, - const uint32_t mtu) + const uint32_t mtu, const bool sub_port = false, const sai_object_id_t vlan_oid = 0) { EXPECT_CALL(mock_sai_router_intf_, create_router_interface( - ::testing::NotNull(), Eq(gSwitchId), Eq(5), + ::testing::NotNull(), Eq(gSwitchId), sub_port ? Eq(6) : Eq(5), Truly(std::bind(MatchCreateRouterInterfaceAttributeList, std::placeholders::_1, CreateRouterInterfaceAttributeList( - gVirtualRouterId, router_intf_entry.src_mac_address, port_oid, mtu))))) + gVirtualRouterId, router_intf_entry.src_mac_address, port_oid, mtu, sub_port, vlan_oid))))) .WillOnce(DoAll(SetArgPointee<0>(router_intf_entry.router_interface_oid), Return(SAI_STATUS_SUCCESS))); const std::string router_intf_key = @@ -261,7 +286,7 @@ class RouterInterfaceManagerTest : public ::testing::Test } StrictMock mock_sai_router_intf_; - MockResponsePublisher publisher_; + StrictMock publisher_; P4OidMapper p4_oid_mapper_; RouterInterfaceManager router_intf_manager_; }; @@ -274,6 +299,16 @@ TEST_F(RouterInterfaceManagerTest, CreateRouterInterfaceValidAttributes) ValidateRouterInterfaceEntry(router_intf_entry); } +TEST_F(RouterInterfaceManagerTest, CreateRouterInterfaceWithSubport) +{ + P4RouterInterfaceEntry router_intf_entry(kRouterInterfaceId1, kPortName10, + kMacAddress1); + AddRouterInterfaceEntry(router_intf_entry, kPortOid10, kMtu10, true, + kVlanOid2); + + ValidateRouterInterfaceEntry(router_intf_entry); +} + TEST_F(RouterInterfaceManagerTest, CreateRouterInterfaceEntryExistsInManager) { P4RouterInterfaceEntry router_intf_entry(kRouterInterfaceId1, kPortName1, kMacAddress1); @@ -770,7 +805,10 @@ TEST_F(RouterInterfaceManagerTest, DrainValidAttributes) EXPECT_CALL(mock_sai_router_intf_, create_router_interface(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kRouterInterfaceOid1), Return(SAI_STATUS_SUCCESS))); - Drain(); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key), + Eq(attributes), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); P4RouterInterfaceEntry router_intf_entry(kRouterInterfaceId1, kPortName1, kMacAddress1); router_intf_entry.router_interface_oid = kRouterInterfaceOid1; @@ -782,7 +820,10 @@ TEST_F(RouterInterfaceManagerTest, DrainValidAttributes) Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key, SET_COMMAND, attributes)); EXPECT_CALL(mock_sai_router_intf_, set_router_interface_attribute(_, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); - Drain(); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key), + Eq(attributes), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); router_intf_entry.src_mac_address = kMacAddress2; ValidateRouterInterfaceEntry(router_intf_entry); @@ -792,7 +833,10 @@ TEST_F(RouterInterfaceManagerTest, DrainValidAttributes) Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key, DEL_COMMAND, attributes)); EXPECT_CALL(mock_sai_router_intf_, remove_router_interface(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); - Drain(); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key), + Eq(attributes), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); ValidateRouterInterfaceEntryNotPresent(router_intf_entry.router_interface_id); } @@ -807,7 +851,12 @@ TEST_F(RouterInterfaceManagerTest, DrainInvalidAppDbEntryKey) // Enqueue entry for create operation. std::vector attributes; Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key, SET_COMMAND, attributes)); - Drain(); + EXPECT_CALL( + publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key), Eq(attributes), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); ValidateRouterInterfaceEntryNotPresent(kRouterInterfaceId1); } @@ -821,14 +870,24 @@ TEST_F(RouterInterfaceManagerTest, DrainInvalidAppDbEntryAttributes) std::vector attributes; attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kPort), "xyz"}); Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key, SET_COMMAND, attributes)); + EXPECT_CALL( + publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key), Eq(attributes), + Eq(StatusCode::SWSS_RC_NOT_FOUND), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, Drain(/*failure_before=*/false)); + ValidateRouterInterfaceEntryNotPresent(kRouterInterfaceId1); // Zero mac address attribute. attributes.clear(); attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kPort), kPortName1}); attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kSrcMac), kZeroMacAddress.to_string()}); Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key, SET_COMMAND, attributes)); - - Drain(); + EXPECT_CALL( + publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key), Eq(attributes), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); ValidateRouterInterfaceEntryNotPresent(kRouterInterfaceId1); } @@ -842,11 +901,101 @@ TEST_F(RouterInterfaceManagerTest, DrainInvalidOperation) attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kPort), kPortName1}); attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kSrcMac), kMacAddress1.to_string()}); Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key, "INVALID", attributes)); - Drain(); + EXPECT_CALL( + publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key), Eq(attributes), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); ValidateRouterInterfaceEntryNotPresent(kRouterInterfaceId1); } +TEST_F(RouterInterfaceManagerTest, DrainNotExecuted) { + std::vector attributes; + attributes.push_back( + swss::FieldValueTuple{prependParamField(p4orch::kPort), kPortName1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kSrcMac), + kMacAddress1.to_string()}); + + const std::string appl_db_key_1 = + std::string(APP_P4RT_ROUTER_INTERFACE_TABLE_NAME) + kTableKeyDelimiter + + "{\"match/router_interface_id\":\"intf-3/4\"}"; + const std::string appl_db_key_2 = + std::string(APP_P4RT_ROUTER_INTERFACE_TABLE_NAME) + kTableKeyDelimiter + + "{\"match/router_interface_id\":\"intf-3/5\"}"; + const std::string appl_db_key_3 = + std::string(APP_P4RT_ROUTER_INTERFACE_TABLE_NAME) + kTableKeyDelimiter + + "{\"match/router_interface_id\":\"intf-3/6\"}"; + + Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key_1, SET_COMMAND, attributes)); + Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key_2, SET_COMMAND, attributes)); + Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key_3, SET_COMMAND, attributes)); + + EXPECT_CALL( + publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key_1), Eq(attributes), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL( + publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key_2), Eq(attributes), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_CALL( + publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key_3), Eq(attributes), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_EXECUTED, Drain(/*failure_before=*/true)); + EXPECT_EQ(nullptr, GetRouterInterfaceEntry( + KeyGenerator::generateRouterInterfaceKey("intf-3/4"))); + EXPECT_EQ(nullptr, GetRouterInterfaceEntry( + KeyGenerator::generateRouterInterfaceKey("intf-3/5"))); + EXPECT_EQ(nullptr, GetRouterInterfaceEntry( + KeyGenerator::generateRouterInterfaceKey("intf-3/6"))); +} + +TEST_F(RouterInterfaceManagerTest, DrainStopOnFirstFailure) { + std::vector attributes; + attributes.push_back( + swss::FieldValueTuple{prependParamField(p4orch::kPort), kPortName1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kSrcMac), + kMacAddress1.to_string()}); + + const std::string appl_db_key_1 = + std::string(APP_P4RT_ROUTER_INTERFACE_TABLE_NAME) + kTableKeyDelimiter + + "{\"match/router_interface_id\":\"intf-3/4\"}"; + const std::string appl_db_key_2 = + std::string(APP_P4RT_ROUTER_INTERFACE_TABLE_NAME) + kTableKeyDelimiter + + "{\"match/router_interface_id\":\"intf-3/5\"}"; + const std::string appl_db_key_3 = + std::string(APP_P4RT_ROUTER_INTERFACE_TABLE_NAME) + kTableKeyDelimiter + + "{\"match/router_interface_id\":\"intf-3/6\"}"; + + Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key_1, SET_COMMAND, attributes)); + Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key_2, SET_COMMAND, attributes)); + Enqueue(swss::KeyOpFieldsValuesTuple(appl_db_key_3, SET_COMMAND, attributes)); + + EXPECT_CALL(mock_sai_router_intf_, create_router_interface(_, _, _, _)) + .WillOnce(Return(SAI_STATUS_SUCCESS)) + .WillOnce(Return(SAI_STATUS_FAILURE)); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key_1), + Eq(attributes), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key_2), + Eq(attributes), + Eq(StatusCode::SWSS_RC_UNKNOWN), Eq(true))); + EXPECT_CALL( + publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(appl_db_key_3), Eq(attributes), + Eq(StatusCode::SWSS_RC_NOT_EXECUTED), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, Drain(/*failure_before=*/false)); + EXPECT_NE(nullptr, GetRouterInterfaceEntry( + KeyGenerator::generateRouterInterfaceKey("intf-3/4"))); + EXPECT_EQ(nullptr, GetRouterInterfaceEntry( + KeyGenerator::generateRouterInterfaceKey("intf-3/5"))); + EXPECT_EQ(nullptr, GetRouterInterfaceEntry( + KeyGenerator::generateRouterInterfaceKey("intf-3/6"))); +} + TEST_F(RouterInterfaceManagerTest, VerifyStateTest) { P4RouterInterfaceEntry router_intf_entry(kRouterInterfaceId1, kPortName1, kMacAddress1); diff --git a/orchagent/p4orch/tests/test_main.cpp b/orchagent/p4orch/tests/test_main.cpp index 787e0622f47..976cc9ec44e 100644 --- a/orchagent/p4orch/tests/test_main.cpp +++ b/orchagent/p4orch/tests/test_main.cpp @@ -34,7 +34,15 @@ char *gMirrorSession1 = "mirror-session-1"; sai_object_id_t kMirrorSessionOid1 = 9001; char *gMirrorSession2 = "mirror-session-2"; sai_object_id_t kMirrorSessionOid2 = 9002; -sai_object_id_t gUnderlayIfId; +sai_object_id_t gUnderlayIfId = 0x101; +string gMyAsicName = ""; +event_handle_t g_events_handle; + +bool gMultiAsicVoq = false; +bool isChassisDbInUse() +{ + return gMultiAsicVoq; +} #define DEFAULT_BATCH_SIZE 128 #define DEFAULT_MAX_BULK_SIZE 1000 @@ -42,6 +50,8 @@ extern int gBatchSize; size_t gMaxBulkSize = DEFAULT_MAX_BULK_SIZE; bool gSyncMode = false; bool gIsNatSupported = false; +bool gTraditionalFlexCounter = false; +sai_redis_communication_mode_t gRedisCommunicationMode = SAI_REDIS_COMMUNICATION_MODE_REDIS_ASYNC; PortsOrch *gPortsOrch; CrmOrch *gCrmOrch; @@ -72,6 +82,7 @@ sai_udf_api_t *sai_udf_api; sai_tunnel_api_t *sai_tunnel_api; sai_my_mac_api_t *sai_my_mac_api; sai_counter_api_t *sai_counter_api; +sai_bridge_api_t* sai_bridge_api; sai_generic_programmable_api_t *sai_generic_programmable_api; task_process_status handleSaiCreateStatus(sai_api_t api, sai_status_t status, void *context) @@ -99,7 +110,6 @@ bool parseHandleSaiStatusFailure(task_process_status status) return true; } - namespace { @@ -111,6 +121,7 @@ using ::testing::StrictMock; void CreatePort(const std::string port_name, const uint32_t speed, const uint32_t mtu, const sai_object_id_t port_oid, Port::Type port_type = Port::PHY, const sai_port_oper_status_t oper_status = SAI_PORT_OPER_STATUS_DOWN, + const sai_object_id_t vlan_oid = 0, const sai_object_id_t vrouter_id = gVirtualRouterId, const bool admin_state_up = true) { Port port(port_name, port_type); @@ -127,6 +138,7 @@ void CreatePort(const std::string port_name, const uint32_t speed, const uint32_ port.m_vr_id = vrouter_id; port.m_admin_state_up = admin_state_up; port.m_oper_status = oper_status; + if (port_type == Port::SUBPORT) port.m_vlan_info.vlan_oid = vlan_oid; gPortsOrch->setPort(port_name, port); } @@ -151,6 +163,9 @@ void SetupPorts() /*mtu=*/9100, /*port_oid=*/0x5678, /*port_type*/ Port::MGMT); CreatePort(/*port_name=*/"Ethernet9", /*speed=*/50000, /*mtu=*/9100, /*port_oid=*/0x56789abcfff, Port::PHY, SAI_PORT_OPER_STATUS_UNKNOWN); + CreatePort(/*port_name=*/"Ethernet10", /*speed=*/50000, + /*mtu=*/9100, /*port_oid=*/0xabcfff, Port::SUBPORT, SAI_PORT_OPER_STATUS_DOWN, + /*vlan_oid=*/0xffffff); } void AddVrf() @@ -173,7 +188,7 @@ void AddVrf() } // namespace int main(int argc, char *argv[]) -{ +{ gBatchSize = DEFAULT_BATCH_SIZE; testing::InitGoogleTest(&argc, argv); @@ -193,6 +208,7 @@ int main(int argc, char *argv[]) sai_my_mac_api_t my_mac_api; sai_tunnel_api_t tunnel_api; sai_counter_api_t counter_api; + sai_bridge_api_t bridge_api; sai_generic_programmable_api_t generic_programmable_api; sai_router_intfs_api = &router_intfs_api; sai_neighbor_api = &neighbor_api; @@ -210,6 +226,7 @@ int main(int argc, char *argv[]) sai_my_mac_api = &my_mac_api; sai_tunnel_api = &tunnel_api; sai_counter_api = &counter_api; + sai_bridge_api = &bridge_api; sai_generic_programmable_api = &generic_programmable_api; swss::DBConnector appl_db("APPL_DB", 0); @@ -240,3 +257,56 @@ int main(int argc, char *argv[]) return RUN_ALL_TESTS(); } + +void setFlexCounterGroupParameter(const std::string &group, + const std::string &poll_interval, + const std::string &stats_mode, + const std::string &plugin_name, + const std::string &plugins, + const std::string &operation, + bool is_gearbox) +{ + return; +} + +void setFlexCounterGroupPollInterval(const std::string &group, + const std::string &poll_interval, + bool is_gearbox) +{ + return; +} + +void setFlexCounterGroupOperation(const std::string &group, + const std::string &operation, + bool is_gearbox) +{ + return; +} + +void setFlexCounterGroupStatsMode(const std::string &group, + const std::string &stats_mode, + bool is_gearbox) +{ + return; +} + +void delFlexCounterGroup(const std::string &group, + bool is_gearbox) +{ + return; +} + +void startFlexCounterPolling(sai_object_id_t switch_oid, + const std::string &key, + const std::string &counter_ids, + const std::string &counter_field_name, + const std::string &stats_mode) +{ + return; +} + +void stopFlexCounterPolling(sai_object_id_t switch_oid, + const std::string &key) +{ + return; +} diff --git a/orchagent/p4orch/tests/wcmp_manager_test.cpp b/orchagent/p4orch/tests/wcmp_manager_test.cpp index c3aaeb62178..eb179feb861 100644 --- a/orchagent/p4orch/tests/wcmp_manager_test.cpp +++ b/orchagent/p4orch/tests/wcmp_manager_test.cpp @@ -3,9 +3,9 @@ #include #include +#include #include -#include #include "mock_response_publisher.h" #include "mock_sai_acl.h" #include "mock_sai_hostif.h" @@ -27,6 +27,7 @@ using ::p4orch::kTableKeyDelimiter; extern P4Orch *gP4Orch; extern VRFOrch *gVrfOrch; +extern std::unique_ptr gMockResponsePublisher; extern swss::DBConnector *gAppDb; extern sai_object_id_t gSwitchId; extern sai_next_hop_group_api_t *sai_next_hop_group_api; @@ -54,6 +55,7 @@ namespace constexpr char *kWcmpGroupId1 = "group-1"; constexpr char *kWcmpGroupId2 = "group-2"; +constexpr char* kWcmpGroupId3 = "group-3"; constexpr sai_object_id_t kWcmpGroupOid1 = 10; constexpr char *kNexthopId1 = "ju1u32m1.atl11:qe-3/7"; constexpr sai_object_id_t kNexthopOid1 = 1; @@ -231,6 +233,7 @@ class WcmpManagerTest : public ::testing::Test EXPECT_CALL(mock_sai_acl_, remove_acl_table_group(_)).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); delete gP4Orch; delete copp_orch_; + gMockResponsePublisher.reset(); } void setUpMockApi() @@ -266,9 +269,9 @@ class WcmpManagerTest : public ::testing::Test EXPECT_CALL(mock_sai_switch_, get_switch_attribute(_, _, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); copp_orch_ = new CoppOrch(gAppDb, APP_COPP_TABLE_NAME); - // init P4 orch - std::vector p4_tables; + std::vector p4_tables{APP_P4RT_TABLE_NAME}; gP4Orch = new P4Orch(gAppDb, p4_tables, gVrfOrch, copp_orch_); + gMockResponsePublisher = std::make_unique(); } void Enqueue(const swss::KeyOpFieldsValuesTuple &entry) @@ -276,9 +279,12 @@ class WcmpManagerTest : public ::testing::Test wcmp_group_manager_->enqueue(APP_P4RT_WCMP_GROUP_TABLE_NAME, entry); } - void Drain() - { - wcmp_group_manager_->drain(); + ReturnCode Drain(bool failure_before) { + if (failure_before) { + wcmp_group_manager_->drainWithNotExecuted(); + return ReturnCode(StatusCode::SWSS_RC_NOT_EXECUTED); + } + return wcmp_group_manager_->drain(); } std::string VerifyState(const std::string &key, const std::vector &tuple) @@ -1324,7 +1330,11 @@ TEST_F(WcmpManagerTest, ValidateWcmpGroupEntryFailsWhenNextHopDoesNotExist) actions.push_back(action); attributes.push_back(swss::FieldValueTuple{p4orch::kActions, actions.dump()}); Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); - Drain(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kKeyPrefix + j.dump()), + Eq(attributes), Eq(StatusCode::SWSS_RC_NOT_FOUND), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, Drain(/*failure_before=*/false)); std::string key = KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1); auto *wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); EXPECT_EQ(nullptr, wcmp_group_entry_ptr); @@ -1352,7 +1362,12 @@ TEST_F(WcmpManagerTest, ValidateWcmpGroupEntryFailsWhenWeightLessThanOne) actions.push_back(action); attributes.push_back(swss::FieldValueTuple{p4orch::kActions, actions.dump()}); Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); - Drain(); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kKeyPrefix + j.dump()), + Eq(attributes), Eq(StatusCode::SWSS_RC_INVALID_PARAM), + Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); std::string key = KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1); auto *wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); EXPECT_EQ(nullptr, wcmp_group_entry_ptr); @@ -1384,7 +1399,12 @@ TEST_F(WcmpManagerTest, WcmpGroupInvalidOperationInDrainFails) // Invalid Operation string. Only SET and DEL are allowed Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), "Update", attributes)); - Drain(); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kKeyPrefix + j.dump()), + Eq(attributes), Eq(StatusCode::SWSS_RC_INVALID_PARAM), + Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); std::string key = KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1); auto *wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); EXPECT_EQ(nullptr, wcmp_group_entry_ptr); @@ -1404,7 +1424,12 @@ TEST_F(WcmpManagerTest, WcmpGroupUndefinedAttributesInDrainFails) std::vector attributes; attributes.push_back(swss::FieldValueTuple{"Undefined", "Invalid"}); Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); - Drain(); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kKeyPrefix + j.dump()), + Eq(attributes), Eq(StatusCode::SWSS_RC_INVALID_PARAM), + Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); std::string key = KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1); auto *wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); EXPECT_EQ(nullptr, wcmp_group_entry_ptr); @@ -1445,7 +1470,11 @@ TEST_F(WcmpManagerTest, WcmpGroupCreateAndDeleteInDrainSucceeds) .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), Return(SAI_STATUS_SUCCESS))); - Drain(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kKeyPrefix + j.dump()), + Eq(attributes), Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); std::string key = KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1); auto *wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); EXPECT_NE(nullptr, wcmp_group_entry_ptr); @@ -1467,7 +1496,11 @@ TEST_F(WcmpManagerTest, WcmpGroupCreateAndDeleteInDrainSucceeds) .WillOnce(Return(SAI_STATUS_SUCCESS)); attributes.clear(); Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), DEL_COMMAND, attributes)); - Drain(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kKeyPrefix + j.dump()), + Eq(attributes), Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); EXPECT_EQ(nullptr, wcmp_group_entry_ptr); EXPECT_FALSE(p4_oid_mapper_->existsOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, key)); @@ -1506,7 +1539,11 @@ TEST_F(WcmpManagerTest, WcmpGroupCreateAndUpdateInDrainSucceeds) .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), Return(SAI_STATUS_SUCCESS))); - Drain(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kKeyPrefix + j.dump()), + Eq(attributes), Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); std::string key = KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1); auto *wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); EXPECT_NE(nullptr, wcmp_group_entry_ptr); @@ -1537,7 +1574,11 @@ TEST_F(WcmpManagerTest, WcmpGroupCreateAndUpdateInDrainSucceeds) Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) .WillOnce( DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); - Drain(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kKeyPrefix + j.dump()), + Eq(attributes), Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); EXPECT_NE(nullptr, wcmp_group_entry_ptr); EXPECT_EQ(1, wcmp_group_entry_ptr->wcmp_group_members.size()); @@ -1570,7 +1611,11 @@ TEST_F(WcmpManagerTest, WcmpGroupCreateAndUpdateInDrainSucceeds) Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) .WillOnce( DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); - Drain(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kKeyPrefix + j.dump()), + Eq(attributes), Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); EXPECT_NE(nullptr, wcmp_group_entry_ptr); EXPECT_EQ(1, wcmp_group_entry_ptr->wcmp_group_members.size()); @@ -1604,7 +1649,11 @@ TEST_F(WcmpManagerTest, WcmpGroupCreateAndUpdateInDrainSucceeds) Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) .WillOnce( DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); - Drain(); + EXPECT_CALL( + *gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kKeyPrefix + j.dump()), + Eq(attributes), Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, Drain(/*failure_before=*/false)); wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); EXPECT_NE(nullptr, wcmp_group_entry_ptr); EXPECT_EQ(1, wcmp_group_entry_ptr->wcmp_group_members.size()); @@ -1748,7 +1797,12 @@ TEST_F(WcmpManagerTest, ValidateWcmpGroupEntryWithInvalidWatchportAttributeFails actions.push_back(action); attributes.push_back(swss::FieldValueTuple{p4orch::kActions, actions.dump()}); Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); - Drain(); + EXPECT_CALL(*gMockResponsePublisher, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kKeyPrefix + j.dump()), + Eq(attributes), Eq(StatusCode::SWSS_RC_INVALID_PARAM), + Eq(true))); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, + Drain(/*failure_before=*/false)); std::string key = KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1); auto *wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); EXPECT_EQ(nullptr, wcmp_group_entry_ptr); @@ -2291,7 +2345,7 @@ TEST_F(WcmpManagerTest, WatchportStateChangetoOperDownSucceeds) // Verify that the next hop member associated with the port is pruned. std::string op = "port_state_change"; std::string data = "[{\"port_id\":\"oid:0x56789abcdff\",\"port_state\":\"SAI_PORT_OPER_" - "STATUS_DOWN\"}]"; + "STATUS_DOWN\",\"port_error_status\":\"0\"}]"; EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) .WillOnce(Return(SAI_STATUS_SUCCESS)); HandlePortStatusChangeNotification(op, data); @@ -2314,7 +2368,7 @@ TEST_F(WcmpManagerTest, WatchportStateChangeToOperUpSucceeds) // restored. std::string op = "port_state_change"; std::string data = "[{\"port_id\":\"oid:0x112233\",\"port_state\":\"SAI_PORT_OPER_" - "STATUS_UP\"}]"; + "STATUS_UP\",\"port_error_status\":\"0\"}]"; EXPECT_CALL(mock_sai_next_hop_group_, create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, @@ -2339,7 +2393,7 @@ TEST_F(WcmpManagerTest, WatchportStateChangeFromOperUnknownToDownPrunesMemberOnl // Verify that the pruned next hop member is not pruned again. std::string op = "port_state_change"; std::string data = "[{\"port_id\":\"oid:0x56789abcfff\",\"port_state\":\"SAI_PORT_OPER_" - "STATUS_DOWN\"}]"; + "STATUS_DOWN\",\"port_error_status\":\"0\"}]"; HandlePortStatusChangeNotification(op, data); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); diff --git a/orchagent/p4orch/wcmp_manager.cpp b/orchagent/p4orch/wcmp_manager.cpp index 67d87f1373c..fa1a3eae836 100644 --- a/orchagent/p4orch/wcmp_manager.cpp +++ b/orchagent/p4orch/wcmp_manager.cpp @@ -1,5 +1,6 @@ #include "p4orch/wcmp_manager.h" +#include #include #include #include @@ -7,7 +8,6 @@ #include "SaiAttributeList.h" #include "crmorch.h" #include "dbconnector.h" -#include #include "logger.h" #include "p4orch/p4orch_util.h" #include "portsorch.h" @@ -734,13 +734,14 @@ void WcmpManager::updatePortOperStatusMap(const std::string &port, const sai_por port_oper_status_map[port] = status; } -ReturnCode WcmpManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +ReturnCode WcmpManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) { - std::string value; + std::string value; try { - nlohmann::json j = nlohmann::json::parse(json_key); + nlohmann::json j = nlohmann::json::parse(json_key); if (j.find(prependMatchField(p4orch::kWcmpGroupId)) != j.end()) { value = j.at(prependMatchField(p4orch::kWcmpGroupId)).get(); @@ -766,73 +767,78 @@ void WcmpManager::enqueue(const std::string &table_name, const swss::KeyOpFields m_entries.push_back(entry); } -void WcmpManager::drain() -{ - SWSS_LOG_ENTER(); - - for (const auto &key_op_fvs_tuple : m_entries) - { - std::string table_name; - std::string db_key; - parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &db_key); - const std::vector &attributes = kfvFieldsValues(key_op_fvs_tuple); +void WcmpManager::drainWithNotExecuted() { + drainMgmtWithNotExecuted(m_entries, m_publisher); +} - ReturnCode status; - auto app_db_entry_or = deserializeP4WcmpGroupAppDbEntry(db_key, attributes); - if (!app_db_entry_or.ok()) - { - status = app_db_entry_or.status(); - SWSS_LOG_ERROR("Unable to deserialize APP DB WCMP group entry with key %s: %s", - QuotedVar(table_name + ":" + db_key).c_str(), status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, - /*replace=*/true); - continue; - } - auto &app_db_entry = *app_db_entry_or; +ReturnCode WcmpManager::drain() { + SWSS_LOG_ENTER(); - const std::string &operation = kfvOp(key_op_fvs_tuple); - if (operation == SET_COMMAND) - { - status = validateWcmpGroupEntry(app_db_entry); - if (!status.ok()) - { - SWSS_LOG_ERROR("Invalid WCMP group with id %s: %s", QuotedVar(app_db_entry.wcmp_group_id).c_str(), - status.message().c_str()); - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status, - /*replace=*/true); - continue; - } - auto *wcmp_group_entry = getWcmpGroupEntry(app_db_entry.wcmp_group_id); - if (wcmp_group_entry == nullptr) - { - // Create WCMP group - status = processAddRequest(&app_db_entry); - } - else - { - // Modify existing WCMP group - status = processUpdateRequest(&app_db_entry); - } - } - else if (operation == DEL_COMMAND) - { - // Delete WCMP group - status = removeWcmpGroup(app_db_entry.wcmp_group_id); - } - else - { - status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Unknown operation type: " << QuotedVar(operation) << " for WCMP group entry with key " - << QuotedVar(table_name) << ":" << QuotedVar(db_key) - << "; only SET and DEL operations are allowed."; - SWSS_LOG_ERROR("Unknown operation type %s\n", QuotedVar(operation).c_str()); - } - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), status, + ReturnCode status; + while (!m_entries.empty()) { + auto key_op_fvs_tuple = m_entries.front(); + m_entries.pop_front(); + std::string table_name; + std::string db_key; + parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &db_key); + const std::vector& attributes = + kfvFieldsValues(key_op_fvs_tuple); + + auto app_db_entry_or = deserializeP4WcmpGroupAppDbEntry(db_key, attributes); + if (!app_db_entry_or.ok()) { + status = app_db_entry_or.status(); + SWSS_LOG_ERROR( + "Unable to deserialize APP DB WCMP group entry with key %s: %s", + QuotedVar(table_name + ":" + db_key).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + break; + } + auto& app_db_entry = *app_db_entry_or; + + const std::string& operation = kfvOp(key_op_fvs_tuple); + if (operation == SET_COMMAND) { + status = validateWcmpGroupEntry(app_db_entry); + if (!status.ok()) { + SWSS_LOG_ERROR("Invalid WCMP group with id %s: %s", + QuotedVar(app_db_entry.wcmp_group_id).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, /*replace=*/true); + break; + } + auto* wcmp_group_entry = getWcmpGroupEntry(app_db_entry.wcmp_group_id); + if (wcmp_group_entry == nullptr) { + // Create WCMP group + status = processAddRequest(&app_db_entry); + } else { + // Modify existing WCMP group + status = processUpdateRequest(&app_db_entry); + } + } else if (operation == DEL_COMMAND) { + // Delete WCMP group + status = removeWcmpGroup(app_db_entry.wcmp_group_id); + } else { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unknown operation type: " << QuotedVar(operation) + << " for WCMP group entry with key " << QuotedVar(table_name) + << ":" << QuotedVar(db_key) + << "; only SET and DEL operations are allowed."; + SWSS_LOG_ERROR("Unknown operation type %s\n", + QuotedVar(operation).c_str()); + } + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), + kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + if (!status.ok()) { + break; } - m_entries.clear(); + } + drainWithNotExecuted(); + return status; } std::string WcmpManager::verifyState(const std::string &key, const std::vector &tuple) diff --git a/orchagent/p4orch/wcmp_manager.h b/orchagent/p4orch/wcmp_manager.h index 64fd4283e40..9e024ba3e3d 100644 --- a/orchagent/p4orch/wcmp_manager.h +++ b/orchagent/p4orch/wcmp_manager.h @@ -70,9 +70,11 @@ class WcmpManager : public ObjectManagerInterface virtual ~WcmpManager() = default; void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; - void drain() override; + ReturnCode drain() override; + void drainWithNotExecuted() override; std::string verifyState(const std::string &key, const std::vector &tuple) override; - ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, + std::string &object_key) override; // Prunes next hop members egressing through the given port. void pruneNextHops(const std::string &port); diff --git a/orchagent/pfc_detect_broadcom.lua b/orchagent/pfc_detect_broadcom.lua index 29ed2d16339..a4f0c3afddc 100644 --- a/orchagent/pfc_detect_broadcom.lua +++ b/orchagent/pfc_detect_broadcom.lua @@ -12,6 +12,59 @@ local rets = {} redis.call('SELECT', counters_db) +local function parse_boolean(str) return str == 'true' end +local function parse_number(str) return tonumber(str) or 0 end + +local function updateTimePaused(port_key, prio, time_since_last_poll) + -- Estimate that queue paused for entire poll duration + local total_pause_time_field = 'SAI_PORT_STAT_PFC_' .. prio .. '_RX_PAUSE_DURATION_US' + local recent_pause_time_field = 'EST_PORT_STAT_PFC_' .. prio .. '_RECENT_PAUSE_TIME_US' + + local recent_pause_time_us = parse_number( + redis.call('HGET', port_key, recent_pause_time_field) + ) + local total_pause_time_us = redis.call('HGET', port_key, total_pause_time_field) + + -- Only estimate total time when no SAI support + if not total_pause_time_us then + total_pause_time_field = 'EST_PORT_STAT_PFC_' .. prio .. '_RX_PAUSE_DURATION_US' + total_pause_time_us = parse_number( + redis.call('HGET', port_key, total_pause_time_field) + ) + + local total_pause_time_us_new = total_pause_time_us + time_since_last_poll + redis.call('HSET', port_key, total_pause_time_field, total_pause_time_us_new) + end + + local recent_pause_time_us_new = recent_pause_time_us + time_since_last_poll + redis.call('HSET', port_key, recent_pause_time_field, recent_pause_time_us_new) +end + +local function restartRecentTime(port_key, prio, timestamp_last) + local recent_pause_time_field = 'EST_PORT_STAT_PFC_' .. prio .. '_RECENT_PAUSE_TIME_US' + local recent_pause_timestamp_field = 'EST_PORT_STAT_PFC_' .. prio .. '_RECENT_PAUSE_TIMESTAMP' + + redis.call('HSET', port_key, recent_pause_timestamp_field, timestamp_last) + redis.call('HSET', port_key, recent_pause_time_field, 0) +end + +-- Get the time since the last poll, used to compute total and recent times +local timestamp_field_last = 'PFCWD_POLL_TIMESTAMP_last' +local timestamp_last = redis.call('HGET', 'TIMESTAMP', timestamp_field_last) +local time = redis.call('TIME') +-- convert to microseconds +local timestamp_current = tonumber(time[1]) * 1000000 + tonumber(time[2]) + +-- save current poll as last poll +local timestamp_string = tostring(timestamp_current) +redis.call('HSET', 'TIMESTAMP', timestamp_field_last, timestamp_string) + +local time_since_last_poll = poll_time +-- not first poll +if timestamp_last ~= false then + time_since_last_poll = (timestamp_current - tonumber(timestamp_last)) +end + -- Iterate through each queue local n = table.getn(KEYS) for i = n, 1, -1 do @@ -86,6 +139,29 @@ for i = n, 1, -1 do end time_left = detection_time end + + -- estimate history + local pfc_stat_history = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'PFC_STAT_HISTORY') + if pfc_stat_history and pfc_stat_history == "enable" then + local port_key = counters_table_name .. ':' .. port_id + local was_paused = parse_boolean(queue_pause_status_last) + local now_paused = parse_boolean(queue_pause_status) + + -- Activity has occured + if pfc_rx_packets > pfc_rx_packets_last then + -- fresh recent pause period + if not was_paused then + restartRecentTime(port_key, queue_index, timestamp_last) + end + -- Estimate entire interval paused if there was pfc activity + updateTimePaused(port_key, queue_index, time_since_last_poll) + else + -- queue paused entire interval without activity + if now_paused and was_paused then + updateTimePaused(port_key, queue_index, time_since_last_poll) + end + end + end end -- Save values for next run diff --git a/orchagent/pfc_detect_clounix.lua b/orchagent/pfc_detect_clounix.lua new file mode 100755 index 00000000000..2ad2d31dca2 --- /dev/null +++ b/orchagent/pfc_detect_clounix.lua @@ -0,0 +1,98 @@ +-- KEYS - queue IDs +-- ARGV[1] - counters db index +-- ARGV[2] - counters table name +-- ARGV[3] - poll time interval (milliseconds) +-- return queue Ids that satisfy criteria + +local counters_db = ARGV[1] +local counters_table_name = ARGV[2] +local poll_time = tonumber(ARGV[3]) * 1000 + +local rets = {} + +redis.call('SELECT', counters_db) + +-- Iterate through each queue +local n = table.getn(KEYS) +for i = n, 1, -1 do + local counter_keys = redis.call('HKEYS', counters_table_name .. ':' .. KEYS[i]) + local counter_num = 0 + local old_counter_num = 0 + local is_deadlock = false + local pfc_wd_status = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_STATUS') + local pfc_wd_action = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_ACTION') + local big_red_switch_mode = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'BIG_RED_SWITCH_MODE') + if not big_red_switch_mode and (pfc_wd_status == 'operational' or pfc_wd_action == 'alert') then + local detection_time = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME') + if detection_time then + detection_time = tonumber(detection_time) + local time_left = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT') + if not time_left then + time_left = detection_time + else + time_left = tonumber(time_left) + end + + local queue_index = redis.call('HGET', 'COUNTERS_QUEUE_INDEX_MAP', KEYS[i]) + local port_id = redis.call('HGET', 'COUNTERS_QUEUE_PORT_MAP', KEYS[i]) + local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' + local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION' + + -- Get all counters + local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') + local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') + local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) + local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) + + if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then + occupancy_bytes = tonumber(occupancy_bytes) + packets = tonumber(packets) + pfc_rx_packets = tonumber(pfc_rx_packets) + pfc_duration = tonumber(pfc_duration) + + local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') + local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + local pfc_duration_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + -- DEBUG CODE START. Uncomment to enable + local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') + -- DEBUG CODE END. + + -- If this is not a first run, then we have last values available + if packets_last and pfc_rx_packets_last and pfc_duration_last then + packets_last = tonumber(packets_last) + pfc_rx_packets_last = tonumber(pfc_rx_packets_last) + pfc_duration_last = tonumber(pfc_duration_last) + + -- Check actual condition of queue being in PFC storm + if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or + -- DEBUG CODE START. Uncomment to enable + (debug_storm == "enabled") or + -- DEBUG CODE END. + (occupancy_bytes == 0 and packets - packets_last == 0 and (pfc_duration - pfc_duration_last) > poll_time * 0.8) then + if time_left <= poll_time then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') + is_deadlock = true + time_left = detection_time + else + time_left = time_left - poll_time + end + else + if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + end + time_left = detection_time + end + end + + -- Save values for next run + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) + redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last', pfc_duration) + end + end + end +end + +return rets diff --git a/orchagent/pfc_detect_marvell_prestera.lua b/orchagent/pfc_detect_marvell_prestera.lua new file mode 100644 index 00000000000..6e5b710b5a1 --- /dev/null +++ b/orchagent/pfc_detect_marvell_prestera.lua @@ -0,0 +1,99 @@ +-- KEYS - queue IDs +-- ARGV[1] - counters db index +-- ARGV[2] - counters table name +-- ARGV[3] - poll time interval (milliseconds) +-- return queue Ids that satisfy criteria + +local counters_db = ARGV[1] +local counters_table_name = ARGV[2] +local poll_time = tonumber(ARGV[3]) * 1000 + +local rets = {} + +redis.call('SELECT', counters_db) + +-- Iterate through each queue +local n = table.getn(KEYS) +for i = n, 1, -1 do + local counter_keys = redis.call('HKEYS', counters_table_name .. ':' .. KEYS[i]) + local counter_num = 0 + local old_counter_num = 0 + local is_deadlock = false + local pfc_wd_status = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_STATUS') + local pfc_wd_action = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_ACTION') + local big_red_switch_mode = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'BIG_RED_SWITCH_MODE') + if not big_red_switch_mode and (pfc_wd_status == 'operational' or pfc_wd_action == 'alert') then + local detection_time = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME') + if detection_time then + detection_time = tonumber(detection_time) + local time_left = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT') + if not time_left then + time_left = detection_time + else + time_left = tonumber(time_left) + end + + local queue_index = redis.call('HGET', 'COUNTERS_QUEUE_INDEX_MAP', KEYS[i]) + local port_id = redis.call('HGET', 'COUNTERS_QUEUE_PORT_MAP', KEYS[i]) + local pfc_rx_pkt_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PKTS' + local pfc_duration_key = 'SAI_PORT_STAT_PFC_' .. queue_index .. '_RX_PAUSE_DURATION' + + -- Get all counters + local occupancy_bytes = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES') + local packets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS') + local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) + local pfc_duration = "0" + + if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then + occupancy_bytes = tonumber(occupancy_bytes) + packets = tonumber(packets) + pfc_rx_packets = tonumber(pfc_rx_packets) + pfc_duration = tonumber(pfc_duration) + + local packets_last = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last') + local pfc_rx_packets_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') + local pfc_duration_last = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + -- DEBUG CODE START. Uncomment to enable + local debug_storm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'DEBUG_STORM') + -- DEBUG CODE END. + + -- If this is not a first run, then we have last values available + if packets_last and pfc_rx_packets_last and pfc_duration_last then + packets_last = tonumber(packets_last) + pfc_rx_packets_last = tonumber(pfc_rx_packets_last) + pfc_duration_last = tonumber(pfc_duration_last) + + -- Check actual condition of queue being in PFC storm + if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or + -- DEBUG CODE START. Uncomment to enable + (debug_storm == "enabled") or + -- DEBUG CODE END. + (occupancy_bytes == 0 and packets - packets_last == 0 and (pfc_duration - pfc_duration_last) > poll_time * 0.8) then + if time_left <= poll_time then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') + is_deadlock = true + time_left = detection_time + else + time_left = time_left - poll_time + end + else + if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","restore"]') + end + time_left = detection_time + end + end + + -- Save values for next run + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'SAI_QUEUE_STAT_PACKETS_last', packets) + redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'PFC_WD_DETECTION_TIME_LEFT', time_left) + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last', pfc_rx_packets) + redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') + redis.call('HSET', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last', pfc_duration) + end + end + end +end + +return rets + diff --git a/orchagent/pfc_detect_innovium.lua b/orchagent/pfc_detect_marvell_teralynx.lua similarity index 100% rename from orchagent/pfc_detect_innovium.lua rename to orchagent/pfc_detect_marvell_teralynx.lua diff --git a/orchagent/pfc_detect_mellanox.lua b/orchagent/pfc_detect_mellanox.lua old mode 100644 new mode 100755 index 826a577d623..c82662d6755 --- a/orchagent/pfc_detect_mellanox.lua +++ b/orchagent/pfc_detect_mellanox.lua @@ -18,11 +18,33 @@ local timestamp_struct = redis.call('TIME') local timestamp_current = timestamp_struct[1] + timestamp_struct[2] / 1000000 local timestamp_string = tostring(timestamp_current) redis.call('HSET', 'TIMESTAMP', 'pfcwd_poll_timestamp_last', timestamp_string) -local real_poll_time = poll_time +local global_effective_poll_time = poll_time +local global_effective_poll_time_lasttime = redis.call('HGET', 'TIMESTAMP', 'effective_pfcwd_poll_time_last') if timestamp_last ~= false then - real_poll_time = (timestamp_current - tonumber(timestamp_last)) * 1000000 + global_effective_poll_time = (timestamp_current - tonumber(timestamp_last)) * 1000000 + redis.call('HSET', 'TIMESTAMP', 'effective_pfcwd_poll_time_last', global_effective_poll_time) end +-- Get timestamp from TIME_STAMP table for PFC_WD counters +-- Use a field name without spaces to avoid issues +local port_timestamp_current = tonumber(redis.call('HGET', 'COUNTERS:TIME_STAMP', 'PFC_WD_Port_Counter_time_stamp')) +local port_timestamp_last = tonumber(redis.call('HGET', 'COUNTERS:TIME_STAMP', 'PFC_WD_Port_Counter_time_stamp_last')) + +-- Update the last timestamp for all ports at once +if port_timestamp_current ~= nil then + redis.call('HSET', 'COUNTERS:TIME_STAMP', 'PFC_WD_Port_Counter_time_stamp_last', port_timestamp_current) +end + +local effective_poll_time +if port_timestamp_current ~= nil and port_timestamp_last ~= nil then + effective_poll_time = (port_timestamp_current - port_timestamp_last) / 1000 +else + effective_poll_time = global_effective_poll_time +end + +local debug_storm_global = redis.call('HGET', 'DEBUG_STORM', 'enabled') == 'true' +local debug_storm_threshold = tonumber(redis.call('HGET', 'DEBUG_STORM', 'threshold')) + -- Iterate through each queue local n = table.getn(KEYS) for i = n, 1, -1 do @@ -60,6 +82,10 @@ for i = n, 1, -1 do local pfc_rx_packets = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key) local pfc_duration = redis.call('HGET', counters_table_name .. ':' .. port_id, pfc_duration_key) + if debug_storm_global then + redis.call('PUBLISH', 'PFC_WD_DEBUG', 'Port ID ' .. port_id .. ' Queue index ' .. queue_index .. ' occupancy ' .. occupancy_bytes .. ' packets ' .. packets .. ' pfc rx ' .. pfc_rx_packets .. ' pfc duration ' .. pfc_duration .. ' effective poll time ' .. tostring(effective_poll_time) .. '(global ' .. tostring(global_effective_poll_time) .. ')') + end + if occupancy_bytes and packets and pfc_rx_packets and pfc_duration then occupancy_bytes = tonumber(occupancy_bytes) packets = tonumber(packets) @@ -78,27 +104,34 @@ for i = n, 1, -1 do packets_last = tonumber(packets_last) pfc_rx_packets_last = tonumber(pfc_rx_packets_last) pfc_duration_last = tonumber(pfc_duration_last) - local storm_condition = (pfc_duration - pfc_duration_last) > (poll_time * 0.8) + local storm_condition = (pfc_duration - pfc_duration_last) > (effective_poll_time * 0.99) + + if debug_storm_threshold ~= nil and (pfc_duration - pfc_duration_last) > (effective_poll_time * debug_storm_threshold / 100) then + redis.call('PUBLISH', 'PFC_WD_DEBUG', 'Port ID ' .. port_id .. ' Queue index ' .. queue_index .. ' occupancy ' .. occupancy_bytes .. ' packets ' .. packets .. ' pfc rx ' .. pfc_rx_packets .. ' pfc duration ' .. pfc_duration .. ' effective poll time ' .. tostring(effective_poll_time) .. ', triggered by threshold ' .. debug_storm_threshold .. '%') + end -- Check actual condition of queue being in PFC storm - if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or + if (occupancy_bytes > 0 and packets - packets_last == 0 and storm_condition) or -- DEBUG CODE START. Uncomment to enable - (debug_storm == "enabled") or + (debug_storm == "enabled") -- DEBUG CODE END. - (occupancy_bytes == 0 and packets - packets_last == 0 and storm_condition) then - if time_left <= poll_time then + then + if time_left <= effective_poll_time then redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') local occupancy_string = '"occupancy","' .. tostring(occupancy_bytes) .. '",' local packets_string = '"packets","' .. tostring(packets) .. '","packets_last","' .. tostring(packets_last) .. '",' local pfc_rx_packets_string = '"pfc_rx_packets","' .. tostring(pfc_rx_packets) .. '","pfc_rx_packets_last","' .. tostring(pfc_rx_packets_last) .. '",' local storm_condition_string = '"pfc_duration","' .. tostring(pfc_duration) .. '","pfc_duration_last","' .. tostring(pfc_duration_last) .. '",' - local timestamps = '"timestamp","' .. timestamp_string .. '","timestamp_last","' .. timestamp_last .. '","real_poll_time","' .. real_poll_time .. '"' + local timestamps = '"timestamp","' .. timestamp_string .. '","timestamp_last","' .. timestamp_last .. '","effective_poll_time","' .. effective_poll_time .. '"' + if global_effective_poll_time_lasttime ~= false then + timestamps = timestamps .. ',"effective_pfcwd_poll_time_last","' .. global_effective_poll_time_lasttime .. '"' + end redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm",' .. occupancy_string .. packets_string .. pfc_rx_packets_string .. storm_condition_string .. timestamps .. ']') is_deadlock = true time_left = detection_time else - time_left = time_left - poll_time + time_left = time_left - effective_poll_time end else if pfc_wd_action == 'alert' and pfc_wd_status ~= 'operational' then diff --git a/orchagent/pfcactionhandler.cpp b/orchagent/pfcactionhandler.cpp index 305ed4421d4..32f984cea13 100644 --- a/orchagent/pfcactionhandler.cpp +++ b/orchagent/pfcactionhandler.cpp @@ -344,19 +344,58 @@ PfcWdAclHandler::PfcWdAclHandler(sai_object_id_t port, sai_object_id_t queue, // Egress table/rule creation table_type = TABLE_TYPE_PFCWD; - m_strEgressTable = "EgressTable_PfcWdAclHandler_" + queuestr; - found = m_aclTables.find(m_strEgressTable); - if (found == m_aclTables.end()) + + // Use shared egress acl table for BRCM DNX platform. + string platform = getenv("platform") ? getenv("platform") : ""; + string sub_platform = getenv("sub_platform") ? getenv("sub_platform") : ""; + shared_egress_acl_table = (platform == BRCM_PLATFORM_SUBSTRING && + sub_platform == BRCM_DNX_PLATFORM_SUBSTRING); + + if (shared_egress_acl_table) { - // First time of handling PFC for this queue, create ACL table, and bind - createPfcAclTable(port, m_strEgressTable, false); - shared_ptr newRule = make_shared(gAclOrch, m_strRule, m_strEgressTable); - createPfcAclRule(newRule, queueId, m_strEgressTable, port); + Port p; + if (!gPortsOrch->getPort(port, p)) + { + SWSS_LOG_ERROR("Failed to get port structure from port oid 0x%" PRIx64, port); + return; + } + m_strEgressRule = "Egress_Rule_PfcWdAclHandler_" + p.m_alias + "_" + queuestr; + m_strEgressTable = "EgressTable_PfcWdAclHandler"; + found = m_aclTables.find(m_strEgressTable); + if (found == m_aclTables.end()) + { + // First time of handling PFC, create ACL table and also ACL rule. + createPfcAclTable(port, m_strEgressTable, false); + shared_ptr newRule = make_shared(gAclOrch, m_strEgressRule, m_strEgressTable); + createPfcAclRule(newRule, queueId, m_strEgressTable, port); + } + else + { + // ACL table already exists. Add ACL rule if needed. + AclRule* rule = gAclOrch->getAclRule(m_strEgressTable, m_strEgressRule); + if (rule == nullptr) + { + shared_ptr newRule = make_shared(gAclOrch, m_strEgressRule, m_strEgressTable); + createPfcAclRule(newRule, queueId, m_strEgressTable, port); + } + } } else { - // Otherwise just bind ACL table with the port - found->second.bind(port); + m_strEgressTable = "EgressTable_PfcWdAclHandler_" + queuestr; + found = m_aclTables.find(m_strEgressTable); + if (found == m_aclTables.end()) + { + // First time of handling PFC for this queue, create ACL table, and bind + createPfcAclTable(port, m_strEgressTable, false); + shared_ptr newRule = make_shared(gAclOrch, m_strRule, m_strEgressTable); + createPfcAclRule(newRule, queueId, m_strEgressTable, port); + } + else + { + // Otherwise just bind ACL table with the port + found->second.bind(port); + } } } @@ -382,8 +421,20 @@ PfcWdAclHandler::~PfcWdAclHandler(void) gAclOrch->updateAclRule(m_strIngressTable, m_strRule, MATCH_IN_PORTS, &port, RULE_OPER_DELETE); } - auto found = m_aclTables.find(m_strEgressTable); - found->second.unbind(port); + if (shared_egress_acl_table) + { + rule = gAclOrch->getAclRule(m_strEgressTable, m_strEgressRule); + if (rule == nullptr) + { + SWSS_LOG_THROW("Egress ACL Rule does not exist for rule %s", m_strEgressRule.c_str()); + } + gAclOrch->removeAclRule(m_strEgressTable, m_strEgressRule); + } + else + { + auto found = m_aclTables.find(m_strEgressTable); + found->second.unbind(port); + } } void PfcWdAclHandler::clear() @@ -418,7 +469,11 @@ void PfcWdAclHandler::createPfcAclTable(sai_object_id_t port, string strTable, b return; } - aclTable.link(port); + // Link port only for ingress ACL table or unshared egress ACL table. + if (ingress || !shared_egress_acl_table) + { + aclTable.link(port); + } if (ingress) { @@ -452,18 +507,24 @@ void PfcWdAclHandler::createPfcAclRule(shared_ptr rule, uint8_t q attr_value = to_string(queueId); rule->validateAddMatch(attr_name, attr_value); - // Add MATCH_IN_PORTS as match criteria for ingress table - if (strTable == INGRESS_TABLE_DROP) + // Add MATCH_IN_PORTS as match criteria for ingress table and MATCH_OUT_PORT as match creiteria for shared egress table. + if (strTable == INGRESS_TABLE_DROP || shared_egress_acl_table) { Port p; - attr_name = MATCH_IN_PORTS; - + if (strTable == INGRESS_TABLE_DROP) + { + attr_name = MATCH_IN_PORTS; + } + else if (shared_egress_acl_table) { + attr_name = MATCH_OUT_PORT; + } + if (!gPortsOrch->getPort(portOid, p)) { SWSS_LOG_ERROR("Failed to get port structure from port oid 0x%" PRIx64, portOid); return; } - + attr_value = p.m_alias; rule->validateAddMatch(attr_name, attr_value); } diff --git a/orchagent/pfcactionhandler.h b/orchagent/pfcactionhandler.h index acfc923423a..9fa409af306 100644 --- a/orchagent/pfcactionhandler.h +++ b/orchagent/pfcactionhandler.h @@ -107,9 +107,12 @@ class PfcWdAclHandler: public PfcWdLossyHandler // class shared dict: ACL table name -> ACL table static std::map m_aclTables; + bool shared_egress_acl_table = false; + string m_strIngressTable; string m_strEgressTable; string m_strRule; + string m_strEgressRule; void createPfcAclTable(sai_object_id_t port, string strTable, bool ingress); void createPfcAclRule(shared_ptr rule, uint8_t queueId, string strTable, sai_object_id_t port); void updatePfcAclRule(shared_ptr rule, uint8_t queueId, string strTable, vector port); diff --git a/orchagent/pfcwdorch.cpp b/orchagent/pfcwdorch.cpp index 7c78f81d6ba..b70b72bb0c9 100644 --- a/orchagent/pfcwdorch.cpp +++ b/orchagent/pfcwdorch.cpp @@ -15,6 +15,7 @@ #define PFC_WD_ACTION "action" #define PFC_WD_DETECTION_TIME "detection_time" #define PFC_WD_RESTORATION_TIME "restoration_time" +#define PFC_STAT_HISTORY "pfc_stat_history" #define BIG_RED_SWITCH_FIELD "BIG_RED_SWITCH" #define PFC_WD_IN_STORM "storm" @@ -112,30 +113,28 @@ void PfcWdOrch::doTask(Consumer& consumer) break; } } + + if (m_pfcwdFlexCounterManager != nullptr) + { + m_pfcwdFlexCounterManager->flush(); + } } } template template -string PfcWdSwOrch::counterIdsToStr( - const vector ids, string (*convert)(T)) +unordered_set PfcWdSwOrch::counterIdsToStr( + const vector ids, string (*convert)(T)) { SWSS_LOG_ENTER(); - - string str; + unordered_set counterIdSet; for (const auto& i: ids) { - str += convert(i) + ","; - } - - // Remove trailing ',' - if (!str.empty()) - { - str.pop_back(); + counterIdSet.emplace(convert(i)); } - return str; + return counterIdSet; } template @@ -189,6 +188,7 @@ task_process_status PfcWdOrch::createEntry(const st uint32_t restorationTime = 0; // According to requirements, drop action is default PfcWdAction action = PfcWdAction::PFC_WD_ACTION_DROP; + string pfcStatHistory = "disable"; Port port; if (!gPortsOrch->getPort(key, port)) { @@ -265,6 +265,9 @@ task_process_status PfcWdOrch::createEntry(const st } } } + else if(field == PFC_STAT_HISTORY){ + pfcStatHistory = value; + } else { SWSS_LOG_ERROR( @@ -299,8 +302,13 @@ task_process_status PfcWdOrch::createEntry(const st SWSS_LOG_ERROR("%s missing", PFC_WD_DETECTION_TIME); return task_process_status::task_invalid_entry; } + if (pfcStatHistory != "enable" && pfcStatHistory != "disable") + { + SWSS_LOG_ERROR("%s is invalid value for %s", pfcStatHistory.c_str(), PFC_STAT_HISTORY); + return task_process_status::task_invalid_entry; + } - if (!startWdOnPort(port, detectionTime, restorationTime, action)) + if (!startWdOnPort(port, detectionTime, restorationTime, action, pfcStatHistory)) { SWSS_LOG_ERROR("Failed to start PFC Watchdog on port %s", port.m_alias.c_str()); return task_process_status::task_need_retry; @@ -345,9 +353,7 @@ task_process_status PfcWdSwOrch::createEntry(const if (field == POLL_INTERVAL_FIELD) { - vector fieldValues; - fieldValues.emplace_back(POLL_INTERVAL_FIELD, value); - m_flexCounterGroupTable->set(PFC_WD_FLEX_COUNTER_GROUP, fieldValues); + this->m_pfcwdFlexCounterManager->updateGroupPollingInterval(stoi(value)); } else if (field == BIG_RED_SWITCH_FIELD) { @@ -520,7 +526,7 @@ void PfcWdSwOrch::enableBigRedSwitchMode() template bool PfcWdSwOrch::registerInWdDb(const Port& port, - uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action) + uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action, string pfcStatHistory) { SWSS_LOG_ENTER(); @@ -550,14 +556,8 @@ bool PfcWdSwOrch::registerInWdDb(const Port& port, if (!c_portStatIds.empty()) { - string key = getFlexCounterTableKey(sai_serialize_object_id(port.m_port_id)); - vector fieldValues; - // Only register lossless tc counters in database. - string str = counterIdsToStr(c_portStatIds, &sai_serialize_port_stat); - string filteredStr = filterPfcCounters(str, losslessTc); - fieldValues.emplace_back(PORT_COUNTER_ID_LIST, filteredStr); - - m_flexCounterTable->set(key, fieldValues); + auto portStatIdSet = filterPfcCounters(counterIdsToStr(c_portStatIds, &sai_serialize_port_stat), losslessTc); + this->m_pfcwdFlexCounterManager->setCounterIdList(port.m_port_id, CounterType::PORT, portStatIdSet, SAI_OBJECT_TYPE_PORT); } for (auto i : losslessTc) @@ -574,30 +574,28 @@ bool PfcWdSwOrch::registerInWdDb(const Port& port, "" : to_string(restorationTime * 1000)); countersFieldValues.emplace_back("PFC_WD_ACTION", this->serializeAction(action)); + countersFieldValues.emplace_back("PFC_STAT_HISTORY", pfcStatHistory); this->getCountersTable()->set(queueIdStr, countersFieldValues); // We register our queues in PFC_WD table so that syncd will know that it must poll them - vector queueFieldValues; + string key = getFlexCounterTableKey(queueIdStr); if (!c_queueStatIds.empty()) { - string str = counterIdsToStr(c_queueStatIds, sai_serialize_queue_stat); - queueFieldValues.emplace_back(QUEUE_COUNTER_ID_LIST, str); + auto queueStatIdSet = counterIdsToStr(c_queueStatIds, sai_serialize_queue_stat); + this->m_pfcwdFlexCounterManager->setCounterIdList(queueId, CounterType::QUEUE, queueStatIdSet, SAI_OBJECT_TYPE_QUEUE); } if (!c_queueAttrIds.empty()) { - string str = counterIdsToStr(c_queueAttrIds, sai_serialize_queue_attr); - queueFieldValues.emplace_back(QUEUE_ATTR_ID_LIST, str); + auto queueAttrIdSet = counterIdsToStr(c_queueAttrIds, sai_serialize_queue_attr); + (dynamic_cast(this->m_pfcwdFlexCounterManager.get()))->setCounterIdList(queueId, CounterType::QUEUE_ATTR, queueAttrIdSet); } // Create internal entry m_entryMap.emplace(queueId, PfcWdQueueEntry(action, port.m_port_id, i, port.m_alias)); - string key = getFlexCounterTableKey(queueIdStr); - m_flexCounterTable->set(key, queueFieldValues); - // Initialize PFC WD related counters PfcWdActionHandler::initWdCounters( this->getCountersTable(), @@ -610,37 +608,31 @@ bool PfcWdSwOrch::registerInWdDb(const Port& port, } template -string PfcWdSwOrch::filterPfcCounters(string counters, set& losslessTc) +unordered_set PfcWdSwOrch::filterPfcCounters(const unordered_set &counters, set& losslessTc) { SWSS_LOG_ENTER(); - istringstream is(counters); - string counter; - string filterCounters; + unordered_set filterCounters; - while (getline(is, counter, ',')) + for (auto &counter : counters) { + //auto &counter = it.first; size_t index = 0; index = counter.find(SAI_PORT_STAT_PFC_PREFIX); if (index != 0) { - filterCounters = filterCounters + counter + ","; + filterCounters.emplace(counter); } else { uint8_t tc = (uint8_t)atoi(counter.substr(index + sizeof(SAI_PORT_STAT_PFC_PREFIX) - 1, 1).c_str()); if (losslessTc.count(tc)) { - filterCounters = filterCounters + counter + ","; + filterCounters.emplace(counter); } } } - if (!filterCounters.empty()) - { - filterCounters.pop_back(); - } - return filterCounters; } @@ -657,16 +649,12 @@ void PfcWdSwOrch::unregisterFromWdDb(const Port& po { SWSS_LOG_ENTER(); - string key = getFlexCounterTableKey(sai_serialize_object_id(port.m_port_id)); - m_flexCounterTable->del(key); + this->m_pfcwdFlexCounterManager->clearCounterIdList(port.m_port_id, SAI_OBJECT_TYPE_PORT); for (uint8_t i = 0; i < PFC_WD_TC_MAX; i++) { sai_object_id_t queueId = port.m_queue_ids[i]; - string key = getFlexCounterTableKey(sai_serialize_object_id(queueId)); - - // Unregister in syncd - m_flexCounterTable->del(key); + this->m_pfcwdFlexCounterManager->clearCounterIdList(queueId, SAI_OBJECT_TYPE_QUEUE); auto entry = m_entryMap.find(queueId); if (entry != m_entryMap.end() && entry->second.handler != nullptr) @@ -692,9 +680,6 @@ PfcWdSwOrch::PfcWdSwOrch( const vector &queueAttrIds, int pollInterval): PfcWdOrch(db, tableNames), - m_flexCounterDb(new DBConnector("FLEX_COUNTER_DB", 0)), - m_flexCounterTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_TABLE)), - m_flexCounterGroupTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)), c_portStatIds(portStatIds), c_queueStatIds(queueStatIds), c_queueAttrIds(queueAttrIds), @@ -707,6 +692,8 @@ PfcWdSwOrch::PfcWdSwOrch( string detectSha, restoreSha; string detectPluginName = "pfc_detect_" + this->m_platform + ".lua"; string restorePluginName; + string pollIntervalStr = to_string(m_pollInterval); + string plugins; if (this->m_platform == CISCO_8000_PLATFORM_SUBSTRING) { restorePluginName = "pfc_restore_" + this->m_platform + ".lua"; } else { @@ -724,18 +711,16 @@ PfcWdSwOrch::PfcWdSwOrch( restoreSha = swss::loadRedisScript( this->getCountersDb().get(), restoreLuaScript); - - vector fieldValues; - fieldValues.emplace_back(QUEUE_PLUGIN_FIELD, detectSha + "," + restoreSha); - fieldValues.emplace_back(POLL_INTERVAL_FIELD, to_string(m_pollInterval)); - fieldValues.emplace_back(STATS_MODE_FIELD, STATS_MODE_READ); - m_flexCounterGroupTable->set(PFC_WD_FLEX_COUNTER_GROUP, fieldValues); + plugins = detectSha + "," + restoreSha; } catch (...) { SWSS_LOG_WARN("Lua scripts and polling interval for PFC watchdog were not set successfully"); } + this->m_pfcwdFlexCounterManager = make_shared>( + "PFC_WD", StatsMode::READ, m_pollInterval, true, make_pair(QUEUE_PLUGIN_FIELD, plugins)); + auto consumer = new swss::NotificationConsumer( this->getCountersDb().get(), "PFC_WD_ACTION"); @@ -758,7 +743,6 @@ template PfcWdSwOrch::~PfcWdSwOrch(void) { SWSS_LOG_ENTER(); - m_flexCounterGroupTable->del(PFC_WD_FLEX_COUNTER_GROUP); } template @@ -774,11 +758,11 @@ PfcWdSwOrch::PfcWdQueueEntry::PfcWdQueueEntry( template bool PfcWdSwOrch::startWdOnPort(const Port& port, - uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action) + uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action, string pfcStatHistory) { SWSS_LOG_ENTER(); - return registerInWdDb(port, detectionTime, restorationTime, action); + return registerInWdDb(port, detectionTime, restorationTime, action, pfcStatHistory); } template diff --git a/orchagent/pfcwdorch.h b/orchagent/pfcwdorch.h index 935582289c9..69ed9ecfcc7 100644 --- a/orchagent/pfcwdorch.h +++ b/orchagent/pfcwdorch.h @@ -15,6 +15,8 @@ extern "C" { #define PFC_WD_FLEX_COUNTER_GROUP "PFC_WD" +const string pfc_wd_flex_counter_group = PFC_WD_FLEX_COUNTER_GROUP; + enum class PfcWdAction { PFC_WD_ACTION_UNKNOWN, @@ -38,7 +40,7 @@ class PfcWdOrch: public Orch virtual void doTask(Consumer& consumer); virtual bool startWdOnPort(const Port& port, - uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action) = 0; + uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action, string pfcStatHistory) = 0; virtual bool stopWdOnPort(const Port& port) = 0; shared_ptr
getCountersTable(void) @@ -62,6 +64,8 @@ class PfcWdOrch: public Orch protected: virtual bool startWdActionOnQueue(const string &event, sai_object_id_t queueId, const string &info="") = 0; string m_platform = ""; + shared_ptr> m_pfcwdFlexCounterManager; + private: shared_ptr m_countersDb = nullptr; @@ -85,7 +89,7 @@ class PfcWdSwOrch: public PfcWdOrch void doTask(Consumer& consumer) override; virtual bool startWdOnPort(const Port& port, - uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action); + uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action, string pfcStatHistory); virtual bool stopWdOnPort(const Port& port); task_process_status createEntry(const string& key, const vector& data) override; @@ -115,13 +119,13 @@ class PfcWdSwOrch: public PfcWdOrch }; template - static string counterIdsToStr(const vector ids, string (*convert)(T)); + static unordered_set counterIdsToStr(const vector ids, string (*convert)(T)); bool registerInWdDb(const Port& port, - uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action); + uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action, string pfcStatHistory); void unregisterFromWdDb(const Port& port); void doTask(swss::NotificationConsumer &wdNotification); - string filterPfcCounters(string counters, set& losslessTc); + unordered_set filterPfcCounters(const unordered_set &counters, set& losslessTc); string getFlexCounterTableKey(string s); void disableBigRedSwitchMode(); @@ -137,10 +141,6 @@ class PfcWdSwOrch: public PfcWdOrch const vector c_queueStatIds; const vector c_queueAttrIds; - shared_ptr m_flexCounterDb = nullptr; - shared_ptr m_flexCounterTable = nullptr; - shared_ptr m_flexCounterGroupTable = nullptr; - bool m_bigRedSwitchFlag = false; int m_pollInterval; diff --git a/orchagent/port.h b/orchagent/port.h index f6b598edeb9..dc6de89aa8b 100644 --- a/orchagent/port.h +++ b/orchagent/port.h @@ -10,9 +10,13 @@ extern "C" { #include #include #include +#include #include - +#include +#include +#include #include +#include #define DEFAULT_PORT_VLAN_ID 1 /* @@ -30,6 +34,28 @@ extern "C" { #define VNID_NONE 0xFFFFFFFF +// SerdesValue using boost::variant to support both vector and string values +using SerdesValue = boost::variant, std::string>; + +// Visitor class for processing SerdesValue in SAI attribute setting +class SerdesValueVisitor : public boost::static_visitor { +public: + explicit SerdesValueVisitor(sai_attribute_t& attr) : attr_(attr) {} + + void operator()(const std::vector& values) const { + attr_.value.u32list.count = static_cast(values.size()); + attr_.value.u32list.list = const_cast(values.data()); + } + + void operator()(const std::string& str_value) const { + attr_.value.json.json.count = static_cast(str_value.size()); + attr_.value.json.json.list = reinterpret_cast(const_cast(str_value.data())); + } + +private: + sai_attribute_t& attr_; +}; + namespace swss { struct VlanMemberEntry @@ -73,6 +99,43 @@ struct SystemLagInfo int32_t spa_id = 0; }; +typedef std::map stp_port_ids_t; +class PortOperErrorEvent +{ +public: + PortOperErrorEvent() = default; + PortOperErrorEvent(const sai_port_error_status_t error, std::string key) : m_errorFlag(error), m_dbKeyError(key){} + ~PortOperErrorEvent() = default; + + inline void incrementErrorCount(void) { m_errorCount++; } + + inline size_t getErrorCount(void) const { return m_errorCount; } + + void recordEventTime(void) { + auto now = std::chrono::system_clock::now(); + m_eventTime = std::chrono::system_clock::to_time_t(now); + } + + std::string getEventTime(void) { + std::ostringstream oss; + oss << std::put_time(std::gmtime(&m_eventTime), "%Y-%m-%d %H:%M:%S"); + return oss.str(); + } + + inline std::string getDbKey(void) const { return m_dbKeyError; } + + // Returns true if port oper error flag in sai_port_error_status_t is set + bool isErrorSet(sai_port_error_status_t errstatus) const { return (m_errorFlag & errstatus);} + + static const std::unordered_map db_key_errors; + +private: + sai_port_error_status_t m_errorFlag = SAI_PORT_ERROR_STATUS_CLEAR; + size_t m_errorCount = 0; + std::string m_dbKeyError; // DB key for this port error + std::time_t m_eventTime = 0; +}; + class Port { public: @@ -97,7 +160,8 @@ class Port Ext, // external Int, // internal Inb, // inband - Rec // recirculation + Rec, // recirculation + Dpc // DPU Connect Port on SmartSwitch }; public: @@ -131,6 +195,7 @@ class Port uint32_t m_speed = 0; // Mbps port_learn_mode_t m_learn_mode = SAI_BRIDGE_PORT_FDB_LEARNING_MODE_HW; bool m_autoneg = false; + bool m_unreliable_los = false; bool m_link_training = false; bool m_admin_state_up = false; bool m_init = false; @@ -153,6 +218,7 @@ class Port sai_object_id_t m_parent_port_id = 0; uint32_t m_dependency_bitmap = 0; sai_port_oper_status_t m_oper_status = SAI_PORT_OPER_STATUS_UNKNOWN; + sai_port_error_status_t m_oper_error_status = SAI_PORT_ERROR_STATUS_CLEAR; //Bitmap of last port oper error status std::set m_members; std::set m_child_ports; std::vector m_queue_ids; @@ -160,12 +226,14 @@ class Port sai_port_priority_flow_control_mode_t m_pfc_asym = SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_COMBINED; uint8_t m_pfc_bitmask = 0; // PFC enable bit mask uint8_t m_pfcwd_sw_bitmask = 0; // PFC software watchdog enable + uint8_t m_host_tx_queue = 0; + bool m_host_tx_queue_configured = false; uint16_t m_tpid = DEFAULT_TPID; uint32_t m_nat_zone_id = 0; uint32_t m_vnid = VNID_NONE; uint32_t m_fdb_count = 0; + uint64_t m_flap_count = 0; uint32_t m_up_member_count = 0; - uint32_t m_maximum_headroom = 0; std::set m_adv_speeds; sai_port_interface_type_t m_interface_type = SAI_PORT_INTERFACE_TYPE_NONE; std::set m_adv_interface_types; @@ -190,8 +258,13 @@ class Port sai_object_id_t m_system_side_id = 0; sai_object_id_t m_line_side_id = 0; - /* pre-emphasis */ - std::map> m_preemphasis; + stp_port_ids_t m_stp_port_ids; //STP Port object ids for each STP instance + sai_int16_t m_stp_id = -1; //STP instance for the VLAN + /* Port oper error status to event map*/ + std::unordered_map m_portOperErrorToEvent; + + /* serdes attributes */ + std::map m_serdes_attrs; /* Force initial parameter configuration flags */ bool m_an_cfg = false; // Auto-negotiation (AN) @@ -200,12 +273,25 @@ class Port bool m_adv_intf_cfg = false; // Advertised interface type bool m_fec_cfg = false; // Forward Error Correction (FEC) bool m_override_fec = false; // Enable Override FEC - bool m_pfc_asym_cfg = false; // Asymmetric Priority Flow Control (PFC) bool m_lm_cfg = false; // Forwarding Database (FDB) Learning Mode (LM) bool m_lt_cfg = false; // Link Training (LT) int m_cap_an = -1; /* Capability - AutoNeg, -1 means not set */ int m_cap_lt = -1; /* Capability - LinkTraining, -1 means not set */ + + /* Path Tracing */ + uint16_t m_pt_intf_id = 0; + sai_port_path_tracing_timestamp_type_t m_pt_timestamp_template = SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_16_23; + + /* link event damping */ + sai_redis_link_event_damping_algorithm_t m_link_event_damping_algorithm = SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_DISABLED; + uint32_t m_max_suppress_time = 0; + uint32_t m_decay_half_life = 0; + uint32_t m_suppress_threshold = 0; + uint32_t m_reuse_threshold = 0; + uint32_t m_flap_penalty = 0; + + Role m_role; }; } diff --git a/orchagent/port/port_capabilities.cpp b/orchagent/port/port_capabilities.cpp index a55334d9f1a..dbe6c7090f4 100644 --- a/orchagent/port/port_capabilities.cpp +++ b/orchagent/port/port_capabilities.cpp @@ -61,7 +61,7 @@ void PortCapabilities::queryPortAttrCapabilities(T &obj, sai_port_attr_t attrId) ); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR( + SWSS_LOG_WARN( "Failed to get attribute(%s) capabilities", toStr(SAI_OBJECT_TYPE_PORT, attrId).c_str() ); diff --git a/orchagent/port/port_capabilities.h b/orchagent/port/port_capabilities.h index e937e7b943c..3941615cfcf 100644 --- a/orchagent/port/port_capabilities.h +++ b/orchagent/port/port_capabilities.h @@ -4,6 +4,7 @@ extern "C" { #include #include #include +#include } class PortCapabilities final diff --git a/orchagent/port/portcnt.h b/orchagent/port/portcnt.h index 26c8a603f64..91af64fed64 100644 --- a/orchagent/port/portcnt.h +++ b/orchagent/port/portcnt.h @@ -185,6 +185,16 @@ class PortConfig final bool is_set = false; } regn_bfm1n; // Port serdes regn_bfm1n + struct { + bool value; + bool is_set = false; + } unreliable_los; // Port unreliable_los + + struct { + std::string value; + bool is_set = false; + } custom_collection; // Port serdes custom_collection + } serdes; // Port serdes struct { @@ -202,6 +212,55 @@ class PortConfig final bool is_set = false; } description; // Port description + struct { + std::string value; + bool is_set = false; + } subport; // Port subport + + struct { + std::uint16_t value; + bool is_set = false; + } pt_intf_id; // Port interface ID for Path Tracing + + struct { + sai_port_path_tracing_timestamp_type_t value; + bool is_set = false; + } pt_timestamp_template; // Port timestamp template for Path Tracing + + struct { + sai_redis_link_event_damping_algorithm_t value; + bool is_set = false; + } link_event_damping_algorithm; // Port link event damping algorithm + + struct { + + struct { + uint32_t value; + bool is_set = false; + } max_suppress_time; // Max suppress time + + struct { + uint32_t value; + bool is_set = false; + } decay_half_life; // Decay half life + + struct { + uint32_t value; + bool is_set = false; + } suppress_threshold; // Suppress threshold + + struct { + uint32_t value; + bool is_set = false; + } reuse_threshold; // Reuse threshold + + struct { + uint32_t value; + bool is_set = false; + } flap_penalty; // Flap penalty + + } link_event_damping_config; // Port link event damping config + std::string key; std::string op; diff --git a/orchagent/port/porthlpr.cpp b/orchagent/port/porthlpr.cpp index 64c05b2aec0..15ccd3585d4 100644 --- a/orchagent/port/porthlpr.cpp +++ b/orchagent/port/porthlpr.cpp @@ -21,11 +21,12 @@ using namespace swss; // types -------------------------------------------------------------------------------------------------------------- typedef decltype(PortConfig::serdes) PortSerdes_t; +typedef decltype(PortConfig::link_event_damping_config) PortDampingConfig_t; // constants ---------------------------------------------------------------------------------------------------------- static const std::uint32_t minPortSpeed = 1; -static const std::uint32_t maxPortSpeed = 800000; +static const std::uint32_t maxPortSpeed = 1600000; static const std::uint32_t minPortMtu = 68; static const std::uint32_t maxPortMtu = 9216; @@ -114,7 +115,22 @@ static const std::unordered_map portRoleMap = { PORT_ROLE_EXT, Port::Role::Ext }, { PORT_ROLE_INT, Port::Role::Int }, { PORT_ROLE_INB, Port::Role::Inb }, - { PORT_ROLE_REC, Port::Role::Rec } + { PORT_ROLE_REC, Port::Role::Rec }, + { PORT_ROLE_DPC, Port::Role::Dpc } +}; + +static const std::unordered_map portPtTimestampTemplateMap = +{ + { PORT_PT_TIMESTAMP_TEMPLATE_1, SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_8_15 }, + { PORT_PT_TIMESTAMP_TEMPLATE_2, SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_12_19 }, + { PORT_PT_TIMESTAMP_TEMPLATE_3, SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_16_23 }, + { PORT_PT_TIMESTAMP_TEMPLATE_4, SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_20_27 } +}; + +static const std::unordered_map g_linkEventDampingAlgorithmMap = +{ + { "disabled", SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_DISABLED }, + { "aied", SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_AIED } }; // functions ---------------------------------------------------------------------------------------------------------- @@ -197,6 +213,11 @@ std::string PortHelper::getAutonegStr(const PortConfig &port) const return this->getFieldValueStr(port, PORT_AUTONEG); } +std::string PortHelper::getUnreliableLosStr(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_UNRELIABLE_LOS); +} + std::string PortHelper::getPortInterfaceTypeStr(const PortConfig &port) const { return this->getFieldValueStr(port, PORT_INTERFACE_TYPE); @@ -232,6 +253,16 @@ std::string PortHelper::getAdminStatusStr(const PortConfig &port) const return this->getFieldValueStr(port, PORT_ADMIN_STATUS); } +std::string PortHelper::getPtTimestampTemplateStr(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_PT_TIMESTAMP_TEMPLATE); +} + +std::string PortHelper::getDampingAlgorithm(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_DAMPING_ALGO); +} + bool PortHelper::parsePortAlias(PortConfig &port, const std::string &field, const std::string &value) const { SWSS_LOG_ENTER(); @@ -363,6 +394,31 @@ bool PortHelper::parsePortAutoneg(PortConfig &port, const std::string &field, co return true; } + +bool PortHelper::parsePortUnreliableLos(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + const auto &cit = portModeMap.find(value); + if (cit == portModeMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.serdes.unreliable_los.value = cit->second; + port.serdes.unreliable_los.is_set = true; + + return true; +} + + bool PortHelper::parsePortAdvSpeeds(PortConfig &port, const std::string &field, const std::string &value) const { SWSS_LOG_ENTER(); @@ -652,6 +708,25 @@ bool PortHelper::parsePortSerdes(T &serdes, const std::string &field, const std: return false; } + // Use SFINAE with enable_if for extensible type handling for serdes.value + return parseSerdesValueImpl(serdes, field, value); +} + +// Helper function for JSON string-based serdes (custom_collection) +template +typename std::enable_if::value, bool>::type +PortHelper::parseSerdesValueImpl(T &serdes, const std::string &field, const std::string &value) const +{ + serdes.value = value; + serdes.is_set = true; + return true; +} + +// Helper function for vector-based serdes (most serdes attributes) +template +typename std::enable_if>::value, bool>::type +PortHelper::parseSerdesValueImpl(T &serdes, const std::string &field, const std::string &value) const +{ const auto &serdesList = tokenize(value, ','); try @@ -689,6 +764,7 @@ template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::obplev) &serdes template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::obnlev) &serdes, const std::string &field, const std::string &value) const; template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::regn_bfm1p) &serdes, const std::string &field, const std::string &value) const; template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::regn_bfm1n) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::custom_collection) &serdes, const std::string &field, const std::string &value) const; @@ -748,6 +824,151 @@ bool PortHelper::parsePortDescription(PortConfig &port, const std::string &field return true; } +bool PortHelper::parsePortSubport(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty string is prohibited", field.c_str()); + return false; + } + + try + { + port.subport.value = value; + port.subport.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + return true; +} + +bool PortHelper::parsePortLinkEventDampingAlgorithm(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + const auto &cit = g_linkEventDampingAlgorithmMap.find(value); + if (cit == g_linkEventDampingAlgorithmMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.link_event_damping_algorithm.value = cit->second; + port.link_event_damping_algorithm.is_set = true; + + return true; +} + +template +bool PortHelper::parsePortLinkEventDampingConfig(T &damping_config_attr, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty string is prohibited", field.c_str()); + return false; + } + + try + { + damping_config_attr.value = to_uint(value); + damping_config_attr.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + return true; +} + +template bool PortHelper::parsePortLinkEventDampingConfig(decltype(PortDampingConfig_t::max_suppress_time) &damping_config_attr, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortLinkEventDampingConfig(decltype(PortDampingConfig_t::decay_half_life) &damping_config_attr, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortLinkEventDampingConfig(decltype(PortDampingConfig_t::suppress_threshold) &damping_config_attr, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortLinkEventDampingConfig(decltype(PortDampingConfig_t::reuse_threshold) &damping_config_attr, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortLinkEventDampingConfig(decltype(PortDampingConfig_t::flap_penalty) &damping_config_attr, const std::string &field, const std::string &value) const; + +bool PortHelper::parsePortPtIntfId(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + uint16_t pt_intf_id; + try + { + if (value != "None") + { + pt_intf_id = to_uint(value); + if (pt_intf_id < 1 || pt_intf_id > 4095) + { + throw std::invalid_argument("Out of range Path Tracing Interface ID: " + value); + } + + port.pt_intf_id.value = pt_intf_id; + } + else + { + /* + * In SAI, Path Tracing Interface ID 0 means Path Tracing disabled. + * When Path Tracing Interface ID is not set (i.e., value is None), + * we set the Interface ID to 0 in ASIC DB in order to disable + * Path Tracing on the port. + */ + port.pt_intf_id.value = 0; + } + port.pt_intf_id.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + return true; +} + +bool PortHelper::parsePortPtTimestampTemplate(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + std::unordered_map::const_iterator cit; + + if (value != "None") + { + cit = portPtTimestampTemplateMap.find(value); + } + else + { + /* + * When Path Tracing Timestamp Template is not specified (i.e., value is None), + * we use Template3 (which is the default template in SAI). + */ + cit = portPtTimestampTemplateMap.find("template3"); + } + if (cit == portPtTimestampTemplateMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.pt_timestamp_template.value = cit->second; + port.pt_timestamp_template.is_set = true; + + return true; +} + bool PortHelper::parsePortConfig(PortConfig &port) const { SWSS_LOG_ENTER(); @@ -855,6 +1076,13 @@ bool PortHelper::parsePortConfig(PortConfig &port) const return false; } } + else if (field == PORT_UNRELIABLE_LOS) + { + if (!this->parsePortUnreliableLos(port, field, value)) + { + return false; + } + } else if (field == PORT_PREEMPHASIS) { if (!this->parsePortSerdes(port.serdes.preemphasis, field, value)) @@ -974,6 +1202,13 @@ bool PortHelper::parsePortConfig(PortConfig &port) const return false; } } + else if (field == PORT_CUSTOM_SERDES_ATTRS) + { + if (!this->parsePortSerdes(port.serdes.custom_collection, field, value)) + { + return false; + } + } else if (field == PORT_ROLE) { if (!this->parsePortRole(port, field, value)) @@ -995,13 +1230,82 @@ bool PortHelper::parsePortConfig(PortConfig &port) const return false; } } + else if (field == PORT_SUBPORT) + { + if (!this->parsePortSubport(port, field, value)) + { + return false; + } + } + else if (field == PORT_PT_INTF_ID) + { + if (!this->parsePortPtIntfId(port, field, value)) + { + return false; + } + } + else if (field == PORT_PT_TIMESTAMP_TEMPLATE) + { + if (!this->parsePortPtTimestampTemplate(port, field, value)) + { + return false; + } + } + else if (field == PORT_DAMPING_ALGO) + { + if (!this->parsePortLinkEventDampingAlgorithm(port, field, value)) + { + return false; + } + } + else if (field == PORT_MAX_SUPPRESS_TIME) + { + if (!this->parsePortLinkEventDampingConfig(port.link_event_damping_config.max_suppress_time, field, value)) + { + return false; + } + } + else if (field == PORT_DECAY_HALF_LIFE) + { + if (!this->parsePortLinkEventDampingConfig(port.link_event_damping_config.decay_half_life, field, value)) + { + return false; + } + } + else if (field == PORT_SUPPRESS_THRESHOLD) + { + if (!this->parsePortLinkEventDampingConfig(port.link_event_damping_config.suppress_threshold, field, value)) + { + return false; + } + } + else if (field == PORT_REUSE_THRESHOLD) + { + if (!this->parsePortLinkEventDampingConfig(port.link_event_damping_config.reuse_threshold, field, value)) + { + return false; + } + } + else if (field == PORT_FLAP_PENALTY) + { + if (!this->parsePortLinkEventDampingConfig(port.link_event_damping_config.flap_penalty, field, value)) + { + return false; + } + } + else if (field == PORT_MODE) + { + /* Placeholder to prevent warning. Not needed to be parsed here. + * Setting exists in sonic-port.yang with possible values: routed|access|trunk + */ + } else { SWSS_LOG_WARN("Unknown field(%s): skipping ...", field.c_str()); } } - return this->validatePortConfig(port); + return true; } bool PortHelper::validatePortConfig(PortConfig &port) const diff --git a/orchagent/port/porthlpr.h b/orchagent/port/porthlpr.h index 4bcae7fca5b..153c784be73 100644 --- a/orchagent/port/porthlpr.h +++ b/orchagent/port/porthlpr.h @@ -19,6 +19,7 @@ class PortHelper final bool fecIsOverrideRequired(const std::string &str) const; std::string getAutonegStr(const PortConfig &port) const; + std::string getUnreliableLosStr(const PortConfig &port) const; std::string getPortInterfaceTypeStr(const PortConfig &port) const; std::string getAdvInterfaceTypesStr(const PortConfig &port) const; std::string getFecStr(const PortConfig &port) const; @@ -26,8 +27,11 @@ class PortHelper final std::string getLearnModeStr(const PortConfig &port) const; std::string getLinkTrainingStr(const PortConfig &port) const; std::string getAdminStatusStr(const PortConfig &port) const; + std::string getPtTimestampTemplateStr(const PortConfig &port) const; + std::string getDampingAlgorithm(const PortConfig &port) const; bool parsePortConfig(PortConfig &port) const; + bool validatePortConfig(PortConfig &port) const; private: std::string getFieldValueStr(const PortConfig &port, const std::string &field) const; @@ -35,11 +39,24 @@ class PortHelper final template bool parsePortSerdes(T &serdes, const std::string &field, const std::string &value) const; + template + typename std::enable_if::value, bool>::type + parseSerdesValueImpl(T &serdes, const std::string &field, const std::string &value) const; + + template + typename std::enable_if>::value, bool>::type + parseSerdesValueImpl(T &serdes, const std::string &field, const std::string &value) const; + + bool parsePortLinkEventDampingAlgorithm(PortConfig &port, const std::string &field, const std::string &value) const; + template + bool parsePortLinkEventDampingConfig(T &damping_config_attr, const std::string &field, const std::string &value) const; + bool parsePortAlias(PortConfig &port, const std::string &field, const std::string &value) const; bool parsePortIndex(PortConfig &port, const std::string &field, const std::string &value) const; bool parsePortLanes(PortConfig &port, const std::string &field, const std::string &value) const; bool parsePortSpeed(PortConfig &port, const std::string &field, const std::string &value) const; bool parsePortAutoneg(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortUnreliableLos(PortConfig &port, const std::string &field, const std::string &value) const; bool parsePortAdvSpeeds(PortConfig &port, const std::string &field, const std::string &value) const; bool parsePortInterfaceType(PortConfig &port, const std::string &field, const std::string &value) const; bool parsePortAdvInterfaceTypes(PortConfig &port, const std::string &field, const std::string &value) const; @@ -52,6 +69,7 @@ class PortHelper final bool parsePortRole(PortConfig &port, const std::string &field, const std::string &value) const; bool parsePortAdminStatus(PortConfig &port, const std::string &field, const std::string &value) const; bool parsePortDescription(PortConfig &port, const std::string &field, const std::string &value) const; - - bool validatePortConfig(PortConfig &port) const; + bool parsePortSubport(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortPtIntfId(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortPtTimestampTemplate(PortConfig &port, const std::string &field, const std::string &value) const; }; diff --git a/orchagent/port/portschema.h b/orchagent/port/portschema.h index 56b2541c375..b2667baa250 100644 --- a/orchagent/port/portschema.h +++ b/orchagent/port/portschema.h @@ -51,38 +51,56 @@ #define PORT_ROLE_INT "Int" #define PORT_ROLE_INB "Inb" #define PORT_ROLE_REC "Rec" +#define PORT_ROLE_DPC "Dpc" -#define PORT_ALIAS "alias" -#define PORT_INDEX "index" -#define PORT_LANES "lanes" -#define PORT_SPEED "speed" -#define PORT_AUTONEG "autoneg" -#define PORT_ADV_SPEEDS "adv_speeds" -#define PORT_INTERFACE_TYPE "interface_type" -#define PORT_ADV_INTERFACE_TYPES "adv_interface_types" -#define PORT_FEC "fec" -#define PORT_MTU "mtu" -#define PORT_TPID "tpid" -#define PORT_PFC_ASYM "pfc_asym" -#define PORT_LEARN_MODE "learn_mode" -#define PORT_LINK_TRAINING "link_training" -#define PORT_PREEMPHASIS "preemphasis" -#define PORT_IDRIVER "idriver" -#define PORT_IPREDRIVER "ipredriver" -#define PORT_PRE1 "pre1" -#define PORT_PRE2 "pre2" -#define PORT_PRE3 "pre3" -#define PORT_MAIN "main" -#define PORT_POST1 "post1" -#define PORT_POST2 "post2" -#define PORT_POST3 "post3" -#define PORT_ATTN "attn" -#define PORT_OB_M2LP "ob_m2lp" -#define PORT_OB_ALEV_OUT "ob_alev_out" -#define PORT_OBPLEV "obplev" -#define PORT_OBNLEV "obnlev" -#define PORT_REGN_BFM1P "regn_bfm1p" -#define PORT_REGN_BFM1N "regn_bfm1n" -#define PORT_ROLE "role" -#define PORT_ADMIN_STATUS "admin_status" -#define PORT_DESCRIPTION "description" +#define PORT_PT_TIMESTAMP_TEMPLATE_1 "template1" +#define PORT_PT_TIMESTAMP_TEMPLATE_2 "template2" +#define PORT_PT_TIMESTAMP_TEMPLATE_3 "template3" +#define PORT_PT_TIMESTAMP_TEMPLATE_4 "template4" + +#define PORT_ALIAS "alias" +#define PORT_INDEX "index" +#define PORT_LANES "lanes" +#define PORT_SPEED "speed" +#define PORT_AUTONEG "autoneg" +#define PORT_ADV_SPEEDS "adv_speeds" +#define PORT_INTERFACE_TYPE "interface_type" +#define PORT_ADV_INTERFACE_TYPES "adv_interface_types" +#define PORT_FEC "fec" +#define PORT_MTU "mtu" +#define PORT_TPID "tpid" +#define PORT_PFC_ASYM "pfc_asym" +#define PORT_LEARN_MODE "learn_mode" +#define PORT_LINK_TRAINING "link_training" +#define PORT_PREEMPHASIS "preemphasis" +#define PORT_IDRIVER "idriver" +#define PORT_IPREDRIVER "ipredriver" +#define PORT_PRE1 "pre1" +#define PORT_PRE2 "pre2" +#define PORT_PRE3 "pre3" +#define PORT_MAIN "main" +#define PORT_POST1 "post1" +#define PORT_POST2 "post2" +#define PORT_POST3 "post3" +#define PORT_ATTN "attn" +#define PORT_OB_M2LP "ob_m2lp" +#define PORT_OB_ALEV_OUT "ob_alev_out" +#define PORT_OBPLEV "obplev" +#define PORT_OBNLEV "obnlev" +#define PORT_REGN_BFM1P "regn_bfm1p" +#define PORT_REGN_BFM1N "regn_bfm1n" +#define PORT_CUSTOM_SERDES_ATTRS "custom_serdes_attrs" +#define PORT_ROLE "role" +#define PORT_ADMIN_STATUS "admin_status" +#define PORT_DESCRIPTION "description" +#define PORT_SUBPORT "subport" +#define PORT_PT_INTF_ID "pt_interface_id" +#define PORT_PT_TIMESTAMP_TEMPLATE "pt_timestamp_template" +#define PORT_DAMPING_ALGO "link_event_damping_algorithm" +#define PORT_MAX_SUPPRESS_TIME "max_suppress_time" +#define PORT_DECAY_HALF_LIFE "decay_half_life" +#define PORT_SUPPRESS_THRESHOLD "suppress_threshold" +#define PORT_REUSE_THRESHOLD "reuse_threshold" +#define PORT_FLAP_PENALTY "flap_penalty" +#define PORT_MODE "mode" +#define PORT_UNRELIABLE_LOS "unreliable_los" diff --git a/orchagent/port_flr.lua b/orchagent/port_flr.lua new file mode 100644 index 00000000000..5d69cbaac13 --- /dev/null +++ b/orchagent/port_flr.lua @@ -0,0 +1,465 @@ +-- KEYS - port IDs +-- ARGV[1] - counters db index +-- ARGV[2] - counters table name +-- ARGV[3] - poll time interval +-- return log + +local logtable = {} + +local function logit(msg) + logtable[#logtable+1] = tostring(msg) +end + +local counters_db = ARGV[1] +local counters_table_name = ARGV[2] + +local APPL_DB = 0 -- Application database +local COUNTERS_DB = 2 -- Counters and statistics +local STATE_DB = 6 -- State database + +local KEY_SPEED = 'speed' +local KEY_LANES = 'lanes' +local KEY_OPER_STATUS = 'oper_status' + +local STATE_DB_PORT_TABLE_PREFIX = 'PORT_TABLE|' +local APPL_DB_PORT_TABLE_PREFIX = 'PORT_TABLE:' + +local rates_table_name = "RATES" +local bookmark_table_name = "RATES:GLOBAL" +local BIN_FILTER_VALUE = 10 +local MIN_SIGNIFICANT_BINS = 2 +local FEC_FLR_POLL_INTERVAL = 120 +local MFC = 8 + +local function get_port_name_from_oid(port) + redis.call('SELECT', COUNTERS_DB) + local port_name_hash = redis.call('HGETALL', 'COUNTERS_PORT_NAME_MAP') + local num_port_keys = redis.call('HLEN', 'COUNTERS_PORT_NAME_MAP') + -- flip port name hash + for i = 1, num_port_keys do + local k_index = i*2 -1 + local v_index = i*2 + if (port_name_hash[v_index] == port) then + return port_name_hash[k_index] + end + end + return 0 +end + +local function get_port_speed_numlanes(interface_name) + -- get the port config from config db + local _ + local port_speed, lane_count = 0, 0 + + -- Get the port configure + redis.call('SELECT', APPL_DB) + local lanes = redis.call('HGET', APPL_DB_PORT_TABLE_PREFIX .. interface_name, KEY_LANES) + + if lanes then + port_speed = redis.call('HGET', APPL_DB_PORT_TABLE_PREFIX .. interface_name, KEY_SPEED) + + -- we were spliting it on ',' + _, lane_count = string.gsub(lanes, ",", ",") + lane_count = lane_count + 1 + end + -- switch back to counter db + redis.call('SELECT', counters_db) + + return port_speed, lane_count +end + + +local function get_interleaving_factor_for_port(port_oid) + -- Correlation between port-speeds, number of lanes and + -- Interleaving factor + -- This lookup table is a direct implementation of the table present in the HLD. + -- The key is a string in the format: 'speed_lanes' + local interleaving_map = { + ['1600000_8'] = 4, + ['800000_8'] = 4, + ['400000_8'] = 2, + ['400000_4'] = 2, + ['200000_4'] = 2, + ['200000_2'] = 2, + ['100000_2'] = 2, + } + + local port_name = get_port_name_from_oid(port_oid) + local port_speed, port_numlanes = get_port_speed_numlanes(port_name) + + -- Create the key from the port's properties to search the map. + local key = tostring(port_speed) .. '_' .. tostring(port_numlanes) + + -- reset redis object to COUNTERS_DB + redis.call('SELECT', COUNTERS_DB) + + -- Look up the factor. + return interleaving_map[key] or 1 +end + +-- Get configuration +redis.call('SELECT', counters_db) + +-- Get numeric value from Redis table:port, returns 0 if not found +local function get_kv_from_redis_db(table_name, port, key) + local value = redis.call('HGET', table_name .. ':' .. port, key) + value = tonumber(value) or 0 + return value +end + +-- Store value in Redis table:port +local function set_kv_in_redis_db(table_name, port, key, value) + redis.call('HSET', table_name .. ':' .. port, key, tostring(value)) +end + + +local fec_cwerr_keys = { + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S1", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S2", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S3", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S4", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S5", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S6", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S7", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S8", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S9", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S10", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S11", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S12", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S13", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S14", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S15", +} + +-- Calculate delta values for FEC codeword error bins (S0-S15) for predicted FLR computation +-- This function computes the difference between current and previous counter values for each +-- symbol error bin, which represents codeword errors that occurred in the current interval. +-- Parameters: +-- port: Port identifier +-- Returns: +-- delta_bins: Array of delta values for each symbol error bin (S0-S15) +local function get_fec_cws_delta(port) + local delta_bins = {} + + local binval = 0 + local binval_last = 0 + local delta = 0 + for _, key in ipairs(fec_cwerr_keys) do + -- Get current counter value from COUNTERS table + binval = tonumber(get_kv_from_redis_db(counters_table_name, port, key)) + -- Get previous counter value from RATES table (where "_last" values are stored) + binval_last = tonumber(get_kv_from_redis_db(rates_table_name, port, key .. "_last")) + -- Calculate delta for this interval + delta = binval - binval_last + table.insert(delta_bins, delta) + -- Store current value as "_last" for next interval calculation + set_kv_in_redis_db(rates_table_name, port, key .. "_last", binval) + end + + return delta_bins +end + +-- Sum all codeword counts across symbol error bins +local function get_total_cws(codewords) + local total_cw = 0 + + for _, value in ipairs(codewords) do + total_cw = total_cw + value + end + + return total_cw +end + +-- Count the number of symbol error bins with significant codeword error counts +-- Only bins with counts greater than BIN_FILTER_VALUE (=10) are considered significant +-- for linear regression analysis. This filters out noise and ensures statistical reliability. +-- Parameters: +-- bins: Array of codeword error counts for each symbol error bin +-- Returns: +-- significant_count: Number of bins with values greater than BIN_FILTER_VALUE +local function count_significant_bins(bins) + local significant_count = 0 + for _, value in ipairs(bins) do + if value > BIN_FILTER_VALUE then + significant_count = significant_count + 1 + end + end + + return significant_count +end + +-- Compute slope and intercept for linear regression on logarithmic codeword error ratios +-- The codeword error ratio typically follows an exponential decay curve, which becomes +-- linear when transformed to logarithmic scale, enabling linear regression analysis. +-- Parameters: +-- bins: Array of codeword error counts for each symbol error bin (S1-S15, S0 excluded) +-- total_cws: Total number of codewords across all bins +-- Returns: +-- slope, intercept, r_squared: Linear regression parameters and accuracy measure +local function compute_slope_intercept(bins, total_cws) + -- Step1: Normalize to probability of cw_i errors where cw_i is the probability of a + -- CW with i symbol errors (only consider bins with significant error counts) + local normalised_cw = {} + for _, value in ipairs(bins) do + if value > BIN_FILTER_VALUE then + table.insert(normalised_cw, value/total_cws) + else + table.insert(normalised_cw, 0) + end + end + + -- Step2 :Convert the exponential data to logarithmic data + local log_values_cw = {} + local nan = 0/0 + for _, normalised_cw_i in ipairs(normalised_cw) do + if normalised_cw_i > 0 then + table.insert(log_values_cw, math.log10(normalised_cw_i)) + else + table.insert(log_values_cw, nan) + end + end + + -- Step3 : Prepare mask vector + local mask = {} + for _, log_value_cw_i in ipairs(log_values_cw) do + if log_value_cw_i ~= log_value_cw_i then + table.insert(mask, 0) + else + table.insert(mask, 1) + end + end + + --Step4 : Linear Regression + local data_length = #bins + logit("Data Length :" .. data_length) + + local B = 0 -- ## n + local C = 0 -- ## sigma(x) + local D = 0 -- ## sigma(y) + local E = 0 -- ## sigma(x^2) + local F = 0 -- ## sigma(xy) + local G = 0 -- ## sigma(y^2) + + for i = 1, data_length do + if mask[i] == 1 then + B = B + mask[i] + C = C + (i) + D = D + (log_values_cw[i]) + E = E + ((i) * (i)) + F = F + ((i) * (log_values_cw[i])) + G = G + ((log_values_cw[i]) * (log_values_cw[i])) + end + end + + -- Slope and Intercept + local slope = (B*F - C*D)/(B*E - C*C) + local intercept = (D - slope*C) / B + + -- R^2 (measure of accuracy) + local numerator = (B * F - C * D) + local denominator = math.sqrt((B * E - C*C) * (B * G - D*D)) + local r_squared = (numerator / denominator) * (numerator / denominator) + + return slope, intercept, r_squared +end + +-- Compute the predicted Frame Loss Ratio (FLR) from linear regression parameters +-- Uses the fitted slope and intercept to extrapolate CER for uncorrectable symbol errors +-- (window 16-20) and converts to FLR using IEEE FEC formula with interleaving factor. +-- Parameters: +-- slope: Fitted slope from log-linear regression +-- intercept: Fitted intercept from log-linear regression +-- sum_window: Array of [start_index, end_index] for uncorrectable error window (16-20) +-- x_interleaving: FEC interleaving factor (1=no interleaving, 2=400G, 4=800G+) +-- mfc: MAC frames per codeword (8 for RS-544 FEC) +-- Returns: +-- cer: Predicted Codeword Error Ratio for the window +-- flr: Predicted Frame Loss Ratio using IEEE formula +local function extrapolate_flr_from_regression(slope, intercept, sum_window, x_interleaving, mfc) + -- Transform logarithmic regression line back to linear scale to get predicted CER + local function line_function(x) + return 10 ^ (intercept + (slope*x)) + end + + -- # Sum predicted corrected errors in the given window + local cer = 0 + local flr = 0 + for x = sum_window[1], sum_window[2]+1 do + cer = cer + line_function(x) + end + + -- # IEEE FLR formula + flr = cer * (1 + x_interleaving * mfc) / mfc + + return cer, flr +end + +-- Main function to calculate predicted FLR using linear regression on codeword error distribution +-- Steps: Get error deltas -> Remove S0 -> Check sufficient data -> Perform regression -> Extrapolate FLR +-- Parameters: +-- port: Port identifier +-- Returns: +-- flr: Predicted Frame Loss Ratio (0 if insufficient data for prediction) +local function compute_predicted_flr(port) + local bins = get_fec_cws_delta(port) + + local total_cws = get_total_cws(bins) + logit("SUM : " .. total_cws) + if total_cws == 0 then + logit("Total corrected codewords is zero, cannot compute slope and intercept.") + return 0 + end + + -- Trim out _S0 from the data + table.remove(bins, 1) + + local significant_bins = count_significant_bins(bins) + logit("Significant Bins : " .. significant_bins) + if significant_bins < MIN_SIGNIFICANT_BINS then + logit("Not enough significant bins to compute slope and intercept.") + return 0 + end + + local slope = 0 + local intercept = 0 + local r_squared = 0 + + slope, intercept, r_squared = compute_slope_intercept(bins, total_cws) + logit("Slope : " .. slope) + logit("Intercept : " .. intercept) + logit("R^2 : " .. r_squared) + + local cer = 0 + local flr = 0 + + local sum_window = {16,20} + local x_interleaving = get_interleaving_factor_for_port(port) + cer, flr = extrapolate_flr_from_regression(slope, intercept, {16, 20}, x_interleaving, MFC) + logit("CER : " .. cer) + logit("FLR : " .. flr) + return flr, r_squared +end + +-- Calculate observed FEC FLR based on uncorrectable codeword ratio +-- Formula: CER = Uncorrectable_CWs / Total_CWs, FLR = 1.125 * CER (X=1 interleaving) +-- Parameters: +-- port: Port identifier +-- Returns: +-- fec_flr: Observed Frame Loss Ratio (0 if no data change or counters unavailable) +local function compute_observed_flr(port) + + local fec_uncorr_codewords = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES') + local fec_corr_codewords = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES') + local fec_codewords_with_zero_errors = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S0') + + -- check if these values are defined + if fec_uncorr_codewords and fec_corr_codewords and fec_codewords_with_zero_errors then + local fec_uncorr_codewords_last = redis.call('HGET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES_last') + local fec_corr_codewords_last = redis.call('HGET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES_last') + local fec_codewords_with_zero_errors_last = redis.call('HGET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S0_last') + + fec_uncorr_codewords_last = tonumber(fec_uncorr_codewords_last) or 0 + fec_corr_codewords_last = tonumber(fec_corr_codewords_last) or 0 + fec_codewords_with_zero_errors_last = tonumber(fec_codewords_with_zero_errors_last) or 0 + + local fec_uncorr_codewords_delta = fec_uncorr_codewords - fec_uncorr_codewords_last + local fec_corr_codewords_delta = fec_corr_codewords - fec_corr_codewords_last + local fec_codewords_with_zero_errors_delta = fec_codewords_with_zero_errors - fec_codewords_with_zero_errors_last + + local total_codewords_delta = fec_uncorr_codewords_delta + + fec_corr_codewords_delta + + fec_codewords_with_zero_errors_delta + + -- if total_delta == 0, nothing has changed so don't compute flr + if (total_codewords_delta == 0) then + return 0 + end + + local codeword_error_ratio = fec_uncorr_codewords_delta / total_codewords_delta + -- assuming interleaving factor is X = 1 + local x_interleaving = get_interleaving_factor_for_port(port) + local fec_flr = x_interleaving * codeword_error_ratio + + -- update old counter values + redis.call('HSET', rates_table_name ..':' .. port, 'SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES_last', fec_uncorr_codewords) + redis.call('HSET', rates_table_name ..':' .. port, 'SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES_last', fec_corr_codewords) + redis.call('HSET', rates_table_name ..':' .. port, 'SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S0_last', fec_codewords_with_zero_errors) + + return tonumber(fec_flr) + end + return 0 +end + +-- Update FLR timestamp in bookmark table +local function update_flr_timestamp() + local time = redis.call('TIME') + local timestamp_current = time[1] + redis.call('HSET', bookmark_table_name, 'FEC_FLR_TIMESTAMP_last', timestamp_current) +end + +-- Check if FLR calculation interval has elapsed (default 120s) +local function time_to_calculate_flr() + local time = redis.call('TIME') + local timestamp_current = time[1] + + -- Check if FEC_FLR_TIMESTAMP_last exists in the bookmark table + local timestamp_last = redis.call('HGET', bookmark_table_name, 'FEC_FLR_TIMESTAMP_last') + + -- If the key doesn't exist, return true + if timestamp_last == false then + return true -- First time calculation + end + + timestamp_last = tonumber(timestamp_last) or 0 + + if (timestamp_last == 0) or ((timestamp_current - timestamp_last) >= FEC_FLR_POLL_INTERVAL) then + return true + end + + return false +end + +-- Check if FEC data exists for a port by verifying the presence of correctable frames counter +-- Parameters: +-- port: Port identifier +-- Returns: +-- true if FEC data exists, false otherwise +local function fec_data_exists(port) + local hash_key = counters_table_name .. ':' .. port + local exists = redis.call('HEXISTS', hash_key, 'SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES') + return exists == 1 +end + +-- Main FLR computation function that orchestrates both observed and predicted FLR calculation +-- Called for each port at the configured interval. Computes and stores both FEC_FLR (observed) +-- and FEC_FLR_PREDICTED in the RATES table for telemetry collection. +-- Parameters: +-- port: Port identifier +local function compute_flr_for_port(port) + if (fec_data_exists(port)) then + -- Calculate observed FLR from uncorrectable codeword ratio + local fec_flr = compute_observed_flr(port) + redis.call('HSET', rates_table_name ..':' .. port, 'FEC_FLR', fec_flr) + + -- Calculate predicted FLR using linear regression on codeword error distribution + local predicted_flr + local r_squared + predicted_flr, r_squared = compute_predicted_flr(port) + predicted_flr = predicted_flr or 0 + r_squared = r_squared or 0 + redis.call('HSET', rates_table_name .. ':' .. port, 'FEC_FLR_PREDICTED', tostring(predicted_flr)) + redis.call('HSET', rates_table_name .. ':' .. port, 'FEC_FLR_R_SQUARED', tostring(r_squared)) + end +end + +local n = table.getn(KEYS) + +if (time_to_calculate_flr()) then + for i = 1, n do + compute_flr_for_port(KEYS[i]) + end + update_flr_timestamp() +end + +return logtable diff --git a/orchagent/port_rates.lua b/orchagent/port_rates.lua index c29977d153b..0ae069a6c72 100644 --- a/orchagent/port_rates.lua +++ b/orchagent/port_rates.lua @@ -13,6 +13,9 @@ end local counters_db = ARGV[1] local counters_table_name = ARGV[2] local rates_table_name = "RATES" +local appl_db_port = "PORT_TABLE" +-- refer back to common/schema.h +local appl_db = "0" -- Get configuration redis.call('SELECT', counters_db) @@ -29,11 +32,138 @@ logit(alpha) logit(one_minus_alpha) logit(delta) +local port_interface_oid_map = redis.call('HGETALL', "COUNTERS_PORT_NAME_MAP") +local port_interface_oid_key_count = redis.call('HLEN', "COUNTERS_PORT_NAME_MAP") + +-- lookup interface name from port oid + +local function find_interface_name_from_oid(port) + + for i = 1, port_interface_oid_key_count do + local index = i * 2 - 1 + if port_interface_oid_map[index + 1] == port then + return port_interface_oid_map[index] + end + end + + return 0 +end + +-- calculate lanes and serdes speed from interface lane count & speed +-- return lane speed and serdes speed + +local function calculate_lane_and_serdes_speed(count, speed) + + local serdes = 0 + local lane_speed = 0 + + if count == 0 or speed == 0 then + logit("Invalid number of lanes or speed") + return 0, 0 + end + + -- check serdes_cnt if it is a multiple of speed + local serdes_cnt = math.fmod(speed, count) + + if serdes_cnt ~= 0 then + logit("Invalid speed and number of lanes combination") + return 0, 0 + end + + lane_speed = math.floor(speed / count) + + -- return value in bits + if lane_speed == 1000 then + serdes = 1.25e+9 + elseif lane_speed == 10000 then + serdes = 10.3125e+9 + elseif lane_speed == 25000 then + serdes = 25.78125e+9 + elseif lane_speed == 50000 then + serdes = 53.125e+9 + elseif lane_speed == 100000 then + serdes = 106.25e+9 + elseif lane_speed == 200000 then + serdes = 212.5e+9 + else + logit("Invalid serdes speed") + end + + return lane_speed, serdes +end + +-- look up interface lanes count, lanes speed & serdes speed +-- return lane count, lane speed, serdes speed + +local function find_lanes_and_serdes(interface_name) + -- get the port config from config db + local _ + local serdes, lane_speed, count = 0, 0, 0 + + -- Get the port configure + redis.call('SELECT', appl_db) + local lanes = redis.call('HGET', appl_db_port ..':'..interface_name, 'lanes') + + if lanes then + local speed = redis.call('HGET', appl_db_port ..':'..interface_name, 'speed') + + -- we were spliting it on ',' + _, count = string.gsub(lanes, ",", ",") + count = count + 1 + + lane_speed, serdes = calculate_lane_and_serdes_speed(count, speed) + + end + -- switch back to counter db + redis.call('SELECT', counters_db) + + return count, lane_speed, serdes +end + +-- find the max T - Maximum FEC histogram bin with non-zero count +-- return max T value + +local function find_maxT(port) + local maxT = -1 + for i = 0, 15 do + local fec_cwi = 'SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S' .. i + local fec_cwi_val = redis.call('HGET', counters_table_name .. ':' .. port, fec_cwi) + if fec_cwi_val then + fec_cwi_val = tonumber(fec_cwi_val) or 0 + if fec_cwi_val > 0 then + maxT = i + end + end + end + return maxT +end + local function compute_rate(port) + local state_table = rates_table_name .. ':' .. port .. ':' .. 'PORT' local initialized = redis.call('HGET', state_table, 'INIT_DONE') logit(initialized) + -- FEC BER + local fec_corr_bits, fec_uncorr_frames + local maxT = -1 + local fec_corr_bits_ber_new, fec_uncorr_bits_ber_new = -1, -1 + -- HLD review suggest to use the statistical average when calculate the post fec ber + local rs_average_frame_ber = 1e-8 + local lanes_speed, serdes_speed, lanes_count = 0, 0, 0 + + -- lookup interface name from oid + local interface_name = find_interface_name_from_oid(port) + if interface_name then + lanes_count, lanes_speed, serdes_speed = find_lanes_and_serdes(interface_name) + + if lanes_count and serdes_speed then + fec_corr_bits = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_FEC_CORRECTED_BITS') + fec_uncorr_frames = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES') + end + end + + -- Get new COUNTERS values local in_ucast_pkts = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_UCAST_PKTS') local in_non_ucast_pkts = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS') @@ -58,10 +188,11 @@ local function compute_rate(port) local out_octets_last = redis.call('HGET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_OCTETS_last') -- Calculate new rates values - local rx_bps_new = (in_octets - in_octets_last) / delta * 1000 - local tx_bps_new = (out_octets - out_octets_last) / delta * 1000 - local rx_pps_new = ((in_ucast_pkts + in_non_ucast_pkts) - (in_ucast_pkts_last + in_non_ucast_pkts_last)) / delta * 1000 - local tx_pps_new = ((out_ucast_pkts + out_non_ucast_pkts) - (out_ucast_pkts_last + out_non_ucast_pkts_last)) / delta * 1000 + local scale_factor = 1000 / delta + local rx_bps_new = (in_octets - in_octets_last) * scale_factor + local tx_bps_new = (out_octets - out_octets_last) * scale_factor + local rx_pps_new = ((in_ucast_pkts + in_non_ucast_pkts) - (in_ucast_pkts_last + in_non_ucast_pkts_last)) * scale_factor + local tx_pps_new = ((out_ucast_pkts + out_non_ucast_pkts) - (out_ucast_pkts_last + out_non_ucast_pkts_last)) * scale_factor if initialized == "DONE" then -- Get old rates values @@ -83,6 +214,26 @@ local function compute_rate(port) redis.call('HSET', rates_table_name .. ':' .. port, 'TX_PPS', tx_pps_new) redis.call('HSET', state_table, 'INIT_DONE', 'DONE') end + + -- only do the calculation when all info present + + if fec_corr_bits and fec_uncorr_frames and lanes_count and serdes_speed then + local fec_corr_bits_last = redis.call('HGET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_FEC_CORRECTED_BITS_last') + local fec_uncorr_frames_last = redis.call('HGET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_FEC_NOT_CORRECTABLE_FARMES_last') + + -- Initialize to 0 if last counter values does not exist (during first boot for eg) + fec_corr_bits_last = tonumber(fec_corr_bits_last) or 0 + fec_uncorr_frames_last = tonumber(fec_uncorr_frames_last) or 0 + + local serdes_rate_total = lanes_count * serdes_speed * delta / 1000 + + fec_corr_bits_ber_new = (fec_corr_bits - fec_corr_bits_last) / serdes_rate_total + fec_uncorr_bits_ber_new = (fec_uncorr_frames - fec_uncorr_frames_last) * rs_average_frame_ber / serdes_rate_total + else + logit("FEC counters or lane info not found on " .. port) + end + + maxT = find_maxT(port) else redis.call('HSET', state_table, 'INIT_DONE', 'COUNTERS_LAST') end @@ -94,6 +245,26 @@ local function compute_rate(port) redis.call('HSET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS_last', out_non_ucast_pkts) redis.call('HSET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_OCTETS_last', in_octets) redis.call('HSET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_OCTETS_last', out_octets) + + -- do not update FEC related stat if we dont have it + + if not fec_corr_bits or not fec_uncorr_frames or not fec_corr_bits_ber_new or + not fec_uncorr_bits_ber_new then + logit("FEC counters not found on " .. port) + return + end + -- Set BER values + local fec_pre_ber_max = redis.call('HGET', rates_table_name .. ':' .. port, 'FEC_PRE_BER_MAX') + fec_pre_ber_max = tonumber(fec_pre_ber_max) or 0 + + if fec_corr_bits_ber_new > fec_pre_ber_max then + redis.call('HSET', rates_table_name .. ':' .. port, 'FEC_PRE_BER_MAX', fec_corr_bits_ber_new) + end + redis.call('HSET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_FEC_CORRECTED_BITS_last', fec_corr_bits) + redis.call('HSET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_FEC_NOT_CORRECTABLE_FARMES_last', fec_uncorr_frames) + redis.call('HSET', rates_table_name .. ':' .. port, 'FEC_PRE_BER', fec_corr_bits_ber_new) + redis.call('HSET', rates_table_name .. ':' .. port, 'FEC_POST_BER', fec_uncorr_bits_ber_new) + redis.call('HSET', rates_table_name .. ':' .. port, 'FEC_MAX_T', maxT) end local n = table.getn(KEYS) diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp old mode 100755 new mode 100644 index 98c13b7dc7e..cb27f138f5a --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -1,3 +1,7 @@ +#include +#include +#include + #include "portsorch.h" #include "intfsorch.h" #include "bufferorch.h" @@ -6,6 +10,8 @@ #include "vxlanorch.h" #include "directory.h" #include "subintf.h" +#include "notifications.h" +#include "stporch.h" #include #include @@ -32,6 +38,9 @@ #include "switchorch.h" #include "stringutility.h" #include "subscriberstatetable.h" +#include "warm_restart.h" + +#include "saitam.h" extern sai_switch_api_t *sai_switch_api; extern sai_bridge_api_t *sai_bridge_api; @@ -43,13 +52,16 @@ extern sai_acl_api_t* sai_acl_api; extern sai_queue_api_t *sai_queue_api; extern sai_object_id_t gSwitchId; extern sai_fdb_api_t *sai_fdb_api; +extern sai_tam_api_t *sai_tam_api; extern sai_l2mc_group_api_t *sai_l2mc_group_api; +extern sai_buffer_api_t *sai_buffer_api; extern IntfsOrch *gIntfsOrch; extern NeighOrch *gNeighOrch; extern CrmOrch *gCrmOrch; extern BufferOrch *gBufferOrch; extern FdbOrch *gFdbOrch; extern SwitchOrch *gSwitchOrch; +extern StpOrch *gStpOrch; extern Directory gDirectory; extern sai_system_port_api_t *sai_system_port_api; extern string gMySwitchType; @@ -57,6 +69,8 @@ extern int32_t gVoqMySwitchId; extern string gMyHostName; extern string gMyAsicName; extern event_handle_t g_events_handle; +extern bool isChassisDbInUse(); +extern bool gMultiAsicVoq; // defines ------------------------------------------------------------------------------------------------------------ @@ -71,10 +85,9 @@ extern event_handle_t g_events_handle; #define PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 1000 #define PORT_BUFFER_DROP_STAT_POLLING_INTERVAL_MS 60000 #define QUEUE_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 10000 -#define QUEUE_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS "60000" -#define PG_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS "60000" -#define PG_DROP_FLEX_STAT_COUNTER_POLL_MSECS "10000" -#define PORT_RATE_FLEX_COUNTER_POLLING_INTERVAL_MS "1000" +#define QUEUE_WATERMARK_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 60000 +#define PG_WATERMARK_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 60000 +#define PG_DROP_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 10000 // types -------------------------------------------------------------------------------------------------------------- @@ -84,7 +97,50 @@ struct PortAttrValue }; typedef PortAttrValue PortAttrValue_t; -typedef std::map> PortSerdesAttrMap_t; +typedef std::map PortSerdesAttrMap_t; + +struct PortBulker +{ + std::vector oids; + std::vector attrCount; + std::vector attrList; + std::vector statuses; + uint32_t count; + + PortBulker(uint32_t size) : + oids(size), + attrCount(size, 1), + attrList(size), + statuses(size, SAI_STATUS_NOT_EXECUTED), + count(0) + { + } + + void add(sai_object_id_t oid, sai_attribute_t& attr) + { + oids[count] = oid; + attrList[count] = attr; + count++; + } + + void executeGet(sai_bulk_op_error_mode_t errorMode = SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR) + { + if (count == 0) + { + return; + } + + std::vector attrs(count); + + for (size_t idx = 0; idx < count; idx++) + { + attrs[idx] = &attrList[idx]; + } + + sai_port_api->get_ports_attribute(count, oids.data(), attrCount.data(), + attrs.data(), errorMode, statuses.data()); + } +}; // constants ---------------------------------------------------------------------------------------------------------- @@ -149,6 +205,23 @@ static map interface_type_map = { "kr8", SAI_PORT_INTERFACE_TYPE_KR8 } }; +// Timestamp Template map used for Path Tracing +static map pt_timestamp_template_map = +{ + { "template1", SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_8_15 }, + { "template2", SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_12_19 }, + { "template3", SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_16_23 }, + { "template4", SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_20_27 } +}; + +static map sai_queue_type_string_map = +{ + {SAI_QUEUE_TYPE_ALL, "SAI_QUEUE_TYPE_ALL"}, + {SAI_QUEUE_TYPE_UNICAST, "SAI_QUEUE_TYPE_UNICAST"}, + {SAI_QUEUE_TYPE_MULTICAST, "SAI_QUEUE_TYPE_MULTICAST"}, + {SAI_QUEUE_TYPE_UNICAST_VOQ, "SAI_QUEUE_TYPE_UNICAST_VOQ"}, +}; + const vector port_stat_ids = { SAI_PORT_STAT_IF_IN_OCTETS, @@ -215,7 +288,40 @@ const vector port_stat_ids = SAI_PORT_STAT_IP_IN_RECEIVES, SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES, SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES, - SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS + SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S0, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S1, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S2, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S3, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S4, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S5, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S6, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S7, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S8, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S9, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S10, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S11, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S12, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S13, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S14, + SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S15, + SAI_PORT_STAT_IF_IN_FEC_CORRECTED_BITS, + SAI_PORT_STAT_TRIM_PACKETS, + SAI_PORT_STAT_DROPPED_TRIM_PACKETS, + SAI_PORT_STAT_TX_TRIM_PACKETS, + SAI_PORT_STAT_DOT3_STATS_ALIGNMENT_ERRORS, + SAI_PORT_STAT_DOT3_STATS_FCS_ERRORS, + SAI_PORT_STAT_DOT3_STATS_SINGLE_COLLISION_FRAMES, + SAI_PORT_STAT_DOT3_STATS_MULTIPLE_COLLISION_FRAMES, + SAI_PORT_STAT_DOT3_STATS_SQE_TEST_ERRORS, + SAI_PORT_STAT_DOT3_STATS_DEFERRED_TRANSMISSIONS, + SAI_PORT_STAT_DOT3_STATS_LATE_COLLISIONS, + SAI_PORT_STAT_DOT3_STATS_EXCESSIVE_COLLISIONS, + SAI_PORT_STAT_DOT3_STATS_INTERNAL_MAC_TRANSMIT_ERRORS, + SAI_PORT_STAT_DOT3_STATS_CARRIER_SENSE_ERRORS, + SAI_PORT_STAT_DOT3_STATS_FRAME_TOO_LONGS, + SAI_PORT_STAT_DOT3_STATS_INTERNAL_MAC_RECEIVE_ERRORS, + SAI_PORT_STAT_DOT3_STATS_SYMBOL_ERRORS }; const vector gbport_stat_ids = @@ -252,8 +358,16 @@ static const vector queue_stat_ids = SAI_QUEUE_STAT_BYTES, SAI_QUEUE_STAT_DROPPED_PACKETS, SAI_QUEUE_STAT_DROPPED_BYTES, + SAI_QUEUE_STAT_TRIM_PACKETS, + SAI_QUEUE_STAT_DROPPED_TRIM_PACKETS, + SAI_QUEUE_STAT_TX_TRIM_PACKETS +}; +static const vector voq_stat_ids = +{ + SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS }; + static const vector queueWatermarkStatIds = { SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES, @@ -270,12 +384,47 @@ static const vector ingressPriorityGroupDropS SAI_INGRESS_PRIORITY_GROUP_STAT_DROPPED_PACKETS }; +const vector wred_port_stat_ids = +{ + SAI_PORT_STAT_GREEN_WRED_DROPPED_PACKETS, + SAI_PORT_STAT_YELLOW_WRED_DROPPED_PACKETS, + SAI_PORT_STAT_RED_WRED_DROPPED_PACKETS, + SAI_PORT_STAT_WRED_DROPPED_PACKETS +}; + +static const vector wred_queue_stat_ids = +{ + SAI_QUEUE_STAT_WRED_ECN_MARKED_PACKETS, + SAI_QUEUE_STAT_WRED_ECN_MARKED_BYTES, + SAI_QUEUE_STAT_WRED_DROPPED_PACKETS, + SAI_QUEUE_STAT_WRED_DROPPED_BYTES +}; + static char* hostif_vlan_tag[] = { [SAI_HOSTIF_VLAN_TAG_STRIP] = "SAI_HOSTIF_VLAN_TAG_STRIP", [SAI_HOSTIF_VLAN_TAG_KEEP] = "SAI_HOSTIF_VLAN_TAG_KEEP", [SAI_HOSTIF_VLAN_TAG_ORIGINAL] = "SAI_HOSTIF_VLAN_TAG_ORIGINAL" }; +const std::unordered_map PortOperErrorEvent::db_key_errors = +{ + // SAI port oper error status to error name mapping + { SAI_PORT_ERROR_STATUS_MAC_LOCAL_FAULT, "mac_local_fault"}, + { SAI_PORT_ERROR_STATUS_MAC_REMOTE_FAULT, "mac_remote_fault"}, + { SAI_PORT_ERROR_STATUS_FEC_SYNC_LOSS, "fec_sync_loss"}, + { SAI_PORT_ERROR_STATUS_FEC_LOSS_ALIGNMENT_MARKER, "fec_alignment_loss"}, + { SAI_PORT_ERROR_STATUS_HIGH_SER, "high_ser_error"}, + { SAI_PORT_ERROR_STATUS_HIGH_BER, "high_ber_error"}, + { SAI_PORT_ERROR_STATUS_CRC_RATE, "crc_rate"}, + { SAI_PORT_ERROR_STATUS_DATA_UNIT_CRC_ERROR, "data_unit_crc_error"}, + { SAI_PORT_ERROR_STATUS_DATA_UNIT_SIZE, "data_unit_size"}, + { SAI_PORT_ERROR_STATUS_DATA_UNIT_MISALIGNMENT_ERROR, "data_unit_misalignment_error"}, + { SAI_PORT_ERROR_STATUS_CODE_GROUP_ERROR, "code_group_error"}, + { SAI_PORT_ERROR_STATUS_SIGNAL_LOCAL_ERROR, "signal_local_error"}, + { SAI_PORT_ERROR_STATUS_NO_RX_REACHABILITY, "no_rx_reachability"} +}; + + // functions ---------------------------------------------------------------------------------------------------------- static bool isValidPortTypeForLagMember(const Port& port) @@ -287,91 +436,227 @@ static void getPortSerdesAttr(PortSerdesAttrMap_t &map, const PortConfig &port) { if (port.serdes.preemphasis.is_set) { - map[SAI_PORT_SERDES_ATTR_PREEMPHASIS] = port.serdes.preemphasis.value; + map[SAI_PORT_SERDES_ATTR_PREEMPHASIS] = SerdesValue(port.serdes.preemphasis.value); } if (port.serdes.idriver.is_set) { - map[SAI_PORT_SERDES_ATTR_IDRIVER] = port.serdes.idriver.value; + map[SAI_PORT_SERDES_ATTR_IDRIVER] = SerdesValue(port.serdes.idriver.value); } if (port.serdes.ipredriver.is_set) { - map[SAI_PORT_SERDES_ATTR_IPREDRIVER] = port.serdes.ipredriver.value; + map[SAI_PORT_SERDES_ATTR_IPREDRIVER] = SerdesValue(port.serdes.ipredriver.value); } if (port.serdes.pre1.is_set) { - map[SAI_PORT_SERDES_ATTR_TX_FIR_PRE1] = port.serdes.pre1.value; + map[SAI_PORT_SERDES_ATTR_TX_FIR_PRE1] = SerdesValue(port.serdes.pre1.value); } if (port.serdes.pre2.is_set) { - map[SAI_PORT_SERDES_ATTR_TX_FIR_PRE2] = port.serdes.pre2.value; + map[SAI_PORT_SERDES_ATTR_TX_FIR_PRE2] = SerdesValue(port.serdes.pre2.value); } if (port.serdes.pre3.is_set) { - map[SAI_PORT_SERDES_ATTR_TX_FIR_PRE3] = port.serdes.pre3.value; + map[SAI_PORT_SERDES_ATTR_TX_FIR_PRE3] = SerdesValue(port.serdes.pre3.value); } if (port.serdes.main.is_set) { - map[SAI_PORT_SERDES_ATTR_TX_FIR_MAIN] = port.serdes.main.value; + map[SAI_PORT_SERDES_ATTR_TX_FIR_MAIN] = SerdesValue(port.serdes.main.value); } if (port.serdes.post1.is_set) { - map[SAI_PORT_SERDES_ATTR_TX_FIR_POST1] = port.serdes.post1.value; + map[SAI_PORT_SERDES_ATTR_TX_FIR_POST1] = SerdesValue(port.serdes.post1.value); } if (port.serdes.post2.is_set) { - map[SAI_PORT_SERDES_ATTR_TX_FIR_POST2] = port.serdes.post2.value; + map[SAI_PORT_SERDES_ATTR_TX_FIR_POST2] = SerdesValue(port.serdes.post2.value); } if (port.serdes.post3.is_set) { - map[SAI_PORT_SERDES_ATTR_TX_FIR_POST3] = port.serdes.post3.value; + map[SAI_PORT_SERDES_ATTR_TX_FIR_POST3] = SerdesValue(port.serdes.post3.value); } if (port.serdes.attn.is_set) { - map[SAI_PORT_SERDES_ATTR_TX_FIR_ATTN] = port.serdes.attn.value; + map[SAI_PORT_SERDES_ATTR_TX_FIR_ATTN] = SerdesValue(port.serdes.attn.value); } if (port.serdes.ob_m2lp.is_set) { - - map[SAI_PORT_SERDES_ATTR_TX_PAM4_RATIO] = port.serdes.ob_m2lp.value; + map[SAI_PORT_SERDES_ATTR_TX_PAM4_RATIO] = SerdesValue(port.serdes.ob_m2lp.value); } if (port.serdes.ob_alev_out.is_set) { - map[SAI_PORT_SERDES_ATTR_TX_OUT_COMMON_MODE] = port.serdes.ob_alev_out.value; + map[SAI_PORT_SERDES_ATTR_TX_OUT_COMMON_MODE] = SerdesValue(port.serdes.ob_alev_out.value); } if (port.serdes.obplev.is_set) { - map[SAI_PORT_SERDES_ATTR_TX_PMOS_COMMON_MODE] = port.serdes.obplev.value; + map[SAI_PORT_SERDES_ATTR_TX_PMOS_COMMON_MODE] = SerdesValue(port.serdes.obplev.value); } if (port.serdes.obnlev.is_set) { - map[SAI_PORT_SERDES_ATTR_TX_NMOS_COMMON_MODE] = port.serdes.obnlev.value; + map[SAI_PORT_SERDES_ATTR_TX_NMOS_COMMON_MODE] = SerdesValue(port.serdes.obnlev.value); } if (port.serdes.regn_bfm1p.is_set) { - map[SAI_PORT_SERDES_ATTR_TX_PMOS_VLTG_REG] = port.serdes.regn_bfm1p.value; + map[SAI_PORT_SERDES_ATTR_TX_PMOS_VLTG_REG] = SerdesValue(port.serdes.regn_bfm1p.value); } if (port.serdes.regn_bfm1n.is_set) { - map[SAI_PORT_SERDES_ATTR_TX_NMOS_VLTG_REG] = port.serdes.regn_bfm1n.value; + map[SAI_PORT_SERDES_ATTR_TX_NMOS_VLTG_REG] = SerdesValue(port.serdes.regn_bfm1n.value); } - + if (port.serdes.custom_collection.is_set) + { + map[SAI_PORT_SERDES_ATTR_CUSTOM_COLLECTION] = SerdesValue(port.serdes.custom_collection.value); + } + +} + +static bool isPathTracingSupported() +{ + /* + * Path Tracing is supported when four conditions are met: + * + * 1. The switch supports SAI_OBJECT_TYPE_TAM + * 2. SAI_OBJECT_TYPE_PORT supports SAI_PORT_ATTR_PATH_TRACING_INTF attribute + * 3. SAI_OBJECT_TYPE_PORT supports SAI_PORT_ATTR_PATH_TRACING_TIMESTAMP_TYPE attribute + * 4. SAI_OBJECT_TYPE_PORT supports SAI_PORT_ATTR_TAM_OBJECT attribute + */ + + /* First, query switch capabilities */ + sai_attribute_t attr; + std::vector switchCapabilities(SAI_OBJECT_TYPE_MAX); + attr.id = SAI_SWITCH_ATTR_SUPPORTED_OBJECT_TYPE_LIST; + attr.value.s32list.count = static_cast(switchCapabilities.size()); + attr.value.s32list.list = switchCapabilities.data(); + + bool is_tam_supported = false; + auto status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + if (status == SAI_STATUS_SUCCESS) + { + for (std::uint32_t i = 0; i < attr.value.s32list.count; i++) + { + switch(static_cast(attr.value.s32list.list[i])) + { + case SAI_OBJECT_TYPE_TAM: + is_tam_supported = true; + break; + default: + /* Received an attribute in which we are not interested, ignoring it */ + break; + } + } + } + else if (SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status) + || status == SAI_STATUS_NOT_SUPPORTED || status == SAI_STATUS_NOT_IMPLEMENTED) + { + SWSS_LOG_INFO("Querying OBJECT_TYPE_LIST is not supported on this platform"); + return false; + } + else + { + SWSS_LOG_ERROR( + "Failed to get a list of supported switch capabilities. Error=%d", status + ); + return false; + } + + /* Then verify if the four conditions are met */ + if (!is_tam_supported || + !gSwitchOrch->querySwitchCapability(SAI_OBJECT_TYPE_PORT, SAI_PORT_ATTR_PATH_TRACING_INTF) || + !gSwitchOrch->querySwitchCapability(SAI_OBJECT_TYPE_PORT, SAI_PORT_ATTR_PATH_TRACING_TIMESTAMP_TYPE) || + !gSwitchOrch->querySwitchCapability(SAI_OBJECT_TYPE_PORT, SAI_PORT_ATTR_TAM_OBJECT)) + { + return false; + } + + return true; +} + +bool PortsOrch::checkPathTracingCapability() +{ + vector fvVector; + if (isPathTracingSupported()) + { + SWSS_LOG_INFO("Path Tracing is supported"); + /* Set PATH_TRACING_CAPABLE = true in STATE DB */ + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_PATH_TRACING_CAPABLE, "true"); + m_isPathTracingSupported = true; + } + else + { + SWSS_LOG_INFO("Path Tracing is not supported"); + /* Set PATH_TRACING_CAPABLE = false in STATE DB */ + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_PATH_TRACING_CAPABLE, "false"); + m_isPathTracingSupported = false; + } + gSwitchOrch->set_switch_capability(fvVector); + + return m_isPathTracingSupported; +} + +static bool isPortStatSupported(sai_port_stat_t stat) +{ + static std::vector statList; + + if (statList.empty()) + { + sai_stat_capability_list_t capList = { .count = 0, .list = nullptr }; + + auto status = sai_query_stats_capability(gSwitchId, SAI_OBJECT_TYPE_PORT, &capList); + if ((status != SAI_STATUS_SUCCESS) && (status != SAI_STATUS_BUFFER_OVERFLOW)) + { + return false; + } + + statList.resize(capList.count); + capList.list = statList.data(); + + status = sai_query_stats_capability(gSwitchId, SAI_OBJECT_TYPE_PORT, &capList); + if (status != SAI_STATUS_SUCCESS) + { + return false; + } + } + + return std::any_of( + statList.cbegin(), + statList.cend(), + [stat](const sai_stat_capability_t &cap) { + return static_cast(cap.stat_enum) == stat; + } + ); +} + +static bool isMlnxPlatform() +{ + const auto *platform = std::getenv("platform"); + if (platform == nullptr) + { + return false; + } + + const auto *result = std::strstr(platform, MLNX_PLATFORM_SUBSTRING); + if (result == nullptr) + { + return false; + } + + return true; } // Port OA ------------------------------------------------------------------------------------------------------------ @@ -394,19 +679,36 @@ static void getPortSerdesAttr(PortSerdesAttrMap_t &map, const PortConfig &port) PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector &tableNames, DBConnector *chassisAppDb) : Orch(db, tableNames), m_portStateTable(stateDb, STATE_PORT_TABLE_NAME), + m_portOpErrTable(stateDb, STATE_PORT_OPER_ERR_TABLE_NAME), port_stat_manager(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false), - gb_port_stat_manager("GB_FLEX_COUNTER_DB", + gb_port_stat_manager(true, PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false), port_buffer_drop_stat_manager(PORT_BUFFER_DROP_STAT_FLEX_COUNTER_GROUP, StatsMode::READ, PORT_BUFFER_DROP_STAT_POLLING_INTERVAL_MS, false), queue_stat_manager(QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, QUEUE_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false), - m_port_state_poller(new SelectableTimer(timespec { .tv_sec = PORT_STATE_POLLING_SEC, .tv_nsec = 0 })) + queue_watermark_manager(QUEUE_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ_AND_CLEAR, QUEUE_WATERMARK_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false), + pg_watermark_manager(PG_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ_AND_CLEAR, PG_WATERMARK_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false), + pg_drop_stat_manager(PG_DROP_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, PG_DROP_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false), + wred_port_stat_manager(WRED_PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false), + wred_queue_stat_manager(WRED_QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, QUEUE_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false), + counter_managers({ + ref(port_stat_manager), + ref(port_buffer_drop_stat_manager), + ref(queue_stat_manager), + ref(queue_watermark_manager), + ref(pg_watermark_manager), + ref(pg_drop_stat_manager), + ref(wred_port_stat_manager), + ref(wred_queue_stat_manager) + }), + m_port_state_poller(new SelectableTimer(timespec { .tv_sec = PORT_STATE_POLLING_SEC, .tv_nsec = 0 })), + m_isWarmRestoreStage(WarmStart::isWarmStart()) { SWSS_LOG_ENTER(); /* Initialize counter table */ m_counter_db = shared_ptr(new DBConnector("COUNTERS_DB", 0)); - m_counterTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_PORT_NAME_MAP)); + m_counterNameMapUpdater = unique_ptr(new CounterNameMapUpdater("COUNTERS_DB", COUNTERS_PORT_NAME_MAP)); m_counterSysPortTable = unique_ptr
( new Table(m_counter_db.get(), COUNTERS_SYSTEM_PORT_NAME_MAP)); m_counterLagTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_LAG_NAME_MAP)); @@ -418,35 +720,38 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector(new Table(db, APP_PORT_TABLE_NAME)); m_sendToIngressPortTable = unique_ptr
(new Table(db, APP_SEND_TO_INGRESS_PORT_TABLE_NAME)); + m_systemPortTable = unique_ptr
(new Table(db, APP_SYSTEM_PORT_TABLE_NAME)); /* Initialize gearbox */ m_gearboxTable = unique_ptr
(new Table(db, "_GEARBOX_TABLE")); /* Initialize queue tables */ - m_queueTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_QUEUE_NAME_MAP)); + m_queueCounterNameMapUpdater = unique_ptr(new CounterNameMapUpdater("COUNTERS_DB", COUNTERS_QUEUE_NAME_MAP)); m_voqTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_VOQ_NAME_MAP)); m_queuePortTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_QUEUE_PORT_MAP)); m_queueIndexTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_QUEUE_INDEX_MAP)); m_queueTypeTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_QUEUE_TYPE_MAP)); /* Initialize ingress priority group tables */ - m_pgTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_PG_NAME_MAP)); + m_pgCounterNameMapUpdater = unique_ptr(new CounterNameMapUpdater("COUNTERS_DB", COUNTERS_PG_NAME_MAP)); m_pgPortTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_PG_PORT_MAP)); m_pgIndexTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_PG_INDEX_MAP)); - m_flex_db = shared_ptr(new DBConnector("FLEX_COUNTER_DB", 0)); - m_flexCounterTable = unique_ptr(new ProducerTable(m_flex_db.get(), FLEX_COUNTER_TABLE)); - m_flexCounterGroupTable = unique_ptr(new ProducerTable(m_flex_db.get(), FLEX_COUNTER_GROUP_TABLE)); - m_state_db = shared_ptr(new DBConnector("STATE_DB", 0)); m_stateBufferMaximumValueTable = unique_ptr
(new Table(m_state_db.get(), STATE_BUFFER_MAXIMUM_VALUE_TABLE)); + /* Initialize counter capability table*/ + m_queueCounterCapabilitiesTable = unique_ptr
(new Table(m_state_db.get(), STATE_QUEUE_COUNTER_CAPABILITIES_NAME)); + m_portCounterCapabilitiesTable = unique_ptr
(new Table(m_state_db.get(), STATE_PORT_COUNTER_CAPABILITIES_NAME)); + initGearbox(); - string queueWmSha, pgWmSha; + string queueWmSha, pgWmSha, portRateSha, nvdaPortTrimSha, portFlrSha; string queueWmPluginName = "watermark_queue.lua"; string pgWmPluginName = "watermark_pg.lua"; string portRatePluginName = "port_rates.lua"; + string nvdaPortTrimPluginName = "nvda_port_trim_drop.lua"; + string portFlrPluginName = "port_flr.lua"; try { @@ -457,36 +762,65 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector fieldValues; - fieldValues.emplace_back(QUEUE_PLUGIN_FIELD, queueWmSha); - fieldValues.emplace_back(POLL_INTERVAL_FIELD, QUEUE_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS); - fieldValues.emplace_back(STATS_MODE_FIELD, STATS_MODE_READ_AND_CLEAR); - m_flexCounterGroupTable->set(QUEUE_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, fieldValues); - - fieldValues.clear(); - fieldValues.emplace_back(PG_PLUGIN_FIELD, pgWmSha); - fieldValues.emplace_back(POLL_INTERVAL_FIELD, PG_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS); - fieldValues.emplace_back(STATS_MODE_FIELD, STATS_MODE_READ_AND_CLEAR); - m_flexCounterGroupTable->set(PG_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, fieldValues); - - fieldValues.clear(); - fieldValues.emplace_back(PORT_PLUGIN_FIELD, portRateSha); - fieldValues.emplace_back(POLL_INTERVAL_FIELD, PORT_RATE_FLEX_COUNTER_POLLING_INTERVAL_MS); - fieldValues.emplace_back(STATS_MODE_FIELD, STATS_MODE_READ); - m_flexCounterGroupTable->set(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, fieldValues); - - fieldValues.clear(); - fieldValues.emplace_back(POLL_INTERVAL_FIELD, PG_DROP_FLEX_STAT_COUNTER_POLL_MSECS); - fieldValues.emplace_back(STATS_MODE_FIELD, STATS_MODE_READ); - m_flexCounterGroupTable->set(PG_DROP_STAT_COUNTER_FLEX_COUNTER_GROUP, fieldValues); + portRateSha = swss::loadRedisScript(m_counter_db.get(), portRateLuaScript); + + string nvdaPortTrimLuaScript = swss::loadLuaScript(nvdaPortTrimPluginName); + nvdaPortTrimSha = swss::loadRedisScript(m_counter_db.get(), nvdaPortTrimLuaScript); + + string portFlrLuaScript = swss::loadLuaScript(portFlrPluginName); + portFlrSha = swss::loadRedisScript(m_counter_db.get(), portFlrLuaScript); } catch (const runtime_error &e) { SWSS_LOG_ERROR("Port flex counter groups were not set successfully: %s", e.what()); } + // Build portStatPlugins string, only adding non-empty plugin SHAs + std::string portStatPlugins; + if (!portRateSha.empty()) + { + portStatPlugins = portRateSha; + } + if (!portFlrSha.empty()) + { + if (!portStatPlugins.empty()) + { + portStatPlugins += ","; + } + portStatPlugins += portFlrSha; + } + + // Nvidia custom trim stat calculation + if (isMlnxPlatform() && \ + isPortStatSupported(SAI_PORT_STAT_TRIM_PACKETS) && \ + isPortStatSupported(SAI_PORT_STAT_TX_TRIM_PACKETS) && \ + !isPortStatSupported(SAI_PORT_STAT_DROPPED_TRIM_PACKETS)) + { + portStatPlugins += "," + nvdaPortTrimSha; + } + + setFlexCounterGroupParameter(QUEUE_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, + QUEUE_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS, + STATS_MODE_READ_AND_CLEAR, + QUEUE_PLUGIN_FIELD, + queueWmSha); + + setFlexCounterGroupParameter(PG_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, + PG_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS, + STATS_MODE_READ_AND_CLEAR, + PG_PLUGIN_FIELD, + pgWmSha); + + setFlexCounterGroupParameter(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, + PORT_RATE_FLEX_COUNTER_POLLING_INTERVAL_MS, + STATS_MODE_READ, + PORT_PLUGIN_FIELD, + portStatPlugins); + + setFlexCounterGroupParameter(PG_DROP_STAT_COUNTER_FLEX_COUNTER_GROUP, + PG_DROP_FLEX_STAT_COUNTER_POLL_MSECS, + STATS_MODE_READ); + /* Get CPU port */ this->initializeCpuPort(); @@ -532,6 +866,60 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vectorquerySwitchCapability(SAI_OBJECT_TYPE_HOSTIF, SAI_HOSTIF_ATTR_QUEUE)) + { + m_supportsHostIfTxQueue = true; + } + else + { + SWSS_LOG_WARN("Hostif queue attribute not supported"); + } + + // Query whether SAI supports Host Tx Signal and Host Tx Notification + + sai_attr_capability_t capability; + + + if (sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_PORT, + SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE, + &capability) == SAI_STATUS_SUCCESS) + { + if (capability.create_implemented == true) + { + SWSS_LOG_DEBUG("SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE is true"); + saiHwTxSignalSupported = true; + } + } + + if (sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_SWITCH, + SAI_SWITCH_ATTR_PORT_HOST_TX_READY_NOTIFY, + &capability) == SAI_STATUS_SUCCESS) + { + if (capability.create_implemented == true) + { + SWSS_LOG_DEBUG("SAI_SWITCH_ATTR_PORT_HOST_TX_READY_NOTIFY is true"); + saiTxReadyNotifySupported = true; + } + } + + if (saiHwTxSignalSupported && saiTxReadyNotifySupported) + { + SWSS_LOG_DEBUG("m_cmisModuleAsicSyncSupported is true"); + m_cmisModuleAsicSyncSupported = true; + + // set HOST_TX_READY callback function attribute to SAI, only if the feature is enabled + sai_attribute_t attr; + attr.id = SAI_SWITCH_ATTR_PORT_HOST_TX_READY_NOTIFY; + attr.value.ptr = (void *)on_port_host_tx_ready; + + if (sai_switch_api->set_switch_attribute(gSwitchId, &attr) != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("PortsOrch failed to set SAI_SWITCH_ATTR_PORT_HOST_TX_READY_NOTIFY attribute"); + } + + Orch::addExecutor(new Consumer(new SubscriberStateTable(stateDb, STATE_TRANSCEIVER_INFO_TABLE_NAME, TableConsumable::DEFAULT_POP_BATCH_SIZE, 0), this, STATE_TRANSCEIVER_INFO_TABLE_NAME)); + } + if (gMySwitchType != "dpu") { sai_attr_capability_t attr_cap; @@ -582,22 +970,45 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vectorset_switch_attribute(gSwitchId, &attr) != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set SAI_SWITCH_ATTR_FDB_EVENT_NOTIFY attribute"); + throw runtime_error("PortsOrch initialization failure (failed to set fdb event notify)"); + } + } + /* Add port oper status notification support */ - DBConnector *notificationsDb = new DBConnector("ASIC_DB", 0); - m_portStatusNotificationConsumer = new swss::NotificationConsumer(notificationsDb, "NOTIFICATIONS"); + m_notificationsDb = make_shared("ASIC_DB", 0); + m_portStatusNotificationConsumer = new swss::NotificationConsumer(m_notificationsDb.get(), "NOTIFICATIONS"); auto portStatusNotificatier = new Notifier(m_portStatusNotificationConsumer, this, "PORT_STATUS_NOTIFICATIONS"); Orch::addExecutor(portStatusNotificatier); - if (gMySwitchType == "voq") + if (m_cmisModuleAsicSyncSupported) + { + m_portHostTxReadyNotificationConsumer = new swss::NotificationConsumer(m_notificationsDb.get(), "NOTIFICATIONS"); + auto portHostTxReadyNotificatier = new Notifier(m_portHostTxReadyNotificationConsumer, this, "PORT_HOST_TX_NOTIFICATIONS"); + Orch::addExecutor(portHostTxReadyNotificatier); + } + + if (isChassisDbInUse()) { string tableName; //Add subscriber to process system LAG (System PortChannel) table @@ -613,6 +1024,12 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector (new LagIdAllocator(chassisAppDb)); } + /* Query Path Tracing capability */ + checkPathTracingCapability(); + + /* Initialize the stats capability in STATE_DB */ + initCounterCapabilities(gSwitchId); + auto executor = new ExecutableTimer(m_port_state_poller, this, "PORT_STATE_POLLER"); Orch::addExecutor(executor); } @@ -643,6 +1060,26 @@ void PortsOrch::initializeCpuPort() SWSS_LOG_NOTICE("Get CPU port pid:%" PRIx64, this->m_cpuPort.m_port_id); } +// Creating mapping of various port oper errors for error handling +void PortsOrch::initializePortOperErrors(Port &port) +{ + SWSS_LOG_ENTER(); + + for (auto& error : PortOperErrorEvent::db_key_errors) + { + const sai_port_error_status_t error_status = error.first; + std::string error_name = error.second; + if (port.m_portOperErrorToEvent.find(error_status) == port.m_portOperErrorToEvent.end()) + { + port.m_portOperErrorToEvent[error_status] = PortOperErrorEvent(error_status, error_name); + SWSS_LOG_INFO("Initialize port %s error %s flag=0x%" PRIx32, + port.m_alias.c_str(), + error_name.c_str(), + error_status); + } + } +} + void PortsOrch::initializePorts() { SWSS_LOG_ENTER(); @@ -731,7 +1168,7 @@ void PortsOrch::setPortConfigState(port_config_state_t value) this->m_portConfigState = value; } -bool PortsOrch::addPortBulk(const std::vector &portList) +bool PortsOrch::addPortBulk(const std::vector &portList, std::vector& addedPorts) { // The method is used to create ports in a bulk mode. // The action takes place when: @@ -745,6 +1182,8 @@ bool PortsOrch::addPortBulk(const std::vector &portList) return true; } + addedPorts.reserve(portList.size()); + std::vector attrValueList; std::vector> attrDataList; std::vector attrCountList; @@ -759,6 +1198,12 @@ bool PortsOrch::addPortBulk(const std::vector &portList) sai_attribute_t attr; std::vector attrList; + addedPorts.emplace_back(cit.key, Port::PHY); + auto& p = addedPorts.back(); + + p.m_role = cit.role.value; + p.m_index = cit.index.value; + if (cit.lanes.is_set) { PortAttrValue_t attrValue; @@ -778,6 +1223,7 @@ bool PortsOrch::addPortBulk(const std::vector &portList) attr.id = SAI_PORT_ATTR_SPEED; attr.value.u32 = cit.speed.value; attrList.push_back(attr); + p.m_speed = cit.speed.value; } if (cit.autoneg.is_set) @@ -785,6 +1231,10 @@ bool PortsOrch::addPortBulk(const std::vector &portList) attr.id = SAI_PORT_ATTR_AUTO_NEG_MODE; attr.value.booldata = cit.autoneg.value; attrList.push_back(attr); + p.m_autoneg = cit.autoneg.value; + // If port is successfully created then autoneg was set and is supported + p.m_cap_an = 1; + p.m_an_cfg = true; } if (cit.fec.is_set) @@ -792,6 +1242,122 @@ bool PortsOrch::addPortBulk(const std::vector &portList) attr.id = SAI_PORT_ATTR_FEC_MODE; attr.value.s32 = cit.fec.value; attrList.push_back(attr); + + if (fec_override_sup) + { + attr.id = SAI_PORT_ATTR_AUTO_NEG_FEC_MODE_OVERRIDE; + attr.value.booldata = cit.fec.override_fec; + attrList.push_back(attr); + } + + p.m_fec_mode = cit.fec.value; + p.m_override_fec = cit.fec.override_fec; + p.m_fec_cfg = true; + } + + if (cit.tpid.is_set) + { + if (cit.tpid.value != DEFAULT_TPID) + { + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_TPID; + attr.value.u16 = cit.tpid.value; + attrList.push_back(attr); + p.m_tpid = cit.tpid.value; + } + } + + if (cit.pfc_asym.is_set) + { + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_MODE; + attr.value.s32 = cit.pfc_asym.value; + attrList.push_back(attr); + + if (cit.pfc_asym.value == SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_SEPARATE) + { + attr.id = SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_RX; + attr.value.u8 = static_cast(0xff); + attrList.push_back(attr); + } + + p.m_pfc_asym = cit.pfc_asym.value; + } + + if (m_cmisModuleAsicSyncSupported) + { + attr.id = SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE; + attr.value.booldata = false; + attrList.push_back(attr); + } + + if (cit.pt_intf_id.is_set) + { + if (!m_isPathTracingSupported) + { + SWSS_LOG_WARN( + "Failed to set Path Tracing Interface ID: Path Tracing is not supported by the switch" + ); + continue; + } + + /* + * First, let's check the Path Tracing Interface ID configured for the port. + * + * Path Tracing Interface ID > 0 -> Path Tracing ENABLED on the port + * Path Tracing Interface ID == 0 -> Path Tracing DISABLED on the port + */ + if (cit.pt_intf_id.value != 0) + { + /* Path Tracing ENABLED case */ + + /* + * The port does not have a TAM object assigned to it. + * + * Let's create a new TAM object (if we don't already have one) + * and assign it to the port. + */ + if (m_ptTam == SAI_NULL_OBJECT_ID) + { + if (!createPtTam()) + { + SWSS_LOG_ERROR( + "Failed to create TAM object for Path Tracing" + ); + } + } + + if (m_ptTam != SAI_NULL_OBJECT_ID) + { + vector tam_objects_list; + tam_objects_list.push_back(m_ptTam); + attr.id = SAI_PORT_ATTR_TAM_OBJECT; + attr.value.objlist.count = (uint32_t)tam_objects_list.size(); + attr.value.objlist.list = tam_objects_list.data(); + + m_ptTamRefCount++; + m_portPtTam[cit.key] = m_ptTam; + } + } + + attr.id = SAI_PORT_ATTR_PATH_TRACING_INTF; + attr.value.u16 = cit.pt_intf_id.value; + attrList.push_back(attr); + } + + if (cit.pt_timestamp_template.is_set) + { + if (!m_isPathTracingSupported) + { + SWSS_LOG_WARN( + "Failed to set Path Tracing Timestamp Template: Path Tracing is not supported by the switch" + ); + continue; + } + + attr.id = SAI_PORT_ATTR_PATH_TRACING_TIMESTAMP_TYPE; + attr.value.u16 = cit.pt_timestamp_template.value; + attrList.push_back(attr); } attrDataList.push_back(attrList); @@ -835,9 +1401,18 @@ bool PortsOrch::addPortBulk(const std::vector &portList) return false; } - m_portListLaneMap[portList.at(i).lanes.value] = oidList.at(i); - m_portCount++; - } + Port& p = addedPorts.at(i); + PortConfig pCfg = portList.at(i); + + if (pCfg.autoneg.is_set) + { + updatePortStatePoll(p, PORT_STATE_POLL_AN, pCfg.autoneg.value); + } + + m_portListLaneMap[portList.at(i).lanes.value] = oidList.at(i); + addedPorts.at(i).m_port_id = oidList.at(i); + m_portCount++; + } // newly created ports might be put in the default vlan so remove all ports from // the default vlan. @@ -873,6 +1448,22 @@ bool PortsOrch::removePortBulk(const std::vector &portList) // Remove port serdes (if exists) before removing port since this reference is dependency removePortSerdesAttribute(cit); + + /* + * Decrease TAM object ref count before removing the port, if the port + * has a TAM object assigned + */ + if (m_portPtTam.find(p.m_alias) != m_portPtTam.end()) + { + m_ptTamRefCount--; + if (m_ptTamRefCount == 0) + { + if (!removePtTam(m_ptTam)) + { + throw runtime_error("Remove port TAM object for Path Tracing failed"); + } + } + } } auto portCount = static_cast(portList.size()); @@ -1090,6 +1681,11 @@ map& PortsOrch::getAllPorts() return m_portList; } +unordered_set& PortsOrch::getAllVlans() +{ + return m_vlanPorts; +} + bool PortsOrch::getPort(string alias, Port &p) { SWSS_LOG_ENTER(); @@ -1156,6 +1752,140 @@ bool PortsOrch::getBridgePortReferenceCount(Port &port) return m_bridge_port_ref_count[port.m_alias]; } + +/**** +* Func Name : initCounterCapabilities +* Parameters : switch oid +* Returns : void +* Description: It updates the STATE_DB with platform stat capability +* As of now, it only handles WRED counters +* 1. Initialize the WRED statistics capabilities with false for all counters +* 2. Get queue stats capability from the platform +* 3. Based on the fetched queue stats capability, update the STATE_DB +* 4. Get port stats capability from the platform +* 5. Based on the fetched port stats capability, update the STATE_DB +**/ +void PortsOrch::initCounterCapabilities(sai_object_id_t switchId) +{ + sai_stat_capability_list_t queue_stats_capability, port_stats_capability; + + uint32_t it = 0; + bool pt_grn_pkt = false, pt_red_pkt = false, pt_ylw_pkt = false, pt_tot_pkt = false; + bool q_ecn_byte = false, q_ecn_pkt = false, q_wred_byte = false, q_wred_pkt = false; + + sai_stat_capability_t stat_initializer; + stat_initializer.stat_enum = 0; + stat_initializer.stat_modes = 0; + vector qstat_cap_list; + queue_stats_capability.count = 0; + queue_stats_capability.list = nullptr; + + vector fieldValuesTrue; + fieldValuesTrue.push_back(FieldValueTuple("isSupported", "true")); + + vector fieldValuesFalse; + fieldValuesFalse.push_back(FieldValueTuple("isSupported", "false")); + + /* 1. Initialize the WRED stats capabilities with false for all counters */ + m_queueCounterCapabilitiesTable->set("WRED_ECN_QUEUE_ECN_MARKED_PKT_COUNTER",fieldValuesFalse); + m_queueCounterCapabilitiesTable->set("WRED_ECN_QUEUE_ECN_MARKED_BYTE_COUNTER",fieldValuesFalse); + m_queueCounterCapabilitiesTable->set("WRED_ECN_QUEUE_WRED_DROPPED_PKT_COUNTER",fieldValuesFalse); + m_queueCounterCapabilitiesTable->set("WRED_ECN_QUEUE_WRED_DROPPED_BYTE_COUNTER",fieldValuesFalse); + m_portCounterCapabilitiesTable->set("WRED_ECN_PORT_WRED_GREEN_DROP_COUNTER",fieldValuesFalse); + m_portCounterCapabilitiesTable->set("WRED_ECN_PORT_WRED_YELLOW_DROP_COUNTER",fieldValuesFalse); + m_portCounterCapabilitiesTable->set("WRED_ECN_PORT_WRED_RED_DROP_COUNTER",fieldValuesFalse); + m_portCounterCapabilitiesTable->set("WRED_ECN_PORT_WRED_TOTAL_DROP_COUNTER",fieldValuesFalse); + + /* 2. Get queue stats capability from the platform */ + sai_status_t status = sai_query_stats_capability(switchId, SAI_OBJECT_TYPE_QUEUE, &queue_stats_capability); + if (status == SAI_STATUS_BUFFER_OVERFLOW) + { + qstat_cap_list.resize(queue_stats_capability.count, stat_initializer); + queue_stats_capability.list = qstat_cap_list.data(); + status = sai_query_stats_capability(switchId, SAI_OBJECT_TYPE_QUEUE, &queue_stats_capability); + } + if (status == SAI_STATUS_SUCCESS) + { + /* 3. Based on the fetched queue stats capability, update the STATE_DB */ + for(it=0; itset("WRED_ECN_QUEUE_ECN_MARKED_PKT_COUNTER",fieldValuesTrue); + q_ecn_pkt = true; + } + else if (SAI_QUEUE_STAT_WRED_ECN_MARKED_BYTES == queue_stats_capability.list[it].stat_enum) + { + m_queueCounterCapabilitiesTable->set("WRED_ECN_QUEUE_ECN_MARKED_BYTE_COUNTER",fieldValuesTrue); + q_ecn_byte = true; + } + else if (SAI_QUEUE_STAT_WRED_DROPPED_PACKETS == queue_stats_capability.list[it].stat_enum) + { + m_queueCounterCapabilitiesTable->set("WRED_ECN_QUEUE_WRED_DROPPED_PKT_COUNTER",fieldValuesTrue); + q_wred_pkt = true; + } + else if (SAI_QUEUE_STAT_WRED_DROPPED_BYTES == queue_stats_capability.list[it].stat_enum) + { + m_queueCounterCapabilitiesTable->set("WRED_ECN_QUEUE_WRED_DROPPED_BYTE_COUNTER",fieldValuesTrue); + q_wred_byte = true; + } + + } + SWSS_LOG_INFO("WRED queue stats is_capable: [ecn-marked-pkts:%d,ecn-marked-bytes:%d,wred-drop-pkts:%d,wred-drop-bytes:%d]", + q_ecn_pkt, q_ecn_byte, q_wred_pkt, q_wred_byte); + } + else + { + SWSS_LOG_NOTICE("Queue stat capability get failed: WRED queue stats can not be enabled, rv:%d", status); + } + + vector pstat_cap_list; + port_stats_capability.count = 0; + port_stats_capability.list = nullptr; + + /* 4. Get port stats capability from the platform*/ + status = sai_query_stats_capability(switchId, SAI_OBJECT_TYPE_PORT, &port_stats_capability); + if (status == SAI_STATUS_BUFFER_OVERFLOW) + { + pstat_cap_list.resize(port_stats_capability.count, stat_initializer); + port_stats_capability.list = pstat_cap_list.data(); + status = sai_query_stats_capability(switchId, SAI_OBJECT_TYPE_PORT, &port_stats_capability); + } + if (status == SAI_STATUS_SUCCESS) + { + /* 5. Based on the fetched port stats capability, update the STATE_DB*/ + for(it=0; itset("WRED_ECN_PORT_WRED_GREEN_DROP_COUNTER",fieldValuesTrue); + pt_grn_pkt = true; + } + else if (SAI_PORT_STAT_YELLOW_WRED_DROPPED_PACKETS == port_stats_capability.list[it].stat_enum) + { + m_portCounterCapabilitiesTable->set("WRED_ECN_PORT_WRED_YELLOW_DROP_COUNTER",fieldValuesTrue); + pt_ylw_pkt = true; + } + else if (SAI_PORT_STAT_RED_WRED_DROPPED_PACKETS == port_stats_capability.list[it].stat_enum) + { + m_portCounterCapabilitiesTable->set("WRED_ECN_PORT_WRED_RED_DROP_COUNTER",fieldValuesTrue); + pt_red_pkt = true; + } + else if (SAI_PORT_STAT_WRED_DROPPED_PACKETS == port_stats_capability.list[it].stat_enum) + { + m_portCounterCapabilitiesTable->set("WRED_ECN_PORT_WRED_TOTAL_DROP_COUNTER",fieldValuesTrue); + pt_tot_pkt = true; + } + } + SWSS_LOG_INFO("WRED port drop stats is_capable: [wred-grn-pkts:%d,wred-ylw-pkts:%d,wred-red-pkts:%d,wred-total-pkts:%d]", + pt_grn_pkt, pt_ylw_pkt, pt_red_pkt, pt_tot_pkt); + } + else + { + SWSS_LOG_NOTICE("Port stat capability get failed: WRED port stats can not be enabled, rv:%d", status); + } +} + bool PortsOrch::getPortByBridgePortId(sai_object_id_t bridge_port_id, Port &port) { SWSS_LOG_ENTER(); @@ -1386,7 +2116,7 @@ void PortsOrch::initHostTxReadyState(Port &port) if (hostTxReady.empty()) { - m_portStateTable.hset(port.m_alias, "host_tx_ready", "false"); + setHostTxReady(port, "false"); SWSS_LOG_NOTICE("initialize host_tx_ready as false for port %s", port.m_alias.c_str()); } @@ -1400,10 +2130,13 @@ bool PortsOrch::setPortAdminStatus(Port &port, bool state) attr.id = SAI_PORT_ATTR_ADMIN_STATE; attr.value.booldata = state; + // if sync between cmis module configuration and asic is supported, + // do not change host_tx_ready value in STATE DB when admin status is changed. + /* Update the host_tx_ready to false before setting admin_state, when admin state is false */ - if (!state) + if (!state && !m_cmisModuleAsicSyncSupported) { - m_portStateTable.hset(port.m_alias, "host_tx_ready", "false"); + setHostTxReady(port, "false"); SWSS_LOG_NOTICE("Set admin status DOWN host_tx_ready to false for port %s", port.m_alias.c_str()); } @@ -1414,7 +2147,11 @@ bool PortsOrch::setPortAdminStatus(Port &port, bool state) SWSS_LOG_ERROR("Failed to set admin status %s for port %s." " Setting host_tx_ready as false", state ? "UP" : "DOWN", port.m_alias.c_str()); - m_portStateTable.hset(port.m_alias, "host_tx_ready", "false"); + + if (!m_cmisModuleAsicSyncSupported) + { + setHostTxReady(port, "false"); + } task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); if (handle_status != task_success) { @@ -1423,17 +2160,17 @@ bool PortsOrch::setPortAdminStatus(Port &port, bool state) } bool gbstatus = setGearboxPortsAttr(port, SAI_PORT_ATTR_ADMIN_STATE, &state); - if (gbstatus != true) + if (gbstatus != true && !m_cmisModuleAsicSyncSupported) { - m_portStateTable.hset(port.m_alias, "host_tx_ready", "false"); + setHostTxReady(port, "false"); SWSS_LOG_NOTICE("Set host_tx_ready to false as gbstatus is false " "for port %s", port.m_alias.c_str()); } /* Update the state table for host_tx_ready*/ - if (state && (gbstatus == true) && (status == SAI_STATUS_SUCCESS) ) + if (state && (gbstatus == true) && (status == SAI_STATUS_SUCCESS) && !m_cmisModuleAsicSyncSupported) { - m_portStateTable.hset(port.m_alias, "host_tx_ready", "true"); + setHostTxReady(port, "true"); SWSS_LOG_NOTICE("Set admin status UP host_tx_ready to true for port %s", port.m_alias.c_str()); } @@ -1441,6 +2178,20 @@ bool PortsOrch::setPortAdminStatus(Port &port, bool state) return true; } +void PortsOrch::setHostTxReady(Port port, const std::string &status) +{ + vector tuples; + bool exist; + + /* If the port is revmoed, don't need to update StateDB*/ + exist = m_portStateTable.get(port.m_alias, tuples); + if (exist) + { + SWSS_LOG_NOTICE("Setting host_tx_ready status = %s, alias = %s, port_id = 0x%" PRIx64, status.c_str(), port.m_alias.c_str(), port.m_port_id); + m_portStateTable.hset(port.m_alias, "host_tx_ready", status); + } +} + bool PortsOrch::getPortAdminStatus(sai_object_id_t id, bool &up) { SWSS_LOG_ENTER(); @@ -1466,31 +2217,6 @@ bool PortsOrch::getPortAdminStatus(sai_object_id_t id, bool &up) return true; } -bool PortsOrch::getPortMtu(const Port& port, sai_uint32_t &mtu) -{ - SWSS_LOG_ENTER(); - - sai_attribute_t attr; - attr.id = SAI_PORT_ATTR_MTU; - - sai_status_t status = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); - - if (status != SAI_STATUS_SUCCESS) - { - return false; - } - - mtu = attr.value.u32 - (uint32_t)(sizeof(struct ether_header) + FCS_LEN + VLAN_TAG_LEN); - - /* Reduce the default MTU got from ASIC by MAX_MACSEC_SECTAG_SIZE */ - if (mtu > MAX_MACSEC_SECTAG_SIZE) - { - mtu -= MAX_MACSEC_SECTAG_SIZE; - } - - return true; -} - bool PortsOrch::setPortMtu(const Port& port, sai_uint32_t mtu) { SWSS_LOG_ENTER(); @@ -1728,6 +2454,10 @@ bool PortsOrch::setPortPfcAsym(Port &port, sai_port_priority_flow_control_mode_t if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to set PFC mode %d to port id 0x%" PRIx64 " (rc:%d)", pfc_asym, port.m_port_id, status); + if (status == SAI_STATUS_NOT_SUPPORTED) + { + return true; + } task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); if (handle_status != task_success) { @@ -2245,7 +2975,7 @@ bool PortsOrch::setHostIntfsStripTag(Port &port, sai_hostif_vlan_tag_t strip) return false; } - for (const auto p: portv) + for (const auto &p: portv) { sai_attribute_t attr; attr.id = SAI_HOSTIF_ATTR_VLAN_TAG; @@ -2713,6 +3443,21 @@ bool PortsOrch::getPortAdvSpeeds(const Port& port, bool remote, string& adv_spee return rc; } +task_process_status PortsOrch::setPortUnreliableLOS(Port &port, bool enabled) +{ + SWSS_LOG_ENTER(); + sai_attribute_t attr; + sai_status_t status; + attr.id = SAI_PORT_ATTR_UNRELIABLE_LOS; + attr.value.booldata = enabled; + status = sai_port_api->set_port_attribute(port.m_port_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + return handleSaiSetStatus(SAI_API_PORT, status); + } + return task_success; +} + task_process_status PortsOrch::setPortAdvSpeeds(Port &port, std::set &speed_list) { SWSS_LOG_ENTER(); @@ -2768,7 +3513,7 @@ task_process_status PortsOrch::setPortAdvInterfaceTypes(Port &port, std::set(attr[0].value.s32); m_queueInfo[queue_id].index = attr[1].value.u8; + + if (sai_queue_type_string_map.find(m_queueInfo[queue_id].type) == sai_queue_type_string_map.end()) + { + SWSS_LOG_ERROR("Got unsupported queue type %d for %" PRIx64 " queue", attr[0].value.s32, queue_id); + throw runtime_error("Got unsupported queue type"); + } } else { @@ -2804,26 +3555,8 @@ bool PortsOrch::getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uin SWSS_LOG_INFO("Fetched cached information (index %d type %d) for queue %" PRIx64, attr[1].value.u8, attr[0].value.s32, queue_id); } - switch (attr[0].value.s32) - { - case SAI_QUEUE_TYPE_ALL: - type = "SAI_QUEUE_TYPE_ALL"; - break; - case SAI_QUEUE_TYPE_UNICAST: - type = "SAI_QUEUE_TYPE_UNICAST"; - break; - case SAI_QUEUE_TYPE_MULTICAST: - type = "SAI_QUEUE_TYPE_MULTICAST"; - break; - case SAI_QUEUE_TYPE_UNICAST_VOQ: - type = "SAI_QUEUE_TYPE_UNICAST_VOQ"; - break; - default: - SWSS_LOG_ERROR("Got unsupported queue type %d for %" PRIx64 " queue", attr[0].value.s32, queue_id); - throw runtime_error("Got unsupported queue type"); - } - index = attr[1].value.u8; + queue_type = static_cast(attr[0].value.s32); return true; } @@ -2889,6 +3622,49 @@ task_process_status PortsOrch::setPortLinkTraining(const Port &port, bool state) return task_success; } +ReturnCode PortsOrch::setPortLinkEventDampingAlgorithm(Port &port, + sai_redis_link_event_damping_algorithm_t &link_event_damping_algorithm) +{ + SWSS_LOG_ENTER(); + sai_attribute_t attr; + attr.id = SAI_REDIS_PORT_ATTR_LINK_EVENT_DAMPING_ALGORITHM; + attr.value.s32 = link_event_damping_algorithm; + + CHECK_ERROR_AND_LOG_AND_RETURN( + sai_port_api->set_port_attribute(port.m_port_id, &attr), + "Failed to set link event damping algorithm (" << link_event_damping_algorithm << ") for port " + << port.m_alias); + + SWSS_LOG_INFO("Set link event damping algorithm %u for port %s", link_event_damping_algorithm, port.m_alias.c_str()); + return ReturnCode(); +} + +ReturnCode PortsOrch::setPortLinkEventDampingAiedConfig(Port &port, + sai_redis_link_event_damping_algo_aied_config_t &config) { + + SWSS_LOG_ENTER(); + sai_attribute_t attr; + attr.id = SAI_REDIS_PORT_ATTR_LINK_EVENT_DAMPING_ALGO_AIED_CONFIG; + attr.value.ptr = (void *) &config; + + std::stringstream msg; + msg << "link event damping algorithm aied config for port " << port.m_alias << " - "; + msg << "max_suppress_time: " << config.max_suppress_time << ", "; + msg << "decay_half_life: " << config.decay_half_life << ", "; + msg << "suppress_threshold: " << config.suppress_threshold << ", "; + msg << "reuse_threshold: " << config.reuse_threshold << ", "; + msg << "flap_penalty: " << config.flap_penalty; + + std::string msg_str = msg.str(); + + CHECK_ERROR_AND_LOG_AND_RETURN( + sai_port_api->set_port_attribute(port.m_port_id, &attr), "Failed to set " + msg_str); + + SWSS_LOG_INFO("Set %s", msg_str.c_str()); + + return ReturnCode(); +} + bool PortsOrch::setHostIntfsOperStatus(const Port& port, bool isUp) const { SWSS_LOG_ENTER(); @@ -2943,17 +3719,7 @@ bool PortsOrch::createVlanHostIntf(Port& vl, string hostif_name) attr.value.chardata[SAI_HOSTIF_NAME_SIZE - 1] = '\0'; attrs.push_back(attr); - bool set_hostif_tx_queue = false; - if (gSwitchOrch->querySwitchCapability(SAI_OBJECT_TYPE_HOSTIF, SAI_HOSTIF_ATTR_QUEUE)) - { - set_hostif_tx_queue = true; - } - else - { - SWSS_LOG_WARN("Hostif queue attribute not supported"); - } - - if (set_hostif_tx_queue) + if (m_supportsHostIfTxQueue) { attr.id = SAI_HOSTIF_ATTR_QUEUE; attr.value.u32 = DEFAULT_HOSTIF_TX_QUEUE; @@ -2984,6 +3750,59 @@ bool PortsOrch::removeVlanHostIntf(Port vl) return true; } +void PortsOrch::updateDbPortFlapCount(Port& port, sai_port_oper_status_t pstatus) +{ + SWSS_LOG_ENTER(); + + ++port.m_flap_count; + vector tuples; + FieldValueTuple tuple("flap_count", std::to_string(port.m_flap_count)); + tuples.push_back(tuple); + + auto now = std::chrono::system_clock::now(); + std::time_t now_c = std::chrono::system_clock::to_time_t(now); + if (pstatus == SAI_PORT_OPER_STATUS_DOWN) + { + char buffer[32]; + // Format: Www Mmm dd hh:mm:ss yyyy + std::strftime(buffer, sizeof(buffer), "%a %b %d %H:%M:%S %Y", std::gmtime(&now_c)); + FieldValueTuple tuple("last_down_time", buffer); + tuples.push_back(tuple); + } + else if (pstatus == SAI_PORT_OPER_STATUS_UP) + { + char buffer[32]; + // Format: Www Mmm dd hh:mm:ss yyyy + std::strftime(buffer, sizeof(buffer), "%a %b %d %H:%M:%S %Y", std::gmtime(&now_c)); + FieldValueTuple tuple("last_up_time", buffer); + tuples.push_back(tuple); + } + m_portTable->set(port.m_alias, tuples); +} + +void PortsOrch::updateDbPortOperError(Port& port, PortOperErrorEvent *pevent) +{ + SWSS_LOG_ENTER(); + + auto time = pevent->getEventTime(); + auto key = pevent->getDbKey(); + vector tuples; + FieldValueTuple tup1("oper_error_status", std::to_string(port.m_oper_error_status)); + tuples.push_back(tup1); + + FieldValueTuple tup2("oper_error_status_time", time); + tuples.push_back(tup2); + + size_t count = pevent->getErrorCount(); + FieldValueTuple tup3(key + "_count", std::to_string(count)); + tuples.push_back(tup3); + + FieldValueTuple tup4(key + "_time", time); + tuples.push_back(tup4); + + m_portOpErrTable.set(port.m_alias, tuples); +} + void PortsOrch::updateDbPortOperStatus(const Port& port, sai_port_oper_status_t status) const { SWSS_LOG_ENTER(); @@ -3021,6 +3840,10 @@ sai_status_t PortsOrch::removePort(sai_object_id_t port_id) } /* else : port is in default state or not yet created */ + /* Remove port counters */ + port_stat_manager.clearCounterIdList(port.m_port_id); + port_buffer_drop_stat_manager.clearCounterIdList(port.m_port_id); + /* * Remove port serdes (if exists) before removing port since this * reference is dependency. @@ -3061,8 +3884,18 @@ string PortsOrch::getPriorityGroupDropPacketsFlexCounterTableKey(string key) { return string(PG_DROP_STAT_COUNTER_FLEX_COUNTER_GROUP) + ":" + key; } +/**** +* Func Name : getWredQueueFlexCounterTableKey +* Parameters : Key as string +* Returns : Returns the Wred queue stat flexcounter table Key +* Description: Form the key and return +**/ +string PortsOrch::getWredQueueFlexCounterTableKey(string key) +{ + return string(WRED_QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP) + ":" + key; +} -bool PortsOrch::initPort(const PortConfig &port) +bool PortsOrch::initExistingPort(const PortConfig& port) { SWSS_LOG_ENTER(); @@ -3071,90 +3904,180 @@ bool PortsOrch::initPort(const PortConfig &port) const auto &index = port.index.value; const auto &lane_set = port.lanes.value; + /* Determine if the port has already been initialized before */ + auto it = m_portList.find(alias); + if (it != m_portList.end()) + { + SWSS_LOG_DEBUG("Port has already been initialized before alias:%s", alias.c_str()); + return true; + } + /* Determine if the lane combination exists in switch */ - if (m_portListLaneMap.find(lane_set) != m_portListLaneMap.end()) + if (m_portListLaneMap.find(lane_set) == m_portListLaneMap.end()) + { + SWSS_LOG_ERROR("Failed to locate port lane combination alias:%s", alias.c_str()); + return false; + } + + sai_object_id_t id = m_portListLaneMap[lane_set]; + + Port p(alias, Port::PHY); + p.m_role = role; + p.m_index = index; + p.m_port_id = id; + + /* initialize port admin status */ + if (!getPortAdminStatus(p.m_port_id, p.m_admin_state_up)) + { + SWSS_LOG_ERROR("Failed to get initial port admin status %s", p.m_alias.c_str()); + return false; + } + + // Read port speed of an already existing port + if (!isAutoNegEnabled(p.m_port_id) && !getPortSpeed(p.m_port_id, p.m_speed)) + { + SWSS_LOG_ERROR("Failed to get initial port admin speed %d", p.m_speed); + return false; + } + + std::vector ports = {p}; + return initPortsBulk(ports); +} + +bool PortsOrch::initPortsBulk(std::vector& ports) +{ + SWSS_LOG_ENTER(); + + bool status = true; + + SWSS_LOG_TIMER(__FUNCTION__); + + if (!initializePorts(ports)) + { + status = false; + } + + for (auto& p: ports) { - sai_object_id_t id = m_portListLaneMap[lane_set]; + const auto& alias = p.m_alias; - /* Determine if the port has already been initialized before */ - if (m_portList.find(alias) != m_portList.end() && m_portList[alias].m_port_id == id) + registerPort(p); + + if (!m_isWarmRestoreStage) { - SWSS_LOG_DEBUG("Port has already been initialized before alias:%s", alias.c_str()); + postPortInit(m_portList[alias]); } - else - { - Port p(alias, Port::PHY); - p.m_index = index; - p.m_port_id = id; + SWSS_LOG_NOTICE("Initialized port %s", alias.c_str()); + } - /* Initialize the port and create corresponding host interface */ - if (initializePort(p)) - { - /* Create associated Gearbox lane mapping */ - initGearboxPort(p); + return status; +} - /* Add port to port list */ - m_portList[alias] = p; - saiOidToAlias[id] = alias; - m_port_ref_count[alias] = 0; - m_portOidToIndex[id] = index; +// Registers a newly created and initialized port, adds port to internal maps. +// Performs the following operations: +// - Adds port to internal port list and mapping tables +// - Initializes gearbox port configuration if applicable +// - Sets up port name mapping for counter tables +// - Installs flex counters for port statistics monitoring +// - Notifies subscribers of port state changes +void PortsOrch::registerPort(Port &p) +{ + SWSS_LOG_ENTER(); - /* Add port name map to counter table */ - FieldValueTuple tuple(p.m_alias, sai_serialize_object_id(p.m_port_id)); - vector fields; - fields.push_back(tuple); - m_counterTable->set("", fields); + const auto &alias = p.m_alias; + const auto &role = p.m_role; + const auto &index = p.m_index; + const auto id = p.m_port_id; - // Install a flex counter for this port to track stats - auto flex_counters_orch = gDirectory.get(); - /* Delay installing the counters if they are yet enabled - If they are enabled, install the counters immediately */ - if (flex_counters_orch->getPortCountersState()) - { - auto port_counter_stats = generateCounterStats(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP); - port_stat_manager.setCounterIdList(p.m_port_id, - CounterType::PORT, port_counter_stats); - auto gbport_counter_stats = generateCounterStats(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, true); - if (p.m_system_side_id) - gb_port_stat_manager.setCounterIdList(p.m_system_side_id, - CounterType::PORT, gbport_counter_stats); - if (p.m_line_side_id) - gb_port_stat_manager.setCounterIdList(p.m_line_side_id, - CounterType::PORT, gbport_counter_stats); - } - if (flex_counters_orch->getPortBufferDropCountersState()) - { - auto port_buffer_drop_stats = generateCounterStats(PORT_BUFFER_DROP_STAT_FLEX_COUNTER_GROUP); - port_buffer_drop_stat_manager.setCounterIdList(p.m_port_id, CounterType::PORT, port_buffer_drop_stats); - } + /* Create associated Gearbox lane mapping */ + initGearboxPort(p); + updateSystemPort(p); - PortUpdate update = { p, true }; - notify(SUBJECT_TYPE_PORT_CHANGE, static_cast(&update)); + /* Add port to port list */ + m_portList[alias] = p; + saiOidToAlias[id] = alias; + m_port_ref_count[alias] = 0; + m_portOidToIndex[id] = index; - m_portList[alias].m_init = true; + /* Add port name map to counter table */ + FieldValueTuple tuple(p.m_alias, sai_serialize_object_id(p.m_port_id)); + vector fields; + fields.push_back(tuple); + m_counterNameMapUpdater->setCounterNameMap(p.m_alias, p.m_port_id); - if (role == Port::Role::Rec || role == Port::Role::Inb) - { - m_recircPortRole[alias] = role; - } + // Install a flex counter for this port to track stats + auto flex_counters_orch = gDirectory.get(); + /* Delay installing the counters if they are yet enabled + If they are enabled, install the counters immediately */ + if (flex_counters_orch->getPortCountersState()) + { + auto port_counter_stats = generateCounterStats(port_stat_ids, sai_serialize_port_stat); + port_stat_manager.setCounterIdList(p.m_port_id, + CounterType::PORT, port_counter_stats); + auto gbport_counter_stats = generateCounterStats(gbport_stat_ids, sai_serialize_port_stat); + if (p.m_system_side_id) + gb_port_stat_manager.setCounterIdList(p.m_system_side_id, + CounterType::PORT, gbport_counter_stats, p.m_switch_id); + if (p.m_line_side_id) + gb_port_stat_manager.setCounterIdList(p.m_line_side_id, + CounterType::PORT, gbport_counter_stats, p.m_switch_id); + } + if (flex_counters_orch->getPortBufferDropCountersState()) + { + auto port_buffer_drop_stats = generateCounterStats(port_buffer_drop_stat_ids, sai_serialize_port_stat); + port_buffer_drop_stat_manager.setCounterIdList(p.m_port_id, CounterType::PORT, port_buffer_drop_stats); + } + + if (flex_counters_orch->getWredPortCountersState()) + { + auto wred_port_stats = generateCounterStats(wred_port_stat_ids, sai_serialize_port_stat); + wred_port_stat_manager.setCounterIdList(p.m_port_id, CounterType::PORT, wred_port_stats); + } + + // If queue-related flex counters are already enabled, generate queue maps + // for the newly added port so that usecases like dynamic port breakout works. + bool queueFcEnabled = flex_counters_orch->getQueueCountersState() || + flex_counters_orch->getQueueWatermarkCountersState() || + flex_counters_orch->getWredQueueCountersState(); + if (queueFcEnabled && !p.m_queue_ids.empty()) + { + auto queuesStateVector = flex_counters_orch->getQueueConfigurations(); + + auto maxQueueNumber = getNumberOfPortSupportedQueueCounters(p.m_alias); + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); - SWSS_LOG_NOTICE("Initialized port %s", alias.c_str()); + if (queuesStateVector.count(p.m_alias)) + { + flexCounterQueueState = queuesStateVector.at(p.m_alias); + } + else if (queuesStateVector.count(createAllAvailableBuffersStr)) + { + if (maxQueueNumber > 0) + { + flexCounterQueueState.enableQueueCounters(0, maxQueueNumber - 1); } - else + } + else + { + if (p.m_host_tx_queue_configured && p.m_host_tx_queue < maxQueueNumber) { - SWSS_LOG_ERROR("Failed to initialize port %s", alias.c_str()); - return false; + flexCounterQueueState.enableQueueCounter(p.m_host_tx_queue); } } + + generateQueueMapPerPort(p, flexCounterQueueState, false); } - else + + PortUpdate update = { p, true }; + notify(SUBJECT_TYPE_PORT_CHANGE, static_cast(&update)); + + m_portList[alias].m_init = true; + + if (role == Port::Role::Rec || role == Port::Role::Inb) { - SWSS_LOG_ERROR("Failed to locate port lane combination alias:%s", alias.c_str()); - return false; + m_recircPortRole[alias] = role; } - - return true; } void PortsOrch::deInitPort(string alias, sai_object_id_t port_id) @@ -3169,6 +4092,14 @@ void PortsOrch::deInitPort(string alias, sai_object_id_t port_id) return; } + if (!p.m_queue_ids.empty()) + { + auto skip_host_tx_queue = p.m_host_tx_queue_configured && (p.m_queue_ids.size() > p.m_host_tx_queue); + // Remove all queue counters and mappings for this port to avoid stale entries + std::string range = "0-" + to_string(p.m_queue_ids.size() - 1); + removePortBufferQueueCounters(p, range, skip_host_tx_queue); + } + /* remove port from flex_counter_table for updating counters */ auto flex_counters_orch = gDirectory.get(); if ((flex_counters_orch->getPortCountersState())) @@ -3180,13 +4111,20 @@ void PortsOrch::deInitPort(string alias, sai_object_id_t port_id) { port_buffer_drop_stat_manager.clearCounterIdList(p.m_port_id); } + if (flex_counters_orch->getWredPortCountersState()) + { + wred_port_stat_manager.clearCounterIdList(p.m_port_id); + } /* remove port name map from counter table */ - m_counterTable->hdel("", alias); + m_counterNameMapUpdater->delCounterNameMap(alias); /* Remove the associated port serdes attribute */ removePortSerdesAttribute(p.m_port_id); + /* Remove the entry from buffer maximum parameter table*/ + m_stateBufferMaximumValueTable->del(alias); + m_portList[alias].m_init = false; SWSS_LOG_NOTICE("De-Initialized port %s", alias.c_str()); } @@ -3244,7 +4182,10 @@ bool PortsOrch::bake() addExistingData(APP_LAG_MEMBER_TABLE_NAME); addExistingData(APP_VLAN_TABLE_NAME); addExistingData(APP_VLAN_MEMBER_TABLE_NAME); - + if (saiHwTxSignalSupported && saiTxReadyNotifySupported) + { + addExistingData(STATE_TRANSCEIVER_INFO_TABLE_NAME); + } return true; } @@ -3425,41 +4366,67 @@ void PortsOrch::doPortTask(Consumer &consumer) if (op == SET_COMMAND) { - auto &fvMap = m_portConfigMap[key]; - - for (const auto &cit : kfvFieldsValues(keyOpFieldsValues)) + auto parsePortFvs = [&](auto& fvMap) -> bool { - auto fieldName = fvField(cit); - auto fieldValue = fvValue(cit); + for (const auto &cit : kfvFieldsValues(keyOpFieldsValues)) + { + auto fieldName = fvField(cit); + auto fieldValue = fvValue(cit); - SWSS_LOG_INFO("FIELD: %s, VALUE: %s", fieldName.c_str(), fieldValue.c_str()); + SWSS_LOG_INFO("FIELD: %s, VALUE: %s", fieldName.c_str(), fieldValue.c_str()); - fvMap[fieldName] = fieldValue; - } + fvMap[fieldName] = fieldValue; + } - pCfg.fieldValueMap = fvMap; + pCfg.fieldValueMap = fvMap; - if (!m_portHlpr.parsePortConfig(pCfg)) + if (!m_portHlpr.parsePortConfig(pCfg)) + { + return false; + } + + return true; + }; + + const bool portExists = m_portList.count(key) > 0; + if (!portExists) { - it = taskMap.erase(it); - continue; - } + // Aggregate configuration while the port is not created. + auto &fvMap = m_portConfigMap[key]; - /* Collect information about all received ports */ - m_lanesAliasSpeedMap[pCfg.lanes.value] = pCfg; + if (!parsePortFvs(fvMap)) + { + it = taskMap.erase(it); + continue; + } - // TODO: - // Fix the issue below - // After PortConfigDone, while waiting for "PortInitDone" and the first gBufferOrch->isPortReady(alias), - // the complete m_lanesAliasSpeedMap may be populated again, so initPort() will be called more than once - // for the same port. + if (!m_portHlpr.validatePortConfig(pCfg)) + { + it = taskMap.erase(it); + continue; + } + + /* Collect information about all received ports */ + m_lanesAliasSpeedMap[pCfg.lanes.value] = pCfg; + } + else + { + // Port is already created, gather updated field-values. + std::unordered_map fvMap; + + if (!parsePortFvs(fvMap)) + { + it = taskMap.erase(it); + continue; + } + } /* Once all ports received, go through the each port and perform appropriate actions: * 1. Remove ports which don't exist anymore * 2. Create new ports * 3. Initialize all ports */ - if (getPortConfigState() != PORT_CONFIG_MISSING) + if (getPortConfigState() == PORT_CONFIG_RECEIVED) { std::vector portsToAddList; std::vector portsToRemoveList; @@ -3496,44 +4463,49 @@ void PortsOrch::doPortTask(Consumer &consumer) continue; } - if (!initPort(it->second)) + if (!initExistingPort(it->second)) { - // Failure has been recorded in initPort + // Failure has been recorded in initExistingPort it++; continue; } - initPortSupportedSpeeds(it->second.key, m_portListLaneMap[it->first]); - initPortSupportedFecModes(it->second.key, m_portListLaneMap[it->first]); - it++; } // Bulk port add if (!portsToAddList.empty()) { - if (!addPortBulk(portsToAddList)) + std::vector addedPorts; + if (!addPortBulk(portsToAddList, addedPorts)) { SWSS_LOG_THROW("PortsOrch initialization failure"); } - for (const auto &cit : portsToAddList) - { - if (!initPort(cit)) - { - // Failure has been recorded in initPort - continue; - } - - initPortSupportedSpeeds(cit.key, m_portListLaneMap[cit.lanes.value]); - initPortSupportedFecModes(cit.key, m_portListLaneMap[cit.lanes.value]); - } + initPortsBulk(addedPorts); } setPortConfigState(PORT_CONFIG_DONE); } + else if (getPortConfigState() == PORT_CONFIG_DONE) + { + // Add and initialize the port + if (!portExists) + { + std::vector portsToAddList { pCfg }; + std::vector addedPorts; - if (getPortConfigState() != PORT_CONFIG_DONE) + if (!addPortBulk(portsToAddList, addedPorts)) + { + SWSS_LOG_ERROR("Failed to add port %s", pCfg.key.c_str()); + it++; + continue; + } + + initPortsBulk(addedPorts); + } + } + else { // Not yet receive PortConfigDone. Save it for future retry it++; @@ -3562,6 +4534,9 @@ void PortsOrch::doPortTask(Consumer &consumer) PortSerdesAttrMap_t serdes_attr; getPortSerdesAttr(serdes_attr, pCfg); + // Saved configured admin status + bool admin_status = p.m_admin_state_up; + if (pCfg.autoneg.is_set) { if (!p.m_an_cfg || p.m_autoneg != pCfg.autoneg.value) @@ -3667,10 +4642,10 @@ void PortsOrch::doPortTask(Consumer &consumer) m_portList[p.m_alias] = p; updatePortStatePoll(p, PORT_STATE_POLL_LT, pCfg.link_training.value); - // Restore pre-emphasis when LT is transitioned from ON to OFF + // Restore serdes attributes when LT is transitioned from ON to OFF if (!p.m_link_training && serdes_attr.empty()) { - serdes_attr = p.m_preemphasis; + serdes_attr = p.m_serdes_attrs; } SWSS_LOG_NOTICE( @@ -3680,6 +4655,86 @@ void PortsOrch::doPortTask(Consumer &consumer) } } + if (pCfg.link_event_damping_algorithm.is_set) + { + if (p.m_link_event_damping_algorithm != pCfg.link_event_damping_algorithm.value) + { + auto status = setPortLinkEventDampingAlgorithm(p, pCfg.link_event_damping_algorithm.value); + if (!status.ok()) + { + SWSS_LOG_ERROR( + "Failed to set port %s link event damping algorithm to %s", + p.m_alias.c_str(), m_portHlpr.getDampingAlgorithm(pCfg).c_str() + ); + it = taskMap.erase(it); + continue; + } + + p.m_link_event_damping_algorithm = pCfg.link_event_damping_algorithm.value; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s link event damping algorithm to %s", + p.m_alias.c_str(), m_portHlpr.getDampingAlgorithm(pCfg).c_str() + ); + } + } + + sai_redis_link_event_damping_algo_aied_config_t aied_config = { + p.m_max_suppress_time, + p.m_suppress_threshold, + p.m_reuse_threshold, + p.m_decay_half_life, + p.m_flap_penalty, + }; + + if (pCfg.link_event_damping_config.max_suppress_time.is_set) + { + aied_config.max_suppress_time = pCfg.link_event_damping_config.max_suppress_time.value; + } + if (pCfg.link_event_damping_config.decay_half_life.is_set) + { + aied_config.decay_half_life = pCfg.link_event_damping_config.decay_half_life.value; + } + if (pCfg.link_event_damping_config.suppress_threshold.is_set) + { + aied_config.suppress_threshold = pCfg.link_event_damping_config.suppress_threshold.value; + } + if (pCfg.link_event_damping_config.reuse_threshold.is_set) + { + aied_config.reuse_threshold = pCfg.link_event_damping_config.reuse_threshold.value; + } + if (pCfg.link_event_damping_config.flap_penalty.is_set) + { + aied_config.flap_penalty = pCfg.link_event_damping_config.flap_penalty.value; + } + + bool config_changed = !(aied_config.max_suppress_time == p.m_max_suppress_time && + aied_config.decay_half_life == p.m_decay_half_life && + aied_config.suppress_threshold == p.m_suppress_threshold && + aied_config.reuse_threshold == p.m_reuse_threshold && + aied_config.flap_penalty == p.m_flap_penalty); + + if (config_changed) + { + auto status = setPortLinkEventDampingAiedConfig(p, aied_config); + if (!status.ok()) + { + SWSS_LOG_ERROR("Failed to set port %s link event damping config", p.m_alias.c_str()); + it = taskMap.erase(it); + continue; + } + + p.m_max_suppress_time = aied_config.max_suppress_time; + p.m_decay_half_life = aied_config.decay_half_life; + p.m_suppress_threshold = aied_config.suppress_threshold; + p.m_reuse_threshold = aied_config.reuse_threshold; + p.m_flap_penalty = aied_config.flap_penalty; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE("Set port %s link event damping config successfully", p.m_alias.c_str()); + } + if (pCfg.speed.is_set) { if (p.m_speed != pCfg.speed.value) @@ -3848,6 +4903,27 @@ void PortsOrch::doPortTask(Consumer &consumer) } } + if (pCfg.serdes.unreliable_los.is_set) + { + auto status = setPortUnreliableLOS(p, pCfg.serdes.unreliable_los.value); + if (status != task_success) + { + SWSS_LOG_ERROR( + "Failed to set port %s unreliable from %d to %d", + p.m_alias.c_str(), p.m_unreliable_los, pCfg.serdes.unreliable_los.value + ); + p.m_unreliable_los = false; + } else { + + p.m_unreliable_los = pCfg.serdes.unreliable_los.value; + SWSS_LOG_INFO( + "Set port %s unreliable los to %s", + p.m_alias.c_str(), m_portHlpr.getUnreliableLosStr(pCfg).c_str() + ); + } + m_portStateTable.hset(p.m_alias, "phy_ctrl_unreliable_los", p.m_unreliable_los ? "true":"false"); + } + if (pCfg.adv_interface_types.is_set) { if (!p.m_adv_intf_cfg || p.m_adv_interface_types != pCfg.adv_interface_types.value) @@ -4010,7 +5086,6 @@ void PortsOrch::doPortTask(Consumer &consumer) p.m_fec_mode = pCfg.fec.value; p.m_override_fec = pCfg.fec.override_fec; - p.m_fec_cfg = true; m_portList[p.m_alias] = p; SWSS_LOG_NOTICE( @@ -4018,6 +5093,10 @@ void PortsOrch::doPortTask(Consumer &consumer) p.m_alias.c_str(), m_portHlpr.getFecStr(pCfg).c_str() ); } + else + { + setGearboxPortsAttr(p, SAI_PORT_ATTR_FEC_MODE, &pCfg.fec.value, pCfg.fec.override_fec); + } } if (pCfg.learn_mode.is_set) @@ -4047,7 +5126,7 @@ void PortsOrch::doPortTask(Consumer &consumer) if (pCfg.pfc_asym.is_set) { - if (!p.m_pfc_asym_cfg || p.m_pfc_asym != pCfg.pfc_asym.value) + if (p.m_pfc_asym != pCfg.pfc_asym.value) { if (m_portCap.isPortPfcAsymSupported()) { @@ -4062,7 +5141,6 @@ void PortsOrch::doPortTask(Consumer &consumer) } p.m_pfc_asym = pCfg.pfc_asym.value; - p.m_pfc_asym_cfg = true; m_portList[p.m_alias] = p; SWSS_LOG_NOTICE( @@ -4084,8 +5162,8 @@ void PortsOrch::doPortTask(Consumer &consumer) { if (p.m_link_training) { - SWSS_LOG_NOTICE("Save port %s preemphasis for LT", p.m_alias.c_str()); - p.m_preemphasis = serdes_attr; + SWSS_LOG_NOTICE("Save port %s serdes attributes for LT", p.m_alias.c_str()); + p.m_serdes_attrs = serdes_attr; m_portList[p.m_alias] = p; } else @@ -4107,7 +5185,7 @@ void PortsOrch::doPortTask(Consumer &consumer) if (setPortSerdesAttribute(p.m_port_id, gSwitchId, serdes_attr)) { SWSS_LOG_NOTICE("Set port %s SI settings is successful", p.m_alias.c_str()); - p.m_preemphasis = serdes_attr; + p.m_serdes_attrs = serdes_attr; m_portList[p.m_alias] = p; } else @@ -4122,6 +5200,15 @@ void PortsOrch::doPortTask(Consumer &consumer) /* create host_tx_ready field in state-db */ initHostTxReadyState(p); + initializePortOperErrors(p); + + // Restore admin status if the port was brought down + if (admin_status != p.m_admin_state_up && pCfg.admin_status.is_set == false) + { + pCfg.admin_status.is_set = true; + pCfg.admin_status.value = admin_status; + } + /* Last step set port admin status */ if (pCfg.admin_status.is_set) { @@ -4146,6 +5233,110 @@ void PortsOrch::doPortTask(Consumer &consumer) ); } } + + if (pCfg.pt_intf_id.is_set) + { + if (!m_isPathTracingSupported) + { + SWSS_LOG_WARN( + "Failed to set Path Tracing Interface ID: Path Tracing is not supported by the switch" + ); + it = taskMap.erase(it); + continue; + } + + if (p.m_pt_intf_id != pCfg.pt_intf_id.value) + { + /* + * First, let's check the Path Tracing Interface ID configured for the port. + * + * Path Tracing Interface ID > 0 -> Path Tracing ENABLED on the port + * Path Tracing Interface ID == 0 -> Path Tracing DISABLED on the port + */ + if (pCfg.pt_intf_id.value != 0) + { + /* Path Tracing ENABLED case */ + + /* Create and set port TAM object */ + if (!createAndSetPortPtTam(p)) + { + SWSS_LOG_ERROR( + "Failed to create and set port %s TAM object for Path Tracing", + p.m_alias.c_str() + ); + it++; + continue; + } + } + else + { + /* Path Tracing DISABLED case */ + + /* Unset port TAM object */ + if (!unsetPortPtTam(p)) + { + SWSS_LOG_ERROR( + "Failed to unset port %s TAM object for Path Tracing", + p.m_alias.c_str() + ); + it++; + continue; + } + } + + /* Set Path Tracing Interface ID */ + if (!setPortPtIntfId(p, pCfg.pt_intf_id.value)) + { + SWSS_LOG_ERROR( + "Failed to set port %s Intf ID to %u", + p.m_alias.c_str(), pCfg.pt_intf_id.value + ); + it++; + continue; + } + + p.m_pt_intf_id = pCfg.pt_intf_id.value; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s Intf ID to %u", + p.m_alias.c_str(), pCfg.pt_intf_id.value + ); + } + } + + if (pCfg.pt_timestamp_template.is_set) + { + if (!m_isPathTracingSupported) + { + SWSS_LOG_WARN( + "Failed to set Path Tracing Timestamp Template: Path Tracing is not supported by the switch" + ); + it = taskMap.erase(it); + continue; + } + + if (p.m_pt_timestamp_template != pCfg.pt_timestamp_template.value) + { + if (!setPortPtTimestampTemplate(p, pCfg.pt_timestamp_template.value)) + { + SWSS_LOG_ERROR( + "Failed to set port %s Timestamp Template to %s", + p.m_alias.c_str(), m_portHlpr.getPtTimestampTemplateStr(pCfg).c_str() + ); + it++; + continue; + } + + p.m_pt_timestamp_template = pCfg.pt_timestamp_template.value; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s Timestamp Template to %s", + p.m_alias.c_str(), m_portHlpr.getPtTimestampTemplateStr(pCfg).c_str() + ); + } + } } } else if (op == DEL_COMMAND) @@ -4202,6 +5393,18 @@ void PortsOrch::doPortTask(Consumer &consumer) } } + /* + * Unset port Path Tracing TAM object and decrease TAM object refcount before + * removing the port (if the port has a TAM object associated) + */ + if (!unsetPortPtTam(p)) + { + SWSS_LOG_ERROR( + "Failed to unset port %s TAM object for Path Tracing", + p.m_alias.c_str() + ); + } + sai_status_t status = removePort(port_id); if (SAI_STATUS_SUCCESS != status) { @@ -4469,6 +5672,74 @@ void PortsOrch::doVlanMemberTask(Consumer &consumer) } } +void PortsOrch::doTransceiverPresenceCheck(Consumer &consumer) +{ + /* + the idea is to listen to transceiver info table, and also maintain an internal list of plugged modules. + + */ + SWSS_LOG_ENTER(); + + string table_name = consumer.getTableName(); + + auto it = consumer.m_toSync.begin(); + while(it != consumer.m_toSync.end()) + { + auto t = it->second; + string alias = kfvKey(t); + string op = kfvOp(t); + + if (op == SET_COMMAND) + { + SWSS_LOG_DEBUG("TRANSCEIVER_INFO table has changed - SET command for port %s", alias.c_str()); + + if (m_pluggedModulesPort.find(alias) == m_pluggedModulesPort.end()) + { + m_pluggedModulesPort[alias] = m_portList[alias]; + + SWSS_LOG_DEBUG("Setting host_tx_signal allow for port %s", alias.c_str()); + setSaiHostTxSignal(m_pluggedModulesPort[alias], true); + } + } + else if (op == DEL_COMMAND) + { + SWSS_LOG_DEBUG("TRANSCEIVER_INFO table has changed - DEL command for port %s", alias.c_str()); + + Port p; + if (m_pluggedModulesPort.find(alias) != m_pluggedModulesPort.end()) + { + p = m_pluggedModulesPort[alias]; + m_pluggedModulesPort.erase(alias); + SWSS_LOG_DEBUG("Setting host_tx_signal NOT allow for port %s", alias.c_str()); + setSaiHostTxSignal(p, false); + } + } + + it = consumer.m_toSync.erase(it); + } +} + +bool PortsOrch::setSaiHostTxSignal(const Port &port, bool enable) +{ + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE; + attr.value.booldata = enable; + + if (saiOidToAlias.find(port.m_port_id) != saiOidToAlias.end()) + { + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Could not set SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE to port 0x%" PRIx64, port.m_port_id); + return false; + } + return true; + } + + SWSS_LOG_NOTICE("Could not set SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE - OID does not exist 0x%" PRIx64, port.m_port_id); + return true; +} + void PortsOrch::doLagTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -4693,6 +5964,16 @@ void PortsOrch::doLagMemberTask(Consumer &consumer) Port lag, port; if (!getPort(lag_alias, lag)) { + if (gMySwitchType == "voq") + { + size_t pos = lag_alias.find('|'); + std::string port_hostname = (pos != std::string::npos) ? lag_alias.substr(0, pos) : lag_alias; + if (gMyHostName == port_hostname) + { + it = consumer.m_toSync.erase(it); + continue; + } + } SWSS_LOG_INFO("Failed to locate LAG %s", lag_alias.c_str()); it++; continue; @@ -4761,7 +6042,7 @@ void PortsOrch::doLagMemberTask(Consumer &consumer) } } - if ((gMySwitchType == "voq") && (port.m_type != Port::SYSTEM)) + if (isChassisDbInUse() && (port.m_type != Port::SYSTEM)) { //Sync to SYSTEM_LAG_MEMBER_TABLE of CHASSIS_APP_DB voqSyncAddLagMember(lag, port, status); @@ -4833,6 +6114,46 @@ void PortsOrch::doLagMemberTask(Consumer &consumer) } } +void PortsOrch::onWarmBootEnd() +{ + SWSS_LOG_ENTER(); + + m_isWarmRestoreStage = false; + + /* Start dynamic state sync up */ + refreshPortStatus(); + + // Do post boot port initialization + for (auto& it: m_portList) + { + Port& port = it.second; + + if (port.m_type == Port::PHY) + { + postPortInit(it.second); + } + } +} + +void PortsOrch::postPortInit(Port& p) +{ + SWSS_LOG_ENTER(); + + if (gMySwitchType != "dpu") + { + initializePortBufferMaximumParameters(p); + } + + // We have to test the size of m_queue_ids here since it isn't initialized on some platforms (like DPU) + if (p.m_host_tx_queue_configured && p.m_queue_ids.size() > p.m_host_tx_queue) + { + createPortBufferQueueCounters(p, to_string(p.m_host_tx_queue), false); + } + + initPortSupportedSpeeds(p.m_alias, p.m_port_id); + initPortSupportedFecModes(p.m_alias, p.m_port_id); +} + void PortsOrch::doTask() { auto tableOrder = { @@ -4867,9 +6188,14 @@ void PortsOrch::doTask(Consumer &consumer) string table_name = consumer.getTableName(); - if (table_name == APP_PORT_TABLE_NAME) + if (table_name == STATE_TRANSCEIVER_INFO_TABLE_NAME) + { + doTransceiverPresenceCheck(consumer); + } + else if (table_name == APP_PORT_TABLE_NAME) { doPortTask(consumer); + flushCounters(); } else if (table_name == APP_SEND_TO_INGRESS_PORT_TABLE_NAME) { @@ -4947,251 +6273,473 @@ void PortsOrch::initializeVoqs(Port &port) SWSS_LOG_INFO("Get voqs for port %s", port.m_alias.c_str()); } -void PortsOrch::initializeQueues(Port &port) +bool PortsOrch::initializePorts(std::vector& ports) { SWSS_LOG_ENTER(); - sai_attribute_t attr; - attr.id = SAI_PORT_ATTR_QOS_NUMBER_OF_QUEUES; - sai_status_t status = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); - if (status != SAI_STATUS_SUCCESS) + bool status = true; + + if (gMySwitchType != "dpu") { - SWSS_LOG_ERROR("Failed to get number of queues for port %s rv:%d", port.m_alias.c_str(), status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_PORT, status); - if (handle_status != task_process_status::task_success) - { - throw runtime_error("PortsOrch initialization failure."); - } + initializePriorityGroupsBulk(ports); + initializeQueuesBulk(ports); + initializeSchedulerGroupsBulk(ports); } - SWSS_LOG_INFO("Get %d queues for port %s", attr.value.u32, port.m_alias.c_str()); - - port.m_queue_ids.resize(attr.value.u32); - port.m_queue_lock.resize(attr.value.u32); - if (attr.value.u32 == 0) + /* initialize port host_tx_ready value (only for supporting systems) */ + if (m_cmisModuleAsicSyncSupported) { - return; + initializePortHostTxReadyBulk(ports); } - attr.id = SAI_PORT_ATTR_QOS_QUEUE_LIST; - attr.value.objlist.count = (uint32_t)port.m_queue_ids.size(); - attr.value.objlist.list = port.m_queue_ids.data(); + initializePortMtuBulk(ports); - status = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); - if (status != SAI_STATUS_SUCCESS) + // Create host interfaces + for (auto iter = ports.begin(); iter != ports.end();) { - SWSS_LOG_ERROR("Failed to get queue list for port %s rv:%d", port.m_alias.c_str(), status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_PORT, status); - if (handle_status != task_process_status::task_success) + Port& port = *iter; + + /* Check warm start states */ + vector tuples; + bool exist = m_portTable->get(port.m_alias, tuples); + string operStatus, flapCount = "0"; + if (exist) { - throw runtime_error("PortsOrch initialization failure."); + for (auto i : tuples) + { + if (fvField(i) == "oper_status") + { + operStatus = fvValue(i); + } + + if (fvField(i) == "flap_count") + { + flapCount = fvValue(i); + } + } + } + + SWSS_LOG_INFO("Port %s with oper %s flap_count=%s", port.m_alias.c_str(), operStatus.c_str(), flapCount.c_str()); + + /** + * Create database port oper status as DOWN if attr missing + * This status will be updated upon receiving port_oper_status_notification. + */ + if (operStatus == "up") + { + port.m_oper_status = SAI_PORT_OPER_STATUS_UP; + } + else if (operStatus.empty()) + { + port.m_oper_status = SAI_PORT_OPER_STATUS_DOWN; + /* Fill oper_status in db with default value "down" */ + m_portTable->hset(port.m_alias, "oper_status", "down"); + } + else + { + port.m_oper_status = SAI_PORT_OPER_STATUS_DOWN; + } + + // initalize port flap count + if (!flapCount.empty()) + { + try + { + port.m_flap_count = stoull(flapCount); + m_portTable->hset(port.m_alias, "flap_count", flapCount); + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to get port (%s) flap_count: %s", port.m_alias.c_str(), e.what()); + } } + + /* + * always initialize Port SAI_HOSTIF_ATTR_OPER_STATUS based on oper_status value in appDB. + */ + bool isUp = port.m_oper_status == SAI_PORT_OPER_STATUS_UP; + + /* Create host interface */ + if (!addHostIntfs(port, port.m_alias, port.m_hif_id, isUp)) + { + SWSS_LOG_ERROR("Failed to create host interface for port %s", port.m_alias.c_str()); + iter = ports.erase(iter); + status = false; + continue; + } + + iter++; } - SWSS_LOG_INFO("Get queues for port %s", port.m_alias.c_str()); + return status; } -void PortsOrch::initializeSchedulerGroups(Port &port) +void PortsOrch::initializePortHostTxReadyBulk(std::vector& ports) { - std::vector scheduler_group_ids; SWSS_LOG_ENTER(); - sai_attribute_t attr; - attr.id = SAI_PORT_ATTR_QOS_NUMBER_OF_SCHEDULER_GROUPS; - sai_status_t status = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); - if (status != SAI_STATUS_SUCCESS) + SWSS_LOG_TIMER(__FUNCTION__); + + const auto portCount = static_cast(ports.size()); + + PortBulker bulker(portCount); + + for (auto& port: ports) { - SWSS_LOG_ERROR("Failed to get number of scheduler groups for port:%s", port.m_alias.c_str()); - task_process_status handle_status = handleSaiGetStatus(SAI_API_PORT, status); - if (handle_status != task_process_status::task_success) + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_HOST_TX_READY_STATUS; + bulker.add(port.m_port_id, attr); + } + + bulker.executeGet(); + + for (size_t idx = 0; idx < portCount; idx++) + { + const auto& port = ports[idx]; + const auto status = bulker.statuses[idx]; + const auto& attr = bulker.attrList[idx]; + + bool hostTxReady = false; + + if (status == SAI_STATUS_SUCCESS) { - throw runtime_error("PortsOrch initialization failure."); + hostTxReady = attr.value.booldata; } + else + { + SWSS_LOG_ERROR("Failed to get host_tx_ready value from SAI to Port %" PRIx64 , port.m_port_id); + } + + string hostTxReadyStr = hostTxReady ? "true" : "false"; + + SWSS_LOG_DEBUG("Received host_tx_ready current status: port_id: 0x%" PRIx64 " status: %s", port.m_port_id, hostTxReadyStr.c_str()); + setHostTxReady(port, hostTxReadyStr); } - SWSS_LOG_INFO("Got %d number of scheduler groups for port %s", attr.value.u32, port.m_alias.c_str()); +} - scheduler_group_ids.resize(attr.value.u32); +void PortsOrch::initializePortMtuBulk(std::vector& ports) +{ + SWSS_LOG_ENTER(); - if (attr.value.u32 == 0) + SWSS_LOG_TIMER(__FUNCTION__); + + const auto portCount = static_cast(ports.size()); + + PortBulker bulker(portCount); + + for (auto& port: ports) { - return; + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_MTU; + bulker.add(port.m_port_id, attr); } - attr.id = SAI_PORT_ATTR_QOS_SCHEDULER_GROUP_LIST; - attr.value.objlist.count = (uint32_t)scheduler_group_ids.size(); - attr.value.objlist.list = scheduler_group_ids.data(); + bulker.executeGet(); - status = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); - if (status != SAI_STATUS_SUCCESS) + for (size_t idx = 0; idx < portCount; idx++) { - SWSS_LOG_ERROR("Failed to get scheduler group list for port %s rv:%d", port.m_alias.c_str(), status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_PORT, status); - if (handle_status != task_process_status::task_success) + auto& port = ports[idx]; + const auto status = bulker.statuses[idx]; + const auto& attr = bulker.attrList[idx]; + + if (status == SAI_STATUS_SUCCESS) { - throw runtime_error("PortsOrch initialization failure."); + auto mtu = attr.value.u32 - (uint32_t)(sizeof(struct ether_header) + FCS_LEN + VLAN_TAG_LEN); + + /* Reduce the default MTU got from ASIC by MAX_MACSEC_SECTAG_SIZE */ + if (mtu > MAX_MACSEC_SECTAG_SIZE) + { + mtu -= MAX_MACSEC_SECTAG_SIZE; + } + + port.m_mtu = mtu; + } + else + { + SWSS_LOG_ERROR("Failed to get mtu value from SAI for Port %" PRIx64 , port.m_port_id); } } - - SWSS_LOG_INFO("Got scheduler groups for port %s", port.m_alias.c_str()); } -void PortsOrch::initializePriorityGroups(Port &port) +void PortsOrch::initializePriorityGroupsBulk(std::vector& ports) { SWSS_LOG_ENTER(); - sai_attribute_t attr; - attr.id = SAI_PORT_ATTR_NUMBER_OF_INGRESS_PRIORITY_GROUPS; - sai_status_t status = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); - if (status != SAI_STATUS_SUCCESS) + SWSS_LOG_TIMER(__FUNCTION__); + + const auto portCount = static_cast(ports.size()); + + // Query number of ingress priority groups { - SWSS_LOG_ERROR("Failed to get number of priority groups for port %s rv:%d", port.m_alias.c_str(), status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_PORT, status); - if (handle_status != task_process_status::task_success) + PortBulker bulker(portCount); + + for (const auto& port: ports) { - throw runtime_error("PortsOrch initialization failure."); + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_NUMBER_OF_INGRESS_PRIORITY_GROUPS; + bulker.add(port.m_port_id, attr); } - } - SWSS_LOG_INFO("Get %d priority groups for port %s", attr.value.u32, port.m_alias.c_str()); - port.m_priority_group_ids.resize(attr.value.u32); + bulker.executeGet(); - if (attr.value.u32 == 0) - { - return; - } + for (size_t idx = 0; idx < portCount; idx++) + { + auto& port = ports[idx]; + const auto status = bulker.statuses[idx]; + const auto& attr = bulker.attrList[idx]; - attr.id = SAI_PORT_ATTR_INGRESS_PRIORITY_GROUP_LIST; - attr.value.objlist.count = (uint32_t)port.m_priority_group_ids.size(); - attr.value.objlist.list = port.m_priority_group_ids.data(); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get number of priority groups for port %s rv:%d", port.m_alias.c_str(), status); + handleSaiGetStatus(SAI_API_PORT, status); + throw runtime_error("PortsOrch initialization failure."); + } - status = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); - if (status != SAI_STATUS_SUCCESS) + SWSS_LOG_INFO("Get %d priority groups for port %s", attr.value.u32, port.m_alias.c_str()); + port.m_priority_group_ids.resize(attr.value.u32); + } + } + + // Query ingress priority groups lists { - SWSS_LOG_ERROR("Fail to get priority group list for port %s rv:%d", port.m_alias.c_str(), status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_PORT, status); - if (handle_status != task_process_status::task_success) + PortBulker bulker(portCount); + + for (auto& port: ports) { - throw runtime_error("PortsOrch initialization failure."); + sai_attribute_t attr; + + if (port.m_priority_group_ids.size() == 0) + { + continue; + } + + attr.id = SAI_PORT_ATTR_INGRESS_PRIORITY_GROUP_LIST; + attr.value.objlist.list = port.m_priority_group_ids.data(); + attr.value.objlist.count = static_cast(port.m_priority_group_ids.size()); + bulker.add(port.m_port_id, attr); + } + + bulker.executeGet(); + + size_t idx = 0; + for (const auto& port: ports) + { + if (port.m_priority_group_ids.size() == 0) + { + continue; + } + + const auto status = bulker.statuses[idx]; + idx++; + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Fail to get priority group list for port %s rv:%d", port.m_alias.c_str(), status); + handleSaiGetStatus(SAI_API_PORT, status); + throw runtime_error("PortsOrch initialization failure."); + } + + SWSS_LOG_INFO("Get priority groups for port %s", port.m_alias.c_str()); } } - SWSS_LOG_INFO("Get priority groups for port %s", port.m_alias.c_str()); } -void PortsOrch::initializePortBufferMaximumParameters(Port &port) +void PortsOrch::initializeQueuesBulk(std::vector& ports) { - sai_attribute_t attr; - vector fvVector; + SWSS_LOG_ENTER(); - attr.id = SAI_PORT_ATTR_QOS_MAXIMUM_HEADROOM_SIZE; + SWSS_LOG_TIMER(__FUNCTION__); - sai_status_t status = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); - if (status != SAI_STATUS_SUCCESS) + const auto portCount = static_cast(ports.size()); + + // Query number of queues { - SWSS_LOG_NOTICE("Unable to get the maximum headroom for port %s rv:%d, ignored", port.m_alias.c_str(), status); + PortBulker bulker(portCount); + + for (auto& port: ports) + { + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_QOS_NUMBER_OF_QUEUES; + bulker.add(port.m_port_id, attr); + } + + bulker.executeGet(); + + for (size_t idx = 0; idx < portCount; idx++) + { + auto& port = ports[idx]; + const auto status = bulker.statuses[idx]; + const auto& attr = bulker.attrList[idx]; + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get number of queues for port %s rv:%d", port.m_alias.c_str(), status); + handleSaiGetStatus(SAI_API_PORT, status); + throw runtime_error("PortsOrch initialization failure."); + } + + SWSS_LOG_INFO("Get %d queues for port %s", attr.value.u32, port.m_alias.c_str()); + port.m_queue_ids.resize(attr.value.u32); + port.m_queue_lock.resize(attr.value.u32); + } } - else + + // Query queue lists { - port.m_maximum_headroom = attr.value.u32; - fvVector.emplace_back("max_headroom_size", to_string(port.m_maximum_headroom)); - } + PortBulker bulker(portCount); - fvVector.emplace_back("max_priority_groups", to_string(port.m_priority_group_ids.size())); - fvVector.emplace_back("max_queues", to_string(port.m_queue_ids.size())); + for (auto& port: ports) + { + sai_attribute_t attr; - m_stateBufferMaximumValueTable->set(port.m_alias, fvVector); + if (port.m_queue_ids.size() == 0) + { + continue; + } + + attr.id = SAI_PORT_ATTR_QOS_QUEUE_LIST; + attr.value.objlist.list = port.m_queue_ids.data(); + attr.value.objlist.count = static_cast(port.m_queue_ids.size()); + bulker.add(port.m_port_id, attr); + } + + bulker.executeGet(); + + size_t idx = 0; + for (const auto& port: ports) + { + if (port.m_queue_ids.size() == 0) + { + continue; + } + + const auto status = bulker.statuses[idx]; + idx++; + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Fail to get queue list for port %s rv:%d", port.m_alias.c_str(), status); + handleSaiGetStatus(SAI_API_PORT, status); + throw runtime_error("PortsOrch initialization failure."); + } + + SWSS_LOG_INFO("Get queues for port %s", port.m_alias.c_str()); + } + } } -bool PortsOrch::initializePort(Port &port) +void PortsOrch::initializeSchedulerGroupsBulk(std::vector& ports) { SWSS_LOG_ENTER(); - SWSS_LOG_NOTICE("Initializing port alias:%s pid:%" PRIx64, port.m_alias.c_str(), port.m_port_id); + SWSS_LOG_TIMER(__FUNCTION__); + + std::vector> scheduler_group_ids(ports.size()); + + const auto portCount = static_cast(ports.size()); + + // Query number of scheduler groups + { + PortBulker bulker(portCount); + + for (const auto& port: ports) + { + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_QOS_NUMBER_OF_SCHEDULER_GROUPS; + bulker.add(port.m_port_id, attr); + } + + bulker.executeGet(); + + for (size_t idx = 0; idx < portCount; idx++) + { + const auto& port = ports[idx]; + const auto status = bulker.statuses[idx]; + const auto& attr = bulker.attrList[idx]; + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get number of scheduler groups for port %s rv:%d", port.m_alias.c_str(), status); + handleSaiGetStatus(SAI_API_PORT, status); + throw runtime_error("PortsOrch initialization failure."); + } + + SWSS_LOG_INFO("Get %d scheduler groups for port %s", attr.value.u32, port.m_alias.c_str()); + scheduler_group_ids[idx].resize(attr.value.u32); + } + } + + // Query scheduler groups lists + { + PortBulker bulker(portCount); + + for (size_t idx = 0; idx < portCount; idx++) + { + sai_attribute_t attr; + const auto& port = ports[idx]; + + if (scheduler_group_ids[idx].size() == 0) + { + continue; + } + + attr.id = SAI_PORT_ATTR_QOS_SCHEDULER_GROUP_LIST; + attr.value.objlist.list = scheduler_group_ids[idx].data(); + attr.value.objlist.count = static_cast(scheduler_group_ids[idx].size()); + bulker.add(port.m_port_id, attr); + } + + bulker.executeGet(); - if (gMySwitchType != "dpu") - { - initializePriorityGroups(port); - initializeQueues(port); - initializeSchedulerGroups(port); - initializePortBufferMaximumParameters(port); - } + size_t bulkIdx = 0; + for (size_t idx = 0; idx < portCount; idx++) + { + const auto& port = ports[idx]; + if (scheduler_group_ids[idx].size() == 0) + { + continue; + } - /* Create host interface */ - if (!addHostIntfs(port, port.m_alias, port.m_hif_id)) - { - SWSS_LOG_ERROR("Failed to create host interface for port %s", port.m_alias.c_str()); - return false; - } + const auto status = bulker.statuses[bulkIdx]; + bulkIdx++; - /* Check warm start states */ - vector tuples; - bool exist = m_portTable->get(port.m_alias, tuples); - string operStatus; - if (exist) - { - for (auto i : tuples) - { - if (fvField(i) == "oper_status") + if (status != SAI_STATUS_SUCCESS) { - operStatus = fvValue(i); + SWSS_LOG_ERROR("Failed to get scheduler group list for port %s rv:%d", port.m_alias.c_str(), status); + handleSaiGetStatus(SAI_API_PORT, status); + throw runtime_error("PortsOrch initialization failure."); } + + SWSS_LOG_INFO("Get scheduler groups for port %s", port.m_alias.c_str()); } } - SWSS_LOG_DEBUG("initializePort %s with oper %s", port.m_alias.c_str(), operStatus.c_str()); +} - /** - * Create database port oper status as DOWN if attr missing - * This status will be updated upon receiving port_oper_status_notification. - */ - if (operStatus == "up") - { - port.m_oper_status = SAI_PORT_OPER_STATUS_UP; - } - else if (operStatus.empty()) - { - port.m_oper_status = SAI_PORT_OPER_STATUS_DOWN; - /* Fill oper_status in db with default value "down" */ - m_portTable->hset(port.m_alias, "oper_status", "down"); - } - else - { - port.m_oper_status = SAI_PORT_OPER_STATUS_DOWN; - } +void PortsOrch::initializePortBufferMaximumParameters(const Port &port) +{ + sai_attribute_t attr; + vector fvVector; - /* initialize port admin status */ - if (!getPortAdminStatus(port.m_port_id, port.m_admin_state_up)) - { - SWSS_LOG_ERROR("Failed to get initial port admin status %s", port.m_alias.c_str()); - return false; - } + attr.id = SAI_PORT_ATTR_QOS_MAXIMUM_HEADROOM_SIZE; - /* initialize port admin speed */ - if (!isAutoNegEnabled(port.m_port_id) && !getPortSpeed(port.m_port_id, port.m_speed)) + sai_status_t status = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); + if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to get initial port admin speed %d", port.m_speed); - return false; + SWSS_LOG_NOTICE("Unable to get the maximum headroom for port %s rv:%d, ignored", port.m_alias.c_str(), status); } - - /* initialize port mtu */ - if (!getPortMtu(port, port.m_mtu)) + else { - SWSS_LOG_ERROR("Failed to get initial port mtu %d", port.m_mtu); + auto maximum_headroom = attr.value.u32; + fvVector.emplace_back("max_headroom_size", to_string(maximum_headroom)); } - /* - * always initialize Port SAI_HOSTIF_ATTR_OPER_STATUS based on oper_status value in appDB. - */ - bool isUp = port.m_oper_status == SAI_PORT_OPER_STATUS_UP; - if (!setHostIntfsOperStatus(port, isUp)) - { - SWSS_LOG_WARN("Failed to set operation status %s to host interface %s", - operStatus.c_str(), port.m_alias.c_str()); - return false; - } + fvVector.emplace_back("max_priority_groups", to_string(port.m_priority_group_ids.size())); + fvVector.emplace_back("max_queues", to_string(port.m_queue_ids.size())); - return true; + m_stateBufferMaximumValueTable->set(port.m_alias, fvVector); } -bool PortsOrch::addHostIntfs(Port &port, string alias, sai_object_id_t &host_intfs_id) +bool PortsOrch::addHostIntfs(Port &port, string alias, sai_object_id_t &host_intfs_id, bool isUp) { SWSS_LOG_ENTER(); @@ -5215,23 +6763,20 @@ bool PortsOrch::addHostIntfs(Port &port, string alias, sai_object_id_t &host_int attr.value.chardata[SAI_HOSTIF_NAME_SIZE - 1] = '\0'; attrs.push_back(attr); - bool set_hostif_tx_queue = false; - if (gSwitchOrch->querySwitchCapability(SAI_OBJECT_TYPE_HOSTIF, SAI_HOSTIF_ATTR_QUEUE)) - { - set_hostif_tx_queue = true; - } - else - { - SWSS_LOG_WARN("Hostif queue attribute not supported"); - } - - if (set_hostif_tx_queue) + if (m_supportsHostIfTxQueue) { attr.id = SAI_HOSTIF_ATTR_QUEUE; attr.value.u32 = DEFAULT_HOSTIF_TX_QUEUE; attrs.push_back(attr); + + port.m_host_tx_queue = DEFAULT_HOSTIF_TX_QUEUE; + port.m_host_tx_queue_configured = true; } + attr.id = SAI_HOSTIF_ATTR_OPER_STATUS; + attr.value.booldata = isUp; + attrs.push_back(attr); + sai_status_t status = sai_hostif_api->create_hostif(&host_intfs_id, gSwitchId, (uint32_t)attrs.size(), attrs.data()); if (status != SAI_STATUS_SUCCESS) { @@ -5243,7 +6788,10 @@ bool PortsOrch::addHostIntfs(Port &port, string alias, sai_object_id_t &host_int } } - SWSS_LOG_NOTICE("Create host interface for port %s", alias.c_str()); + SWSS_LOG_NOTICE("Create host interface for port %s with oper status %s", alias.c_str(), isUp ? "up" : "down"); + + event_params_t params = {{"ifname", alias},{"status", isUp ? "up" : "down"}}; + event_publish(g_events_handle, "if-state", ¶ms); return true; } @@ -5459,6 +7007,9 @@ bool PortsOrch::removeBridgePort(Port &port) hostif_vlan_tag[SAI_HOSTIF_VLAN_TAG_STRIP], port.m_alias.c_str()); return false; } + + /* Remove STP ports before bridge port deletion*/ + gStpOrch->removeStpPorts(port); //Flush the FDB entires corresponding to the port gFdbOrch->flushFDBEntries(port.m_bridge_port_id, SAI_NULL_OBJECT_ID); @@ -5554,6 +7105,7 @@ bool PortsOrch::addVlan(string vlan_alias) m_portList[vlan_alias] = vlan; m_port_ref_count[vlan_alias] = 0; saiOidToAlias[vlan_oid] = vlan_alias; + m_vlanPorts.emplace(vlan_alias); return true; } @@ -5600,6 +7152,12 @@ bool PortsOrch::removeVlan(Port vlan) return false; } + /* If STP instance is associated with VLAN remove VLAN from STP before deletion */ + if(vlan.m_stp_id != -1) + { + gStpOrch->removeVlanFromStpInstance(vlan.m_alias, 0); + } + sai_status_t status = sai_vlan_api->remove_vlan(vlan.m_vlan_info.vlan_oid); if (status != SAI_STATUS_SUCCESS) { @@ -5620,6 +7178,7 @@ bool PortsOrch::removeVlan(Port vlan) saiOidToAlias.erase(vlan.m_vlan_info.vlan_oid); m_portList.erase(vlan.m_alias); m_port_ref_count.erase(vlan.m_alias); + m_vlanPorts.erase(vlan.m_alias); return true; } @@ -5697,7 +7256,8 @@ bool PortsOrch::addVlanMember(Port &vlan, Port &port, string &tagging_mode, stri port.m_alias.c_str(), vlan.m_alias.c_str(), vlan.m_vlan_info.vlan_id, port.m_port_id); /* Use untagged VLAN as pvid of the member port */ - if (sai_tagging_mode == SAI_VLAN_TAGGING_MODE_UNTAGGED) + if (sai_tagging_mode == SAI_VLAN_TAGGING_MODE_UNTAGGED && + port.m_type != Port::TUNNEL) { if(!setPortPvid(port, vlan.m_vlan_info.vlan_id)) { @@ -6032,7 +7592,8 @@ bool PortsOrch::removeVlanMember(Port &vlan, Port &port, string end_point_ip) port.m_alias.c_str(), vlan.m_alias.c_str(), vlan.m_vlan_info.vlan_id, vlan_member_id); /* Restore to default pvid if this port joined this VLAN in untagged mode previously */ - if (sai_tagging_mode == SAI_VLAN_TAGGING_MODE_UNTAGGED) + if (sai_tagging_mode == SAI_VLAN_TAGGING_MODE_UNTAGGED && + port.m_type != Port::TUNNEL) { if (!setPortPvid(port, DEFAULT_PORT_VLAN_ID)) { @@ -6099,13 +7660,16 @@ bool PortsOrch::addLag(string lag_alias, uint32_t spa_id, int32_t switch_id) switch_id = gVoqMySwitchId; system_lag_alias = gMyHostName + "|" + gMyAsicName + "|" + lag_alias; - // Allocate unique lag id - spa_id = m_lagIdAllocator->lagIdAdd(system_lag_alias, 0); - - if ((int32_t)spa_id <= 0) + if (gMultiAsicVoq) { - SWSS_LOG_ERROR("Failed to allocate unique LAG id for local lag %s rv:%d", lag_alias.c_str(), spa_id); - return false; + // Allocate unique lag id + spa_id = m_lagIdAllocator->lagIdAdd(system_lag_alias, 0); + + if ((int32_t)spa_id <= 0) + { + SWSS_LOG_ERROR("Failed to allocate unique LAG id for local lag %s rv:%d", lag_alias.c_str(), spa_id); + return false; + } } } @@ -6217,7 +7781,7 @@ bool PortsOrch::removeLag(Port lag) m_counterLagTable->hdel("", lag.m_alias); - if (gMySwitchType == "voq") + if (isChassisDbInUse()) { // Free the lag id, if this is local LAG @@ -6315,7 +7879,7 @@ bool PortsOrch::addLagMember(Port &lag, Port &port, string member_status) m_portList[lag.m_alias] = lag; - if (lag.m_bridge_port_id > 0) + if ((lag.m_bridge_port_id > 0)||(!lag.m_child_ports.empty())) { if (!setHostIntfsStripTag(port, SAI_HOSTIF_VLAN_TAG_KEEP)) { @@ -6330,7 +7894,7 @@ bool PortsOrch::addLagMember(Port &lag, Port &port, string member_status) LagMemberUpdate update = { lag, port, true }; notify(SUBJECT_TYPE_LAG_MEMBER_CHANGE, static_cast(&update)); - if (gMySwitchType == "voq") + if (isChassisDbInUse()) { //Sync to SYSTEM_LAG_MEMBER_TABLE of CHASSIS_APP_DB voqSyncAddLagMember(lag, port, member_status); @@ -6363,7 +7927,7 @@ bool PortsOrch::removeLagMember(Port &lag, Port &port) lag.m_members.erase(port.m_alias); m_portList[lag.m_alias] = lag; - if (lag.m_bridge_port_id > 0) + if ((lag.m_bridge_port_id > 0)||(!lag.m_child_ports.empty())) { if (!setHostIntfsStripTag(port, SAI_HOSTIF_VLAN_TAG_STRIP)) { @@ -6378,7 +7942,7 @@ bool PortsOrch::removeLagMember(Port &lag, Port &port) LagMemberUpdate update = { lag, port, false }; notify(SUBJECT_TYPE_LAG_MEMBER_CHANGE, static_cast(&update)); - if (gMySwitchType == "voq") + if (isChassisDbInUse()) { //Sync to SYSTEM_LAG_MEMBER_TABLE of CHASSIS_APP_DB voqSyncDelLagMember(lag, port); @@ -6492,7 +8056,9 @@ bool PortsOrch::addTunnel(string tunnel_alias, sai_object_id_t tunnel_id, bool h { tunnel.m_learn_mode = SAI_BRIDGE_PORT_FDB_LEARNING_MODE_DISABLE; } + tunnel.m_oper_status = SAI_PORT_OPER_STATUS_DOWN; m_portList[tunnel_alias] = tunnel; + saiOidToAlias[tunnel_id] = tunnel_alias; SWSS_LOG_INFO("addTunnel:: %" PRIx64, tunnel_id); @@ -6503,6 +8069,7 @@ bool PortsOrch::removeTunnel(Port tunnel) { SWSS_LOG_ENTER(); + saiOidToAlias.erase(tunnel.m_tunnel_id); m_portList.erase(tunnel.m_alias); return true; @@ -6535,6 +8102,10 @@ void PortsOrch::generateQueueMap(map queuesState { flexCounterQueueState.enableQueueCounters(0, maxQueueNumber - 1); } + else if (it.second.m_host_tx_queue_configured && it.second.m_host_tx_queue <= maxQueueNumber) + { + flexCounterQueueState.enableQueueCounters(it.second.m_host_tx_queue, it.second.m_host_tx_queue); + } queuesStateVector.insert(make_pair(it.second.m_alias, flexCounterQueueState)); } generateQueueMapPerPort(it.second, queuesStateVector.at(it.second.m_alias), false); @@ -6592,7 +8163,7 @@ void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates const auto id = sai_serialize_object_id(queue_ids[queueIndex]); - string queueType; + sai_queue_type_t queueType; uint8_t queueRealIndex = 0; if (getQueueTypeAndIndex(queue_ids[queueIndex], queueType, queueRealIndex)) { @@ -6602,7 +8173,7 @@ void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates { continue; } - queueTypeVector.emplace_back(id, queueType); + queueTypeVector.emplace_back(id, sai_queue_type_string_map[queueType]); queueIndexVector.emplace_back(id, to_string(queueRealIndex)); } @@ -6612,7 +8183,7 @@ void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates // Install a flex counter for this voq to track stats. Voq counters do // not have buffer queue config. So it does not get enabled through the // flexcounter orch logic. Always enabled voq counters. - addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, true); + addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, true, queueType); queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_system_port_oid)); } else @@ -6625,7 +8196,7 @@ void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates // counter on voq systems. if (gMySwitchType == "voq") { - addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false); + addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false, queueType); } queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); } @@ -6637,7 +8208,7 @@ void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates } else { - m_queueTable->set("", queueVector); + m_queueCounterNameMapUpdater->setCounterNameMap(queueVector); CounterCheckOrch::getInstance().addPort(port); } m_queuePortTable->set("", queuePortVector); @@ -6673,6 +8244,10 @@ void PortsOrch::addQueueFlexCounters(map queuesS { flexCounterQueueState.enableQueueCounters(0, maxQueueNumber - 1); } + else if (it.second.m_host_tx_queue_configured && it.second.m_host_tx_queue <= maxQueueNumber) + { + flexCounterQueueState.enableQueueCounters(it.second.m_host_tx_queue, it.second.m_host_tx_queue); + } queuesStateVector.insert(make_pair(it.second.m_alias, flexCounterQueueState)); } addQueueFlexCountersPerPort(it.second, queuesStateVector.at(it.second.m_alias)); @@ -6687,7 +8262,7 @@ void PortsOrch::addQueueFlexCountersPerPort(const Port& port, FlexCounterQueueSt { for (size_t queueIndex = 0; queueIndex < port.m_queue_ids.size(); ++queueIndex) { - string queueType; + sai_queue_type_t queueType; uint8_t queueRealIndex = 0; if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) { @@ -6696,12 +8271,12 @@ void PortsOrch::addQueueFlexCountersPerPort(const Port& port, FlexCounterQueueSt continue; } // Install a flex counter for this queue to track stats - addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false); + addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false, queueType); } } } -void PortsOrch::addQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, bool voq) +void PortsOrch::addQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, bool voq, sai_queue_type_t queueType) { std::unordered_set counter_stats; std::vector queue_ids; @@ -6712,6 +8287,10 @@ void PortsOrch::addQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_ } if (voq) { + for (const auto& voq_it: voq_stat_ids) + { + counter_stats.emplace(sai_serialize_queue_stat(voq_it)); + } queue_ids = m_port_voq_ids[port.m_alias]; } else @@ -6719,7 +8298,7 @@ void PortsOrch::addQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_ queue_ids = port.m_queue_ids; } - queue_stat_manager.setCounterIdList(queue_ids[queueIndex], CounterType::QUEUE, counter_stats); + queue_stat_manager.setCounterIdList(queue_ids[queueIndex], CounterType::QUEUE, counter_stats, queueType); } @@ -6750,6 +8329,10 @@ void PortsOrch::addQueueWatermarkFlexCounters(map fieldValues; - fieldValues.emplace_back(QUEUE_COUNTER_ID_LIST, counters_stream.str()); - - m_flexCounterTable->set(key, fieldValues); + auto queue_counter_stats = generateCounterStats(queueWatermarkStatIds, sai_serialize_queue_stat); + queue_watermark_manager.setCounterIdList(port.m_queue_ids[queueIndex], CounterType::QUEUE, queue_counter_stats, queueType); } -void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) +void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues, bool skip_host_tx_queue) { SWSS_LOG_ENTER(); @@ -6819,16 +8387,21 @@ void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) for (auto queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) { + if (queueIndex == (uint32_t)port.m_host_tx_queue && skip_host_tx_queue) + { + continue; + } + std::ostringstream name; name << port.m_alias << ":" << queueIndex; const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); - string queueType; + sai_queue_type_t queueType; uint8_t queueRealIndex = 0; if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) { - queueTypeVector.emplace_back(id, queueType); + queueTypeVector.emplace_back(id, sai_queue_type_string_map[queueType]); queueIndexVector.emplace_back(id, to_string(queueRealIndex)); } @@ -6839,16 +8412,22 @@ void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) if (flexCounterOrch->getQueueCountersState()) { // Install a flex counter for this queue to track stats - addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false); + addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false, queueType); } if (flexCounterOrch->getQueueWatermarkCountersState()) { /* add watermark queue counters */ - addQueueWatermarkFlexCountersPerPortPerQueueIndex(port, queueIndex); + addQueueWatermarkFlexCountersPerPortPerQueueIndex(port, queueIndex, queueType); + } + + if (flexCounterOrch->getWredQueueCountersState()) + { + /* add wred queue counters */ + addWredQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false, queueType); } } - m_queueTable->set("", queueVector); + m_queueCounterNameMapUpdater->setCounterNameMap(queueVector); m_queuePortTable->set("", queuePortVector); m_queueIndexTable->set("", queueIndexVector); m_queueTypeTable->set("", queueTypeVector); @@ -6856,7 +8435,7 @@ void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) CounterCheckOrch::getInstance().addPort(port); } -void PortsOrch::removePortBufferQueueCounters(const Port &port, string queues) +void PortsOrch::removePortBufferQueueCounters(const Port &port, string queues, bool skip_host_tx_queue) { SWSS_LOG_ENTER(); @@ -6872,15 +8451,20 @@ void PortsOrch::removePortBufferQueueCounters(const Port &port, string queues) for (auto queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) { + if (queueIndex == (uint32_t)port.m_host_tx_queue && skip_host_tx_queue) + { + continue; + } + std::ostringstream name; name << port.m_alias << ":" << queueIndex; const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); // Remove the queue counter from counters DB maps - m_queueTable->hdel("", name.str()); + m_queueCounterNameMapUpdater->delCounterNameMap(name.str()); m_queuePortTable->hdel("", id); - string queueType; + sai_queue_type_t queueType; uint8_t queueRealIndex = 0; if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) { @@ -6892,14 +8476,18 @@ void PortsOrch::removePortBufferQueueCounters(const Port &port, string queues) if (flexCounterOrch->getQueueCountersState()) { // Remove the flex counter for this queue - queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex]); + queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex], queueType); } if (flexCounterOrch->getQueueWatermarkCountersState()) { // Remove watermark queue counters - string key = getQueueWatermarkFlexCounterTableKey(id); - m_flexCounterTable->del(key); + queue_watermark_manager.clearCounterIdList(port.m_queue_ids[queueIndex], queueType); + } + if (flexCounterOrch->getWredQueueCountersState()) + { + /* Remove wred queue counters */ + wred_queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex], queueType); } } @@ -6966,7 +8554,7 @@ void PortsOrch::generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgS } - m_pgTable->set("", pgVector); + m_pgCounterNameMapUpdater->setCounterNameMap(pgVector); m_pgPortTable->set("", pgPortVector); m_pgIndexTable->set("", pgIndexVector); @@ -7015,7 +8603,7 @@ void PortsOrch::createPortBufferPgCounters(const Port& port, string pgs) } } - m_pgTable->set("", pgVector); + m_pgCounterNameMapUpdater->setCounterNameMap(pgVector); m_pgPortTable->set("", pgPortVector); m_pgIndexTable->set("", pgIndexVector); @@ -7072,23 +8660,8 @@ void PortsOrch::addPriorityGroupFlexCountersPerPort(const Port& port, FlexCounte void PortsOrch::addPriorityGroupFlexCountersPerPortPerPgIndex(const Port& port, size_t pgIndex) { - const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); - - string delimiter = ""; - std::ostringstream ingress_pg_drop_packets_counters_stream; - string key = getPriorityGroupDropPacketsFlexCounterTableKey(id); - /* Add dropped packets counters to flex_counter */ - for (const auto& it: ingressPriorityGroupDropStatIds) - { - ingress_pg_drop_packets_counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); - if (delimiter.empty()) - { - delimiter = comma; - } - } - vector fieldValues; - fieldValues.emplace_back(PG_COUNTER_ID_LIST, ingress_pg_drop_packets_counters_stream.str()); - m_flexCounterTable->set(key, fieldValues); + auto pg_counter_stats = generateCounterStats(ingressPriorityGroupDropStatIds, sai_serialize_ingress_priority_group_stat); + pg_drop_stat_manager.setCounterIdList(port.m_priority_group_ids[pgIndex], CounterType::PRIORITY_GROUP, pg_counter_stats); } void PortsOrch::addPriorityGroupWatermarkFlexCounters(map pgsStateVector) @@ -7143,22 +8716,8 @@ void PortsOrch::addPriorityGroupWatermarkFlexCountersPerPort(const Port& port, F void PortsOrch::addPriorityGroupWatermarkFlexCountersPerPortPerPgIndex(const Port& port, size_t pgIndex) { - const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); - - string key = getPriorityGroupWatermarkFlexCounterTableKey(id); - - std::string delimiter = ""; - std::ostringstream counters_stream; - /* Add watermark counters to flex_counter */ - for (const auto& it: ingressPriorityGroupWatermarkStatIds) - { - counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); - delimiter = comma; - } - - vector fieldValues; - fieldValues.emplace_back(PG_COUNTER_ID_LIST, counters_stream.str()); - m_flexCounterTable->set(key, fieldValues); + auto pg_counter_stats = generateCounterStats(ingressPriorityGroupWatermarkStatIds, sai_serialize_ingress_priority_group_stat); + pg_watermark_manager.setCounterIdList(port.m_priority_group_ids[pgIndex], CounterType::PRIORITY_GROUP, pg_counter_stats); } void PortsOrch::removePortBufferPgCounters(const Port& port, string pgs) @@ -7182,7 +8741,7 @@ void PortsOrch::removePortBufferPgCounters(const Port& port, string pgs) const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); // Remove the pg counter from counters DB maps - m_pgTable->hdel("", name.str()); + m_pgCounterNameMapUpdater->delCounterNameMap(name.str()); m_pgPortTable->hdel("", id); m_pgIndexTable->hdel("", id); @@ -7190,15 +8749,13 @@ void PortsOrch::removePortBufferPgCounters(const Port& port, string pgs) if (flexCounterOrch->getPgCountersState()) { // Remove dropped packets counters from flex_counter - string key = getPriorityGroupDropPacketsFlexCounterTableKey(id); - m_flexCounterTable->del(key); + pg_drop_stat_manager.clearCounterIdList(port.m_priority_group_ids[pgIndex]); } if (flexCounterOrch->getPgWatermarkCountersState()) { // Remove watermark counters from flex_counter - string key = getPriorityGroupWatermarkFlexCounterTableKey(id); - m_flexCounterTable->del(key); + pg_watermark_manager.clearCounterIdList(port.m_priority_group_ids[pgIndex]); } } @@ -7212,8 +8769,8 @@ void PortsOrch::generatePortCounterMap() return; } - auto port_counter_stats = generateCounterStats(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP); - auto gbport_counter_stats = generateCounterStats(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, true); + auto port_counter_stats = generateCounterStats(port_stat_ids, sai_serialize_port_stat); + auto gbport_counter_stats = generateCounterStats(gbport_stat_ids, sai_serialize_port_stat); for (const auto& it: m_portList) { // Set counter stats only for PHY ports to ensure syncd will not try to query the counter statistics from the HW for non-PHY ports. @@ -7225,10 +8782,10 @@ void PortsOrch::generatePortCounterMap() CounterType::PORT, port_counter_stats); if (it.second.m_system_side_id) gb_port_stat_manager.setCounterIdList(it.second.m_system_side_id, - CounterType::PORT, gbport_counter_stats); + CounterType::PORT, gbport_counter_stats, it.second.m_switch_id); if (it.second.m_line_side_id) gb_port_stat_manager.setCounterIdList(it.second.m_line_side_id, - CounterType::PORT, gbport_counter_stats); + CounterType::PORT, gbport_counter_stats, it.second.m_switch_id); } m_isPortCounterMapGenerated = true; @@ -7241,7 +8798,7 @@ void PortsOrch::generatePortBufferDropCounterMap() return; } - auto port_buffer_drop_stats = generateCounterStats(PORT_BUFFER_DROP_STAT_FLEX_COUNTER_GROUP); + auto port_buffer_drop_stats = generateCounterStats(port_buffer_drop_stat_ids, sai_serialize_port_stat); for (const auto& it: m_portList) { // Set counter stats only for PHY ports to ensure syncd will not try to query the counter statistics from the HW for non-PHY ports. @@ -7255,6 +8812,139 @@ void PortsOrch::generatePortBufferDropCounterMap() m_isPortBufferDropCounterMapGenerated = true; } +/**** +* Func Name : generateWredPortCounterMap +* Parameters : None +* Returns : void +* Description: Set the list of counters to be used for syncd counter polling +**/ +void PortsOrch::generateWredPortCounterMap() +{ + if (m_isWredPortCounterMapGenerated) + { + return; + } + + auto wred_port_stats = generateCounterStats(wred_port_stat_ids, sai_serialize_port_stat); + for (const auto& it: m_portList) + { + // Set counter stats only for PHY ports to ensure syncd will not try to query the counter statistics from the HW for non-PHY ports. + if (it.second.m_type != Port::Type::PHY) + { + continue; + } + wred_port_stat_manager.setCounterIdList(it.second.m_port_id, CounterType::PORT, wred_port_stats); + } + + m_isWredPortCounterMapGenerated = true; +} + +/**** +* Func Name : addWredQueueFlexCounters +* Parameters : queueStateVector +* Returns : void +* Description: Top level API to Set WRED flex counters for Queues +**/ +void PortsOrch::addWredQueueFlexCounters(map queuesStateVector) +{ + if (m_isWredQueueCounterMapGenerated) + { + return; + } + + bool isCreateAllQueues = false; + + if (queuesStateVector.count(createAllAvailableBuffersStr)) + { + isCreateAllQueues = true; + queuesStateVector.clear(); + } + + for (const auto& it: m_portList) + { + if (it.second.m_type == Port::PHY) + { + if (!queuesStateVector.count(it.second.m_alias)) + { + auto maxQueueNumber = getNumberOfPortSupportedQueueCounters(it.second.m_alias); + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); + if (isCreateAllQueues && maxQueueNumber) + { + flexCounterQueueState.enableQueueCounters(0, maxQueueNumber - 1); + } + else if (it.second.m_host_tx_queue_configured && it.second.m_host_tx_queue <= maxQueueNumber) + { + flexCounterQueueState.enableQueueCounters(it.second.m_host_tx_queue, it.second.m_host_tx_queue); + } + queuesStateVector.insert(make_pair(it.second.m_alias, flexCounterQueueState)); + } + addWredQueueFlexCountersPerPort(it.second, queuesStateVector.at(it.second.m_alias)); + } + } + + m_isWredQueueCounterMapGenerated = true; +} + +/**** +* Func Name : addWredQueueFlexCountersPerPort +* Parameters : port and Queuestate +* Returns : void +* Description: Port level API to program flexcounter for queues +**/ +void PortsOrch::addWredQueueFlexCountersPerPort(const Port& port, FlexCounterQueueStates& queuesState) +{ + /* Add stat counters to flex_counter */ + + for (size_t queueIndex = 0; queueIndex < port.m_queue_ids.size(); ++queueIndex) + { + sai_queue_type_t queueType; + uint8_t queueRealIndex = 0; + if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) + { + if (!queuesState.isQueueCounterEnabled(queueRealIndex)) + { + continue; + } + addWredQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false, queueType); + } + } +} +/**** +* Func Name : addWredQueueFlexCountersPerPortPerQueueIndex +* Parameters : port, queueIndex, is_voq +* Returns : void +* Description: Sets the Stats list to be polled by the flexcounter +**/ + +void PortsOrch::addWredQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, bool voq, sai_queue_type_t queueType) +{ + std::unordered_set counter_stats; + std::vector queue_ids; + + for (const auto& it: wred_queue_stat_ids) + { + counter_stats.emplace(sai_serialize_queue_stat(it)); + } + if (voq) + { + queue_ids = m_port_voq_ids[port.m_alias]; + } + else + { + queue_ids = port.m_queue_ids; + } + + wred_queue_stat_manager.setCounterIdList(queue_ids[queueIndex], CounterType::QUEUE, counter_stats, queueType); +} + +void PortsOrch::flushCounters() +{ + for (auto counter_manager : counter_managers) + { + counter_manager.get().flush(); + } +} + uint32_t PortsOrch::getNumberOfPortSupportedPgCounters(string port) { return static_cast(m_portList[port].m_priority_group_ids.size()); @@ -7275,18 +8965,27 @@ void PortsOrch::doTask(NotificationConsumer &consumer) return; } - std::string op; - std::string data; - std::vector values; + if (&consumer != m_portStatusNotificationConsumer && &consumer != m_portHostTxReadyNotificationConsumer) + { + return; + } - consumer.pop(op, data, values); + std::deque entries; + consumer.pops(entries); - if (&consumer != m_portStatusNotificationConsumer) + for (auto& entry : entries) { - return; + handleNotification(consumer, entry); } +} + +void PortsOrch::handleNotification(NotificationConsumer &consumer, KeyOpFieldsValuesTuple& entry) +{ + auto op = kfvOp(entry); + auto data = kfvKey(entry); + auto values = kfvFieldsValues(entry); - if (op == "port_state_change") + if (&consumer == m_portStatusNotificationConsumer && op == "port_state_change") { uint32_t count; sai_port_oper_status_notification_t *portoperstatus = nullptr; @@ -7295,12 +8994,14 @@ void PortsOrch::doTask(NotificationConsumer &consumer) for (uint32_t i = 0; i < count; i++) { + Port port; sai_object_id_t id = portoperstatus[i].port_id; sai_port_oper_status_t status = portoperstatus[i].port_state; + sai_port_error_status_t port_oper_err = portoperstatus[i].port_error_status; - SWSS_LOG_NOTICE("Get port state change notification id:%" PRIx64 " status:%d", id, status); - - Port port; + SWSS_LOG_NOTICE("Get port state change notification id:%" PRIx64 " status:%d " + "oper_error_status:0x%" PRIx32, + id, status, port_oper_err); if (!getPort(id, port)) { @@ -7328,7 +9029,7 @@ void PortsOrch::doTask(NotificationConsumer &consumer) if (!m_portHlpr.fecToStr(fec_str, fec_mode)) { SWSS_LOG_ERROR("Error unknown fec mode %d while querying port %s fec mode", - static_cast(fec_mode), port.m_alias.c_str()); + static_cast(fec_mode), port.m_alias.c_str()); fec_str = "N/A"; } updateDbPortOperFec(port,fec_str); @@ -7337,6 +9038,11 @@ void PortsOrch::doTask(NotificationConsumer &consumer) { updateDbPortOperFec(port, "N/A"); } + } else { + if (port_oper_err) + { + updatePortErrorStatus(port, port_oper_err); + } } /* update m_portList */ @@ -7345,6 +9051,70 @@ void PortsOrch::doTask(NotificationConsumer &consumer) sai_deserialize_free_port_oper_status_ntf(count, portoperstatus); } + else if (&consumer == m_portHostTxReadyNotificationConsumer && op == "port_host_tx_ready") + { + sai_object_id_t port_id; + sai_object_id_t switch_id; + sai_port_host_tx_ready_status_t host_tx_ready_status; + + sai_deserialize_port_host_tx_ready_ntf(data, switch_id, port_id, host_tx_ready_status); + SWSS_LOG_DEBUG("Recieved host_tx_ready notification for port 0x%" PRIx64, port_id); + + Port p; + if (!getPort(port_id, p)) + { + SWSS_LOG_ERROR("Failed to get port object for port id 0x%" PRIx64, port_id); + return; + } + setHostTxReady(p, host_tx_ready_status == SAI_PORT_HOST_TX_READY_STATUS_READY ? "true" : "false"); + } +} + +void PortsOrch::updatePortErrorStatus(Port &port, sai_port_error_status_t errstatus) +{ + size_t errors = 0; + string db_port_error_name; + PortOperErrorEvent *portOperErrorEvent = nullptr; + size_t error_count = PortOperErrorEvent::db_key_errors.size(); + + SWSS_LOG_NOTICE("Port %s error state set from 0x%" PRIx32 "-> 0x%" PRIx32, + port.m_alias.c_str(), + port.m_oper_error_status, + errstatus); + + port.m_oper_error_status = errstatus; + + // Iterate through all the port oper errors + while ((errstatus >> errors) && (errors < error_count)) + { + sai_port_error_status_t error_status = static_cast(errstatus & (1 << errors)); + + if (port.m_portOperErrorToEvent.find(error_status) == port.m_portOperErrorToEvent.end()) + { + ++errors; + continue; + } + + portOperErrorEvent = &port.m_portOperErrorToEvent[error_status]; + + if (portOperErrorEvent->isErrorSet(errstatus)) + { + SWSS_LOG_NOTICE("Port %s oper error event: %s occurred", + port.m_alias.c_str(), + portOperErrorEvent->getDbKey().c_str()); + portOperErrorEvent->recordEventTime(); + portOperErrorEvent->incrementErrorCount(); + updateDbPortOperError(port, portOperErrorEvent); + } + else + { + SWSS_LOG_WARN("Port %s port oper error %s not updated in DB", + port.m_alias.c_str(), + portOperErrorEvent->getDbKey().c_str()); + } + + ++errors; + } } void PortsOrch::updatePortOperStatus(Port &port, sai_port_oper_status_t status) @@ -7357,9 +9127,14 @@ void PortsOrch::updatePortOperStatus(Port &port, sai_port_oper_status_t status) return; } - if (port.m_type == Port::PHY) + if (port.m_type == Port::PHY || port.m_type == Port::TUNNEL) { updateDbPortOperStatus(port, status); + } + + if (port.m_type == Port::PHY) + { + updateDbPortFlapCount(port, status); updateGearboxPortOperStatus(port); /* Refresh the port states and reschedule the poller tasks */ @@ -7390,6 +9165,8 @@ void PortsOrch::updatePortOperStatus(Port &port, sai_port_oper_status_t status) isUp ? "up" : "down"); } } + SWSS_LOG_INFO("Updating the nexthop for port %s and operational status %s", port.m_alias.c_str(), isUp ? "up" : "down"); + if (!gNeighOrch->ifChangeInformNextHop(port.m_alias, isUp)) { SWSS_LOG_WARN("Inform nexthop operation failed for interface %s", port.m_alias.c_str()); @@ -7398,10 +9175,19 @@ void PortsOrch::updatePortOperStatus(Port &port, sai_port_oper_status_t status) { if (!gNeighOrch->ifChangeInformNextHop(child_port, isUp)) { - SWSS_LOG_WARN("Inform nexthop operation failed for sub interface %s", child_port.c_str()); + SWSS_LOG_WARN("Inform nexthop operation failed for sub interface %s", child_port.c_str()); + } + } + + if(isChassisDbInUse()) + { + if (gIntfsOrch->isLocalSystemPortIntf(port.m_alias)) + { + gIntfsOrch->voqSyncIntfState(port.m_alias, isUp); } } + PortOperStateUpdate update = {port, status}; notify(SUBJECT_TYPE_PORT_OPER_STATE_CHANGE, static_cast(&update)); } @@ -7474,6 +9260,18 @@ void PortsOrch::refreshPortStatus() { updateDbPortOperSpeed(port, 0); } + sai_port_fec_mode_t fec_mode; + string fec_str = "N/A"; + if (oper_fec_sup && getPortOperFec(port, fec_mode)) + { + if (!m_portHlpr.fecToStr(fec_str, fec_mode)) + { + SWSS_LOG_ERROR("Error unknown fec mode %d while querying port %s fec mode", + static_cast(fec_mode), port.m_alias.c_str()); + fec_str = "N/A"; + } + } + updateDbPortOperFec(port,fec_str); } } } @@ -7668,7 +9466,7 @@ bool PortsOrch::removeAclTableGroup(const Port &p) } bool PortsOrch::setPortSerdesAttribute(sai_object_id_t port_id, sai_object_id_t switch_id, - map> &serdes_attr) + map &serdes_attr) { SWSS_LOG_ENTER(); @@ -7715,12 +9513,15 @@ bool PortsOrch::setPortSerdesAttribute(sai_object_id_t port_id, sai_object_id_t for (auto it = serdes_attr.begin(); it != serdes_attr.end(); it++) { port_serdes_attr.id = it->first; - port_serdes_attr.value.u32list.count = (uint32_t)it->second.size(); - port_serdes_attr.value.u32list.list = it->second.data(); + + // Use boost::variant visitor to handle both vector and string types + boost::apply_visitor(SerdesValueVisitor(port_serdes_attr), it->second); + attr_list.emplace_back(port_serdes_attr); } + assert(serdes_attr.size() + 1 == attr_list.size()); status = sai_port_api->create_port_serdes(&port_serdes_id, switch_id, - static_cast(serdes_attr.size()+1), + static_cast(attr_list.size()), attr_list.data()); if (status != SAI_STATUS_SUCCESS) @@ -7937,11 +9738,11 @@ bool PortsOrch::initGearboxPort(Port &port) } attr.value.s32 = sai_fec; attrs.push_back(attr); - + if (fec_override_sup) { attr.id = SAI_PORT_ATTR_AUTO_NEG_FEC_MODE_OVERRIDE; - + attr.value.booldata = m_portHlpr.fecIsOverrideRequired(m_gearboxPortMap[port.m_index].system_fec); attrs.push_back(attr); } @@ -7954,6 +9755,13 @@ bool PortsOrch::initGearboxPort(Port &port) attr.value.booldata = m_gearboxPortMap[port.m_index].system_training; attrs.push_back(attr); + if (m_cmisModuleAsicSyncSupported) + { + attr.id = SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE; + attr.value.booldata = false; + attrs.push_back(attr); + } + status = sai_port_api->create_port(&systemPort, phyOid, static_cast(attrs.size()), attrs.data()); if (status != SAI_STATUS_SUCCESS) { @@ -8055,6 +9863,13 @@ bool PortsOrch::initGearboxPort(Port &port) attr.value.u32 = media_type_map[m_gearboxPortMap[port.m_index].line_adver_media_type]; attrs.push_back(attr); + if (m_cmisModuleAsicSyncSupported) + { + attr.id = SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE; + attr.value.booldata = false; + attrs.push_back(attr); + } + status = sai_port_api->create_port(&linePort, phyOid, static_cast(attrs.size()), attrs.data()); if (status != SAI_STATUS_SUCCESS) { @@ -8106,8 +9921,8 @@ bool PortsOrch::initGearboxPort(Port &port) m_gbcounterTable->set("", fields); /* Set serdes tx taps on system and line side */ - map> serdes_attr; - typedef pair> serdes_attr_pair; + map serdes_attr; + typedef pair serdes_attr_pair; vector attr_val; for (auto pair: tx_fir_strings_system_side) { if (m_gearboxInterfaceMap[port.m_index].tx_firs.find(pair.first) != m_gearboxInterfaceMap[port.m_index].tx_firs.end() ) { @@ -8120,11 +9935,11 @@ bool PortsOrch::initGearboxPort(Port &port) { if (setPortSerdesAttribute(systemPort, phyOid, serdes_attr)) { - SWSS_LOG_NOTICE("Set port %s system side preemphasis is success", port.m_alias.c_str()); + SWSS_LOG_NOTICE("Set port %s system side serdes attributes is success", port.m_alias.c_str()); } else { - SWSS_LOG_ERROR("Failed to set port %s system side pre-emphasis", port.m_alias.c_str()); + SWSS_LOG_ERROR("Failed to set port %s system side serdes attributes", port.m_alias.c_str()); return false; } } @@ -8140,11 +9955,11 @@ bool PortsOrch::initGearboxPort(Port &port) { if (setPortSerdesAttribute(linePort, phyOid, serdes_attr)) { - SWSS_LOG_NOTICE("Set port %s line side preemphasis is success", port.m_alias.c_str()); + SWSS_LOG_NOTICE("Set port %s line side serdes attributes is success", port.m_alias.c_str()); } else { - SWSS_LOG_ERROR("Failed to set port %s line side pre-emphasis", port.m_alias.c_str()); + SWSS_LOG_ERROR("Failed to set port %s line side serdes attributes", port.m_alias.c_str()); return false; } } @@ -8284,7 +10099,9 @@ bool PortsOrch::getSystemPorts() attr.value.sysportconfig.attached_core_index, attr.value.sysportconfig.attached_core_port_index); - m_systemPortOidMap[sp_key] = system_port_list[i]; + systemPortMapInfo system_port_info; + system_port_info.system_port_id = system_port_list[i]; + m_systemPortOidMap[sp_key] = system_port_info; } } @@ -8314,7 +10131,7 @@ bool PortsOrch::addSystemPorts() vector keys; vector spFv; - DBConnector appDb(APPL_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); + DBConnector appDb("APPL_DB", 0); Table appSystemPortTable(&appDb, APP_SYSTEM_PORT_TABLE_NAME); //Retrieve system port configurations from APP DB @@ -8370,7 +10187,8 @@ bool PortsOrch::addSystemPorts() sai_status_t status; //Retrive system port config info and enable - system_port_oid = m_systemPortOidMap[sp_key]; + system_port_oid = m_systemPortOidMap[sp_key].system_port_id; + attr.id = SAI_SYSTEM_PORT_ATTR_TYPE; attrs.push_back(attr); @@ -8433,6 +10251,10 @@ bool PortsOrch::addSystemPorts() port.m_system_port_info.speed = attrs[1].value.sysportconfig.speed; port.m_system_port_info.num_voq = attrs[1].value.sysportconfig.num_voq; + //Update the system Port Info to the m_systemPortOidMap to be used later when the Port Speed is changed dynamically + m_systemPortOidMap[sp_key].system_port_info = port.m_system_port_info; + m_systemPortOidMap[sp_key].info_valid = true; + initializeVoqs( port ); setPort(port.m_alias, port); /* Add system port name map to counter table */ @@ -8461,6 +10283,73 @@ bool PortsOrch::addSystemPorts() return true; } +void PortsOrch::updateSystemPort(Port &port) +{ + if (!m_initDone) + { + //addSystemPorts will update the system port + return; + } + + if ((gMySwitchType == "voq") && (port.m_type == Port::PHY)) + { + auto system_port_alias = gMyHostName + "|" + gMyAsicName + "|" + port.m_alias; + vector spFv; + + m_systemPortTable->get(system_port_alias, spFv); + + //Retrieve system port configurations from APP DB + int32_t switch_id = -1; + int32_t core_index = -1; + int32_t core_port_index = -1; + + for ( auto &fv : spFv ) + { + if(fv.first == "switch_id") + { + switch_id = stoi(fv.second); + continue; + } + if(fv.first == "core_index") + { + core_index = stoi(fv.second); + continue; + } + if(fv.first == "core_port_index") + { + core_port_index = stoi(fv.second); + continue; + } + if(switch_id < 0 || core_index < 0 || core_port_index < 0) + { + continue; + } + tuple sp_key(switch_id, core_index, core_port_index); + + if(m_systemPortOidMap.find(sp_key) != m_systemPortOidMap.end()) + { + auto system_port = m_systemPortOidMap[sp_key]; + // Check if the system_port_info is already populated in m_systemPortOidMap. + if(system_port.info_valid) + { + port.m_system_port_oid = system_port.system_port_id; + port.m_system_port_info = system_port.system_port_info; + port.m_system_port_info.local_port_oid = port.m_port_id; + //initializeVoqs(port); + SWSS_LOG_NOTICE("Updated system port for %s with system_port_alias:%s switch_id:%d, core_index:%d, core_port_index:%d", + port.m_alias.c_str(), system_port.system_port_info.alias.c_str(), system_port.system_port_info.switch_id, + system_port.system_port_info.core_index, system_port.system_port_info.core_port_index); + } + } + } + if(port.m_system_port_info.alias.empty()) + { + SWSS_LOG_ERROR("SYSTEM PORT Information is not updated for %s", port.m_alias.c_str()); + } + } +} + + bool PortsOrch::getInbandPort(Port &port) { if (m_portList.find(m_inbandPortName) == m_portList.end()) @@ -8514,7 +10403,8 @@ void PortsOrch::voqSyncAddLag (Port &lag) // Sync only local lag add to CHASSIS_APP_DB - if (switch_id != gVoqMySwitchId) + if (switch_id != gVoqMySwitchId || + !gMultiAsicVoq) { return; } @@ -8575,24 +10465,16 @@ void PortsOrch::voqSyncDelLagMember(Port &lag, Port &port) m_tableVoqSystemLagMemberTable->del(key); } -std::unordered_set PortsOrch::generateCounterStats(const string& type, bool gearbox) +template +std::unordered_set PortsOrch::generateCounterStats(const vector &counterIds, std::string (*serializer)(const T)) { std::unordered_set counter_stats; - if (type == PORT_STAT_COUNTER_FLEX_COUNTER_GROUP) - { - auto& stat_ids = gearbox ? gbport_stat_ids : port_stat_ids; - for (const auto& it: stat_ids) - { - counter_stats.emplace(sai_serialize_port_stat(it)); - } - } - else if (type == PORT_BUFFER_DROP_STAT_FLEX_COUNTER_GROUP) + + for (const auto& it:counterIds) { - for (const auto& it: port_buffer_drop_stat_ids) - { - counter_stats.emplace(sai_serialize_port_stat(it)); - } + counter_stats.emplace(serializer(it)); } + return counter_stats; } @@ -8777,6 +10659,311 @@ void PortsOrch::updatePortStatePoll(const Port &port, port_state_poll_t type, bo } } +bool PortsOrch::createAndSetPortPtTam(const Port &p) +{ + /* + * First, let's check if a TAM object is already assigned to the port. + */ + + /* If the port has already a TAM object, nothing to do */ + if (m_portPtTam.find(p.m_alias) != m_portPtTam.end()) + { + SWSS_LOG_DEBUG( + "Port %s has already a TAM object", p.m_alias.c_str() + ); + return true; + } + + /* + * The port does not have a TAM object assigned to it. + * + * Let's create a new TAM object (if we don't already have one) + * and assign it to the port. + */ + if (m_ptTam == SAI_NULL_OBJECT_ID) + { + if (!createPtTam()) + { + SWSS_LOG_ERROR( + "Failed to create TAM object for Path Tracing" + ); + return false; + } + } + + if (!setPortPtTam(p, m_ptTam)) + { + SWSS_LOG_ERROR( + "Failed to set port %s TAM object for Path Tracing", + p.m_alias.c_str() + ); + return false; + } + + m_ptTamRefCount++; + m_portPtTam[p.m_alias] = m_ptTam; + + return true; +} + +bool PortsOrch::unsetPortPtTam(const Port &p) +{ + /* + * Let's unassign the TAM object from the port and decrease ref counter + */ + if (m_portPtTam.find(p.m_alias) != m_portPtTam.end()) + { + if (!setPortPtTam(p, SAI_NULL_OBJECT_ID)) + { + SWSS_LOG_ERROR( + "Failed to unset port %s TAM object for Path Tracing", + p.m_alias.c_str() + ); + return false; + } + m_ptTamRefCount--; + m_portPtTam.erase(p.m_alias); + + /* + * If the TAM object is no longer used, we can safely remove it. + */ + if (m_ptTamRefCount == 0) + { + if (!removePtTam(m_ptTam)) + { + SWSS_LOG_ERROR( + "Failed to remove TAM object for Path Tracing" + ); + return false; + } + } + } + + return true; +} + +bool PortsOrch::setPortPtIntfId(const Port& port, sai_uint16_t intf_id) +{ + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_PATH_TRACING_INTF; + attr.value.u16 = intf_id; + + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); + + if (status != SAI_STATUS_SUCCESS) + { + task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + return true; +} + +bool PortsOrch::setPortPtTimestampTemplate(const Port& port, sai_port_path_tracing_timestamp_type_t ts_type) +{ + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_PATH_TRACING_TIMESTAMP_TYPE; + attr.value.s32 = ts_type; + + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); + + if (status != SAI_STATUS_SUCCESS) + { + task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + return true; +} + +bool PortsOrch::setPortPtTam(const Port& port, sai_object_id_t tam_id) +{ + sai_attribute_t attr; + + attr.id = SAI_PORT_ATTR_TAM_OBJECT; + + if (tam_id != SAI_NULL_OBJECT_ID) + { + attr.value.objlist.count = 1; + attr.value.objlist.list = &tam_id; + } + else + { + attr.value.objlist.count = 0; + } + + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); + + if (status != SAI_STATUS_SUCCESS) + { + task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + return true; +} + +bool PortsOrch::createPtTam() +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + vector attrs; + sai_status_t status; + + /* First, create a TAM report */ + if (m_ptTamReport == SAI_NULL_OBJECT_ID) + { + sai_object_id_t tam_report_id; + + attr.id = SAI_TAM_REPORT_ATTR_TYPE; + attr.value.s32 = SAI_TAM_REPORT_TYPE_VENDOR_EXTN; + attrs.push_back(attr); + + status = sai_tam_api->create_tam_report(&tam_report_id, gSwitchId, static_cast(attrs.size()), attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create TAM Report object for Path Tracing, rv:%d", status); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_TAM, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + m_ptTamReport = tam_report_id; + SWSS_LOG_NOTICE("Created TAM Report object %" PRIx64 " for Path Tracing", tam_report_id); + } + + /* Second, create a TAM INT object */ + if (m_ptTamInt == SAI_NULL_OBJECT_ID) + { + sai_object_id_t tam_int_id; + + attrs.clear(); + + attr.id = SAI_TAM_INT_ATTR_TYPE; + attr.value.s32 = SAI_TAM_INT_TYPE_PATH_TRACING; + attrs.push_back(attr); + + attr.id = SAI_TAM_INT_ATTR_DEVICE_ID; + attr.value.u32 = 0; + attrs.push_back(attr); + + attr.id = SAI_TAM_INT_ATTR_INT_PRESENCE_TYPE; + attr.value.u32 = SAI_TAM_INT_PRESENCE_TYPE_UNDEFINED; + attrs.push_back(attr); + + attr.id = SAI_TAM_INT_ATTR_INLINE; + attr.value.u32 = false; + attrs.push_back(attr); + + attr.id = SAI_TAM_INT_ATTR_REPORT_ID; + attr.value.oid = m_ptTamReport; + attrs.push_back(attr); + + status = sai_tam_api->create_tam_int(&tam_int_id, gSwitchId, static_cast(attrs.size()), attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create TAM INT object for Path Tracing, rv:%d", status); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_TAM, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + m_ptTamInt = tam_int_id; + SWSS_LOG_NOTICE("Created TAM INT object %" PRIx64 " for Path Tracing", tam_int_id); + } + + /* Finally, create a TAM object */ + if (m_ptTam == SAI_NULL_OBJECT_ID) + { + sai_object_id_t tam_id; + + attrs.clear(); + + attr.id = SAI_TAM_ATTR_INT_OBJECTS_LIST; + attr.value.objlist.count = 1; + attr.value.objlist.list = &m_ptTamInt; + attrs.push_back(attr); + + status = sai_tam_api->create_tam(&tam_id, gSwitchId, static_cast(attrs.size()), attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create TAM object for Path Tracing, rv:%d", status); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_TAM, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + m_ptTam = tam_id; + SWSS_LOG_NOTICE("Created TAM object %" PRIx64 " for Path Tracing", tam_id); + } + + return true; +} + +bool PortsOrch::removePtTam(sai_object_id_t tam_id) +{ + SWSS_LOG_ENTER(); + + sai_status_t status; + + if (m_ptTam != SAI_NULL_OBJECT_ID) + { + status = sai_tam_api->remove_tam(m_ptTam); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove TAM object for Path Tracing, rv:%d", status); + return false; + } + + SWSS_LOG_NOTICE("Removed TAM %" PRIx64, m_ptTam); + m_ptTam = SAI_NULL_OBJECT_ID; + } + + if (m_ptTamInt != SAI_NULL_OBJECT_ID) + { + status = sai_tam_api->remove_tam_int(m_ptTamInt); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove TAM INT object for Path Tracing, rv:%d", status); + return false; + } + + SWSS_LOG_NOTICE("Removed TAM INT %" PRIx64, m_ptTamInt); + m_ptTamInt = SAI_NULL_OBJECT_ID; + } + + if (m_ptTamReport != SAI_NULL_OBJECT_ID) + { + status = sai_tam_api->remove_tam_report(m_ptTamReport); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove TAM Report for Path Tracing, rv:%d", status); + return false; + } + + SWSS_LOG_NOTICE("Removed TAM Report %" PRIx64, m_ptTamReport); + m_ptTamReport = SAI_NULL_OBJECT_ID; + } + + return true; +} + void PortsOrch::doTask(swss::SelectableTimer &timer) { Port port; @@ -8808,4 +10995,3 @@ void PortsOrch::doTask(swss::SelectableTimer &timer) m_port_state_poller->stop(); } } - diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h old mode 100755 new mode 100644 index 24d7c575f8d..954c42ed6dc --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -2,6 +2,7 @@ #define SWSS_PORTSORCH_H #include +#include #include "acltable.h" #include "orch.h" @@ -20,6 +21,8 @@ #include "port/porthlpr.h" #include "port/portschema.h" +#include "high_frequency_telemetry/counternameupdater.h" + #define FCS_LEN 4 #define VLAN_TAG_LEN 4 #define MAX_MACSEC_SECTAG_SIZE 32 @@ -30,6 +33,12 @@ #define QUEUE_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP "QUEUE_WATERMARK_STAT_COUNTER" #define PG_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP "PG_WATERMARK_STAT_COUNTER" #define PG_DROP_STAT_COUNTER_FLEX_COUNTER_GROUP "PG_DROP_STAT_COUNTER" +#define QUEUE_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS "60000" +#define PG_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS "60000" +#define PG_DROP_FLEX_STAT_COUNTER_POLL_MSECS "10000" +#define PORT_RATE_FLEX_COUNTER_POLLING_INTERVAL_MS "1000" +#define WRED_QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP "WRED_ECN_QUEUE_STAT_COUNTER" +#define WRED_PORT_STAT_COUNTER_FLEX_COUNTER_GROUP "WRED_ECN_PORT_STAT_COUNTER" typedef std::vector PortSupportedSpeeds; typedef std::set PortSupportedFecModes; @@ -108,6 +117,13 @@ struct queueInfo sai_uint8_t index; }; +struct systemPortMapInfo +{ + sai_object_id_t system_port_id; + SystemPortInfo system_port_info; + bool info_valid = false; +}; + template struct PortCapability { @@ -141,15 +157,20 @@ class PortsOrch : public Orch, public Subject void setPort(string alias, Port port); void getCpuPort(Port &port); void initHostTxReadyState(Port &port); + void initializePortOperErrors(Port &port); bool getInbandPort(Port &port); bool getVlanByVlanId(sai_vlan_id_t vlan_id, Port &vlan); bool setHostIntfsOperStatus(const Port& port, bool up) const; void updateDbPortOperStatus(const Port& port, sai_port_oper_status_t status) const; + void updateDbPortFlapCount(Port& port, sai_port_oper_status_t pstatus); + void updateDbPortOperError(Port& port, PortOperErrorEvent *pevent); bool createVlanHostIntf(Port& vl, string hostif_name); bool removeVlanHostIntf(Port vl); + unordered_set& getAllVlans(); + bool createBindAclTableGroup(sai_object_id_t port_oid, sai_object_id_t acl_table_oid, sai_object_id_t &group_oid, @@ -176,10 +197,11 @@ class PortsOrch : public Orch, public Subject void generateQueueMap(map queuesStateVector); uint32_t getNumberOfPortSupportedQueueCounters(string port); - void createPortBufferQueueCounters(const Port &port, string queues); - void removePortBufferQueueCounters(const Port &port, string queues); + void createPortBufferQueueCounters(const Port &port, string queues, bool skip_host_tx_queue=true); + void removePortBufferQueueCounters(const Port &port, string queues, bool skip_host_tx_queue=true); void addQueueFlexCounters(map queuesStateVector); void addQueueWatermarkFlexCounters(map queuesStateVector); + void addWredQueueFlexCounters(map queuesStateVector); void generatePriorityGroupMap(map pgsStateVector); uint32_t getNumberOfPortSupportedPgCounters(string port); @@ -191,6 +213,11 @@ class PortsOrch : public Orch, public Subject void generatePortCounterMap(); void generatePortBufferDropCounterMap(); + void generateWredPortCounterMap(); + void generateWredQueueCounterMap(); + + void flushCounters(); + void refreshPortStatus(); bool removeAclTableGroup(const Port &p); @@ -235,38 +262,49 @@ class PortsOrch : public Orch, public Subject bool isMACsecPort(sai_object_id_t port_id) const; vector getPortVoQIds(Port& port); + bool setPortPtIntfId(const Port& port, sai_uint16_t intf_id); + bool setPortPtTimestampTemplate(const Port& port, sai_port_path_tracing_timestamp_type_t ts_type); + private: - unique_ptr
m_counterTable; + unique_ptr m_counterNameMapUpdater; unique_ptr
m_counterSysPortTable; unique_ptr
m_counterLagTable; unique_ptr
m_portTable; unique_ptr
m_sendToIngressPortTable; + unique_ptr
m_systemPortTable; unique_ptr
m_gearboxTable; - unique_ptr
m_queueTable; + unique_ptr m_queueCounterNameMapUpdater; unique_ptr
m_voqTable; unique_ptr
m_queuePortTable; unique_ptr
m_queueIndexTable; unique_ptr
m_queueTypeTable; - unique_ptr
m_pgTable; + unique_ptr m_pgCounterNameMapUpdater; unique_ptr
m_pgPortTable; unique_ptr
m_pgIndexTable; unique_ptr
m_stateBufferMaximumValueTable; - unique_ptr m_flexCounterTable; - unique_ptr m_flexCounterGroupTable; Table m_portStateTable; + Table m_portOpErrTable; std::string getQueueWatermarkFlexCounterTableKey(std::string s); std::string getPriorityGroupWatermarkFlexCounterTableKey(std::string s); std::string getPriorityGroupDropPacketsFlexCounterTableKey(std::string s); std::string getPortRateFlexCounterTableKey(std::string s); + std::string getWredQueueFlexCounterTableKey(std::string s); shared_ptr m_counter_db; - shared_ptr m_flex_db; shared_ptr m_state_db; + shared_ptr m_notificationsDb; - FlexCounterManager port_stat_manager; - FlexCounterManager port_buffer_drop_stat_manager; - FlexCounterManager queue_stat_manager; + FlexCounterTaggedCachedManager port_stat_manager; + FlexCounterTaggedCachedManager port_buffer_drop_stat_manager; + FlexCounterTaggedCachedManager queue_stat_manager; + FlexCounterTaggedCachedManager queue_watermark_manager; + FlexCounterTaggedCachedManager pg_watermark_manager; + FlexCounterTaggedCachedManager pg_drop_stat_manager; + FlexCounterTaggedCachedManager wred_port_stat_manager; + FlexCounterTaggedCachedManager wred_queue_stat_manager; + + std::vector> counter_managers; FlexCounterManager gb_port_stat_manager; shared_ptr m_gb_counter_db; @@ -305,11 +343,13 @@ class PortsOrch : public Orch, public Subject map m_gearboxPortMap; map> m_gearboxPortListLaneMap; + unordered_set m_vlanPorts; port_config_state_t m_portConfigState = PORT_CONFIG_MISSING; sai_uint32_t m_portCount; map, sai_object_id_t> m_portListLaneMap; map, PortConfig> m_lanesAliasSpeedMap; map m_portList; + map m_pluggedModulesPort; map m_portVlanMember; map> m_port_voq_ids; /* mapping from SAI object ID to Name for faster @@ -326,12 +366,20 @@ class PortsOrch : public Orch, public Subject map m_bridge_port_ref_count; NotificationConsumer* m_portStatusNotificationConsumer; + NotificationConsumer* m_portHostTxReadyNotificationConsumer; + bool fec_override_sup = false; bool oper_fec_sup = false; + bool saiHwTxSignalSupported = false; + bool saiTxReadyNotifySupported = false; + bool m_supportsHostIfTxQueue = false; swss::SelectableTimer *m_port_state_poller = nullptr; + bool m_cmisModuleAsicSyncSupported = false; + void doTask() override; + void onWarmBootEnd() override; void doTask(Consumer &consumer); void doPortTask(Consumer &consumer); void doSendToIngressPortTask(Consumer &consumer); @@ -339,8 +387,10 @@ class PortsOrch : public Orch, public Subject void doVlanMemberTask(Consumer &consumer); void doLagTask(Consumer &consumer); void doLagMemberTask(Consumer &consumer); + void doTransceiverPresenceCheck(Consumer &consumer); void doTask(NotificationConsumer &consumer); + void handleNotification(NotificationConsumer &consumer, KeyOpFieldsValuesTuple& entry); void doTask(swss::SelectableTimer &timer); void removePortFromLanesMap(string alias); @@ -348,14 +398,17 @@ class PortsOrch : public Orch, public Subject void removeDefaultVlanMembers(); void removeDefaultBridgePorts(); - bool initializePort(Port &port); - void initializePriorityGroups(Port &port); - void initializePortBufferMaximumParameters(Port &port); - void initializeQueues(Port &port); - void initializeSchedulerGroups(Port &port); + bool initializePorts(std::vector& ports); + void initializePriorityGroupsBulk(std::vector& ports); + void initializeQueuesBulk(std::vector& ports); + void initializeSchedulerGroupsBulk(std::vector& ports); + void initializePortHostTxReadyBulk(std::vector& ports); + void initializePortMtuBulk(std::vector& ports); + + void initializePortBufferMaximumParameters(const Port &port); void initializeVoqs(Port &port); - bool addHostIntfs(Port &port, string alias, sai_object_id_t &host_intfs_id); + bool addHostIntfs(Port &port, string alias, sai_object_id_t &host_intfs_id, bool isUp); bool setHostIntfsStripTag(Port &port, sai_hostif_vlan_tag_t strip); bool setBridgePortLearnMode(Port &port, sai_bridge_port_fdb_learning_mode_t learn_mode); @@ -372,15 +425,21 @@ class PortsOrch : public Orch, public Subject bool setDistributionOnLagMember(Port &lagMember, bool enableDistribution); sai_status_t removePort(sai_object_id_t port_id); - bool initPort(const PortConfig &port); + bool initExistingPort(const PortConfig &port); + bool initPortsBulk(std::vector& ports); + void registerPort(Port &p); + void deInitPort(string alias, sai_object_id_t port_id); void initPortCapAutoNeg(Port &port); void initPortCapLinkTraining(Port &port); + void postPortInit(Port &p); + bool setPortAdminStatus(Port &port, bool up); bool getPortAdminStatus(sai_object_id_t id, bool& up); bool getPortMtu(const Port& port, sai_uint32_t &mtu); + bool getPortHostTxReady(const Port& port, bool &hostTxReadyVal); bool setPortMtu(const Port& port, sai_uint32_t mtu); bool setPortTpid(Port &port, sai_uint16_t tpid); bool setPortPvid (Port &port, sai_uint32_t pvid); @@ -392,6 +451,9 @@ class PortsOrch : public Orch, public Subject bool setBridgePortAdminStatus(sai_object_id_t id, bool up); + bool setSaiHostTxSignal(const Port &port, bool enable); + + void setHostTxReady(Port port, const std::string &status); // Get supported speeds on system side bool isSpeedSupported(const std::string& alias, sai_object_id_t port_id, sai_uint32_t speed); void getPortSupportedSpeeds(const std::string& alias, sai_object_id_t port_id, PortSupportedSpeeds &supported_speeds); @@ -409,17 +471,21 @@ class PortsOrch : public Orch, public Subject bool getPortAdvSpeeds(const Port& port, bool remote, string& adv_speeds); task_process_status setPortAdvSpeeds(Port &port, std::set &speed_list); - bool getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uint8_t &index); + bool getQueueTypeAndIndex(sai_object_id_t queue_id, sai_queue_type_t &type, uint8_t &index); bool m_isQueueMapGenerated = false; void generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState, bool voq); bool m_isQueueFlexCountersAdded = false; void addQueueFlexCountersPerPort(const Port& port, FlexCounterQueueStates& queuesState); - void addQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, bool voq); + void addQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, bool voq, sai_queue_type_t queueType); bool m_isQueueWatermarkFlexCountersAdded = false; void addQueueWatermarkFlexCountersPerPort(const Port& port, FlexCounterQueueStates& queuesState); - void addQueueWatermarkFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex); + void addQueueWatermarkFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, sai_queue_type_t queueType); + + bool m_isWredQueueCounterMapGenerated = false; + void addWredQueueFlexCountersPerPort(const Port& port, FlexCounterQueueStates& queuesState); + void addWredQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, bool voq, sai_queue_type_t queueType); bool m_isPriorityGroupMapGenerated = false; void generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState); @@ -436,10 +502,16 @@ class PortsOrch : public Orch, public Subject bool isAutoNegEnabled(sai_object_id_t id); task_process_status setPortAutoNeg(Port &port, bool autoneg); + task_process_status setPortUnreliableLOS(Port &port, bool enabled); task_process_status setPortInterfaceType(Port &port, sai_port_interface_type_t interface_type); task_process_status setPortAdvInterfaceTypes(Port &port, std::set &interface_types); task_process_status setPortLinkTraining(const Port& port, bool state); + ReturnCode setPortLinkEventDampingAlgorithm(Port &port, + sai_redis_link_event_damping_algorithm_t &link_event_damping_algorithm); + ReturnCode setPortLinkEventDampingAiedConfig(Port &port, + sai_redis_link_event_damping_algo_aied_config_t &config); + void updatePortOperStatus(Port &port, sai_port_oper_status_t status); bool getPortOperSpeed(const Port& port, sai_uint32_t& speed) const; @@ -461,8 +533,7 @@ class PortsOrch : public Orch, public Subject void getPortSerdesVal(const std::string& s, std::vector &lane_values, int base = 16); bool setPortSerdesAttribute(sai_object_id_t port_id, sai_object_id_t switch_id, - std::map> &serdes_attr); - + std::map &serdes_attr); void removePortSerdesAttribute(sai_object_id_t port_id); @@ -475,14 +546,16 @@ class PortsOrch : public Orch, public Subject bool initGearboxPort(Port &port); bool getPortOperFec(const Port& port, sai_port_fec_mode_t &fec_mode) const; void updateDbPortOperFec(Port &port, string fec_str); + void updatePortErrorStatus(Port &port, sai_port_error_status_t port_oper_eror); map m_recircPortRole; //map key is tuple of - map, sai_object_id_t> m_systemPortOidMap; + map, systemPortMapInfo> m_systemPortOidMap; sai_uint32_t m_systemPortCount; bool getSystemPorts(); bool addSystemPorts(); + void updateSystemPort(Port &port); unique_ptr
m_tableVoqSystemLagTable; unique_ptr
m_tableVoqSystemLagMemberTable; void voqSyncAddLag(Port &lag); @@ -492,9 +565,14 @@ class PortsOrch : public Orch, public Subject unique_ptr m_lagIdAllocator; set m_macsecEnabledPorts; - std::unordered_set generateCounterStats(const string& type, bool gearbox = false); + template + std::unordered_set generateCounterStats(const vector &counterIds, std::string (*serializer)(const T)); + map m_queueInfo; + /* Protoypes for Path tracing */ + bool setPortPtTam(const Port& port, sai_object_id_t tam_id); + private: void initializeCpuPort(); void initializePorts(); @@ -502,9 +580,27 @@ class PortsOrch : public Orch, public Subject auto getPortConfigState() const -> port_config_state_t; void setPortConfigState(port_config_state_t value); - bool addPortBulk(const std::vector &portList); + bool addPortBulk(const std::vector &portList, std::vector& addedPorts); bool removePortBulk(const std::vector &portList); + /* Prototypes for Path Tracing */ + bool checkPathTracingCapability(); + bool createPtTam(); + bool removePtTam(sai_object_id_t tam_id); + bool createAndSetPortPtTam(const Port &p); + bool unsetPortPtTam(const Port &p); + sai_object_id_t m_ptTamReport = SAI_NULL_OBJECT_ID; + sai_object_id_t m_ptTamInt = SAI_NULL_OBJECT_ID; + sai_object_id_t m_ptTam = SAI_NULL_OBJECT_ID; + uint32_t m_ptTamRefCount = 0; + map m_portPtTam; + // Define whether the switch supports or not Path Tracing + bool m_isPathTracingSupported = false; + void initCounterCapabilities(sai_object_id_t switchId); + bool m_isWredPortCounterMapGenerated = false; + std::unique_ptr m_queueCounterCapabilitiesTable = nullptr; + std::unique_ptr m_portCounterCapabilitiesTable = nullptr; + private: // Port config aggregator std::unordered_map> m_portConfigMap; @@ -514,5 +610,6 @@ class PortsOrch : public Orch, public Subject // Port OA helper PortHelper m_portHlpr; + bool m_isWarmRestoreStage = false; }; #endif /* SWSS_PORTSORCH_H */ diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index 90fc6fc7669..12c2a3e2326 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -11,6 +11,7 @@ #include #include #include +#include using namespace std; @@ -1783,7 +1784,12 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer, KeyOpFieldsVal return task_process_status::task_invalid_entry; } - if((tokens[0] == gMyHostName) && (tokens[1] == gMyAsicName)) + string tmp_token_1 = tokens[1]; + string tmp_gMyAsicName = gMyAsicName; + boost::algorithm::to_lower(tmp_token_1); + boost::algorithm::to_lower(tmp_gMyAsicName); + // Check if the port is local to this ASIC + if((tokens[0] == gMyHostName) && (tmp_token_1 == tmp_gMyAsicName)) { local_port = true; local_port_name = tokens[2]; @@ -2018,6 +2024,7 @@ task_process_status QosOrch::handleGlobalQosMap(const string &OP, KeyOpFieldsVal { SWSS_LOG_INFO("Global QoS map %s is not yet created", map_name.c_str()); task_status = task_process_status::task_need_retry; + continue; } if (applyDscpToTcMapToSwitch(SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP, id)) diff --git a/orchagent/request_parser.cpp b/orchagent/request_parser.cpp index 70b4351119e..e2a63c6a736 100644 --- a/orchagent/request_parser.cpp +++ b/orchagent/request_parser.cpp @@ -41,6 +41,7 @@ void Request::clear() attr_item_bools_.clear(); attr_item_mac_addresses_.clear(); attr_item_packet_actions_.clear(); + attr_item_string_list_.clear(); is_parsed_ = false; } @@ -71,10 +72,10 @@ void Request::parseKey(const KeyOpFieldsValuesTuple& request) key_items.push_back(full_key_.substr(key_item_start, full_key_.length())); /* - * Attempt to parse an IPv6 address only if the following conditions are met: + * Attempt to parse an IPv6/MAC address only if the following conditions are met: * - The key separator is ":" * - The above logic will already correctly parse IPv6 addresses using other key separators - * - Special consideration is only needed for ":" key separators since IPv6 addresses also use ":" as the field separator + * - Special consideration is only needed for ":" key separators since IPv6/MAC addresses also use ":" as the field separator * - The number of parsed key items exceeds the number of expected key items * - If we have too many key items and the last key item is supposed to be an IP or prefix, there is a chance that it was an * IPv6 address that got segmented during parsing @@ -85,7 +86,8 @@ void Request::parseKey(const KeyOpFieldsValuesTuple& request) */ if (key_separator_ == ':' and key_items.size() > number_of_key_items_ and - (request_description_.key_item_types.back() == REQ_T_IP or request_description_.key_item_types.back() == REQ_T_IP_PREFIX)) + (request_description_.key_item_types.back() == REQ_T_IP or request_description_.key_item_types.back() == REQ_T_IP_PREFIX + or request_description_.key_item_types.back() == REQ_T_MAC_ADDRESS)) { // Remove key_items so that key_items.size() is correct, then assemble the removed items into an IPv6 address std::vector ip_addr_groups(--key_items.begin() + number_of_key_items_, key_items.end()); @@ -155,8 +157,16 @@ void Request::parseAttrs(const KeyOpFieldsValuesTuple& request) const auto item = request_description_.attr_item_types.find(fvField(*i)); if (item == not_found) { - throw std::invalid_argument(std::string("Unknown attribute name: ") + fvField(*i)); + if (!relaxed_attr_parsing_) + { + throw std::invalid_argument(std::string("Unknown attribute name: ") + fvField(*i)); + } + else + { + continue; + } } + attr_names_.insert(fvField(*i)); switch(item->second) { @@ -196,6 +206,12 @@ void Request::parseAttrs(const KeyOpFieldsValuesTuple& request) case REQ_T_UINT_LIST: attr_item_uint_list_[fvField(*i)] = parseUintList(fvValue(*i)); break; + case REQ_T_BOOL_LIST: + attr_item_bool_list_[fvField(*i)] = parseBoolList(fvValue(*i)); + break; + case REQ_T_STRING_LIST: + attr_item_string_list_[fvField(*i)] = parseStringList(fvValue(*i)); + break; default: throw std::logic_error(std::string("Not implemented attribute type parser for attribute:") + fvField(*i)); } @@ -362,6 +378,25 @@ sai_packet_action_t Request::parsePacketAction(const std::string& str) return found->second; } +vector Request::parseBoolList(const std::string& str) +{ + try + { + vector res; + string substr; + std::istringstream iss(str); + while (getline(iss, substr, ',')) + { + res.emplace_back(parseBool(substr)); + } + return res; + } + catch (std::invalid_argument& _) + { + throw std::invalid_argument(std::string("Invalid boolean list: ") + str); + } +} + vector Request::parseIpAddressList(const std::string& str) { try @@ -428,3 +463,22 @@ vector Request::parseUintList(const std::string& str) throw std::invalid_argument(std::string("Out of range unsigned integer: ") + str); } } + +vector Request::parseStringList(const std::string& str) +{ + std::vector res; + try + { + std::string token; + std::istringstream iss(str); + while (getline(iss, token, ',')) + { + res.emplace_back(token); + } + } + catch (std::invalid_argument& _) + { + throw std::invalid_argument(std::string("Invalid string list: ") + str); + } + return res; +} diff --git a/orchagent/request_parser.h b/orchagent/request_parser.h index 1fd110977c4..f63c3fd316f 100644 --- a/orchagent/request_parser.h +++ b/orchagent/request_parser.h @@ -22,6 +22,8 @@ typedef enum _request_types_t REQ_T_MAC_ADDRESS_LIST, REQ_T_IP_LIST, REQ_T_UINT_LIST, + REQ_T_BOOL_LIST, + REQ_T_STRING_LIST, } request_types_t; typedef struct _request_description @@ -168,11 +170,24 @@ class Request return attr_item_uint_list_.at(attr_name); } + const std::vector getAttrBoolList(const std::string& attr_name) const + { + assert(is_parsed_); + return attr_item_bool_list_.at(attr_name); + } + + const std::vector& getAttrStringList(const std::string& attr_name) const + { + assert(is_parsed_); + return attr_item_string_list_.at(attr_name); + } + protected: - Request(const request_description_t& request_description, const char key_separator) + Request(const request_description_t& request_description, const char key_separator, bool relaxed_attr_parsing = false) : request_description_(request_description), key_separator_(key_separator), is_parsed_(false), + relaxed_attr_parsing_(relaxed_attr_parsing), number_of_key_items_(request_description.key_item_types.size()) { } @@ -192,6 +207,8 @@ class Request std::vector parseIpAddressList(const std::string& str); std::vector parseMacAddressList(const std::string& str); std::vector parseUintList(const std::string& str); + std::vector parseBoolList(const std::string& str); + std::vector parseStringList(const std::string& str); sai_packet_action_t parsePacketAction(const std::string& str); @@ -199,6 +216,8 @@ class Request char key_separator_; bool is_parsed_; size_t number_of_key_items_; + // Enable if only interested in only a subset of attributes + bool relaxed_attr_parsing_; std::string table_name_; std::string operation_; @@ -212,6 +231,7 @@ class Request // FIXME: Make one union with all the values, except string std::unordered_map attr_item_strings_; std::unordered_map attr_item_bools_; + std::unordered_map> attr_item_bool_list_; std::unordered_map attr_item_mac_addresses_; std::unordered_map attr_item_packet_actions_; std::unordered_map attr_item_vlan_; @@ -222,6 +242,7 @@ class Request std::unordered_map> attr_item_ip_list_; std::unordered_map> attr_item_mac_addresses_list_; std::unordered_map> attr_item_uint_list_; + std::unordered_map> attr_item_string_list_; }; #endif // __REQUEST_PARSER_H diff --git a/orchagent/response_publisher.cpp b/orchagent/response_publisher.cpp index 031f1aefefc..f630ddf5899 100644 --- a/orchagent/response_publisher.cpp +++ b/orchagent/response_publisher.cpp @@ -50,7 +50,7 @@ void RecordResponse(const std::string &response_channel, const std::string &key, { if (!swss::Recorder::Instance().respub.isRecord()) { - return; + return; } std::string s = response_channel + ":" + key + "|" + status; @@ -64,27 +64,45 @@ void RecordResponse(const std::string &response_channel, const std::string &key, } // namespace -ResponsePublisher::ResponsePublisher(bool buffered) : - m_db(std::make_unique("APPL_STATE_DB", 0)), - m_pipe(std::make_unique(m_db.get())), - m_buffered(buffered) +ResponsePublisher::ResponsePublisher(const std::string &dbName, bool buffered, bool db_write_thread) + : m_db(std::make_unique(dbName, 0)), m_buffered(buffered) { + if (m_buffered) + { + m_ntf_pipe = std::make_unique(m_db.get()); + m_db_pipe = std::make_unique(m_db.get()); + } + else + { + m_ntf_pipe = std::make_unique(m_db.get(), 1); + m_db_pipe = std::make_unique(m_db.get(), 1); + } + if (db_write_thread) + { + m_update_thread = std::unique_ptr(new std::thread(&ResponsePublisher::dbUpdateThread, this)); + } } -void ResponsePublisher::publish(const std::string &table, const std::string &key, - const std::vector &intent_attrs, const ReturnCode &status, - const std::vector &state_attrs, bool replace) +ResponsePublisher::~ResponsePublisher() { - // Write to the DB only if: - // 1) A write operation is being performed and state attributes are specified. - // 2) A successful delete operation. - if ((intent_attrs.size() && state_attrs.size()) || (status.ok() && !intent_attrs.size())) + if (m_update_thread != nullptr) { - writeToDB(table, key, state_attrs, intent_attrs.size() ? SET_COMMAND : DEL_COMMAND, replace); + { + std::lock_guard lock(m_lock); + m_queue.emplace(/*table=*/"", /*key=*/"", /*values =*/std::vector{}, /*op=*/"", + /*replace=*/false, /*flush=*/false, /*shutdown=*/true); + } + m_signal.notify_one(); + m_update_thread->join(); } +} +void ResponsePublisher::publish(const std::string &table, const std::string &key, + const std::vector &intent_attrs, const ReturnCode &status, + const std::vector &state_attrs, bool replace) +{ std::string response_channel = "APPL_DB_" + table + "_RESPONSE_CHANNEL"; - swss::NotificationProducer notificationProducer{m_pipe.get(), response_channel, m_buffered}; + swss::NotificationProducer notificationProducer{m_ntf_pipe.get(), response_channel, m_buffered}; auto intent_attrs_copy = intent_attrs; // Add error message as the first field-value-pair. @@ -93,6 +111,14 @@ void ResponsePublisher::publish(const std::string &table, const std::string &key // Sends the response to the notification channel. notificationProducer.send(status.codeStr(), key, intent_attrs_copy); RecordResponse(response_channel, key, intent_attrs_copy, status.codeStr()); + + // Write to the DB only if: + // 1) A write operation is being performed and state attributes are specified. + // 2) A successful delete operation. + if ((intent_attrs.size() && state_attrs.size()) || (status.ok() && !intent_attrs.size())) + { + writeToDB(table, key, state_attrs, intent_attrs.size() ? SET_COMMAND : DEL_COMMAND, replace); + } } void ResponsePublisher::publish(const std::string &table, const std::string &key, @@ -114,7 +140,26 @@ void ResponsePublisher::publish(const std::string &table, const std::string &key void ResponsePublisher::writeToDB(const std::string &table, const std::string &key, const std::vector &values, const std::string &op, bool replace) { - swss::Table applStateTable{m_pipe.get(), table, m_buffered}; + if (m_update_thread != nullptr) + { + { + std::lock_guard lock(m_lock); + m_queue.emplace(table, key, values, op, replace, /*flush=*/false, /*shutdown=*/false); + } + m_signal.notify_one(); + } + else + { + writeToDBInternal(table, key, values, op, replace); + } + RecordDBWrite(table, key, values, op); +} + +void ResponsePublisher::writeToDBInternal(const std::string &table, const std::string &key, + const std::vector &values, const std::string &op, + bool replace) +{ + swss::Table applStateTable{m_db_pipe.get(), table, m_buffered}; auto attrs = values; if (op == SET_COMMAND) @@ -134,7 +179,6 @@ void ResponsePublisher::writeToDB(const std::string &table, const std::string &k if (!applStateTable.get(key, fv)) { applStateTable.set(key, attrs); - RecordDBWrite(table, key, attrs, op); return; } for (auto it = attrs.cbegin(); it != attrs.cend();) @@ -151,22 +195,63 @@ void ResponsePublisher::writeToDB(const std::string &table, const std::string &k if (attrs.size()) { applStateTable.set(key, attrs); - RecordDBWrite(table, key, attrs, op); } } else if (op == DEL_COMMAND) { applStateTable.del(key); - RecordDBWrite(table, key, {}, op); } } void ResponsePublisher::flush() { - m_pipe->flush(); + m_ntf_pipe->flush(); + if (m_update_thread != nullptr) + { + { + std::lock_guard lock(m_lock); + m_queue.emplace(/*table=*/"", /*key=*/"", /*values =*/std::vector{}, /*op=*/"", + /*replace=*/false, /*flush=*/true, /*shutdown=*/false); + } + m_signal.notify_one(); + } + else + { + m_db_pipe->flush(); + } } void ResponsePublisher::setBuffered(bool buffered) { m_buffered = buffered; } + +void ResponsePublisher::dbUpdateThread() +{ + while (true) + { + entry e; + { + std::unique_lock lock(m_lock); + while (m_queue.empty()) + { + m_signal.wait(lock); + } + + e = m_queue.front(); + m_queue.pop(); + } + if (e.shutdown) + { + break; + } + if (e.flush) + { + m_db_pipe->flush(); + } + else + { + writeToDBInternal(e.table, e.key, e.values, e.op, e.replace); + } + } +} diff --git a/orchagent/response_publisher.h b/orchagent/response_publisher.h index ff7bd291e42..e859852e3ff 100644 --- a/orchagent/response_publisher.h +++ b/orchagent/response_publisher.h @@ -1,15 +1,19 @@ #pragma once +#include #include +#include +#include #include +#include #include #include #include "dbconnector.h" #include "notificationproducer.h" +#include "recorder.h" #include "response_publisher_interface.h" #include "table.h" -#include "recorder.h" // This class performs two tasks when publish is called: // 1. Sends a notification into the redis channel. @@ -17,9 +21,9 @@ class ResponsePublisher : public ResponsePublisherInterface { public: - explicit ResponsePublisher(bool buffered = false); + explicit ResponsePublisher(const std::string &dbName, bool buffered = false, bool db_write_thread = false); - virtual ~ResponsePublisher() = default; + virtual ~ResponsePublisher(); // Intent attributes are the attributes sent in the notification into the // redis channel. @@ -46,19 +50,50 @@ class ResponsePublisher : public ResponsePublisherInterface /** * @brief Flush pending responses - */ + */ void flush(); /** * @brief Set buffering mode * * @param buffered Flag whether responses are buffered - */ + */ void setBuffered(bool buffered); private: + struct entry + { + std::string table; + std::string key; + std::vector values; + std::string op; + bool replace; + bool flush; + bool shutdown; + + entry() + { + } + + entry(const std::string &table, const std::string &key, const std::vector &values, + const std::string &op, bool replace, bool flush, bool shutdown) + : table(table), key(key), values(values), op(op), replace(replace), flush(flush), shutdown(shutdown) + { + } + }; + + void dbUpdateThread(); + void writeToDBInternal(const std::string &table, const std::string &key, + const std::vector &values, const std::string &op, bool replace); + std::unique_ptr m_db; - std::unique_ptr m_pipe; + std::unique_ptr m_ntf_pipe; + std::unique_ptr m_db_pipe; bool m_buffered{false}; + // Thread to write to DB. + std::unique_ptr m_update_thread; + std::queue m_queue; + mutable std::mutex m_lock; + std::condition_variable m_signal; }; diff --git a/orchagent/return_code.h b/orchagent/return_code.h index ed154784b75..c9b404f5d7f 100644 --- a/orchagent/return_code.h +++ b/orchagent/return_code.h @@ -177,7 +177,24 @@ class ReturnCode ReturnCode(const sai_status_t &status, const std::string &message = "") : stream_(std::ios_base::out | std::ios_base::ate), is_sai_(true) { - if (m_saiStatusCodeLookup.find(status) == m_saiStatusCodeLookup.end()) + // Non-ranged SAI codes that are not included in this lookup map will map to + // SWSS_RC_UNKNOWN. This includes the general SAI failure: + // SAI_STATUS_FAILURE. + static const auto *const saiStatusCodeLookup = new std::unordered_map({ + {SAI_STATUS_SUCCESS, StatusCode::SWSS_RC_SUCCESS}, + {SAI_STATUS_NOT_SUPPORTED, StatusCode::SWSS_RC_UNIMPLEMENTED}, + {SAI_STATUS_NO_MEMORY, StatusCode::SWSS_RC_NO_MEMORY}, + {SAI_STATUS_INSUFFICIENT_RESOURCES, StatusCode::SWSS_RC_FULL}, + {SAI_STATUS_INVALID_PARAMETER, StatusCode::SWSS_RC_INVALID_PARAM}, + {SAI_STATUS_ITEM_ALREADY_EXISTS, StatusCode::SWSS_RC_EXISTS}, + {SAI_STATUS_ITEM_NOT_FOUND, StatusCode::SWSS_RC_NOT_FOUND}, + {SAI_STATUS_TABLE_FULL, StatusCode::SWSS_RC_FULL}, + {SAI_STATUS_NOT_IMPLEMENTED, StatusCode::SWSS_RC_UNIMPLEMENTED}, + {SAI_STATUS_OBJECT_IN_USE, StatusCode::SWSS_RC_IN_USE}, + {SAI_STATUS_NOT_EXECUTED, StatusCode::SWSS_RC_NOT_EXECUTED}, + }); + + if (saiStatusCodeLookup->find(status) == saiStatusCodeLookup->end()) { // Check for ranged SAI codes. if (SAI_RANGED_STATUS_IS_INVALID_ATTRIBUTE(status)) @@ -207,7 +224,7 @@ class ReturnCode } else { - status_ = m_saiStatusCodeLookup[status]; + status_ = saiStatusCodeLookup->at(status); } stream_ << message; } @@ -298,21 +315,6 @@ class ReturnCode } private: - // Non-ranged SAI codes that are not included in this lookup map will map to - // SWSS_RC_UNKNOWN. This includes the general SAI failure: SAI_STATUS_FAILURE. - std::unordered_map m_saiStatusCodeLookup = { - {SAI_STATUS_SUCCESS, StatusCode::SWSS_RC_SUCCESS}, - {SAI_STATUS_NOT_SUPPORTED, StatusCode::SWSS_RC_UNIMPLEMENTED}, - {SAI_STATUS_NO_MEMORY, StatusCode::SWSS_RC_NO_MEMORY}, - {SAI_STATUS_INSUFFICIENT_RESOURCES, StatusCode::SWSS_RC_FULL}, - {SAI_STATUS_INVALID_PARAMETER, StatusCode::SWSS_RC_INVALID_PARAM}, - {SAI_STATUS_ITEM_ALREADY_EXISTS, StatusCode::SWSS_RC_EXISTS}, - {SAI_STATUS_ITEM_NOT_FOUND, StatusCode::SWSS_RC_NOT_FOUND}, - {SAI_STATUS_TABLE_FULL, StatusCode::SWSS_RC_FULL}, - {SAI_STATUS_NOT_IMPLEMENTED, StatusCode::SWSS_RC_UNIMPLEMENTED}, - {SAI_STATUS_OBJECT_IN_USE, StatusCode::SWSS_RC_IN_USE}, - }; - StatusCode status_; std::stringstream stream_; // Whether the ReturnCode is generated from a SAI status code or not. diff --git a/orchagent/rif_rates.lua b/orchagent/rif_rates.lua index ebce14442a1..8355acbe2b9 100644 --- a/orchagent/rif_rates.lua +++ b/orchagent/rif_rates.lua @@ -37,6 +37,10 @@ for i = 1, n do local out_octets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_ROUTER_INTERFACE_STAT_OUT_OCTETS') local out_pkts = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_ROUTER_INTERFACE_STAT_OUT_PACKETS') + if not in_octets or not in_pkts or not out_octets or not out_pkts then + return logtable + end + if initialized == "DONE" or initialized == "COUNTERS_LAST" then -- Get old COUNTERS values local in_octets_last = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'SAI_ROUTER_INTERFACE_STAT_IN_OCTETS_last') @@ -44,6 +48,10 @@ for i = 1, n do local out_octets_last = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'SAI_ROUTER_INTERFACE_STAT_OUT_OCTETS_last') local out_pkts_last = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'SAI_ROUTER_INTERFACE_STAT_OUT_PACKETS_last') + if not in_octets_last or not in_pkts_last or not out_octets_last or not out_pkts_last then + return logtable + end + -- Calculate new rates values local rx_bps_new = (in_octets - in_octets_last) / delta * 1000 local tx_bps_new = (out_octets - out_octets_last) / delta * 1000 diff --git a/orchagent/routeorch.cpp b/orchagent/routeorch.cpp index b8b9056439c..58b8eb995e4 100644 --- a/orchagent/routeorch.cpp +++ b/orchagent/routeorch.cpp @@ -1,8 +1,11 @@ #include +#include +#include #include #include #include "routeorch.h" #include "nhgorch.h" +#include "tunneldecaporch.h" #include "cbf/cbfnhgorch.h" #include "logger.h" #include "flowcounterrouteorch.h" @@ -25,18 +28,20 @@ extern Directory gDirectory; extern NhgOrch *gNhgOrch; extern CbfNhgOrch *gCbfNhgOrch; extern FlowCounterRouteOrch *gFlowCounterRouteOrch; +extern TunnelDecapOrch *gTunneldecapOrch; extern size_t gMaxBulkSize; +extern string gMySwitchType; /* Default maximum number of next hop groups */ #define DEFAULT_NUMBER_OF_ECMP_GROUPS 128 #define DEFAULT_MAX_ECMP_GROUP_SIZE 32 -RouteOrch::RouteOrch(DBConnector *db, vector &tableNames, SwitchOrch *switchOrch, NeighOrch *neighOrch, IntfsOrch *intfsOrch, VRFOrch *vrfOrch, FgNhgOrch *fgNhgOrch, Srv6Orch *srv6Orch) : +RouteOrch::RouteOrch(DBConnector *db, vector &tableNames, SwitchOrch *switchOrch, NeighOrch *neighOrch, IntfsOrch *intfsOrch, VRFOrch *vrfOrch, FgNhgOrch *fgNhgOrch, Srv6Orch *srv6Orch, swss::ZmqServer *zmqServer) : gRouteBulker(sai_route_api, gMaxBulkSize), gLabelRouteBulker(sai_mpls_api, gMaxBulkSize), gNextHopGroupMemberBulker(sai_next_hop_group_api, gSwitchId, gMaxBulkSize), - Orch(db, tableNames), + ZmqOrch(db, tableNames, zmqServer), m_switchOrch(switchOrch), m_neighOrch(neighOrch), m_intfsOrch(intfsOrch), @@ -44,7 +49,8 @@ RouteOrch::RouteOrch(DBConnector *db, vector &tableNames, m_fgNhgOrch(fgNhgOrch), m_nextHopGroupCount(0), m_srv6Orch(srv6Orch), - m_resync(false) + m_resync(false), + m_appTunnelDecapTermProducer(db, APP_TUNNEL_DECAP_TERM_TABLE_NAME) { SWSS_LOG_ENTER(); @@ -85,6 +91,37 @@ RouteOrch::RouteOrch(DBConnector *db, vector &tableNames, SWSS_LOG_NOTICE("Maximum number of ECMP groups supported is %d", m_maxNextHopGroupCount); + /* fetch the MAX_ECMP_MEMBER_COUNT and for voq platform, set it to 128 */ + attr.id = SAI_SWITCH_ATTR_MAX_ECMP_MEMBER_COUNT; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Failed to get switch attribute max ECMP Group size. rv:%d", status); + } + else + { + uint32_t maxEcmpGroupSize = attr.value.u32; + SWSS_LOG_NOTICE("Switch Type: %s, Max ECMP Group Size supported: %d", gMySwitchType.c_str(), attr.value.u32); + + /*If the switch type is voq, and max Ecmp group size supported is greater or equal to 128, set it to 128 */ + if (gMySwitchType == "voq" && maxEcmpGroupSize >= 128) + { + maxEcmpGroupSize = 128; + attr.id = SAI_SWITCH_ATTR_ECMP_MEMBER_COUNT; + attr.value.s32 = maxEcmpGroupSize; + status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set switch attribute ECMP member count to 128. rv:%d", status); + } + else + { + SWSS_LOG_NOTICE("Set switch attribute ECMP member count to 128"); + } + } + } + m_stateDb = shared_ptr(new DBConnector("STATE_DB", 0)); m_stateDefaultRouteTb = unique_ptr(new Table(m_stateDb.get(), STATE_ROUTE_TABLE_NAME)); @@ -346,6 +383,71 @@ void RouteOrch::detach(Observer *observer, const IpAddress& dstAddr, sai_object_ } } +void RouteOrch::updateDefaultRouteSwapSet(const NextHopGroupKey default_nhg_key, std::set& active_default_route_nhops) +{ + std::set current_default_route_nhops; + current_default_route_nhops.clear(); + + if (default_nhg_key.getSize() == 1) + { + current_default_route_nhops.insert(*default_nhg_key.getNextHops().begin()); + } + else + { + auto nhgm = m_syncdNextHopGroups[default_nhg_key].nhopgroup_members; + for (auto nhop = nhgm.begin(); nhop != nhgm.end(); ++nhop) + { + current_default_route_nhops.insert(nhop->first); + } + } + + active_default_route_nhops.clear(); + std::copy(current_default_route_nhops.begin(), current_default_route_nhops.end(), std::inserter(active_default_route_nhops, active_default_route_nhops.begin())); +} + +bool RouteOrch::addDefaultRouteNexthopsInNextHopGroup(NextHopGroupEntry& original_next_hop_group, std::set& default_route_next_hop_set) +{ + /* In the function we update the member of existing NexthopGroup to the Default Route Nexthop's */ + SWSS_LOG_ENTER(); + sai_object_id_t nexthop_group_member_id; + sai_status_t status; + + for (auto it : default_route_next_hop_set) + { + vector nhgm_attrs; + sai_attribute_t nhgm_attr; + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID; + nhgm_attr.value.oid = original_next_hop_group.next_hop_group_id; + nhgm_attrs.push_back(nhgm_attr); + + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID; + nhgm_attr.value.oid = m_neighOrch->getNextHopId(it); + nhgm_attrs.push_back(nhgm_attr); + + status = sai_next_hop_group_api->create_next_hop_group_member(&nexthop_group_member_id, gSwitchId, + (uint32_t)nhgm_attrs.size(), + nhgm_attrs.data()); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Default Route Swap Failed to add next hop member to group %" PRIx64 ": %d\n", + original_next_hop_group.next_hop_group_id, status); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_NEXT_HOP_GROUP, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + // Increment the Default Route Active NH Reference Count + m_neighOrch->increaseNextHopRefCount(it); + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); + original_next_hop_group.default_route_nhopgroup_members[it].next_hop_id = nexthop_group_member_id; + original_next_hop_group.default_route_nhopgroup_members[it].seq_id = 0; + original_next_hop_group.is_default_route_nh_swap = true; + } + return true; +} + bool RouteOrch::validnexthopinNextHopGroup(const NextHopKey &nexthop, uint32_t& count) { SWSS_LOG_ENTER(); @@ -363,6 +465,13 @@ bool RouteOrch::validnexthopinNextHopGroup(const NextHopKey &nexthop, uint32_t& continue; } + // Route NHOP Group is swapped by default route nh memeber . do not add Nexthop again. + // Wait for Nexthop Group Cleanup + if (nhopgroup->second.is_default_route_nh_swap) + { + continue; + } + vector nhgm_attrs; sai_attribute_t nhgm_attr; @@ -409,6 +518,9 @@ bool RouteOrch::validnexthopinNextHopGroup(const NextHopKey &nexthop, uint32_t& ++count; gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); nhopgroup->second.nhopgroup_members[nexthop].next_hop_id = nexthop_id; + /* Keep the count of number of nexthop members are present in Nexthop Group + * when the links became active again*/ + nhopgroup->second.nh_member_install_count++; } if (!m_fgNhgOrch->validNextHopInNextHopGroup(nexthop)) @@ -436,6 +548,14 @@ bool RouteOrch::invalidnexthopinNextHopGroup(const NextHopKey &nexthop, uint32_t continue; } + // Route NHOP Group is already swapped by default route nh memeber . do not delete actual nexthop again. + + if (nhopgroup->second.is_default_route_nh_swap) + { + continue; + } + + nexthop_id = nhopgroup->second.nhopgroup_members[nexthop].next_hop_id; status = sai_next_hop_group_api->remove_next_hop_group_member(nexthop_id); @@ -449,7 +569,24 @@ bool RouteOrch::invalidnexthopinNextHopGroup(const NextHopKey &nexthop, uint32_t return parseHandleSaiStatusFailure(handle_status); } } - + // Reduce the member install count when links down + if (nhopgroup->second.nh_member_install_count) + { + nhopgroup->second.nh_member_install_count--; + } + // Nexthop Group member count has become zero so swap it's memebers with default route + // nexthop's if this route is eligible for such a swap + if (nhopgroup->second.nh_member_install_count == 0 && nhopgroup->second.eligible_for_default_route_nh_swap && !nhopgroup->second.is_default_route_nh_swap) + { + if(nexthop.ip_address.isV4()) + { + addDefaultRouteNexthopsInNextHopGroup(nhopgroup->second, v4_active_default_route_nhops); + } + else + { + addDefaultRouteNexthopsInNextHopGroup(nhopgroup->second, v6_active_default_route_nhops); + } + } ++count; gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); } @@ -462,7 +599,7 @@ bool RouteOrch::invalidnexthopinNextHopGroup(const NextHopKey &nexthop, uint32_t return true; } -void RouteOrch::doTask(Consumer& consumer) +void RouteOrch::doTask(ConsumerBase& consumer) { SWSS_LOG_ENTER(); @@ -591,53 +728,76 @@ void RouteOrch::doTask(Consumer& consumer) string remote_macs; string weights; string nhg_index; + string context_index; bool& excp_intfs_flag = ctx.excp_intfs_flag; bool overlay_nh = false; bool blackhole = false; string srv6_segments; string srv6_source; + string srv6_vpn_sids; + bool srv6_seg = false; + bool srv6_vpn = false; bool srv6_nh = false; + bool fallback_to_default_route = false; for (auto i : kfvFieldsValues(t)) { - if (fvField(i) == "nexthop") + if (fvField(i) == "nexthop" && fvValue(i) != "") ips = fvValue(i); - if (fvField(i) == "ifname") + if (fvField(i) == "ifname" && fvValue(i) != "") aliases = fvValue(i); - if (fvField(i) == "mpls_nh") + if (fvField(i) == "mpls_nh" && fvValue(i) != "") mpls_nhs = fvValue(i); - if (fvField(i) == "vni_label") { + if (fvField(i) == "vni_label" && fvValue(i) != "") { vni_labels = fvValue(i); overlay_nh = true; } - if (fvField(i) == "router_mac") + if (fvField(i) == "router_mac" && fvValue(i) != "") remote_macs = fvValue(i); if (fvField(i) == "blackhole") blackhole = fvValue(i) == "true"; - if (fvField(i) == "weight") + if (fvField(i) == "weight" && fvValue(i) != "") weights = fvValue(i); - if (fvField(i) == "nexthop_group") + if (fvField(i) == "nexthop_group" && fvValue(i) != "") nhg_index = fvValue(i); - if (fvField(i) == "segment") { + if (fvField(i) == "segment" && fvValue(i) != "") { srv6_segments = fvValue(i); + srv6_seg = true; srv6_nh = true; } - if (fvField(i) == "seg_src") + if (fvField(i) == "seg_src" && fvValue(i) != "") { srv6_source = fvValue(i); + srv6_nh = true; + } - if (fvField(i) == "protocol") + if (fvField(i) == "protocol" && fvValue(i) != "") { ctx.protocol = fvValue(i); } + + if (fvField(i) == "fallback_to_default_route") + fallback_to_default_route = fvValue(i) == "true"; + + if (fvField(i) == "vpn_sid" && fvValue(i) != "") { + srv6_vpn_sids = fvValue(i); + srv6_nh = true; + srv6_vpn = true; + } + + if (fvField(i) == "pic_context_id" && fvValue(i) != "") + { + context_index = fvValue(i); + srv6_vpn = true; + } } /* @@ -651,7 +811,9 @@ void RouteOrch::doTask(Consumer& consumer) continue; } + ctx.fallback_to_default_route = fallback_to_default_route; ctx.nhg_index = nhg_index; + ctx.context_index = context_index; /* * If the nexthop_group is empty, create the next hop group key @@ -666,6 +828,7 @@ void RouteOrch::doTask(Consumer& consumer) NextHopGroupKey& nhg = ctx.nhg; vector srv6_segv; vector srv6_src; + vector srv6_vpn_sidv; bool l3Vni = true; uint32_t vni = 0; @@ -679,6 +842,7 @@ void RouteOrch::doTask(Consumer& consumer) rmacv = tokenize(remote_macs, ','); srv6_segv = tokenize(srv6_segments, ','); srv6_src = tokenize(srv6_source, ','); + srv6_vpn_sidv = tokenize(srv6_vpn_sids, ','); /* * For backward compatibility, adjust ip string from old format to @@ -751,6 +915,9 @@ void RouteOrch::doTask(Consumer& consumer) it = consumer.m_toSync.erase(it); else it++; + + /* Publish route state to advertise routes to Loopback interface */ + publishRouteState(ctx); continue; } @@ -762,25 +929,29 @@ void RouteOrch::doTask(Consumer& consumer) } else if (srv6_nh == true) { - string ip; - if (ipv.empty()) + if (srv6_vpn && (srv6_vpn_sidv.size() != srv6_src.size())) { - ip = "0.0.0.0"; + SWSS_LOG_ERROR("inconsistent number of endpoints and srv6 vpn sids."); + it = consumer.m_toSync.erase(it); + continue; } - else + + if (srv6_seg && (srv6_segv.size() != srv6_src.size())) { - SWSS_LOG_ERROR("For SRV6 nexthop ipv should be empty"); + SWSS_LOG_ERROR("inconsistent number of srv6_segv and srv6_srcs."); it = consumer.m_toSync.erase(it); continue; } - nhg_str = ip + NH_DELIMITER + srv6_segv[0] + NH_DELIMITER + srv6_src[0]; - for (uint32_t i = 1; i < srv6_segv.size(); i++) + for (uint32_t i = 0; i < srv6_src.size(); i++) { - nhg_str += NHG_DELIMITER + ip; - nhg_str += NH_DELIMITER + srv6_segv[i]; - nhg_str += NH_DELIMITER + srv6_src[i]; + if (i) nhg_str += NHG_DELIMITER; + nhg_str += (ipv.size() > i ? ipv[i] : "0.0.0.0") + NH_DELIMITER; // ip address + nhg_str += (srv6_seg ? srv6_segv[i] : "") + NH_DELIMITER; // srv6 segment + nhg_str += srv6_src[i] + NH_DELIMITER; // srv6 source + nhg_str += (srv6_vpn ? srv6_vpn_sidv[i] : "") + NH_DELIMITER; // srv6 vpn sid } + nhg = NextHopGroupKey(nhg_str, overlay_nh, srv6_nh); SWSS_LOG_INFO("SRV6 route with nhg %s", nhg.to_string().c_str()); } @@ -804,6 +975,18 @@ void RouteOrch::doTask(Consumer& consumer) } else { + if(ipv.size() != rmacv.size()){ + SWSS_LOG_ERROR("Skip route %s, it has an invalid router mac field %s", key.c_str(), remote_macs.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + + if(ipv.size() != vni_labelv.size()){ + SWSS_LOG_ERROR("Skip route %s, it has an invalid vni label field %s", key.c_str(), vni_labels.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + for (uint32_t i = 0; i < ipv.size(); i++) { if (i) nhg_str += NHG_DELIMITER; @@ -877,11 +1060,15 @@ void RouteOrch::doTask(Consumer& consumer) * Check if the route does not exist or needs to be updated or * if the route is using a temporary next hop group owned by * NhgOrch. + * With default routes, there may be a setting_entries present in the + * bulker due to a previous DEL event, where we automatically add a + * DROP action. So one of the check below (bulk_entry_pending_removal_or_set) + * checks for both removal and set entries. */ else if (m_syncdRoutes.find(vrf_id) == m_syncdRoutes.end() || m_syncdRoutes.at(vrf_id).find(ip_prefix) == m_syncdRoutes.at(vrf_id).end() || - m_syncdRoutes.at(vrf_id).at(ip_prefix) != RouteNhg(nhg, ctx.nhg_index) || - gRouteBulker.bulk_entry_pending_removal(route_entry) || + m_syncdRoutes.at(vrf_id).at(ip_prefix) != RouteNhg(nhg, ctx.nhg_index, ctx.context_index) || + gRouteBulker.bulk_entry_pending_removal_or_set(route_entry) || ctx.using_temp_nhg) { if (addRoute(ctx, nhg)) @@ -925,6 +1112,10 @@ void RouteOrch::doTask(Consumer& consumer) // Go through the bulker results auto it_prev = consumer.m_toSync.begin(); m_bulkNhgReducedRefCnt.clear(); + NextHopGroupKey v4_default_nhg_key; + NextHopGroupKey v6_default_nhg_key; + m_bulkSrv6NhgReducedVec.clear(); + while (it_prev != it) { KeyOpFieldsValuesTuple t = it_prev->second; @@ -949,6 +1140,11 @@ void RouteOrch::doTask(Consumer& consumer) const sai_object_id_t& vrf_id = ctx.vrf_id; const IpPrefix& ip_prefix = ctx.ip_prefix; + sai_route_entry_t route_entry; + route_entry.vr_id = vrf_id; + route_entry.switch_id = gSwitchId; + copy(route_entry.destination, ip_prefix); + if (op == SET_COMMAND) { const bool& excp_intfs_flag = ctx.excp_intfs_flag; @@ -975,13 +1171,28 @@ void RouteOrch::doTask(Consumer& consumer) } else if (m_syncdRoutes.find(vrf_id) == m_syncdRoutes.end() || m_syncdRoutes.at(vrf_id).find(ip_prefix) == m_syncdRoutes.at(vrf_id).end() || - m_syncdRoutes.at(vrf_id).at(ip_prefix) != RouteNhg(nhg, ctx.nhg_index) || + m_syncdRoutes.at(vrf_id).at(ip_prefix) != RouteNhg(nhg, ctx.nhg_index, ctx.context_index) || + gRouteBulker.bulk_entry_pending_removal(route_entry) || ctx.using_temp_nhg) { if (addRoutePost(ctx, nhg)) it_prev = consumer.m_toSync.erase(it_prev); else it_prev++; + + // Save the Default Route of Default VRF to be used for + // enabling fallback to it as needed + if (ip_prefix.isDefaultRoute() && vrf_id == gVirtualRouterId) + { + if (ip_prefix.isV4()) + { + v4_default_nhg_key = getSyncdRouteNhgKey(gVirtualRouterId, ip_prefix); + } + else + { + v6_default_nhg_key = getSyncdRouteNhgKey(gVirtualRouterId, ip_prefix); + } + } } } else if (op == DEL_COMMAND) @@ -1001,25 +1212,33 @@ void RouteOrch::doTask(Consumer& consumer) { removeOverlayNextHops(it_nhg.second, it_nhg.first); } - else if (it_nhg.first.is_srv6_nexthop()) - { - if(it_nhg.first.getSize() > 1) - { - if(m_syncdNextHopGroups[it_nhg.first].ref_count == 0) - { - removeNextHopGroup(it_nhg.first); - } - else - { - SWSS_LOG_ERROR("SRV6 ECMP %s REF count is not zero", it_nhg.first.to_string().c_str()); - } - } - } else if (m_syncdNextHopGroups[it_nhg.first].ref_count == 0) { - removeNextHopGroup(it_nhg.first); + // Pass the flag to indicate if the NextHop Group as Default Route NH Members as swapped. + removeNextHopGroup(it_nhg.first, m_syncdNextHopGroups[it_nhg.first].is_default_route_nh_swap); } } + /* Reduce reference for srv6 next hop group */ + /* Later delete for increase refcnt early */ + if (!m_bulkSrv6NhgReducedVec.empty()) + { + m_srv6Orch->removeSrv6Nexthops(m_bulkSrv6NhgReducedVec); + } + /* No Update to Default Route so we can return */ + if (!(v4_default_nhg_key.getSize()) && !(v6_default_nhg_key.getSize())) + { + return; + } + /* Update to v4 Default Route so update the data structure */ + if (v4_default_nhg_key.getSize()) + { + updateDefaultRouteSwapSet(v4_default_nhg_key, v4_active_default_route_nhops); + } + /* Update to v6 Default Route so update the data structure */ + if (v6_default_nhg_key.getSize()) + { + updateDefaultRouteSwapSet(v6_default_nhg_key, v6_active_default_route_nhops); + } } } @@ -1122,6 +1341,7 @@ void RouteOrch::increaseNextHopRefCount(const NextHopGroupKey &nexthops) else { m_syncdNextHopGroups[nexthops].ref_count ++; + SWSS_LOG_INFO("Routeorch inc Ref count %u for next_hops: %s", m_syncdNextHopGroups[nexthops].ref_count, nexthops.to_string().c_str()); } } @@ -1143,6 +1363,7 @@ void RouteOrch::decreaseNextHopRefCount(const NextHopGroupKey &nexthops) else { m_syncdNextHopGroups[nexthops].ref_count --; + SWSS_LOG_INFO("Routeorch dec Ref count %u for next_hops: %s", m_syncdNextHopGroups[nexthops].ref_count, nexthops.to_string().c_str()); } } @@ -1239,8 +1460,11 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) vector next_hop_ids; set next_hop_set = nexthops.getNextHops(); + set valid_next_hops_for_refcount; // Track valid next hops for reference counting std::map nhopgroup_members_set; std::map> nhopgroup_shared_set; + MuxOrch* mux_orch = gDirectory.get(); + sai_object_id_t mux_tunnel_nh_id = mux_orch->getTunnelNextHopId(); /* Assert each IP address exists in m_syncdNextHops table, * and add the corresponding next_hop_id to next_hop_ids. */ @@ -1249,13 +1473,15 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) sai_object_id_t next_hop_id; if (m_neighOrch->hasNextHop(it)) { + // this can be tunnel nh id when mux neighbor is disabled next_hop_id = m_neighOrch->getNextHopId(it); } /* See if there is an IP neighbor NH for MPLS NH*/ else if (it.isMplsNextHop() && m_neighOrch->hasNextHop(NextHopKey(it.ip_address, it.alias))) { - m_neighOrch->addNextHop(it); + NeighborContext ctx = NeighborContext(it); + m_neighOrch->addNextHop(ctx); next_hop_id = m_neighOrch->getNextHopId(it); } else @@ -1264,6 +1490,13 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) it.to_string().c_str(), nexthops.to_string().c_str()); return false; } + + // Skip tunnel_nh for reference counting + if (next_hop_id != mux_tunnel_nh_id) + { + valid_next_hops_for_refcount.insert(it); + } + // skip next hop group member create for neighbor from down port if (m_neighOrch->isNextHopFlagSet(it, NHFLAGS_IFDOWN)) { @@ -1281,7 +1514,11 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) nhopgroup_shared_set[next_hop_id].insert(it); } } - + if (!next_hop_ids.size()) + { + SWSS_LOG_INFO("Skipping creation of nexthop group as none of nexthop are active"); + return false; + } sai_attribute_t nhg_attr; vector nhg_attrs; @@ -1313,6 +1550,7 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) NextHopGroupEntry next_hop_group_entry; next_hop_group_entry.next_hop_group_id = next_hop_group_id; + next_hop_group_entry.nh_member_install_count = 0; size_t npid_count = next_hop_ids.size(); vector nhgm_ids(npid_count); @@ -1383,12 +1621,16 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) { next_hop_group_entry.nhopgroup_members[nhopgroup_members_set.find(nhid)->second].next_hop_id = nhgm_id; next_hop_group_entry.nhopgroup_members[nhopgroup_members_set.find(nhid)->second].seq_id = ((uint32_t)i) + 1; + /* Keep the count of number of nexthop members are present in Nexthop Group*/ + next_hop_group_entry.nh_member_install_count++; } } - /* Increment the ref_count for the next hops used by the next hop group. */ - for (auto it : next_hop_set) + /* Increment the ref_count for the valid next hops used by the next hop group. */ + for (auto it : valid_next_hops_for_refcount) + { m_neighOrch->increaseNextHopRefCount(it); + } /* * Initialize the next hop group structure with ref_count as 0. This @@ -1400,7 +1642,7 @@ bool RouteOrch::addNextHopGroup(const NextHopGroupKey &nexthops) return true; } -bool RouteOrch::removeNextHopGroup(const NextHopGroupKey &nexthops) +bool RouteOrch::removeNextHopGroup(const NextHopGroupKey &nexthops, const bool is_default_route_nh_swap) { SWSS_LOG_ENTER(); @@ -1421,10 +1663,15 @@ bool RouteOrch::removeNextHopGroup(const NextHopGroupKey &nexthops) SWSS_LOG_NOTICE("Delete next hop group %s", nexthops.to_string().c_str()); vector next_hop_ids; - auto& nhgm = next_hop_group_entry->second.nhopgroup_members; + /* If the NexthopGroup is the one that has been swapped with default route members + * than when deleting such Nexthop Group we have to remove default route nexthop group members */ + auto& nhgm = is_default_route_nh_swap ? next_hop_group_entry->second.default_route_nhopgroup_members : next_hop_group_entry->second.nhopgroup_members; for (auto nhop = nhgm.begin(); nhop != nhgm.end();) { - if (m_neighOrch->isNextHopFlagSet(nhop->first, NHFLAGS_IFDOWN)) + /* This check we skip for Nexthop Group that has been swapped + * as Nexthop Group Members are not original member which are already removed + * as part of API invalidnexthopinNextHopGroup */ + if (m_neighOrch->isNextHopFlagSet(nhop->first, NHFLAGS_IFDOWN) && (!is_default_route_nh_swap)) { SWSS_LOG_WARN("NHFLAGS_IFDOWN set for next hop group member %s with next_hop_id %" PRIx64, nhop->first.to_string().c_str(), nhop->second.next_hop_id); @@ -1472,11 +1719,25 @@ bool RouteOrch::removeNextHopGroup(const NextHopGroupKey &nexthops) m_nextHopGroupCount--; gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP); + MuxOrch* mux_orch = gDirectory.get(); + sai_object_id_t mux_tunnel_nh_id = mux_orch->getTunnelNextHopId(); + // Filter valid next hops for reference counting (consistent with addNextHopGroup) set next_hop_set = nexthops.getNextHops(); for (auto it : next_hop_set) { - m_neighOrch->decreaseNextHopRefCount(it); + // Skip mux tunnel next hops (consistent with addNextHopGroup) + auto nh_id = m_neighOrch->getNextHopId(it); + if (nh_id != mux_tunnel_nh_id) + { + m_neighOrch->decreaseNextHopRefCount(it); + } + } + + // Process all next hops for overlay/SRv6/MPLS cleanup + for (auto it : next_hop_set) + { + if (overlay_nh && !srv6_nh && !m_neighOrch->getNextHopRefCount(it)) { if(!m_neighOrch->removeTunnelNextHop(it)) @@ -1503,18 +1764,16 @@ bool RouteOrch::removeNextHopGroup(const NextHopGroupKey &nexthops) } } - if (srv6_nh) + // Decrement Nexthop Reference Count for Default Route NH Member used as swapped + if (is_default_route_nh_swap) { - if (!m_srv6Orch->removeSrv6Nexthops(nexthops)) + auto& nhgm = next_hop_group_entry->second.default_route_nhopgroup_members; + for (auto nhop = nhgm.begin(); nhop != nhgm.end(); ++nhop) { - SWSS_LOG_ERROR("Failed to remove Srv6 Nexthop %s", nexthops.to_string().c_str()); - } - else - { - SWSS_LOG_INFO("Remove ECMP Srv6 nexthops %s", nexthops.to_string().c_str()); + m_neighOrch->decreaseNextHopRefCount(nhop->first); } } - + m_syncdNextHopGroups.erase(nexthops); return true; @@ -1585,13 +1844,9 @@ bool RouteOrch::updateNextHopRoutes(const NextHopKey& nextHop, uint32_t& numRout auto rt = it->second.begin(); while(rt != it->second.end()) { - /* Check if route is mux multi-nexthop route - * we define this as a route present in - * mux_multi_active_nh_table - * These routes originally point to NHG and should be handled by updateRoute() - */ - MuxOrch* mux_orch = gDirectory.get(); - if (mux_orch->isMultiNexthopRoute((*rt).prefix)) + /* Check if route points to nexthop group and skip */ + NextHopGroupKey nhg_key = gRouteOrch->getSyncdRouteNhgKey(gVirtualRouterId, (*rt).prefix); + if (nhg_key.getSize() > 1) { /* multiple mux nexthop case: * skip for now, muxOrch::updateRoute() will handle route @@ -1603,8 +1858,8 @@ bool RouteOrch::updateNextHopRoutes(const NextHopKey& nextHop, uint32_t& numRout continue; } - SWSS_LOG_INFO("Updating route %s", (*rt).prefix.to_string().c_str()); next_hop_id = m_neighOrch->getNextHopId(nextHop); + SWSS_LOG_INFO("Updating route %s with nexthop %" PRIu64, (*rt).prefix.to_string().c_str(), (uint64_t)next_hop_id); route_entry.vr_id = (*rt).vrf_id; route_entry.switch_id = gSwitchId; @@ -1670,9 +1925,15 @@ void RouteOrch::addTempRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextH SWSS_LOG_INFO("Failed to get next hop %s for %s", (*it).to_string().c_str(), ipPrefix.to_string().c_str()); it = next_hop_set.erase(it); + continue; } - else - it++; + if(m_neighOrch->isNextHopFlagSet(*it, NHFLAGS_IFDOWN)) + { + SWSS_LOG_INFO("Interface down for NH %s, skip this NH", (*it).to_string().c_str()); + it = next_hop_set.erase(it); + continue; + } + it++; } /* Return if next_hop_set is empty */ @@ -1787,13 +2048,28 @@ bool RouteOrch::addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) if (m_neighOrch->hasNextHop(nexthop)) { next_hop_id = m_neighOrch->getNextHopId(nexthop); + if (srv6_nh) + { + SWSS_LOG_INFO("Single NH: create srv6 vpn %s", nextHops.to_string().c_str()); + if (!m_srv6Orch->srv6Nexthops(nextHops, next_hop_id)) + { + SWSS_LOG_ERROR("Failed to create SRV6 vpn %s", nextHops.to_string().c_str()); + return false; + } + } + else if (m_neighOrch->isNextHopFlagSet(nexthop, NHFLAGS_IFDOWN)) + { + SWSS_LOG_INFO("Interface down for NH %s, skip this Route for programming", nexthop.to_string().c_str()); + return false; + } } /* For non-existent MPLS NH, check if IP neighbor NH exists */ else if (nexthop.isMplsNextHop() && m_neighOrch->isNeighborResolved(nexthop)) { /* since IP neighbor NH exists, neighbor is resolved, add MPLS NH */ - m_neighOrch->addNextHop(nexthop); + NeighborContext ctx = NeighborContext(nexthop); + m_neighOrch->addNextHop(ctx); next_hop_id = m_neighOrch->getNextHopId(nexthop); } /* IP neighbor is not yet resolved */ @@ -1837,22 +2113,38 @@ bool RouteOrch::addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) /* The route is pointing to a next hop group */ else { + /* Need to call srv6nexthops() always for srv6 route, */ + /* regardless of whether there is already an existing next hop group */ + /* because vpn refcount need to be add if need */ + if (srv6_nh) + { + sai_object_id_t temp_nh_id; + SWSS_LOG_INFO("ECMP SRV6 NH: handle srv6 nexthops %s", nextHops.to_string().c_str()); + if(!m_srv6Orch->srv6Nexthops(nextHops, temp_nh_id)) + { + SWSS_LOG_ERROR("Failed to handle SRV6 nexthops for %s", nextHops.to_string().c_str()); + return false; + } + } + /* Check if there is already an existing next hop group */ if (!hasNextHopGroup(nextHops)) { - if(srv6_nh) + /* Try to create a new next hop group */ + if (!addNextHopGroup(nextHops)) { - sai_object_id_t temp_nh_id; - SWSS_LOG_INFO("ECMP SRV6 NH: create srv6 nexthops %s", nextHops.to_string().c_str()); - if(!m_srv6Orch->srv6Nexthops(nextHops, temp_nh_id)) + /* If the nexthop is a srv6 nexthop, not create tempRoute + * retry to add route */ + if (nextHops.is_srv6_nexthop()) { - SWSS_LOG_ERROR("Failed to create SRV6 nexthops for %s", nextHops.to_string().c_str()); return false; } - } - /* Try to create a new next hop group */ - if (!addNextHopGroup(nextHops)) - { + + if (it_route != m_syncdRoutes.at(vrf_id).end() && it_route->second.nhg_key.is_srv6_nexthop()) + { + return false; + } + for(auto it = nextHops.getNextHops().begin(); it != nextHops.getNextHops().end(); ++it) { const NextHopKey& nextHop = *it; @@ -1903,6 +2195,13 @@ bool RouteOrch::addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) /* Return false since the original route is not successfully added */ return false; } + else + { + /* Nexthop Creation Successful. So the save the state if eligible to fallback to default route + * based on APP_DB value for the route. Also initialize the present to False as swap did not happen */ + m_syncdNextHopGroups[nextHops].eligible_for_default_route_nh_swap = ctx.fallback_to_default_route; + m_syncdNextHopGroups[nextHops].is_default_route_nh_swap = false; + } } next_hop_id = m_syncdNextHopGroups[nextHops].next_hop_group_id; @@ -1915,6 +2214,8 @@ bool RouteOrch::addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) copy(route_entry.destination, ipPrefix); sai_attribute_t route_attr; + vector attrs; + vector<_sai_attribute_t> route_attrs; auto& object_statuses = ctx.object_statuses; /* If the prefix is not in m_syncdRoutes, then we need to create the route @@ -1933,16 +2234,30 @@ bool RouteOrch::addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) { route_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; route_attr.value.s32 = SAI_PACKET_ACTION_DROP; + route_attrs.push_back(route_attr); } else { route_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; route_attr.value.oid = next_hop_id; + route_attrs.push_back(route_attr); + } + + if (!ctx.context_index.empty() || nextHops.is_srv6_vpn()) + { + if (!ctx.context_index.empty() && !m_srv6Orch->contextIdExists(ctx.context_index)) + { + SWSS_LOG_INFO("Context id %s does not exist", ctx.context_index.c_str()); + return false; + } + route_attr.id = SAI_ROUTE_ENTRY_ATTR_PREFIX_AGG_ID; + route_attr.value.u32 = ctx.nhg_index.empty() ? m_srv6Orch->getAggId(nextHops) : m_srv6Orch->getAggId(ctx.context_index); + route_attrs.push_back(route_attr); } /* Default SAI_ROUTE_ATTR_PACKET_ACTION is SAI_PACKET_ACTION_FORWARD */ object_statuses.emplace_back(); - sai_status_t status = gRouteBulker.create_entry(&object_statuses.back(), &route_entry, 1, &route_attr); + sai_status_t status = gRouteBulker.create_entry(&object_statuses.back(), &route_entry, (uint32_t)route_attrs.size(), route_attrs.data()); if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) { SWSS_LOG_ERROR("Failed to create route %s with next hop(s) %s: already exists in bulker", @@ -1989,6 +2304,21 @@ bool RouteOrch::addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) gRouteBulker.set_entry_attribute(&object_statuses.back(), &route_entry, &route_attr); } + // Set update preifx agg id if need + if (nextHops.is_srv6_vpn() || + (it_route->second.context_index != ctx.context_index && !ctx.context_index.empty())) + { + if (!ctx.context_index.empty() && !m_srv6Orch->contextIdExists(ctx.context_index)) + { + SWSS_LOG_INFO("Context id %s does not exist", ctx.context_index.c_str()); + return false; + } + route_attr.id = SAI_ROUTE_ENTRY_ATTR_PREFIX_AGG_ID; + route_attr.value.u32 = ctx.nhg_index.empty() ? m_srv6Orch->getAggId(nextHops) : m_srv6Orch->getAggId(ctx.context_index); + object_statuses.emplace_back(); + gRouteBulker.set_entry_attribute(&object_statuses.back(), &route_entry, &route_attr); + } + route_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; route_attr.value.oid = next_hop_id; @@ -2026,6 +2356,15 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey return false; } + // Ensure VRF exists in m_syncdRoutes + auto routeTableIter = m_syncdRoutes.find(vrf_id); + if (routeTableIter == m_syncdRoutes.end()) + { + SWSS_LOG_INFO("VRF 0x%" PRIx64 " doesn't exist in syncd routes for route %s, will retry later", + vrf_id, ipPrefix.to_string().c_str()); + return false; + } + if (m_fgNhgOrch->isRouteFineGrained(vrf_id, ipPrefix, nextHops)) { /* Route is pointing to Fine Grained ECMP nexthop group */ @@ -2078,16 +2417,19 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey { // Previous added an temporary route auto& tmp_next_hop = ctx.tmp_next_hop; - addRoutePost(ctx, tmp_next_hop); + if (tmp_next_hop.getSize() > 0) { + addRoutePost(ctx, tmp_next_hop); + } return false; } } auto it_status = object_statuses.begin(); - auto it_route = m_syncdRoutes.at(vrf_id).find(ipPrefix); + auto it_route = routeTableIter->second.find(ipPrefix); + MuxOrch* mux_orch = gDirectory.get(); if (isFineGrained) { - if (it_route == m_syncdRoutes.at(vrf_id).end()) + if (it_route == routeTableIter->second.end()) { /* First time route addition pointing to FG nhg */ if (*it_status++ != SAI_STATUS_SUCCESS) @@ -2129,7 +2471,7 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey ipPrefix.to_string().c_str(), nextHops.to_string().c_str()); } } - else if (it_route == m_syncdRoutes.at(vrf_id).end()) + else if (it_route == routeTableIter->second.end()) { sai_status_t status = *it_status++; if (status != SAI_STATUS_SUCCESS) @@ -2166,7 +2508,7 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey } else { - incNhgRefCount(ctx.nhg_index); + incNhgRefCount(ctx.nhg_index, ctx.context_index); } SWSS_LOG_INFO("Post create route %s with next hop(s) %s", @@ -2195,6 +2537,14 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey status = *it_status++; if (status != SAI_STATUS_SUCCESS) { + if (status == SAI_STATUS_ITEM_NOT_FOUND) + { + // Routeorch internal cache has an entry, but it has already been removed in sai. + // This can happen in dualtor when a tunnel route is removed that matches a learned route + // remove the entry from the cache and retry route creation + m_syncdRoutes.at(vrf_id).erase(ipPrefix); + return false; + } SWSS_LOG_ERROR("Failed to set route %s with next hop(s) %s", ipPrefix.to_string().c_str(), nextHops.to_string().c_str()); task_process_status handle_status = handleSaiSetStatus(SAI_API_ROUTE, status); @@ -2214,6 +2564,10 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey { decreaseNextHopRefCount(it_route->second.nhg_key); auto ol_nextHops = it_route->second.nhg_key; + if (ol_nextHops.is_srv6_nexthop()) + { + m_bulkSrv6NhgReducedVec.emplace_back(ol_nextHops); + } if (ol_nextHops.getSize() > 1) { if (m_syncdNextHopGroups[ol_nextHops].ref_count == 0) @@ -2221,6 +2575,19 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey SWSS_LOG_NOTICE("Update Nexthop Group %s", ol_nextHops.to_string().c_str()); m_bulkNhgReducedRefCnt.emplace(ol_nextHops, 0); } + if (mux_orch->isMuxNexthops(ol_nextHops)) + { + SWSS_LOG_NOTICE("Remove mux Nexthop %s", ol_nextHops.to_string().c_str()); + RouteKey routekey = { vrf_id, ipPrefix }; + auto nexthop_list = ol_nextHops.getNextHops(); + for (auto nh = nexthop_list.begin(); nh != nexthop_list.end(); nh++) + { + if (!nh->ip_address.isZero()) + { + removeNextHopRoute(*nh, routekey); + } + } + } } else if (ol_nextHops.is_overlay_nexthop()) { @@ -2231,11 +2598,7 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey m_bulkNhgReducedRefCnt.emplace(ol_nextHops, vrf_id); } } - else if (ol_nextHops.is_srv6_nexthop()) - { - m_srv6Orch->removeSrv6Nexthops(ol_nextHops); - } - else if (ol_nextHops.getSize() == 1) + else if (ol_nextHops.getSize() == 1 && !ol_nextHops.is_srv6_nexthop()) { RouteKey r_key = { vrf_id, ipPrefix }; auto nexthop = NextHopKey(ol_nextHops.to_string()); @@ -2245,7 +2608,7 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey /* The next hop group is owned by (Cbf)NhgOrch. */ else { - decNhgRefCount(it_route->second.nhg_index); + decNhgRefCount(it_route->second.nhg_index, it_route->second.context_index); } if (blackhole) @@ -2271,14 +2634,13 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey } else { - incNhgRefCount(ctx.nhg_index); + incNhgRefCount(ctx.nhg_index, ctx.context_index); } SWSS_LOG_INFO("Post set route %s with next hop(s) %s", ipPrefix.to_string().c_str(), nextHops.to_string().c_str()); } - MuxOrch* mux_orch = gDirectory.get(); if (ctx.nhg_index.empty() && nextHops.getSize() == 1 && !nextHops.is_overlay_nexthop() && !nextHops.is_srv6_nexthop()) { RouteKey r_key = { vrf_id, ipPrefix }; @@ -2288,7 +2650,7 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey addNextHopRoute(nexthop, r_key); } } - else if (mux_orch->isMuxNexthops(nextHops)) + else if (nextHops.getSize() > 1 && mux_orch->isMuxNexthops(nextHops) && !nextHops.is_overlay_nexthop() && !nextHops.is_srv6_nexthop()) { RouteKey routekey = { vrf_id, ipPrefix }; auto nexthop_list = nextHops.getNextHops(); @@ -2306,17 +2668,24 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey updateDefRouteState(ipPrefix.to_string(), true); } - if (it_route == m_syncdRoutes.at(vrf_id).end()) + if (it_route == routeTableIter->second.end()) { gFlowCounterRouteOrch->handleRouteAdd(vrf_id, ipPrefix); } - m_syncdRoutes[vrf_id][ipPrefix] = RouteNhg(nextHops, ctx.nhg_index); + m_syncdRoutes[vrf_id][ipPrefix] = RouteNhg(nextHops, ctx.nhg_index, ctx.context_index); + + /* add subnet decap term for VIP route */ + const SubnetDecapConfig &config = gTunneldecapOrch->getSubnetDecapConfig(); + if (config.enable && isVipRoute(ipPrefix, nextHops)) + { + createVipRouteSubnetDecapTerm(ipPrefix); + } // update routes to reflect mux state if (mux_orch->isMuxNexthops(nextHops)) { - mux_orch->updateRoute(ipPrefix, true); + mux_orch->updateRoute(ipPrefix); } notifyNextHopChangeObservers(vrf_id, ipPrefix, nextHops, true); @@ -2355,8 +2724,22 @@ bool RouteOrch::removeRoute(RouteBulkContext& ctx) size_t creating = gRouteBulker.creating_entries_count(route_entry); if (it_route == it_route_table->second.end() && creating == 0) { + /* + * Clean up the VRF routing table if + * 1. there is no routing entry in the VRF routing table and + * 2. there is no pending bulk creation routing entry in gRouteBulker + * The ideal way of the 2nd condition is to check pending bulk creation entries of a certain VRF. + * However, we can not do that unless going over all entries in gRouteBulker. + * So, we use above strict conditions here + */ + if (it_route_table->second.size() == 0 && gRouteBulker.creating_entries_count() == 0) + { + m_syncdRoutes.erase(vrf_id); + m_vrfOrch->decreaseVrfRefCount(vrf_id); + } SWSS_LOG_INFO("Failed to find route entry, vrf_id 0x%" PRIx64 ", prefix %s\n", vrf_id, - ipPrefix.to_string().c_str()); + ipPrefix.to_string().c_str()); + return true; } @@ -2438,6 +2821,15 @@ bool RouteOrch::removeRoutePost(const RouteBulkContext& ctx) updateDefRouteState(ipPrefix.to_string()); SWSS_LOG_INFO("Set route %s next hop ID to NULL", ipPrefix.to_string().c_str()); + + if (ipPrefix.isV4()) + { + v4_active_default_route_nhops.clear(); + } + else + { + v6_active_default_route_nhops.clear(); + } } else { @@ -2470,7 +2862,7 @@ bool RouteOrch::removeRoutePost(const RouteBulkContext& ctx) /* Check if the next hop group is not owned by NhgOrch. */ else if (!it_route->second.nhg_index.empty()) { - decNhgRefCount(it_route->second.nhg_index); + decNhgRefCount(it_route->second.nhg_index, it_route->second.context_index); } /* The NHG is owned by RouteOrch */ else @@ -2481,6 +2873,12 @@ bool RouteOrch::removeRoutePost(const RouteBulkContext& ctx) decreaseNextHopRefCount(it_route->second.nhg_key); auto ol_nextHops = it_route->second.nhg_key; + + if (ol_nextHops.is_srv6_nexthop()) + { + m_bulkSrv6NhgReducedVec.emplace_back(ol_nextHops); + } + MuxOrch* mux_orch = gDirectory.get(); if (it_route->second.nhg_key.getSize() > 1) { @@ -2488,19 +2886,18 @@ bool RouteOrch::removeRoutePost(const RouteBulkContext& ctx) { SWSS_LOG_NOTICE("Remove Nexthop Group %s", ol_nextHops.to_string().c_str()); m_bulkNhgReducedRefCnt.emplace(it_route->second.nhg_key, 0); - if (mux_orch->isMuxNexthops(ol_nextHops)) + } + if (mux_orch->isMuxNexthops(ol_nextHops)) + { + SWSS_LOG_NOTICE("Remove mux Nexthop %s", ol_nextHops.to_string().c_str()); + RouteKey routekey = { vrf_id, ipPrefix }; + auto nexthop_list = ol_nextHops.getNextHops(); + for (auto nh = nexthop_list.begin(); nh != nexthop_list.end(); nh++) { - SWSS_LOG_NOTICE("Remove mux Nexthop %s", ol_nextHops.to_string().c_str()); - RouteKey routekey = { vrf_id, ipPrefix }; - auto nexthop_list = ol_nextHops.getNextHops(); - for (auto nh = nexthop_list.begin(); nh != nexthop_list.end(); nh++) + if (!nh->ip_address.isZero()) { - if (!nh->ip_address.isZero()) - { - removeNextHopRoute(*nh, routekey); - } + removeNextHopRoute(*nh, routekey); } - mux_orch->updateRoute(ipPrefix, false); } } } @@ -2525,11 +2922,6 @@ bool RouteOrch::removeRoutePost(const RouteBulkContext& ctx) { m_neighOrch->removeMplsNextHop(nexthop); } - else if (nexthop.isSrv6NextHop() && - (m_neighOrch->getNextHopRefCount(nexthop) == 0)) - { - m_srv6Orch->removeSrv6Nexthops(it_route->second.nhg_key); - } RouteKey r_key = { vrf_id, ipPrefix }; removeNextHopRoute(nexthop, r_key); @@ -2538,10 +2930,14 @@ bool RouteOrch::removeRoutePost(const RouteBulkContext& ctx) SWSS_LOG_INFO("Remove route %s with next hop(s) %s", ipPrefix.to_string().c_str(), it_route->second.nhg_key.to_string().c_str()); - + /* Publish removal status, removes route entry from APPL STATE DB */ publishRouteState(ctx); + /* Remove the VIP route subnet decap term */ + removeVipRouteSubnetDecapTerm(ipPrefix); + + if (ipPrefix.isDefaultRoute() && vrf_id == gVirtualRouterId) { it_route_table->second[ipPrefix] = RouteNhg(); @@ -2568,6 +2964,51 @@ bool RouteOrch::removeRoutePost(const RouteBulkContext& ctx) return true; } +bool RouteOrch::isRouteExists(const IpPrefix& prefix) +{ + SWSS_LOG_ENTER(); + + sai_object_id_t& vrf_id = gVirtualRouterId; + + sai_route_entry_t route_entry; + route_entry.vr_id = vrf_id; + route_entry.switch_id = gSwitchId; + copy(route_entry.destination, prefix); + auto it_route_table = m_syncdRoutes.find(vrf_id); + if (it_route_table == m_syncdRoutes.end()) + { + SWSS_LOG_INFO("Failed to find route table, vrf_id 0x%" PRIx64 "\n", vrf_id); + return true; + } + auto it_route = it_route_table->second.find(prefix); + size_t creating = gRouteBulker.creating_entries_count(route_entry); + if (it_route == it_route_table->second.end() && creating == 0) + { + SWSS_LOG_INFO("No Route exists for vrf_id 0x%" PRIx64 ", prefix %s\n", vrf_id, + prefix.to_string().c_str()); + return false; + } + return true; +} + +bool RouteOrch::removeRoutePrefix(const IpPrefix& prefix) +{ + // This function removes the route if it exists. + + string key = prefix.to_string(); + RouteBulkContext context(key, false); + context.ip_prefix = prefix; + context.vrf_id = gVirtualRouterId; + if (removeRoute(context)) + { + SWSS_LOG_INFO("Could not find the route with prefix %s", prefix.to_string().c_str()); + return true; + } + gRouteBulker.flush(); + return removeRoutePost(context); + +} + bool RouteOrch::createRemoteVtep(sai_object_id_t vrf_id, const NextHopKey &nextHop) { SWSS_LOG_ENTER(); @@ -2670,7 +3111,7 @@ const NhgBase &RouteOrch::getNhg(const std::string &nhg_index) } } -void RouteOrch::incNhgRefCount(const std::string &nhg_index) +void RouteOrch::incNhgRefCount(const std::string &nhg_index, const std::string &context_index) { SWSS_LOG_ENTER(); @@ -2682,9 +3123,14 @@ void RouteOrch::incNhgRefCount(const std::string &nhg_index) { gCbfNhgOrch->incNhgRefCount(nhg_index); } + + if (!context_index.empty()) + { + m_srv6Orch->increasePicContextIdRefCount(context_index); + } } -void RouteOrch::decNhgRefCount(const std::string &nhg_index) +void RouteOrch::decNhgRefCount(const std::string &nhg_index, const std::string &context_index) { SWSS_LOG_ENTER(); @@ -2696,6 +3142,11 @@ void RouteOrch::decNhgRefCount(const std::string &nhg_index) { gCbfNhgOrch->decNhgRefCount(nhg_index); } + + if (!context_index.empty()) + { + m_srv6Orch->decreasePicContextIdRefCount(context_index); + } } void RouteOrch::publishRouteState(const RouteBulkContext& ctx, const ReturnCode& status) @@ -2716,3 +3167,52 @@ void RouteOrch::publishRouteState(const RouteBulkContext& ctx, const ReturnCode& m_publisher.publish(APP_ROUTE_TABLE_NAME, ctx.key, fvs, status, replace); } + +inline bool RouteOrch::isVipRoute(const IpPrefix &ipPrefix, const NextHopGroupKey &nextHops) +{ + bool res = true; + /* Ensure all next hops are vlan devices */ + for (const auto &nextHop : nextHops.getNextHops()) + { + res &= (!nextHop.alias.compare(0, strlen(VLAN_PREFIX), VLAN_PREFIX)); + } + /* Ensure the prefix is non-local */ + if (nextHops.getSize() == 1) + { + res &= (!m_intfsOrch->isPrefixSubnet(ipPrefix, nextHops.getNextHops().begin()->alias)); + } + return res; +} + +inline void RouteOrch::createVipRouteSubnetDecapTerm(const IpPrefix &ipPrefix) +{ + const SubnetDecapConfig &config = gTunneldecapOrch->getSubnetDecapConfig(); + if (!config.enable || m_SubnetDecapTermsCreated.find(ipPrefix) != m_SubnetDecapTermsCreated.end()) + { + return; + } + SWSS_LOG_NOTICE("Add subnet decap term for %s", ipPrefix.to_string().c_str()); + static const vector data = { + {"term_type", "MP2MP"}, + {"subnet_type", "vip"} + }; + string tunnel_name = ipPrefix.isV4() ? config.tunnel : config.tunnel_v6; + string key = tunnel_name + ":" + ipPrefix.to_string(); + m_appTunnelDecapTermProducer.set(key, data); + m_SubnetDecapTermsCreated.insert(ipPrefix); +} + +inline void RouteOrch::removeVipRouteSubnetDecapTerm(const IpPrefix &ipPrefix) +{ + auto it = m_SubnetDecapTermsCreated.find(ipPrefix); + if (it == m_SubnetDecapTermsCreated.end()) + { + return; + } + const SubnetDecapConfig &config = gTunneldecapOrch->getSubnetDecapConfig(); + SWSS_LOG_NOTICE("Remove subnet decap term for %s", ipPrefix.to_string().c_str()); + string tunnel_name = ipPrefix.isV4() ? config.tunnel : config.tunnel_v6; + string key = tunnel_name + ":" + ipPrefix.to_string(); + m_appTunnelDecapTermProducer.del(key); + m_SubnetDecapTermsCreated.erase(it); +} diff --git a/orchagent/routeorch.h b/orchagent/routeorch.h index b2321377668..a903dc42e0f 100644 --- a/orchagent/routeorch.h +++ b/orchagent/routeorch.h @@ -16,6 +16,9 @@ #include "bulker.h" #include "fgnhgorch.h" #include +#include "zmqorch.h" +#include "zmqserver.h" +#include /* Maximum next hop group number */ #define NHGRP_MAX_SIZE 128 @@ -23,6 +26,7 @@ #define EUI64_INTF_ID_LEN 8 #define LOOPBACK_PREFIX "Loopback" +#define VLAN_PREFIX "Vlan" struct NextHopGroupMemberEntry { @@ -36,9 +40,22 @@ struct NhgBase; struct NextHopGroupEntry { + NextHopGroupEntry() : + next_hop_group_id(SAI_NULL_OBJECT_ID), + ref_count(0), + nh_member_install_count(0), + eligible_for_default_route_nh_swap(false), + is_default_route_nh_swap(false) + { + } + sai_object_id_t next_hop_group_id; // next hop group id int ref_count; // reference count NextHopGroupMembers nhopgroup_members; // ids of members indexed by + NextHopGroupMembers default_route_nhopgroup_members; // ids of members indexed by + uint32_t nh_member_install_count; + bool eligible_for_default_route_nh_swap; + bool is_default_route_nh_swap; }; struct NextHopUpdate @@ -64,12 +81,14 @@ struct RouteNhg */ std::string nhg_index; + std::string context_index; + RouteNhg() = default; - RouteNhg(const NextHopGroupKey& key, const std::string& index) : - nhg_key(key), nhg_index(index) {} + RouteNhg(const NextHopGroupKey& key, const std::string& index, const std::string &context_index = "") : + nhg_key(key), nhg_index(index), context_index(context_index) {} bool operator==(const RouteNhg& rnhg) - { return ((nhg_key == rnhg.nhg_key) && (nhg_index == rnhg.nhg_index)); } + { return ((nhg_key == rnhg.nhg_key) && (nhg_index == rnhg.nhg_index) && (context_index == rnhg.context_index)); } bool operator!=(const RouteNhg& rnhg) { return !(*this == rnhg); } }; @@ -88,7 +107,7 @@ struct RouteKey }; /* NextHopGroupTable: NextHopGroupKey, NextHopGroupEntry */ -typedef std::map NextHopGroupTable; +typedef std::unordered_map NextHopGroupTable; /* RouteTable: destination network, NextHopGroupKey */ typedef std::map RouteTable; /* RouteTables: vrf_id, RouteTable */ @@ -116,18 +135,26 @@ struct RouteBulkContext NextHopGroupKey tmp_next_hop; // Temporary next hop NextHopGroupKey nhg; std::string nhg_index; + std::string context_index; sai_object_id_t vrf_id; IpPrefix ip_prefix; bool excp_intfs_flag; // using_temp_nhg will track if the NhgOrch's owned NHG is temporary or not bool using_temp_nhg; + bool fallback_to_default_route; + std::vector ipv; + std::vector alsv; + std::vector vni_labelv; + std::vector rmacv; + bool vrf_group_flag; std::string key; // Key in database table std::string protocol; // Protocol string bool is_set; // True if set operation RouteBulkContext(const std::string& key, bool is_set) - : key(key), excp_intfs_flag(false), using_temp_nhg(false), is_set(is_set) + : key(key), excp_intfs_flag(false), using_temp_nhg(false), is_set(is_set), + fallback_to_default_route(false) { } @@ -140,11 +167,13 @@ struct RouteBulkContext object_statuses.clear(); tmp_next_hop.clear(); nhg.clear(); - excp_intfs_flag = false; + ipv.clear(); vrf_id = SAI_NULL_OBJECT_ID; + excp_intfs_flag = false; using_temp_nhg = false; key.clear(); protocol.clear(); + fallback_to_default_route = false; } }; @@ -180,10 +209,10 @@ struct LabelRouteBulkContext } }; -class RouteOrch : public Orch, public Subject +class RouteOrch : public ZmqOrch, public Subject { public: - RouteOrch(DBConnector *db, vector &tableNames, SwitchOrch *switchOrch, NeighOrch *neighOrch, IntfsOrch *intfsOrch, VRFOrch *vrfOrch, FgNhgOrch *fgNhgOrch, Srv6Orch *srv6Orch); + RouteOrch(DBConnector *db, vector &tableNames, SwitchOrch *switchOrch, NeighOrch *neighOrch, IntfsOrch *intfsOrch, VRFOrch *vrfOrch, FgNhgOrch *fgNhgOrch, Srv6Orch *srv6Orch, swss::ZmqServer *zmqServer = nullptr); bool hasNextHopGroup(const NextHopGroupKey&) const; sai_object_id_t getNextHopGroupId(const NextHopGroupKey&); @@ -195,13 +224,23 @@ class RouteOrch : public Orch, public Subject void decreaseNextHopRefCount(const NextHopGroupKey&); bool isRefCounterZero(const NextHopGroupKey&) const; + void flushRouteBulker() { gRouteBulker.flush(); } + int getNextHopGroupRefCount(const NextHopGroupKey& key) { return m_syncdNextHopGroups[key].ref_count; } + std::set> &getBulkNhgReducedRefCnt() { return m_bulkNhgReducedRefCnt; } + bool addNextHopGroup(const NextHopGroupKey&); - bool removeNextHopGroup(const NextHopGroupKey&); + bool removeNextHopGroup(const NextHopGroupKey&, const bool is_default_route_nh_swap=false); + + bool addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops); + bool removeRoute(RouteBulkContext& ctx); + bool addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey &nextHops); + bool removeRoutePost(const RouteBulkContext& ctx); void addNextHopRoute(const NextHopKey&, const RouteKey&); void removeNextHopRoute(const NextHopKey&, const RouteKey&); bool updateNextHopRoutes(const NextHopKey&, uint32_t&); bool getRoutesForNexthop(std::set&, const NextHopKey&); + bool swapnexthopinNextHopGroup(sai_object_id_t next_hop_group_id, sai_object_id_t default_next_hop_id); bool validnexthopinNextHopGroup(const NextHopKey&, uint32_t&); bool invalidnexthopinNextHopGroup(const NextHopKey&, uint32_t&); @@ -214,6 +253,8 @@ class RouteOrch : public Orch, public Subject const NextHopGroupKey getSyncdRouteNhgKey(sai_object_id_t vrf_id, const IpPrefix& ipPrefix); bool createFineGrainedNextHopGroup(sai_object_id_t &next_hop_group_id, vector &nhg_attrs); bool removeFineGrainedNextHopGroup(sai_object_id_t &next_hop_group_id); + bool isRouteExists(const IpPrefix& prefix); + bool removeRoutePrefix(const IpPrefix& prefix); void addLinkLocalRouteToMe(sai_object_id_t vrf_id, IpPrefix linklocal_prefix); void delLinkLocalRouteToMe(sai_object_id_t vrf_id, IpPrefix linklocal_prefix); @@ -239,6 +280,8 @@ class RouteOrch : public Orch, public Subject unsigned int m_maxNextHopGroupCount; bool m_resync; + std::set v4_active_default_route_nhops; + std::set v6_active_default_route_nhops; shared_ptr m_stateDb; unique_ptr m_stateDefaultRouteTb; @@ -250,6 +293,10 @@ class RouteOrch : public Orch, public Subject std::set> m_bulkNhgReducedRefCnt; /* m_bulkNhgReducedRefCnt: nexthop, vrf_id */ + std::set m_SubnetDecapTermsCreated; + ProducerStateTable m_appTunnelDecapTermProducer; + std::vector m_bulkSrv6NhgReducedVec; + NextHopObserverTable m_nextHopObservers; EntityBulker gRouteBulker; @@ -257,10 +304,6 @@ class RouteOrch : public Orch, public Subject ObjectBulker gNextHopGroupMemberBulker; void addTempRoute(RouteBulkContext& ctx, const NextHopGroupKey&); - bool addRoute(RouteBulkContext& ctx, const NextHopGroupKey&); - bool removeRoute(RouteBulkContext& ctx); - bool addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey &nextHops); - bool removeRoutePost(const RouteBulkContext& ctx); void addTempLabelRoute(LabelRouteBulkContext& ctx, const NextHopGroupKey&); bool addLabelRoute(LabelRouteBulkContext& ctx, const NextHopGroupKey&); @@ -270,14 +313,20 @@ class RouteOrch : public Orch, public Subject void updateDefRouteState(string ip, bool add=false); - void doTask(Consumer& consumer); - void doLabelTask(Consumer& consumer); + void doTask(ConsumerBase& consumer); + void doLabelTask(ConsumerBase& consumer); const NhgBase &getNhg(const std::string& nhg_index); - void incNhgRefCount(const std::string& nhg_index); - void decNhgRefCount(const std::string& nhg_index); void publishRouteState(const RouteBulkContext& ctx, const ReturnCode& status = ReturnCode(SAI_STATUS_SUCCESS)); + + bool isVipRoute(const IpPrefix &ipPrefix, const NextHopGroupKey &nextHops); + void createVipRouteSubnetDecapTerm(const IpPrefix &ipPrefix); + void removeVipRouteSubnetDecapTerm(const IpPrefix &ipPrefix); + bool addDefaultRouteNexthopsInNextHopGroup(NextHopGroupEntry& original_next_hop_group, std::set& default_route_next_hop_set); + void updateDefaultRouteSwapSet(const NextHopGroupKey default_nhg_key, std::set& active_default_route_nhops); + void incNhgRefCount(const std::string& nhg_index, const std::string &context_index = ""); + void decNhgRefCount(const std::string& nhg_index, const std::string &context_index = ""); }; #endif /* SWSS_ROUTEORCH_H */ diff --git a/orchagent/routeresync.cpp b/orchagent/routeresync.cpp index 477226374b7..2eee189702c 100644 --- a/orchagent/routeresync.cpp +++ b/orchagent/routeresync.cpp @@ -4,6 +4,7 @@ #include "dbconnector.h" #include "producerstatetable.h" #include "logger.h" +#include "orch_zmq_config.h" using namespace std; using namespace swss; @@ -18,9 +19,11 @@ int main(int argc, char **argv) swss::Logger::getInstance().setMinPrio(swss::Logger::SWSS_INFO); SWSS_LOG_ENTER(); - DBConnector db("APPL_DB", 0); - ProducerStateTable r(&db, APP_ROUTE_TABLE_NAME); + + // When the feature ORCH_NORTHBOND_ROUTE_ZMQ_ENABLED is enabled, route events must be sent to orchagent via the ZMQ channel. + std::shared_ptr zmqClient = create_local_zmq_client(ORCH_NORTHBOND_ROUTE_ZMQ_ENABLED, false); + std::shared_ptr r = createProducerStateTable(&db, APP_ROUTE_TABLE_NAME, zmqClient); if (argc != 2) { @@ -31,13 +34,13 @@ int main(int argc, char **argv) std::string op = std::string(argv[1]); if (op == "stop") { - r.del("resync"); + r->del("resync"); } else if (op == "start") { FieldValueTuple fv("nexthop", "0.0.0.0"); std::vector fvVector = { fv }; - r.set("resync", fvVector); + r->set("resync", fvVector); } else { diff --git a/orchagent/saihelper.cpp b/orchagent/saihelper.cpp index 6fcf4c5014a..47fd7d4a677 100644 --- a/orchagent/saihelper.cpp +++ b/orchagent/saihelper.cpp @@ -3,6 +3,7 @@ extern "C" { #include "sai.h" #include "saistatus.h" #include "saiextensions.h" +#include "sairedis.h" } #include @@ -71,8 +72,10 @@ sai_srv6_api_t** sai_srv6_api;; sai_l2mc_group_api_t* sai_l2mc_group_api; sai_counter_api_t* sai_counter_api; sai_bfd_api_t* sai_bfd_api; +sai_icmp_echo_api_t* sai_icmp_echo_api; sai_my_mac_api_t* sai_my_mac_api; sai_generic_programmable_api_t* sai_generic_programmable_api; +sai_dash_appliance_api_t* sai_dash_appliance_api; sai_dash_acl_api_t* sai_dash_acl_api; sai_dash_vnet_api_t sai_dash_vnet_api; sai_dash_outbound_ca_to_pa_api_t* sai_dash_outbound_ca_to_pa_api; @@ -82,8 +85,29 @@ sai_dash_inbound_routing_api_t* sai_dash_inbound_routing_api; sai_dash_eni_api_t* sai_dash_eni_api; sai_dash_vip_api_t* sai_dash_vip_api; sai_dash_direction_lookup_api_t* sai_dash_direction_lookup_api; +sai_dash_tunnel_api_t* sai_dash_tunnel_api; +sai_dash_ha_api_t* sai_dash_ha_api; +sai_twamp_api_t* sai_twamp_api; +sai_tam_api_t* sai_tam_api; +sai_stp_api_t* sai_stp_api; +sai_dash_meter_api_t* sai_dash_meter_api; +sai_dash_outbound_port_map_api_t* sai_dash_outbound_port_map_api; +sai_dash_trusted_vni_api_t* sai_dash_trusted_vni_api; extern sai_object_id_t gSwitchId; +extern bool gTraditionalFlexCounter; +extern bool gSyncMode; +extern sai_redis_communication_mode_t gRedisCommunicationMode; +extern event_handle_t g_events_handle; + +vector gGearboxOids; + +unique_ptr gFlexCounterDb; +unique_ptr gFlexCounterGroupTable; +unique_ptr gFlexCounterTable; +unique_ptr gGearBoxFlexCounterDb; +unique_ptr gGearBoxFlexCounterGroupTable; +unique_ptr gGearBoxFlexCounterTable; static map hardware_access_map = { @@ -206,17 +230,27 @@ void initSaiApi() sai_api_query(SAI_API_L2MC_GROUP, (void **)&sai_l2mc_group_api); sai_api_query(SAI_API_COUNTER, (void **)&sai_counter_api); sai_api_query(SAI_API_BFD, (void **)&sai_bfd_api); + sai_api_query(SAI_API_ICMP_ECHO, (void **)&sai_icmp_echo_api); sai_api_query(SAI_API_MY_MAC, (void **)&sai_my_mac_api); sai_api_query(SAI_API_GENERIC_PROGRAMMABLE, (void **)&sai_generic_programmable_api); + sai_api_query((sai_api_t)SAI_API_DASH_APPLIANCE, (void**)&sai_dash_appliance_api); sai_api_query((sai_api_t)SAI_API_DASH_ACL, (void**)&sai_dash_acl_api); sai_api_query((sai_api_t)SAI_API_DASH_VNET, (void**)&sai_dash_vnet_api); sai_api_query((sai_api_t)SAI_API_DASH_OUTBOUND_CA_TO_PA, (void**)&sai_dash_outbound_ca_to_pa_api); sai_api_query((sai_api_t)SAI_API_DASH_PA_VALIDATION, (void**)&sai_dash_pa_validation_api); sai_api_query((sai_api_t)SAI_API_DASH_OUTBOUND_ROUTING, (void**)&sai_dash_outbound_routing_api); sai_api_query((sai_api_t)SAI_API_DASH_INBOUND_ROUTING, (void**)&sai_dash_inbound_routing_api); + sai_api_query((sai_api_t)SAI_API_DASH_METER, (void**)&sai_dash_meter_api); sai_api_query((sai_api_t)SAI_API_DASH_ENI, (void**)&sai_dash_eni_api); sai_api_query((sai_api_t)SAI_API_DASH_VIP, (void**)&sai_dash_vip_api); sai_api_query((sai_api_t)SAI_API_DASH_DIRECTION_LOOKUP, (void**)&sai_dash_direction_lookup_api); + sai_api_query((sai_api_t)SAI_API_DASH_TUNNEL, (void**)&sai_dash_tunnel_api); + sai_api_query((sai_api_t)SAI_API_DASH_HA, (void**)&sai_dash_ha_api); + sai_api_query((sai_api_t)SAI_API_DASH_OUTBOUND_PORT_MAP, (void**)&sai_dash_outbound_port_map_api); + sai_api_query((sai_api_t)SAI_API_DASH_TRUSTED_VNI, (void**)&sai_dash_trusted_vni_api); + sai_api_query(SAI_API_TWAMP, (void **)&sai_twamp_api); + sai_api_query(SAI_API_TAM, (void **)&sai_tam_api); + sai_api_query(SAI_API_STP, (void **)&sai_stp_api); sai_log_set(SAI_API_SWITCH, SAI_LOG_LEVEL_NOTICE); sai_log_set(SAI_API_BRIDGE, SAI_LOG_LEVEL_NOTICE); @@ -254,12 +288,40 @@ void initSaiApi() sai_log_set(SAI_API_L2MC_GROUP, SAI_LOG_LEVEL_NOTICE); sai_log_set(SAI_API_COUNTER, SAI_LOG_LEVEL_NOTICE); sai_log_set(SAI_API_BFD, SAI_LOG_LEVEL_NOTICE); + sai_log_set(SAI_API_ICMP_ECHO, SAI_LOG_LEVEL_NOTICE); sai_log_set(SAI_API_MY_MAC, SAI_LOG_LEVEL_NOTICE); sai_log_set(SAI_API_GENERIC_PROGRAMMABLE, SAI_LOG_LEVEL_NOTICE); + sai_log_set(SAI_API_TWAMP, SAI_LOG_LEVEL_NOTICE); + sai_log_set(SAI_API_TAM, SAI_LOG_LEVEL_NOTICE); + sai_log_set(SAI_API_STP, SAI_LOG_LEVEL_NOTICE); +} + +void initFlexCounterTables() +{ + if (gTraditionalFlexCounter) + { + gFlexCounterDb = std::make_unique("FLEX_COUNTER_DB", 0); + gFlexCounterTable = std::make_unique(gFlexCounterDb.get(), FLEX_COUNTER_TABLE); + gFlexCounterGroupTable = std::make_unique(gFlexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE); + + gGearBoxFlexCounterDb = std::make_unique("GB_FLEX_COUNTER_DB", 0); + gGearBoxFlexCounterTable = std::make_unique(gGearBoxFlexCounterDb.get(), FLEX_COUNTER_TABLE); + gGearBoxFlexCounterGroupTable = std::make_unique(gGearBoxFlexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE); + } } void initSaiRedis() { + // SAI_REDIS_SWITCH_ATTR_SYNC_MODE attribute only setBuffer and g_syncMode to true + // since it is not using ASIC_DB, we can execute it before create_switch + // when g_syncMode is set to true here, create_switch will wait the response from syncd + if (gSyncMode) + { + SWSS_LOG_WARN("sync mode is depreacated, use -z param"); + + gRedisCommunicationMode = SAI_REDIS_COMMUNICATION_MODE_REDIS_SYNC; + } + /** * NOTE: Notice that all Redis attributes here are using SAI_NULL_OBJECT_ID * as the switch ID, because those operations don't require actual switch @@ -269,6 +331,16 @@ void initSaiRedis() sai_attribute_t attr; sai_status_t status; + attr.id = SAI_REDIS_SWITCH_ATTR_REDIS_COMMUNICATION_MODE; + attr.value.s32 = gRedisCommunicationMode; + + status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set communication mode, rv:%d", status); + return handleSaiFailure(SAI_API_SWITCH, "set", status); + } + auto record_filename = Recorder::Instance().sairedis.getFile(); auto record_location = Recorder::Instance().sairedis.getLoc(); @@ -284,7 +356,7 @@ void initSaiRedis() { SWSS_LOG_ERROR("Failed to set SAI Redis recording output folder to %s, rv:%d", record_location.c_str(), status); - exit(EXIT_FAILURE); + return handleSaiFailure(SAI_API_SWITCH, "set", status); } attr.id = SAI_REDIS_SWITCH_ATTR_RECORDING_FILENAME; @@ -296,7 +368,7 @@ void initSaiRedis() { SWSS_LOG_ERROR("Failed to set SAI Redis recording logfile to %s, rv:%d", record_filename.c_str(), status); - exit(EXIT_FAILURE); + return handleSaiFailure(SAI_API_SWITCH, "set", status); } } @@ -310,22 +382,25 @@ void initSaiRedis() { SWSS_LOG_ERROR("Failed to %s SAI Redis recording, rv:%d", Recorder::Instance().sairedis.isRecord() ? "enable" : "disable", status); - exit(EXIT_FAILURE); + return handleSaiFailure(SAI_API_SWITCH, "set", status); } - attr.id = SAI_REDIS_SWITCH_ATTR_USE_PIPELINE; - attr.value.booldata = true; - - status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); - if (status != SAI_STATUS_SUCCESS) + if (gRedisCommunicationMode == SAI_REDIS_COMMUNICATION_MODE_REDIS_ASYNC) { - SWSS_LOG_ERROR("Failed to enable redis pipeline, rv:%d", status); - exit(EXIT_FAILURE); + SWSS_LOG_NOTICE("Enable redis pipeline"); + attr.id = SAI_REDIS_SWITCH_ATTR_USE_PIPELINE; + attr.value.booldata = true; + + status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to enable redis pipeline, rv:%d", status); + return handleSaiFailure(SAI_API_SWITCH, "set", status); + } } - SWSS_LOG_NOTICE("Enable redis pipeline"); char *platform = getenv("platform"); - if (platform && (strstr(platform, MLNX_PLATFORM_SUBSTRING) || strstr(platform, XS_PLATFORM_SUBSTRING))) + if (platform && (strstr(platform, MLNX_PLATFORM_SUBSTRING) || strstr(platform, XS_PLATFORM_SUBSTRING) || strstr(platform, MRVL_PRST_PLATFORM_SUBSTRING))) { /* We set this long timeout in order for Orchagent to wait enough time for * response from syncd. It is needed since in init, systemd syncd startup @@ -338,7 +413,7 @@ void initSaiRedis() if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to set SAI REDIS response timeout"); - exit(EXIT_FAILURE); + return handleSaiFailure(SAI_API_SWITCH, "set", status); } SWSS_LOG_NOTICE("SAI REDIS response timeout set successfully to %" PRIu64 " ", attr.value.u64); @@ -351,7 +426,7 @@ void initSaiRedis() if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to notify syncd INIT_VIEW, rv:%d gSwitchId %" PRIx64, status, gSwitchId); - exit(EXIT_FAILURE); + return handleSaiFailure(SAI_API_SWITCH, "set", status); } SWSS_LOG_NOTICE("Notify syncd INIT_VIEW"); @@ -365,7 +440,7 @@ void initSaiRedis() if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to set SAI REDIS response timeout"); - exit(EXIT_FAILURE); + return handleSaiFailure(SAI_API_SWITCH, "set", status); } SWSS_LOG_NOTICE("SAI REDIS response timeout set successfully to %" PRIu64 " ", attr.value.u64); @@ -477,6 +552,9 @@ sai_status_t initSaiPhyApi(swss::gearbox_phy_t *phy) phy->firmware_major_version = string(attr.value.chardata); } } + + gGearboxOids.push_back(phyOid); + return status; } @@ -493,100 +571,33 @@ task_process_status handleSaiCreateStatus(sai_api_t api, sai_status_t status, vo * in each orch. * 3. Take the type of sai api into consideration. */ - switch (api) - { - case SAI_API_FDB: - switch (status) - { - case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiCreateStatus"); - return task_success; - case SAI_STATUS_ITEM_ALREADY_EXISTS: - /* - * In FDB creation, there are scenarios where the hardware learns an FDB entry before orchagent. - * In such cases, the FDB SAI creation would report the status of SAI_STATUS_ITEM_ALREADY_EXISTS, - * and orchagent should ignore the error and treat it as entry was explicitly created. - */ - return task_success; - default: - SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - handleSaiFailure(true); - break; - } - break; - case SAI_API_HOSTIF: - switch (status) - { - case SAI_STATUS_SUCCESS: - return task_success; - case SAI_STATUS_FAILURE: - /* - * Host interface maybe failed due to lane not available. - * In some scenarios, like SONiC virtual machine, the invalid lane may be not enabled by VM configuration, - * So just ignore the failure and report an error log. - */ - return task_ignore; - default: - SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - handleSaiFailure(true); - break; - } - break; - case SAI_API_ROUTE: - switch (status) - { - case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiCreateStatus"); - return task_success; - case SAI_STATUS_ITEM_ALREADY_EXISTS: - case SAI_STATUS_NOT_EXECUTED: - /* With VNET routes, the same route can be learned via multiple - sources, like via BGP. Handle this gracefully */ - return task_success; - case SAI_STATUS_TABLE_FULL: - return task_need_retry; - default: - SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - handleSaiFailure(true); - break; - } - break; - case SAI_API_NEIGHBOR: - case SAI_API_NEXT_HOP: - case SAI_API_NEXT_HOP_GROUP: - switch(status) - { - case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiCreateStatus"); - return task_success; - case SAI_STATUS_ITEM_ALREADY_EXISTS: - return task_success; - case SAI_STATUS_TABLE_FULL: - return task_need_retry; - default: - SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - handleSaiFailure(true); - break; - } - break; + string s_api = sai_serialize_api(api); + string s_status = sai_serialize_status(status); + + switch (status) + { + case SAI_STATUS_SUCCESS: + return task_success; + case SAI_STATUS_ITEM_NOT_FOUND: + case SAI_STATUS_ADDR_NOT_FOUND: + case SAI_STATUS_OBJECT_IN_USE: + SWSS_LOG_WARN("Status %s is not expected for create operation, SAI API: %s", + s_status.c_str(), s_api.c_str()); + return task_success; + case SAI_STATUS_ITEM_ALREADY_EXISTS: + SWSS_LOG_NOTICE("Returning success for create operation, SAI API: %s, status: %s", + s_api.c_str(), s_status.c_str()); + return task_success; + case SAI_STATUS_INSUFFICIENT_RESOURCES: + case SAI_STATUS_TABLE_FULL: + case SAI_STATUS_NO_MEMORY: + case SAI_STATUS_NV_STORAGE_FULL: + return task_need_retry; default: - switch (status) - { - case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiCreateStatus"); - return task_success; - default: - SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - handleSaiFailure(true); - break; - } + handleSaiFailure(api, "create", status); + break; } - return task_need_retry; + return task_failed; } task_process_status handleSaiSetStatus(sai_api_t api, sai_status_t status, void *context) @@ -602,68 +613,38 @@ task_process_status handleSaiSetStatus(sai_api_t api, sai_status_t status, void * in each orch. * 3. Take the type of sai api into consideration. */ - if (status == SAI_STATUS_SUCCESS) - { - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiSetStatus"); - return task_success; - } - - switch (api) - { - case SAI_API_PORT: - switch (status) - { - case SAI_STATUS_INVALID_ATTR_VALUE_0: - /* - * If user gives an invalid attribute value, no need to retry or exit orchagent, just fail the current task - * and let user correct the configuration. - */ - SWSS_LOG_ERROR("Encountered SAI_STATUS_INVALID_ATTR_VALUE_0 in set operation, task failed, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - return task_failed; - default: - SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - handleSaiFailure(true); - break; - } - break; - case SAI_API_TUNNEL: - switch (status) - { - case SAI_STATUS_ATTR_NOT_SUPPORTED_0: - SWSS_LOG_ERROR("Encountered SAI_STATUS_ATTR_NOT_SUPPORTED_0 in set operation, task failed, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - return task_failed; - default: - SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - handleSaiFailure(true); - break; - } - break; - case SAI_API_BUFFER: - switch (status) - { - case SAI_STATUS_INSUFFICIENT_RESOURCES: - SWSS_LOG_ERROR("Encountered SAI_STATUS_INSUFFICIENT_RESOURCES in set operation, task failed, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - return task_failed; - default: - SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - handleSaiFailure(true); - break; - } - break; + string s_api = sai_serialize_api(api); + string s_status = sai_serialize_status(status); + + switch (status) + { + case SAI_STATUS_SUCCESS: + return task_success; + case SAI_STATUS_OBJECT_IN_USE: + SWSS_LOG_WARN("Status %s is not expected for set operation, SAI API: %s", + s_status.c_str(), s_api.c_str()); + return task_success; + case SAI_STATUS_ITEM_ALREADY_EXISTS: + case SAI_STATUS_ITEM_NOT_FOUND: + case SAI_STATUS_ADDR_NOT_FOUND: + /* There are specific cases especially with dual-TORs where tunnel + * routes and non-tunnel routes could be create for the same prefix + * which can potentially lead to conditions where ITEM_NOT_FOUND can + * be returned. This needs special handling in muxorch/routeorch. + */ + SWSS_LOG_NOTICE("Returning success for set operation, SAI API: %s, status: %s", + s_api.c_str(), s_status.c_str()); + return task_success; + case SAI_STATUS_INSUFFICIENT_RESOURCES: + case SAI_STATUS_TABLE_FULL: + case SAI_STATUS_NO_MEMORY: + case SAI_STATUS_NV_STORAGE_FULL: + return task_need_retry; default: - SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - handleSaiFailure(true); + handleSaiFailure(api, "set", status); break; } - - return task_need_retry; + return task_failed; } task_process_status handleSaiRemoveStatus(sai_api_t api, sai_status_t status, void *context) @@ -680,57 +661,33 @@ task_process_status handleSaiRemoveStatus(sai_api_t api, sai_status_t status, vo * in each orch. * 3. Take the type of sai api into consideration. */ - switch (api) - { - case SAI_API_ROUTE: - switch (status) - { - case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiRemoveStatus"); - return task_success; - case SAI_STATUS_ITEM_NOT_FOUND: - case SAI_STATUS_NOT_EXECUTED: - /* When the same route is learned via multiple sources, - there can be a duplicate remove operation. Handle this gracefully */ - return task_success; - default: - SWSS_LOG_ERROR("Encountered failure in remove operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - handleSaiFailure(true); - break; - } - break; - case SAI_API_NEIGHBOR: - case SAI_API_NEXT_HOP: - case SAI_API_NEXT_HOP_GROUP: - switch (status) - { - case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiRemoveStatus"); - return task_success; - case SAI_STATUS_ITEM_NOT_FOUND: - return task_success; - default: - SWSS_LOG_ERROR("Encountered failure in remove operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - handleSaiFailure(true); - break; - } - break; + string s_api = sai_serialize_api(api); + string s_status = sai_serialize_status(status); + + switch (status) + { + case SAI_STATUS_SUCCESS: + return task_success; + case SAI_STATUS_ITEM_ALREADY_EXISTS: + case SAI_STATUS_INSUFFICIENT_RESOURCES: + case SAI_STATUS_TABLE_FULL: + case SAI_STATUS_NO_MEMORY: + case SAI_STATUS_NV_STORAGE_FULL: + SWSS_LOG_WARN("Status %s is not expected for remove operation, SAI API: %s", + s_status.c_str(), s_api.c_str()); + return task_success; + case SAI_STATUS_ITEM_NOT_FOUND: + case SAI_STATUS_ADDR_NOT_FOUND: + SWSS_LOG_NOTICE("Returning success for remove operation, SAI API: %s, status: %s", + s_api.c_str(), s_status.c_str()); + return task_success; + case SAI_STATUS_OBJECT_IN_USE: + return task_need_retry; default: - switch (status) - { - case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiRemoveStatus"); - return task_success; - default: - SWSS_LOG_ERROR("Encountered failure in remove operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - handleSaiFailure(true); - break; - } + handleSaiFailure(api, "remove", status); + break; } - return task_need_retry; + return task_failed; } task_process_status handleSaiGetStatus(sai_api_t api, sai_status_t status, void *context) @@ -746,18 +703,21 @@ task_process_status handleSaiGetStatus(sai_api_t api, sai_status_t status, void * in each orch. * 3. Take the type of sai api into consideration. */ + string s_api = sai_serialize_api(api); + string s_status = sai_serialize_status(status); + switch (status) { case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiGetStatus"); return task_success; - case SAI_STATUS_NOT_IMPLEMENTED: - SWSS_LOG_ERROR("Encountered failure in get operation due to the function is not implemented, exiting orchagent, SAI API: %s", - sai_serialize_api(api).c_str()); - throw std::logic_error("SAI get function not implemented"); default: - SWSS_LOG_ERROR("Encountered failure in get operation, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + /* + * handleSaiFailure() is not called for GET failures as it might + * overwhelm the system if there are too many such calls + */ + SWSS_LOG_NOTICE("Encountered failure in GET operation, SAI API: %s, status: %s", + s_api.c_str(), s_status.c_str()); + break; } return task_failed; } @@ -781,22 +741,402 @@ bool parseHandleSaiStatusFailure(task_process_status status) return true; } -/* Handling SAI failure. Request redis to invoke SAI failure dump and abort if set*/ -void handleSaiFailure(bool abort_on_failure) +/* Handling SAI failure. Request redis to invoke SAI failure dump */ +void handleSaiFailure(sai_api_t api, string oper, sai_status_t status) { SWSS_LOG_ENTER(); + string s_api = sai_serialize_api(api); + string s_status = sai_serialize_status(status); + SWSS_LOG_ERROR("Encountered failure in %s operation, SAI API: %s, status: %s", + oper.c_str(), s_api.c_str(), s_status.c_str()); + + // Publish a structured syslog event + event_params_t params = { + { "operation", oper }, + { "api", s_api }, + { "status", s_status }}; + event_publish(g_events_handle, "sai-operation-failure", ¶ms); + sai_attribute_t attr; attr.id = SAI_REDIS_SWITCH_ATTR_NOTIFY_SYNCD; attr.value.s32 = SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP; - sai_status_t status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to take sai failure dump %d", status); } - if (abort_on_failure) +} + +static inline void initSaiRedisCounterEmptyParameter(sai_s8_list_t &sai_s8_list) +{ + sai_s8_list.list = nullptr; + sai_s8_list.count = 0; +} + +static inline void initSaiRedisCounterEmptyParameter(sai_redis_flex_counter_group_parameter_t &flex_counter_group_param) +{ + initSaiRedisCounterEmptyParameter(flex_counter_group_param.poll_interval); + initSaiRedisCounterEmptyParameter(flex_counter_group_param.operation); + initSaiRedisCounterEmptyParameter(flex_counter_group_param.stats_mode); + initSaiRedisCounterEmptyParameter(flex_counter_group_param.plugin_name); + initSaiRedisCounterEmptyParameter(flex_counter_group_param.plugins); + initSaiRedisCounterEmptyParameter(flex_counter_group_param.bulk_chunk_size); + initSaiRedisCounterEmptyParameter(flex_counter_group_param.bulk_chunk_size_per_prefix); +} + +static inline void initSaiRedisCounterParameterFromString(sai_s8_list_t &sai_s8_list, const std::string &str) +{ + if (str.length() > 0) + { + sai_s8_list.list = (int8_t*)const_cast(str.c_str()); + sai_s8_list.count = (uint32_t)str.length(); + } + else + { + initSaiRedisCounterEmptyParameter(sai_s8_list); + } +} + +static inline void notifySyncdCounterOperation(bool is_gearbox, const sai_attribute_t &attr) +{ + if (sai_switch_api == nullptr) + { + // This can happen during destruction of the orchagent daemon. + SWSS_LOG_ERROR("sai_switch_api is NULL"); + return; + } + + if (!is_gearbox) + { + sai_switch_api->set_switch_attribute(gSwitchId, &attr); + } + else + { + for (auto gearbox_oid : gGearboxOids) + { + sai_switch_api->set_switch_attribute(gearbox_oid, &attr); + } + } +} + +static inline void operateFlexCounterDbSingleField(std::vector &fvTuples, + const string &field, const string &value) +{ + if (!field.empty() && !value.empty()) + { + fvTuples.emplace_back(field, value); + } +} + +static inline void operateFlexCounterGroupDatabase(const string &group, + const string &poll_interval, + const string &stats_mode, + const string &plugin_name, + const string &plugins, + const string &operation, + bool is_gearbox) +{ + std::vector fvTuples; + auto &flexCounterGroupTable = is_gearbox ? gGearBoxFlexCounterGroupTable : gFlexCounterGroupTable; + + operateFlexCounterDbSingleField(fvTuples, POLL_INTERVAL_FIELD, poll_interval); + operateFlexCounterDbSingleField(fvTuples, STATS_MODE_FIELD, stats_mode); + operateFlexCounterDbSingleField(fvTuples, plugin_name, plugins); + operateFlexCounterDbSingleField(fvTuples, FLEX_COUNTER_STATUS_FIELD, operation); + + flexCounterGroupTable->set(group, fvTuples); +} +void setFlexCounterGroupParameter(const string &group, + const string &poll_interval, + const string &stats_mode, + const string &plugin_name, + const string &plugins, + const string &operation, + bool is_gearbox) +{ + if (gTraditionalFlexCounter) + { + operateFlexCounterGroupDatabase(group, poll_interval, stats_mode, plugin_name, plugins, operation, is_gearbox); + return; + } + + sai_attribute_t attr; + sai_redis_flex_counter_group_parameter_t flex_counter_group_param; + + attr.id = SAI_REDIS_SWITCH_ATTR_FLEX_COUNTER_GROUP; + attr.value.ptr = &flex_counter_group_param; + + initSaiRedisCounterEmptyParameter(flex_counter_group_param.bulk_chunk_size); + initSaiRedisCounterEmptyParameter(flex_counter_group_param.bulk_chunk_size_per_prefix); + initSaiRedisCounterParameterFromString(flex_counter_group_param.counter_group_name, group); + initSaiRedisCounterParameterFromString(flex_counter_group_param.poll_interval, poll_interval); + initSaiRedisCounterParameterFromString(flex_counter_group_param.operation, operation); + initSaiRedisCounterParameterFromString(flex_counter_group_param.stats_mode, stats_mode); + initSaiRedisCounterParameterFromString(flex_counter_group_param.plugin_name, plugin_name); + initSaiRedisCounterParameterFromString(flex_counter_group_param.plugins, plugins); + + notifySyncdCounterOperation(is_gearbox, attr); +} + +void setFlexCounterGroupOperation(const string &group, + const string &operation, + bool is_gearbox) +{ + if (gTraditionalFlexCounter) + { + operateFlexCounterGroupDatabase(group, "", "", "", "", operation, is_gearbox); + return; + } + + sai_attribute_t attr; + sai_redis_flex_counter_group_parameter_t flex_counter_group_param; + + attr.id = SAI_REDIS_SWITCH_ATTR_FLEX_COUNTER_GROUP; + attr.value.ptr = &flex_counter_group_param; + + initSaiRedisCounterEmptyParameter(flex_counter_group_param); + initSaiRedisCounterParameterFromString(flex_counter_group_param.counter_group_name, group); + initSaiRedisCounterParameterFromString(flex_counter_group_param.operation, operation); + + notifySyncdCounterOperation(is_gearbox, attr); +} + +void setFlexCounterGroupPollInterval(const string &group, + const string &poll_interval, + bool is_gearbox) +{ + if (gTraditionalFlexCounter) + { + operateFlexCounterGroupDatabase(group, poll_interval, "", "", "", "", is_gearbox); + return; + } + + sai_attribute_t attr; + sai_redis_flex_counter_group_parameter_t flex_counter_group_param; + + attr.id = SAI_REDIS_SWITCH_ATTR_FLEX_COUNTER_GROUP; + attr.value.ptr = &flex_counter_group_param; + + initSaiRedisCounterEmptyParameter(flex_counter_group_param); + initSaiRedisCounterParameterFromString(flex_counter_group_param.counter_group_name, group); + initSaiRedisCounterParameterFromString(flex_counter_group_param.poll_interval, poll_interval); + + notifySyncdCounterOperation(is_gearbox, attr); +} + +void setFlexCounterGroupStatsMode(const std::string &group, + const std::string &stats_mode, + bool is_gearbox) +{ + if (gTraditionalFlexCounter) + { + operateFlexCounterGroupDatabase(group, "", stats_mode, "", "", "", is_gearbox); + return; + } + + sai_attribute_t attr; + sai_redis_flex_counter_group_parameter_t flex_counter_group_param; + + attr.id = SAI_REDIS_SWITCH_ATTR_FLEX_COUNTER_GROUP; + attr.value.ptr = &flex_counter_group_param; + + initSaiRedisCounterEmptyParameter(flex_counter_group_param); + initSaiRedisCounterParameterFromString(flex_counter_group_param.counter_group_name, group); + initSaiRedisCounterParameterFromString(flex_counter_group_param.stats_mode, stats_mode); + + notifySyncdCounterOperation(is_gearbox, attr); +} + +void setFlexCounterGroupBulkChunkSize(const std::string &group, + const std::string &bulk_chunk_size, + const std::string &bulk_chunk_size_per_prefix, + bool is_gearbox) +{ + sai_attribute_t attr; + sai_redis_flex_counter_group_parameter_t flex_counter_group_param; + + attr.id = SAI_REDIS_SWITCH_ATTR_FLEX_COUNTER_GROUP; + attr.value.ptr = &flex_counter_group_param; + + initSaiRedisCounterEmptyParameter(flex_counter_group_param); + initSaiRedisCounterParameterFromString(flex_counter_group_param.counter_group_name, group); + initSaiRedisCounterParameterFromString(flex_counter_group_param.bulk_chunk_size, bulk_chunk_size); + initSaiRedisCounterParameterFromString(flex_counter_group_param.bulk_chunk_size_per_prefix, bulk_chunk_size_per_prefix); + + notifySyncdCounterOperation(is_gearbox, attr); +} + +void delFlexCounterGroup(const std::string &group, + bool is_gearbox) +{ + if (gTraditionalFlexCounter) + { + auto &flexCounterGroupTable = is_gearbox ? gGearBoxFlexCounterGroupTable : gFlexCounterGroupTable; + + if (flexCounterGroupTable != nullptr) + { + flexCounterGroupTable->del(group); + } + + return; + } + + sai_attribute_t attr; + sai_redis_flex_counter_group_parameter_t flex_counter_group_param; + + attr.id = SAI_REDIS_SWITCH_ATTR_FLEX_COUNTER_GROUP; + attr.value.ptr = &flex_counter_group_param; + + initSaiRedisCounterEmptyParameter(flex_counter_group_param); + initSaiRedisCounterParameterFromString(flex_counter_group_param.counter_group_name, group); + + notifySyncdCounterOperation(is_gearbox, attr); +} + +void startFlexCounterPolling(sai_object_id_t switch_oid, + const std::string &key, + const std::string &counter_ids, + const std::string &counter_field_name, + const std::string &stats_mode) +{ + if (gTraditionalFlexCounter) + { + std::vector fvTuples; + auto &flexCounterTable = switch_oid == gSwitchId ? gFlexCounterTable : gGearBoxFlexCounterTable; + + operateFlexCounterDbSingleField(fvTuples, counter_field_name, counter_ids); + operateFlexCounterDbSingleField(fvTuples, STATS_MODE_FIELD, stats_mode); + + flexCounterTable->set(key, fvTuples); + + return; + } + + sai_attribute_t attr; + sai_redis_flex_counter_parameter_t flex_counter_param; + + attr.id = SAI_REDIS_SWITCH_ATTR_FLEX_COUNTER; + attr.value.ptr = &flex_counter_param; + + initSaiRedisCounterParameterFromString(flex_counter_param.counter_key, key); + initSaiRedisCounterParameterFromString(flex_counter_param.counter_ids, counter_ids); + initSaiRedisCounterParameterFromString(flex_counter_param.counter_field_name, counter_field_name); + initSaiRedisCounterParameterFromString(flex_counter_param.stats_mode, stats_mode); + + sai_switch_api->set_switch_attribute(switch_oid, &attr); +} + +void stopFlexCounterPolling(sai_object_id_t switch_oid, + const std::string &key) +{ + if (gTraditionalFlexCounter) + { + auto &flexCounterTable = switch_oid == gSwitchId ? gFlexCounterTable : gGearBoxFlexCounterTable; + + if (flexCounterTable != nullptr) + { + flexCounterTable->del(key); + } + + return; + } + + sai_attribute_t attr; + sai_redis_flex_counter_parameter_t flex_counter_param; + + attr.id = SAI_REDIS_SWITCH_ATTR_FLEX_COUNTER; + attr.value.ptr = &flex_counter_param; + + initSaiRedisCounterParameterFromString(flex_counter_param.counter_key, key); + initSaiRedisCounterEmptyParameter(flex_counter_param.counter_ids); + initSaiRedisCounterEmptyParameter(flex_counter_param.counter_field_name); + initSaiRedisCounterEmptyParameter(flex_counter_param.stats_mode); + + sai_switch_api->set_switch_attribute(switch_oid, &attr); +} + +/* + Use metadata info of the SAI object to infer all the available stats + Syncd already has logic to filter out the supported stats +*/ +std::vector queryAvailableCounterStats(const sai_object_type_t object_type) +{ + std::vector stat_list; + auto info = sai_metadata_get_object_type_info(object_type); + + if (!info) + { + SWSS_LOG_ERROR("Metadata info query failed, invalid object: %d", object_type); + return stat_list; + } + + SWSS_LOG_NOTICE("SAI object %s supports stat type %s", + sai_serialize_object_type(object_type).c_str(), + info->statenum->name); + + auto statenumlist = info->statenum->values; + auto statnumcount = (uint32_t)info->statenum->valuescount; + stat_list.reserve(statnumcount); + + for (uint32_t i = 0; i < statnumcount; i++) + { + stat_list.push_back(static_cast(statenumlist[i])); + } + return stat_list; +} + +void writeResultToDB(const std::unique_ptr& table, const string& key, + uint32_t res, const string& version) +{ + SWSS_LOG_ENTER(); + + if (!table) + { + SWSS_LOG_WARN("Table passed in is NULL"); + return; + } + + std::vector fvVector; + + fvVector.emplace_back("result", std::to_string(res)); + + if (!version.empty()) + { + fvVector.emplace_back("version", version); + } + + try + { + table->set(key, fvVector); + } + catch (const exception &e) + { + SWSS_LOG_ERROR("Exception caught while writing to DB: %s", e.what()); + return; + } + SWSS_LOG_INFO("Wrote result to DB for key %s", key.c_str()); +} + +void removeResultFromDB(const std::unique_ptr& table, const string& key) +{ + SWSS_LOG_ENTER(); + + if (!table) + { + SWSS_LOG_WARN("Table passed in is NULL"); + return; + } + + try + { + table->del(key); + } + catch (const exception &e) { - abort(); + SWSS_LOG_ERROR("Exception caught while removing from DB: %s", e.what()); + return; } + SWSS_LOG_INFO("Removed result from DB for key %s", key.c_str()); } diff --git a/orchagent/saihelper.h b/orchagent/saihelper.h index b83f894c2e1..54a52971fdf 100644 --- a/orchagent/saihelper.h +++ b/orchagent/saihelper.h @@ -4,10 +4,13 @@ #include #include "orch.h" +#include "producertable.h" +#include "events.h" #define IS_ATTR_ID_IN_RANGE(attrId, objectType, attrPrefix) \ ((attrId) >= SAI_ ## objectType ## _ATTR_ ## attrPrefix ## _START && (attrId) <= SAI_ ## objectType ## _ATTR_ ## attrPrefix ## _END) +void initFlexCounterTables(); void initSaiApi(); void initSaiRedis(); sai_status_t initSaiPhyApi(swss::gearbox_phy_t *phy); @@ -18,4 +21,42 @@ task_process_status handleSaiSetStatus(sai_api_t api, sai_status_t status, void task_process_status handleSaiRemoveStatus(sai_api_t api, sai_status_t status, void *context = nullptr); task_process_status handleSaiGetStatus(sai_api_t api, sai_status_t status, void *context = nullptr); bool parseHandleSaiStatusFailure(task_process_status status); -void handleSaiFailure(bool abort_on_failure); +void handleSaiFailure(sai_api_t api, std::string oper, sai_status_t status); + +void setFlexCounterGroupParameter(const std::string &group, + const std::string &poll_interval, + const std::string &stats_mode, + const std::string &plugin_name="", + const std::string &plugins="", + const std::string &operation="", + bool is_gearbox=false); +void setFlexCounterGroupPollInterval(const std::string &group, + const std::string &poll_interval, + bool is_gearbox=false); +void setFlexCounterGroupOperation(const std::string &group, + const std::string &operation, + bool is_gearbox=false); +void setFlexCounterGroupStatsMode(const std::string &group, + const std::string &stats_mode, + bool is_gearbox=false); + +void setFlexCounterGroupBulkChunkSize(const std::string &group, + const std::string &bulk_size, + const std::string &bulk_chunk_size_per_prefix, + bool is_gearbox=false); + +void delFlexCounterGroup(const std::string &group, + bool is_gearbox=false); + +void startFlexCounterPolling(sai_object_id_t switch_oid, + const std::string &key, + const std::string &counter_ids, + const std::string &counter_field_name, + const std::string &stats_mode=""); +void stopFlexCounterPolling(sai_object_id_t switch_oid, + const std::string &key); + +std::vector queryAvailableCounterStats(const sai_object_type_t); +void writeResultToDB(const std::unique_ptr&, const std::string& key, + uint32_t res, const std::string& version=""); +void removeResultFromDB(const std::unique_ptr& table, const std::string& key); diff --git a/orchagent/saioffloadsession.h b/orchagent/saioffloadsession.h new file mode 100644 index 00000000000..f81fd70f7b3 --- /dev/null +++ b/orchagent/saioffloadsession.h @@ -0,0 +1,574 @@ +/* + * saioffloadsession.h + * + * Created on: Feb 21, 2025 + * Author: Manas Kumar Mandal + */ +#ifndef SWSS_SAIOFFLOADSESSION_H +#define SWSS_SAIOFFLOADSESSION_H + +#include +#include +#include +#include +#include "portsorch.h" +#include "vrforch.h" + +using namespace std; +using namespace swss; + +using sai_attr_id_val_map_t = std::unordered_map; +using fv_vector_t = std::vector; +using fv_map_t = std::map; +using session_fv_map_t = std::map; +// handler type for setting sai attr map and FieldValues +using sai_attr_handler_map_t = std::unordered_map>; + + +extern sai_object_id_t gSwitchId; +extern sai_object_id_t gVirtualRouterId; +extern PortsOrch* gPortsOrch; +extern sai_switch_api_t* sai_switch_api; +extern Directory gDirectory; + +// saioffload handler types for BFD and ICMP +template +struct SaiOffloadHandlerTraits { }; + +template<> +struct SaiOffloadHandlerTraits +{ + using api_t = sai_bfd_api_t; + using create_session_fn = sai_create_bfd_session_fn; + using remove_session_fn = sai_remove_bfd_session_fn; + using set_session_attribute_fn = sai_set_bfd_session_attribute_fn; + using get_session_attribute_fn = sai_get_bfd_session_attribute_fn; + using get_session_stats_fn = sai_get_bfd_session_stats_fn; + using get_session_stats_ext_fn = sai_get_bfd_session_stats_ext_fn; + using clear_session_stats_fn = sai_clear_bfd_session_stats_fn; + using notif_t = sai_bfd_session_state_notification_t; +}; + +template<> +struct SaiOffloadHandlerTraits +{ + using api_t = sai_icmp_echo_api_t; + using create_session_fn = sai_create_icmp_echo_session_fn; + using remove_session_fn = sai_remove_icmp_echo_session_fn; + using set_session_attribute_fn = sai_set_icmp_echo_session_attribute_fn; + using get_session_attribute_fn = sai_get_icmp_echo_session_attribute_fn; + using get_session_stats_fn = sai_get_icmp_echo_session_stats_fn; + using get_session_stats_ext_fn = sai_get_icmp_echo_session_stats_ext_fn; + using clear_session_stats_fn = sai_clear_icmp_echo_session_stats_fn; + using notif_t = sai_icmp_echo_session_state_notification_t; +}; + +/** + *@enum SaiOffloadHandlerStatus + * + *@brief Enumerated status used by SaiOffloadSessionHandler + */ +enum class SaiOffloadHandlerStatus { + SUCCESS_VALID_ENTRY = 0, + RETRY_VALID_ENTRY = 1, + FAILED_VALID_ENTRY = 2, + FAILED_INVALID_ENTRY = 3 +}; + +const std::unordered_map SaiOffloadStatusStrMap = +{ + {SaiOffloadHandlerStatus::RETRY_VALID_ENTRY, "RETRY_VALID_ENTRY"}, + {SaiOffloadHandlerStatus::SUCCESS_VALID_ENTRY, "SUCCESS_VALID_ENTRY"}, + {SaiOffloadHandlerStatus::FAILED_VALID_ENTRY, "FAILED_VALID_ENTRY"}, + {SaiOffloadHandlerStatus::FAILED_INVALID_ENTRY, "FAILED_INVALID_ENTRY"} +}; + +/** + *@struct SaiOffloadSessionHandler + * + *@brief Common Sai Offload session handler used as CRTP + */ +template +struct SaiOffloadSessionHandler { + using Tapis = SaiOffloadHandlerTraits; + + /** + *@method init + * + *@brief Initialize the handler + * + *@param api(in) SAI API function pointers + *@param key(in) Session key + * + *@return SUCCESS_VALID_ENTRY when valid key and successfully initialized + * FAILED_INVALID_ENTRY when key is invalid + * FAILED_VALID_ENTRY when initialization fails for valid key + */ + SaiOffloadHandlerStatus init(typename Tapis::api_t *api, const string &key); + + /** + *@method create + * + *@brief Create SAI offload session + * + *@param fv_data(in) session parameters as Field Value tuples + * + *@return SUCCESS_VALID_ENTRY session parameters valid and created with success + * FAILED_INVALID_ENTRY session parameters are invalid + * FAILED_VALID_ENTRY session creation fails for valid key + * RETRY_VALID_ENTRY retry session creation for valid key + */ + SaiOffloadHandlerStatus create(const fv_vector_t& fv_data); + + /** + *@method handle_hwlookup + * + *@brief Set the hwlookup session attrib based on other session attribs + * + *@return SUCCESS_VALID_ENTRY session parameters valid for hwlookup and consumed without error + * FAILED_INVALID_ENTRY session parameters are invalid for hwlookup + * FAILED_VALID_ENTRY failure in handling hwlookup for valid parameters + */ + SaiOffloadHandlerStatus handle_hwlookup(); + + /** + *@method remove + * + *@brief Remove the SAI offload session + * + *@param id(in) sai session object id to delete + * + *@return SUCEES_VALID_ENTRY session id found and removed + * FAILED_INVALID_ENTRY session id not found + * FAILED_VALID_ENTRY unable to remove session for a found id + * RETRY_VALID_ENTRY retry session removal for a found id + */ + SaiOffloadHandlerStatus remove(sai_object_id_t id); + + /** + *@method update + * + *@brief Update SAI offload session + * + *@param id(in) sai session object id to update + * fv_data(in) session parameters as Field Value tuples + * fv_map(in) existing map of session parameters Field Value + * + *@return SUCCESS_VALID_ENTRY session parameters valid and updated with success + * FAILED_INVALID_ENTRY session parameters are invalid + * FAILED_VALID_ENTRY session update fails for valid key + * RETRY_VALID_ENTRY retry session update for valid key + */ + SaiOffloadHandlerStatus update(sai_object_id_t session_id, const fv_vector_t& fv_data, const fv_map_t& fv_map); + + /** + *@method register_state_change_notification + * + *@brief Registers function pointer to SAI state change notification + * + *@return True on success, False on failure + */ + bool register_state_change_notification(); + + /** + *@method get_fv_vector + * + *@brief Return the vector of field value tuples of a session + * + *@return vector of field value tuples + */ + inline fv_vector_t& get_fv_vector() { + return m_fv_vector; + } + + /** + *@method get_fv_map + * + *@brief Return the map of field value of a session + * + *@return map of field value + */ + inline fv_map_t& get_fv_map() { + return m_fv_map; + } + /** + *@method get_state_db_key + * + *@brief Returns the formatted state db key of the session + * + *@return reference to string of formatted state db key + */ + inline std::string& get_state_db_key() { + return m_state_db_key; + } + + /** + *@method get_session_id + * + *@brief Returns the session id + * + *@return SAI object id of the session + */ + inline sai_object_id_t get_session_id() { + return m_session_id; + } + +protected: + SaiOffloadSessionHandler() = default; + + typename Tapis::create_session_fn sai_create_session; + typename Tapis::remove_session_fn sai_remove_session; + typename Tapis::set_session_attribute_fn sai_set_session_attrib; + typename Tapis::get_session_attribute_fn sai_get_session_attrib; + + string m_key; + // field value vector + fv_vector_t m_data; + // field value vector for state db + fv_vector_t m_fv_vector; + // field value map for session cache + fv_map_t m_fv_map; + string m_alias; + string m_vrf_name; + string m_state_db_key; + uint32_t m_port_id; + uint32_t m_vrf_id; + // session id + sai_object_id_t m_session_id; + // map of sai attribute id and its value + sai_attr_id_val_map_t m_attr_val_map; + // attribute vector used for session creation + std::vector m_attrs; +}; + +template +SaiOffloadHandlerStatus SaiOffloadSessionHandler::init(typename Tapis::api_t *api, const string &key) +{ + m_key = key; + return static_cast(this)->do_init(api); +} + +template +SaiOffloadHandlerStatus SaiOffloadSessionHandler::create(const fv_vector_t& fv_data) +{ + constexpr auto atype = static_cast(SaiOrchHandlerClass::SAI_API_TYPE::API_TYPE); + constexpr auto& name = static_cast(this)->m_name; + auto& handler_map = static_cast(this)->m_handler_map; + + m_data = fv_data; + + // call the handler for each field-value tuple + // and fill the m_attr_val_map and m_fv_vector + for (auto& data : m_data) + { + auto field = fvField(data); + auto value = fvValue(data); + m_fv_map[field] = value; + auto hsearch = handler_map.find(field); + if (hsearch != handler_map.end()) + { + auto& htuple = hsearch->second; + auto& handler = std::get<1>(htuple); + handler(value, m_attr_val_map, m_fv_vector); + } + else + { + SWSS_LOG_ERROR("%s, Unsupported sai attribute handler for %s", name.c_str(), field.c_str()); + continue; + } + } + + // set the SAI hwlookup attribute based on other sai attributes + auto hwlookup_status = handle_hwlookup(); + if (hwlookup_status != SaiOffloadHandlerStatus::SUCCESS_VALID_ENTRY) + { + return hwlookup_status; + } + + // call the derived orch's create + auto do_create_status = static_cast(this)->do_create(); + if (do_create_status != SaiOffloadHandlerStatus::SUCCESS_VALID_ENTRY) + { + return do_create_status; + } + + // for the sai attribute vector for create + sai_attribute_t attr; + for (auto it = m_attr_val_map.begin(); it != m_attr_val_map.end(); it++) + { + attr.id = it->first; + attr.value = it->second; + m_attrs.emplace_back(attr); + } + + m_session_id = SAI_NULL_OBJECT_ID; + sai_status_t status = sai_create_session(&m_session_id, gSwitchId, (uint32_t)m_attrs.size(), m_attrs.data()); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("%s, SAI create offload session failed %s, rv:%d", name.c_str(), m_key.c_str(), status); + task_process_status handle_status = handleSaiCreateStatus(atype, status); + if (handle_status != task_success) + { + // check for retries + if (parseHandleSaiStatusFailure(handle_status)) + { + return SaiOffloadHandlerStatus::FAILED_VALID_ENTRY; + } + else + { + return SaiOffloadHandlerStatus::RETRY_VALID_ENTRY; + } + } + } + return SaiOffloadHandlerStatus::SUCCESS_VALID_ENTRY; +} + +template +SaiOffloadHandlerStatus SaiOffloadSessionHandler::handle_hwlookup() +{ + + constexpr auto dst_mac_attr_id = static_cast(SaiOrchHandlerClass::SAI_ATTR_ID::DST_MAC_ID); + constexpr auto src_mac_attr_id = static_cast(SaiOrchHandlerClass::SAI_ATTR_ID::SRC_MAC_ID); + constexpr auto hw_lookup_attr_id = static_cast(SaiOrchHandlerClass::SAI_ATTR_ID::HW_LOOKUP_ID); + constexpr auto port_attr_id = static_cast(SaiOrchHandlerClass::SAI_ATTR_ID::PORT_ID); + constexpr auto vrf_attr_id = static_cast(SaiOrchHandlerClass::SAI_ATTR_ID::VRF_ATTR_ID); + + constexpr auto& name = static_cast(this)->m_name; + auto dmac_it = m_attr_val_map.find(dst_mac_attr_id); + + // hw lookup is not needed when outgoing port is specified + if (m_alias != "default") + { + Port port; + if (!gPortsOrch->getPort(m_alias, port)) + { + SWSS_LOG_ERROR("%s, Failed to locate port %s", name.c_str(), m_alias.c_str()); + return SaiOffloadHandlerStatus::RETRY_VALID_ENTRY; + } + + // dmac is needed as no lookup is performed in hardware + if (dmac_it == m_attr_val_map.end()) + { + SWSS_LOG_ERROR("%s, Failed to create offload session %s: destination MAC address required when hardware lookup not valid", + name.c_str(), m_key.c_str()); + return SaiOffloadHandlerStatus::FAILED_INVALID_ENTRY; + } + + // supported only for default vrf + if (m_vrf_name != "default") + { + SWSS_LOG_ERROR("%s, Failed to create offload session %s: vrf is not supported when hardware lookup not valid", + name.c_str(), m_key.c_str()); + return SaiOffloadHandlerStatus::FAILED_INVALID_ENTRY; + } + + sai_attribute_value_t val; + val.booldata = false; + m_attr_val_map[hw_lookup_attr_id] = val; + + sai_attribute_value_t val_port; + val_port.oid = port.m_port_id; + m_attr_val_map[port_attr_id] = val_port; + + sai_attribute_value_t val_smac; + auto smac_it = m_attr_val_map.find(src_mac_attr_id); + if (smac_it == m_attr_val_map.end()) + { + memcpy(val_smac.mac, port.m_mac.getMac(), sizeof(sai_mac_t)); + } + m_attr_val_map[src_mac_attr_id] = val_smac; + } + else + { + // dmac is obtained by hardware lookup + if (dmac_it != m_attr_val_map.end()) + { + SWSS_LOG_ERROR("%s, Failed to create session %s: destination MAC address not supported when hardware lookup valid", + name.c_str(), m_key.c_str()); + return SaiOffloadHandlerStatus::FAILED_INVALID_ENTRY; + } + + // vrf id needed when hardware lookup is enabled + sai_attribute_value_t vrf_val; + if (m_vrf_name == "default") + { + vrf_val.oid = gVirtualRouterId; + } + else + { + VRFOrch* vrf_orch = gDirectory.get(); + vrf_val.oid = vrf_orch->getVRFid(m_vrf_name); + } + m_attr_val_map[vrf_attr_id] = vrf_val; + + sai_attribute_value_t hw_val; + hw_val.booldata = true; + m_attr_val_map[hw_lookup_attr_id] = hw_val; + } + + return SaiOffloadHandlerStatus::SUCCESS_VALID_ENTRY; +} + +template +SaiOffloadHandlerStatus SaiOffloadSessionHandler::remove(sai_object_id_t id) +{ + constexpr auto& name = static_cast(this)->m_name; + constexpr auto atype = static_cast(SaiOrchHandlerClass::SAI_API_TYPE::API_TYPE); + + sai_status_t status = sai_remove_session(id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("%s, Failed to remove offload session %s, rv:%d", name.c_str(), + m_key.c_str(), status); + task_process_status handle_status = handleSaiRemoveStatus(atype, status); + if (handle_status != task_success) + { + // check for retries + if (parseHandleSaiStatusFailure(handle_status)) + { + return SaiOffloadHandlerStatus::FAILED_VALID_ENTRY; + } + else + { + return SaiOffloadHandlerStatus::RETRY_VALID_ENTRY; + } + } + } + + // call the derived orch's remove + auto do_remove_status = static_cast(this)->do_remove(); + if (do_remove_status != SaiOffloadHandlerStatus::SUCCESS_VALID_ENTRY) + { + return do_remove_status; + } + + return SaiOffloadHandlerStatus::SUCCESS_VALID_ENTRY; +} + +template +SaiOffloadHandlerStatus SaiOffloadSessionHandler::update(sai_object_id_t session_id, const fv_vector_t& fv_data, const fv_map_t& fv_map) +{ + constexpr auto& name = static_cast(this)->m_name; + auto& handler_map = static_cast(this)->m_handler_map; + auto& update_fields = static_cast(this)->m_update_fields; + + m_data = fv_data; + m_session_id = session_id; + + // call the handler for field if updatable and + // fill the m_attr_val_map and m_fv_vector + for (auto& data : m_data) + { + auto field = fvField(data); + auto value = fvValue(data); + m_fv_map[field] = value; + + // check for new update field + if (fv_map.find(field) == fv_map.end()) + { + SWSS_LOG_ERROR("%s, Unsupported new field update %s:%s for %s", + name.c_str(), field.c_str(), value.c_str(), m_key.c_str()); + return SaiOffloadHandlerStatus::FAILED_INVALID_ENTRY; + } + + // check if field needs update + if (fv_map.at(field) == value) + { + continue; + } + + // check if this field update supported + if (update_fields.find(field) == update_fields.end()) + { + SWSS_LOG_ERROR("%s, Unsupported field update %s:%s for %s", + name.c_str(), field.c_str(), value.c_str(), m_key.c_str()); + return SaiOffloadHandlerStatus::FAILED_INVALID_ENTRY; + } + + SWSS_LOG_INFO("%s, field update %s:%s for %s", name.c_str(), + field.c_str(), value.c_str(), m_key.c_str()); + + auto hsearch = handler_map.find(field); + if (hsearch != handler_map.end()) + { + auto& htuple = hsearch->second; + auto& handler = std::get<1>(htuple); + handler(value, m_attr_val_map, m_fv_vector); + } + else + { + SWSS_LOG_ERROR("%s, Unsupported sai attribute handler field %s for %s", + name.c_str(), field.c_str(), m_key.c_str()); + } + } + + // call the derived orch's update + auto do_update_status = static_cast(this)->do_update(); + if (do_update_status != SaiOffloadHandlerStatus::SUCCESS_VALID_ENTRY) + { + return do_update_status; + } + + // update the session attributes + // for the sai attribute vector for create + sai_attribute_t attr; + for (auto it = m_attr_val_map.begin(); it != m_attr_val_map.end(); it++) + { + attr.id = it->first; + attr.value = it->second; + + sai_status_t status = sai_set_session_attrib(m_session_id, &attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("%s, SAI offload session attrib id %u set failed %s, rv:%d", + name.c_str(), attr.id, m_key.c_str(), status); + return SaiOffloadHandlerStatus::FAILED_VALID_ENTRY; + } + } + + return SaiOffloadHandlerStatus::SUCCESS_VALID_ENTRY; +} + +template +bool SaiOffloadSessionHandler::register_state_change_notification() +{ + constexpr auto& name = static_cast(this)->m_name; + constexpr auto notify_attr_id = static_cast(SaiOrchHandlerClass::SAI_NOTIF_ATTR_ID::STATE_CHANGE); + sai_attribute_t attr; + sai_status_t status; + sai_attr_capability_t capability; + + status = sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_SWITCH, + notify_attr_id, + &capability); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("%s, Unable to query the change notification capability", name.c_str()); + return false; + } + + if (!capability.set_implemented) + { + SWSS_LOG_ERROR("%s, register change notification not supported", name.c_str()); + return false; + } + + attr.id = notify_attr_id; + attr.value.ptr = (void *)&SaiOrchHandlerClass::on_state_change; + + status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("%s, Failed to register notification handler", name.c_str()); + return false; + } + return true; +} + +#endif diff --git a/orchagent/srv6orch.cpp b/orchagent/srv6orch.cpp index d1177cddc2b..cb6c3cd1e0c 100644 --- a/orchagent/srv6orch.cpp +++ b/orchagent/srv6orch.cpp @@ -1,16 +1,30 @@ #include #include +#include +#include #include "routeorch.h" #include "logger.h" #include "srv6orch.h" #include "sai_serialize.h" #include "crmorch.h" +#include "subscriberstatetable.h" +#include "redisutility.h" +#include "flex_counter_manager.h" +#include "flow_counter_handler.h" using namespace std; using namespace swss; #define ADJ_DELIMITER ',' +#define OVERLAY_RIF_DEFAULT_MTU 9100 +#define LOCATOR_DEFAULT_BLOCK_LEN "32" +#define LOCATOR_DEFAULT_NODE_LEN "16" +#define LOCATOR_DEFAULT_FUNC_LEN "16" +#define LOCATOR_DEFAULT_ARG_LEN "0" + +#define SRV6_FLEX_COUNTER_UPDATE_TIMER 1 +#define SRV6_STAT_COUNTER_POLLING_INTERVAL_MS 10000 extern sai_object_id_t gSwitchId; extern sai_object_id_t gVirtualRouterId; @@ -18,9 +32,11 @@ extern sai_object_id_t gUnderlayIfId; extern sai_srv6_api_t* sai_srv6_api; extern sai_tunnel_api_t* sai_tunnel_api; extern sai_next_hop_api_t* sai_next_hop_api; +extern sai_router_interface_api_t* sai_router_intfs_api; extern RouteOrch *gRouteOrch; extern CrmOrch *gCrmOrch; +extern bool gTraditionalFlexCounter; const map end_behavior_map = { @@ -36,11 +52,11 @@ const map end_behavior_map = {"end.b6.encaps.red", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_ENCAPS_RED}, {"end.b6.insert", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT}, {"end.b6.insert.red", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT_RED}, - {"udx6", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX6}, - {"udx4", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX4}, - {"udt6", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT6}, - {"udt4", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT4}, - {"udt46", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT46}, + {"udx6", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDX6}, + {"udx4", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDX4}, + {"udt6", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDT6}, + {"udt4", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDT4}, + {"udt46", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDT46}, {"un", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UN}, {"ua", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UA} }; @@ -62,796 +78,2121 @@ const map sidlist_type_map = {"encaps.red", SAI_SRV6_SIDLIST_TYPE_ENCAPS_RED} }; -void Srv6Orch::srv6TunnelUpdateNexthops(const string srv6_source, const NextHopKey nhkey, bool insert) +static bool mySidDscpModeToSai(const string& mode, sai_tunnel_dscp_mode_t& sai_mode) { - if (insert) + if (mode == "uniform") { - srv6_tunnel_table_[srv6_source].nexthops.insert(nhkey); + sai_mode = SAI_TUNNEL_DSCP_MODE_UNIFORM_MODEL; + return true; } - else + + if (mode == "pipe") { - srv6_tunnel_table_[srv6_source].nexthops.erase(nhkey); + sai_mode = SAI_TUNNEL_DSCP_MODE_PIPE_MODEL; + return true; } + + return false; } -size_t Srv6Orch::srv6TunnelNexthopSize(const string srv6_source) +Srv6Orch::Srv6Orch(DBConnector *cfgDb, DBConnector *applDb, const vector& tables, SwitchOrch *switchOrch, VRFOrch *vrfOrch, NeighOrch *neighOrch): + Orch(tables), + m_vrfOrch(vrfOrch), + m_switchOrch(switchOrch), + m_neighOrch(neighOrch), + m_sidTable(applDb, APP_SRV6_SID_LIST_TABLE_NAME), + m_mysidTable(applDb, APP_SRV6_MY_SID_TABLE_NAME), + m_piccontextTable(applDb, APP_PIC_CONTEXT_TABLE_NAME), + m_mysidCfgTable(cfgDb, CFG_SRV6_MY_SID_TABLE_NAME), + m_locatorCfgTable(cfgDb, CFG_SRV6_MY_LOCATOR_TABLE_NAME), + m_counter_manager(SRV6_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, SRV6_STAT_COUNTER_POLLING_INTERVAL_MS, false) { - return srv6_tunnel_table_[srv6_source].nexthops.size(); + m_neighOrch->attach(this); + + initializeCounters(); } -bool Srv6Orch::createSrv6Tunnel(const string srv6_source) +Srv6Orch::~Srv6Orch() { - SWSS_LOG_ENTER(); - vector tunnel_attrs; - sai_attribute_t attr; - sai_status_t status; - sai_object_id_t tunnel_id; + m_neighOrch->detach(this); +} - if (srv6_tunnel_table_.find(srv6_source) != srv6_tunnel_table_.end()) +void Srv6Orch::initializeCounters() +{ + m_mysid_counters_supported = queryMySidCountersCapability(); + if (!m_mysid_counters_supported) { - SWSS_LOG_INFO("Tunnel exists for the source %s", srv6_source.c_str()); - return true; + SWSS_LOG_INFO("SRv6 counters are not supported on this platform"); + return; } - SWSS_LOG_INFO("Create tunnel for the source %s", srv6_source.c_str()); - attr.id = SAI_TUNNEL_ATTR_TYPE; - attr.value.s32 = SAI_TUNNEL_TYPE_SRV6; - tunnel_attrs.push_back(attr); - attr.id = SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE; - attr.value.oid = gUnderlayIfId; - tunnel_attrs.push_back(attr); - - IpAddress src_ip(srv6_source); - sai_ip_address_t ipaddr; - ipaddr.addr_family = SAI_IP_ADDR_FAMILY_IPV6; - memcpy(ipaddr.addr.ip6, src_ip.getV6Addr(), sizeof(ipaddr.addr.ip6)); - attr.id = SAI_TUNNEL_ATTR_ENCAP_SRC_IP; - attr.value.ipaddr = ipaddr; - tunnel_attrs.push_back(attr); + m_asic_db = make_shared("ASIC_DB", 0); + m_counter_db = make_shared("COUNTERS_DB", 0); + m_mysid_counters_table = make_unique
(m_counter_db.get(), COUNTERS_SRV6_NAME_MAP); - status = sai_tunnel_api->create_tunnel(&tunnel_id, gSwitchId, (uint32_t)tunnel_attrs.size(), tunnel_attrs.data()); - if (status != SAI_STATUS_SUCCESS) + if (gTraditionalFlexCounter) { - SWSS_LOG_ERROR("Failed to create tunnel for %s", srv6_source.c_str()); - return false; + m_vid_to_rid_table = make_unique
(m_asic_db.get(), "VIDTORID"); } - srv6_tunnel_table_[srv6_source].tunnel_object_id = tunnel_id; - return true; + + m_counter_update_timer = new SelectableTimer(timespec { .tv_sec = SRV6_FLEX_COUNTER_UPDATE_TIMER , .tv_nsec = 0 }); + auto et = new ExecutableTimer(m_counter_update_timer, this, "SRV6_FLEX_COUNTER_UPDATE_TIMER"); + Orch::addExecutor(et); } -bool Srv6Orch::srv6NexthopExists(const NextHopKey &nhKey) +bool Srv6Orch::queryMySidCountersCapability() const { - SWSS_LOG_ENTER(); - if (srv6_nexthop_table_.find(nhKey) != srv6_nexthop_table_.end()) - { - return true; - } - else + sai_attr_capability_t capability; + sai_status_t status = sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_MY_SID_ENTRY, SAI_MY_SID_ENTRY_ATTR_COUNTER_ID, &capability); + if (status != SAI_STATUS_SUCCESS) { + SWSS_LOG_WARN("Could not query SRv6 MySID entry attribute SAI_MY_SID_ENTRY_ATTR_COUNTER_ID %d", status); return false; } + + return capability.set_implemented && capability.create_implemented; +} + + +bool Srv6Orch::getMySidCountersEnabled() const +{ + return m_mysid_counters_enabled; +} + +bool Srv6Orch::getMySidCountersSupported() const +{ + return m_mysid_counters_supported; } -bool Srv6Orch::removeSrv6Nexthops(const NextHopGroupKey &nhg) +IpAddress Srv6Orch::getMySidAddress(const sai_my_sid_entry_t& sai_entry) const +{ + ip_addr_t ip_addr = {}; + ip_addr.family = AF_INET6; + memcpy(&ip_addr.ip_addr.ipv6_addr, sai_entry.sid, sizeof(ip_addr.ip_addr.ipv6_addr)); + + return IpAddress(ip_addr); +} + +string Srv6Orch::getMySidCounterKey(const sai_my_sid_entry_t& sai_entry) const +{ + auto mysid_addr = getMySidAddress(sai_entry).to_string(); + auto locator_cfg = getMySidEntryLocatorCfg(sai_entry); + return getMySidPrefix(mysid_addr, locator_cfg); +} + +bool Srv6Orch::addMySidCounter(const sai_my_sid_entry_t& sai_entry, sai_object_id_t& counter_oid) { SWSS_LOG_ENTER(); - for (auto &sr_nh : nhg.getNextHops()) + if (!FlowCounterHandler::createGenericCounter(counter_oid)) { - string srv6_source, segname; - sai_status_t status = SAI_STATUS_SUCCESS; - srv6_source = sr_nh.srv6_source; - segname = sr_nh.srv6_segment; + SWSS_LOG_ERROR("Failed to create SAI counter for SRv6 MySID entry"); + return false; + } - SWSS_LOG_NOTICE("SRV6 Nexthop %s refcount %d", sr_nh.to_string(false,true).c_str(), m_neighOrch->getNextHopRefCount(sr_nh)); - if (m_neighOrch->getNextHopRefCount(sr_nh) == 0) - { - status = sai_next_hop_api->remove_next_hop(srv6_nexthop_table_[sr_nh]); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to remove SRV6 nexthop %s", sr_nh.to_string(false,true).c_str()); - return false; - } + auto key = getMySidCounterKey(sai_entry); + vector fvs = { + {key, sai_serialize_object_id(counter_oid)} + }; - /* Update nexthop in SID table after deleting the nexthop */ - SWSS_LOG_INFO("Seg %s nexthop refcount %zu", - segname.c_str(), - sid_table_[segname].nexthops.size()); - if (sid_table_[segname].nexthops.find(sr_nh) != sid_table_[segname].nexthops.end()) - { - sid_table_[segname].nexthops.erase(sr_nh); - } - m_neighOrch->updateSrv6Nexthop(sr_nh, 0); - srv6_nexthop_table_.erase(sr_nh); + m_mysid_counters_table->set("", fvs); - /* Delete NH from the tunnel map */ - SWSS_LOG_INFO("Delete NH %s from tunnel map", - sr_nh.to_string(false, true).c_str()); - srv6TunnelUpdateNexthops(srv6_source, sr_nh, false); - } + auto was_empty = m_pending_counters.empty(); + m_pending_counters[counter_oid] = key; - size_t tunnel_nhs = srv6TunnelNexthopSize(srv6_source); - if (tunnel_nhs == 0) - { - status = sai_tunnel_api->remove_tunnel(srv6_tunnel_table_[srv6_source].tunnel_object_id); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to remove SRV6 tunnel object for source %s", srv6_source.c_str()); - return false; - } - srv6_tunnel_table_.erase(srv6_source); - } - else - { - SWSS_LOG_INFO("Nexthops referencing this tunnel object %s: %zu", srv6_source.c_str(),tunnel_nhs); - } + if (was_empty) + { + m_counter_update_timer->start(); } + return true; } -bool Srv6Orch::createSrv6Nexthop(const NextHopKey &nh) +void Srv6Orch::removeMySidCounter(const sai_my_sid_entry_t& sai_entry, sai_object_id_t& counter_oid) { SWSS_LOG_ENTER(); - string srv6_segment = nh.srv6_segment; - string srv6_source = nh.srv6_source; - if (srv6NexthopExists(nh)) + if (counter_oid == SAI_NULL_OBJECT_ID) { - SWSS_LOG_INFO("SRV6 nexthop already created for %s", nh.to_string(false,true).c_str()); - return true; + return; } - sai_object_id_t srv6_object_id = sid_table_[srv6_segment].sid_object_id; - sai_object_id_t srv6_tunnel_id = srv6_tunnel_table_[srv6_source].tunnel_object_id; - if (srv6_object_id == SAI_NULL_OBJECT_ID) - { - SWSS_LOG_ERROR("segment object doesn't exist for segment %s", srv6_segment.c_str()); - return false; - } + auto key = getMySidCounterKey(sai_entry); - if (srv6_tunnel_id == SAI_NULL_OBJECT_ID) + m_mysid_counters_table->hdel("", key); + + auto was_pending = m_pending_counters.erase(counter_oid) == 1; + if (!was_pending) { - SWSS_LOG_ERROR("tunnel object doesn't exist for source %s", srv6_source.c_str()); - return false; + SWSS_LOG_INFO("Unregistering SRv6 counter for %s, oid %s", key.c_str(), sai_serialize_object_id(counter_oid).c_str()); + m_counter_manager.clearCounterIdList(counter_oid); } - SWSS_LOG_INFO("Create srv6 nh for tunnel src %s with seg %s", srv6_source.c_str(), srv6_segment.c_str()); - vector nh_attrs; - sai_object_id_t nexthop_id; - sai_attribute_t attr; - sai_status_t status; - attr.id = SAI_NEXT_HOP_ATTR_TYPE; - attr.value.s32 = SAI_NEXT_HOP_TYPE_SRV6_SIDLIST; - nh_attrs.push_back(attr); + FlowCounterHandler::removeGenericCounter(counter_oid); + counter_oid = SAI_NULL_OBJECT_ID; +} - attr.id = SAI_NEXT_HOP_ATTR_SRV6_SIDLIST_ID; - attr.value.oid = srv6_object_id; - nh_attrs.push_back(attr); +void Srv6Orch::setMySidEntryCounter(const sai_my_sid_entry_t& sai_entry, sai_object_id_t counter_oid) +{ + SWSS_LOG_ENTER(); - attr.id = SAI_NEXT_HOP_ATTR_TUNNEL_ID; - attr.value.oid = srv6_tunnel_id; - nh_attrs.push_back(attr); + sai_attribute_t attr; + attr.id = SAI_MY_SID_ENTRY_ATTR_COUNTER_ID; + attr.value.oid = counter_oid; - status = sai_next_hop_api->create_next_hop(&nexthop_id, gSwitchId, - (uint32_t)nh_attrs.size(), - nh_attrs.data()); + auto status = sai_srv6_api->set_my_sid_entry_attribute(&sai_entry, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to create srv6 nexthop for %s", nh.to_string(false,true).c_str()); - return false; + SWSS_LOG_ERROR("Failed to set my_sid entry counter oid to %s, rc: %s", sai_serialize_object_id(counter_oid).c_str(), sai_serialize_status(status).c_str()); } - m_neighOrch->updateSrv6Nexthop(nh, nexthop_id); - srv6_nexthop_table_[nh] = nexthop_id; - sid_table_[srv6_segment].nexthops.insert(nh); - srv6TunnelUpdateNexthops(srv6_source, nh, true); - return true; } -bool Srv6Orch::srv6Nexthops(const NextHopGroupKey &nhgKey, sai_object_id_t &nexthop_id) +void Srv6Orch::setCountersState(bool enable) { SWSS_LOG_ENTER(); - set nexthops = nhgKey.getNextHops(); - string srv6_source; - string srv6_segment; - for (auto nh : nexthops) + if (!getMySidCountersSupported()) { - srv6_source = nh.srv6_source; - if (!createSrv6Tunnel(srv6_source)) - { - SWSS_LOG_ERROR("Failed to create tunnel for source %s", srv6_source.c_str()); - return false; - } - if (!createSrv6Nexthop(nh)) - { - SWSS_LOG_ERROR("Failed to create SRV6 nexthop %s", nh.to_string(false,true).c_str()); - return false; - } + SWSS_LOG_WARN("Ignoring SRv6 counters state change as they are not supported on this platform"); + return; } - if (nhgKey.getSize() == 1) + if (enable == m_mysid_counters_enabled) { - NextHopKey nhkey(nhgKey.to_string(), false, true); - nexthop_id = srv6_nexthop_table_[nhkey]; + return; } - return true; + + SWSS_LOG_NOTICE("Setting SRv6 MySID counters state to %s", enable ? "enabled" : "disabled"); + + for (auto& mysid : srv6_my_sid_table_) + { + const auto& sai_entry = mysid.second.entry; + auto &counter_oid = mysid.second.counter; + + if (enable) + { + addMySidCounter(sai_entry, counter_oid); + setMySidEntryCounter(sai_entry, counter_oid); + } else { + setMySidEntryCounter(sai_entry, SAI_NULL_OBJECT_ID); + removeMySidCounter(sai_entry, counter_oid); + } + } + + m_mysid_counters_enabled = enable; } -bool Srv6Orch::createUpdateSidList(const string sid_name, const string sid_list, const string sidlist_type) +void Srv6Orch::doTask(SelectableTimer &timer) { SWSS_LOG_ENTER(); - bool exists = (sid_table_.find(sid_name) != sid_table_.end()); - sai_segment_list_t segment_list; - vectorsid_ips = tokenize(sid_list, SID_LIST_DELIMITER); - sai_object_id_t segment_oid; - segment_list.count = (uint32_t)sid_ips.size(); - if (segment_list.count == 0) - { - SWSS_LOG_ERROR("segment list count is zero, skip"); - return true; - } - SWSS_LOG_INFO("Segment count %d", segment_list.count); - segment_list.list = new sai_ip6_t[segment_list.count]; - uint32_t index = 0; - for (string ip_str : sid_ips) - { - IpPrefix ip(ip_str); - SWSS_LOG_INFO("Segment %s, count %d", ip.to_string().c_str(), segment_list.count); - memcpy(segment_list.list[index++], ip.getIp().getV6Addr(), 16); - } - sai_attribute_t attr; - sai_status_t status; - if (!exists) + string value; + for (auto it = m_pending_counters.begin(); it != m_pending_counters.end();) { - /* Create sidlist object with list of ipv6 prefixes */ - SWSS_LOG_INFO("Create SID list"); - vector attributes; - attr.id = SAI_SRV6_SIDLIST_ATTR_SEGMENT_LIST; - attr.value.segmentlist.list = segment_list.list; - attr.value.segmentlist.count = segment_list.count; - attributes.push_back(attr); - - attr.id = SAI_SRV6_SIDLIST_ATTR_TYPE; - if (sidlist_type_map.find(sidlist_type) == sidlist_type_map.end()) + const auto oid = sai_serialize_object_id(it->first); + if (!gTraditionalFlexCounter || m_vid_to_rid_table->hget("", oid, value)) { - SWSS_LOG_INFO("Use default sidlist type: ENCAPS_RED"); - attr.value.s32 = SAI_SRV6_SIDLIST_TYPE_ENCAPS_RED; + SWSS_LOG_INFO("Registering SRv6 counter for %s, oid %s", it->second.c_str(), oid.c_str()); + + unordered_set counter_stats; + FlowCounterHandler::getGenericCounterStatIdList(counter_stats); + m_counter_manager.setCounterIdList(it->first, CounterType::SRV6, counter_stats); + it = m_pending_counters.erase(it); } else { - SWSS_LOG_INFO("sidlist type: %s", sidlist_type.c_str()); - attr.value.s32 = sidlist_type_map.at(sidlist_type); - } - attributes.push_back(attr); - status = sai_srv6_api->create_srv6_sidlist(&segment_oid, gSwitchId, (uint32_t) attributes.size(), attributes.data()); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to create srv6 sidlist object, rv %d", status); - return false; + ++it; } - sid_table_[sid_name].sid_object_id = segment_oid; } - else - { - SWSS_LOG_INFO("Set SID list"); - /* Update sidlist object with new set of ipv6 addresses */ - attr.id = SAI_SRV6_SIDLIST_ATTR_SEGMENT_LIST; - attr.value.segmentlist.list = segment_list.list; - attr.value.segmentlist.count = segment_list.count; - segment_oid = (sid_table_.find(sid_name)->second).sid_object_id; - status = sai_srv6_api->set_srv6_sidlist_attribute(segment_oid, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to set srv6 sidlist object with new segments, rv %d", status); - return false; - } + if (m_pending_counters.empty()) + { + m_counter_update_timer->stop(); } - delete segment_list.list; - return true; } -bool Srv6Orch::deleteSidList(const string sid_name) +MySidLocatorCfg Srv6Orch::getMySidEntryLocatorCfg(const sai_my_sid_entry_t& sai_entry) const { - SWSS_LOG_ENTER(); - sai_status_t status = SAI_STATUS_SUCCESS; - if (sid_table_.find(sid_name) == sid_table_.end()) - { - SWSS_LOG_ERROR("segment name %s doesn't exist", sid_name.c_str()); - return false; - } + return { + sai_entry.locator_block_len, + sai_entry.locator_node_len, + sai_entry.function_len, + sai_entry.args_len, + }; +} - if (sid_table_[sid_name].nexthops.size() > 1) - { - SWSS_LOG_NOTICE("segment object %s referenced by other nexthops: count %zu, not deleting", - sid_name.c_str(), sid_table_[sid_name].nexthops.size()); - return false; - } - SWSS_LOG_INFO("Remove sid list, segname %s", sid_name.c_str()); - status = sai_srv6_api->remove_srv6_sidlist(sid_table_[sid_name].sid_object_id); - if (status != SAI_STATUS_SUCCESS) + +string Srv6Orch::getMySidPrefix(const string& my_sid_addr, const MySidLocatorCfg& locator_cfg) const +{ + return my_sid_addr + "/" + to_string(locator_cfg.block_len + locator_cfg.node_len + locator_cfg.func_len); +} + +bool Srv6Orch::getLocatorCfgFromDb(const string& locator, MySidLocatorCfg& cfg) +{ + vector fvs; + auto exists = m_locatorCfgTable.get(locator, fvs); + if (!exists) { - SWSS_LOG_ERROR("Failed to delete SRV6 sidlist object for %s", sid_name.c_str()); + SWSS_LOG_ERROR("Failed to get the SRv6 locator %s - not present in the CONFIG_DB", locator.c_str()); return false; } - sid_table_.erase(sid_name); + + auto blen = fvsGetValue(fvs, "block_len", true); + auto nlen = fvsGetValue(fvs, "node_len", true); + auto flen = fvsGetValue(fvs, "func_len", true); + auto alen = fvsGetValue(fvs, "arg_len", true); + + cfg = { + (uint8_t)stoi(blen.get_value_or(LOCATOR_DEFAULT_BLOCK_LEN)), + (uint8_t)stoi(nlen.get_value_or(LOCATOR_DEFAULT_NODE_LEN)), + (uint8_t)stoi(flen.get_value_or(LOCATOR_DEFAULT_FUNC_LEN)), + (uint8_t)stoi(alen.get_value_or(LOCATOR_DEFAULT_ARG_LEN)) + }; + return true; } -void Srv6Orch::doTaskSidTable(const KeyOpFieldsValuesTuple & tuple) +bool Srv6Orch::reverseLookupLocator(const vector& candidates, const MySidLocatorCfg& locator_cfg, string& locator) { - SWSS_LOG_ENTER(); - string sid_name = kfvKey(tuple); - string op = kfvOp(tuple); - string sid_list, sidlist_type; - - for (auto i : kfvFieldsValues(tuple)) + for (const auto& candidate: candidates) { - if (fvField(i) == "path") - { - sid_list = fvValue(i); + MySidLocatorCfg cfg; + auto ok = getLocatorCfgFromDb(candidate, cfg); + if (!ok) { + continue; } - if (fvField(i) == "type") + + if (locator_cfg == cfg) { - sidlist_type = fvValue(i); + SWSS_LOG_DEBUG("Found a locator %s matching the config", candidate.c_str()); + locator = candidate; + return true; } } - if (op == SET_COMMAND) + + return false; +} + +void Srv6Orch::addMySidCfgCacheEntry(const string& my_sid_key, const vector& fvs) +{ + auto key_list = tokenize(my_sid_key, '|'); + auto locator = key_list[0]; + auto my_sid_prefix = key_list[1]; + + auto cfg = fvsGetValue(fvs, "decap_dscp_mode", false); + if (!cfg) { - if (!createUpdateSidList(sid_name, sid_list, sidlist_type)) - { - SWSS_LOG_ERROR("Failed to process sid %s", sid_name.c_str()); - } + SWSS_LOG_ERROR("MySID entry %s doesn't have mandatory decap_dscp_mode configuration", my_sid_prefix.c_str()); + return; } - else if (op == DEL_COMMAND) + + sai_tunnel_dscp_mode_t dscp_mode; + if (!mySidDscpModeToSai(*cfg, dscp_mode)) + { + SWSS_LOG_ERROR("Invalid MySID %s DSCP mode: %s", my_sid_prefix.c_str(), cfg->c_str()); + return; + } + + my_sid_dscp_cfg_cache_.insert({my_sid_prefix, {locator, dscp_mode}}); + SWSS_LOG_INFO("Saving MySID entry %s %s DSCP mode %s", locator.c_str(), my_sid_prefix.c_str(), cfg->c_str()); +} + +void Srv6Orch::removeMySidCfgCacheEntry(const string& my_sid_key) +{ + auto key_list = tokenize(my_sid_key, '|'); + auto locator = key_list[0]; + auto my_sid_prefix = key_list[1]; + + auto cfg_cache = my_sid_dscp_cfg_cache_.equal_range(my_sid_prefix); + for (auto it = cfg_cache.first; it != cfg_cache.second; ++it) { - if (!deleteSidList(sid_name)) + if (it->second.first == locator) { - SWSS_LOG_ERROR("Failed to delete sid %s", sid_name.c_str()); + my_sid_dscp_cfg_cache_.erase(it); + break; } - } else { - SWSS_LOG_ERROR("Invalid command"); } } -bool Srv6Orch::mySidExists(string my_sid_string) +void Srv6Orch::mySidCfgCacheRefresh() { - if (srv6_my_sid_table_.find(my_sid_string) != srv6_my_sid_table_.end()) + SWSS_LOG_INFO("Refreshing SRv6 MySID configuration cache"); + + vector entries; + m_mysidCfgTable.getContent(entries); + + for (const auto& entry : entries) { - return true; + addMySidCfgCacheEntry(kfvKey(entry), kfvFieldsValues(entry)); } - return false; } -/* - * Neighbor change notification to be processed for the SRv6 MySID entries - * - * In summary, this function handles both add and delete neighbor notifications - * - * When a neighbor ADD notification is received, we do the following steps: - * - We walk through the list of pending SRv6 MySID entries that are waiting for this neighbor to be ready - * - For each SID, we install the SID into the ASIC - * - We remove the SID from the pending MySID entries list - * - * When a neighbor DELETE notification is received, we do the following steps: - * - We walk through the list of pending SRv6 MySID entries installed in the ASIC - * - For each SID, we remove the SID from the ASIC - * - We add the SID to the pending MySID entries list - */ -void Srv6Orch::updateNeighbor(const NeighborUpdate& update) +bool Srv6Orch::getMySidEntryDscpMode(const string& my_sid_addr, const MySidLocatorCfg& locator_cfg, sai_tunnel_dscp_mode_t& dscp_mode) { - SWSS_LOG_ENTER(); + auto my_sid_prefix = getMySidPrefix(my_sid_addr, locator_cfg); - /* Check if the received notification is a neighbor add or a neighbor delete */ - if (update.add) + auto cfg_cache = my_sid_dscp_cfg_cache_.equal_range(my_sid_prefix); + if (cfg_cache.first == my_sid_dscp_cfg_cache_.end()) { - /* - * It's a neighbor add notification, let's walk through the list of SRv6 MySID entries - * that are waiting for that neighbor to be ready, and install them into the ASIC. - */ - - SWSS_LOG_INFO("Neighbor ADD event: %s alias '%s', installing pending SRv6 SIDs", - update.entry.ip_address.to_string().c_str(), update.entry.alias.c_str()); + mySidCfgCacheRefresh(); - auto it = m_pendingSRv6MySIDEntries.find(NextHopKey(update.entry.ip_address.to_string(), update.entry.alias)); - if (it == m_pendingSRv6MySIDEntries.end()) + cfg_cache = my_sid_dscp_cfg_cache_.equal_range(my_sid_prefix); + if (cfg_cache.first == my_sid_dscp_cfg_cache_.end()) { - /* No SID is waiting for this neighbor. Nothing to do */ - return; + SWSS_LOG_INFO("SRv6 MySID entry %s is not available in the CONFIG_DB", my_sid_prefix.c_str()); + return false; } - auto &nexthop_key = it->first; - auto &pending_my_sid_entries = it->second; + } - for (auto iter = pending_my_sid_entries.begin(); iter != pending_my_sid_entries.end();) - { - string my_sid_string = get<0>(*iter); - const string dt_vrf = get<1>(*iter); - const string adj = get<2>(*iter); - const string end_action = get<3>(*iter); + auto cache_start = cfg_cache.first; + auto cache_end = cfg_cache.second; - SWSS_LOG_INFO("Creating SID %s, action %s, vrf %s, adj %s", my_sid_string.c_str(), end_action.c_str(), dt_vrf.c_str(), adj.c_str()); - - if(!createUpdateMysidEntry(my_sid_string, dt_vrf, adj, end_action)) - { - SWSS_LOG_ERROR("Failed to create/update my_sid entry for sid %s", my_sid_string.c_str()); - ++iter; - continue; - } + if (distance(cache_start, cache_end) == 1) + { + const Srv6MySidDscpCfgCacheVal& cache_val = cache_start->second; + dscp_mode = cache_val.second; - SWSS_LOG_INFO("SID %s created successfully", my_sid_string.c_str()); + SWSS_LOG_INFO("Found decap DSCP mode for MySID addr %s locator %s in the cache", my_sid_prefix.c_str(), cache_val.first.c_str()); + return true; + } - iter = pending_my_sid_entries.erase(iter); - } + // There are multiple mysid entries with the same address but different locators + vector locator_candidates; + transform(cache_start, cache_end, back_inserter(locator_candidates), + [](const auto& v) { return v.second.first; }); - if (pending_my_sid_entries.size() == 0) + string locator; + auto found = reverseLookupLocator(locator_candidates, locator_cfg, locator); + if (!found) + { + SWSS_LOG_ERROR("Cannot find a locator in the CONFIG DB for MySID Entry %s", my_sid_prefix.c_str()); + return false; + } + + for (auto it = cache_start; it != cache_end; ++it) + { + const Srv6MySidDscpCfgCacheVal& cache_val = it->second; + if (cache_val.first == locator) { - m_pendingSRv6MySIDEntries.erase(nexthop_key); + SWSS_LOG_INFO("Found decap DSCP mode for MySID addr %s locator %s after locator reverse lookup", my_sid_prefix.c_str(), locator.c_str()); + dscp_mode = cache_val.second; + return true; } } - else - { - /* - * It's a neighbor delete notification, let's uninstall the SRv6 MySID entries associated with that - * nexthop from the ASIC, and add them to the SRv6 MySID entries pending set. - */ - SWSS_LOG_INFO("Neighbor DELETE event: %s alias '%s', removing associated SRv6 SIDs", - update.entry.ip_address.to_string().c_str(), update.entry.alias.c_str()); + return false; +} - for (auto it = srv6_my_sid_table_.begin(); it != srv6_my_sid_table_.end();) - { - /* Skip SIDs that are not associated with a L3 Adjacency */ - if (it->second.endAdjString.empty()) - { - ++it; - continue; - } +bool Srv6Orch::initIpInIpTunnel(MySidIpInIpTunnel& tunnel, sai_tunnel_dscp_mode_t dscp_mode) +{ + SWSS_LOG_ENTER(); - try - { - /* Skip SIDs that are not associated with this neighbor */ - if (IpAddress(it->second.endAdjString) != update.entry.ip_address) - { - ++it; - continue; - } - } - catch (const std::invalid_argument &e) - { - /* SRv6 SID is associated with an invalid L3 Adjacency IP address, skipping */ - ++it; - continue; - } + vector overlay_intf_attrs; + sai_attribute_t attr; - /* - * Save SID entry information to temp variables, before removing the SID. - * This information will be consumed used later. - */ - string my_sid_string = it->first; - const string dt_vrf = it->second.endVrfString; - const string adj = it->second.endAdjString; - string end_action; - for (auto iter = end_behavior_map.begin(); iter != end_behavior_map.end(); iter++) - { - if (iter->second == it->second.endBehavior) - { - end_action = iter->first; - break; - } - } + attr.id = SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID; + attr.value.oid = gVirtualRouterId; + overlay_intf_attrs.push_back(attr); - /* Skip SIDs with unknown SRv6 behavior */ - if (end_action.empty()) - { - ++it; - continue; - } + attr.id = SAI_ROUTER_INTERFACE_ATTR_TYPE; + attr.value.s32 = SAI_ROUTER_INTERFACE_TYPE_LOOPBACK; + overlay_intf_attrs.push_back(attr); - SWSS_LOG_INFO("Removing SID %s, action %s, vrf %s, adj %s", my_sid_string.c_str(), dt_vrf.c_str(), adj.c_str(), end_action.c_str()); + attr.id = SAI_ROUTER_INTERFACE_ATTR_MTU; + attr.value.u32 = OVERLAY_RIF_DEFAULT_MTU; + overlay_intf_attrs.push_back(attr); - /* Let's delete the SID from the ASIC */ - unordered_map::iterator tmp = it; - ++tmp; - if(!deleteMysidEntry(it->first)) - { - SWSS_LOG_ERROR("Failed to delete my_sid entry for sid %s", it->first.c_str()); - ++it; - continue; - } - it = tmp; + auto status = sai_router_intfs_api->create_router_interface(&tunnel.overlay_rif_oid, gSwitchId, (uint32_t)overlay_intf_attrs.size(), overlay_intf_attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create overlay router interface for MySID IPinIP tunnel: %d", status); + return false; + } - SWSS_LOG_INFO("SID %s removed successfully", my_sid_string.c_str()); + vector tunnel_attrs; - /* - * Finally, add the SID to the pending MySID entries set, so that we can re-install it - * when the neighbor comes back - */ - auto pending_mysid_entry = make_tuple(my_sid_string, dt_vrf, adj, end_action); - m_pendingSRv6MySIDEntries[NextHopKey(update.entry.ip_address.to_string(), update.entry.alias)].insert(pending_mysid_entry); - } - } -} + attr.id = SAI_TUNNEL_ATTR_TYPE; + attr.value.s32 = SAI_TUNNEL_TYPE_IPINIP; + tunnel_attrs.push_back(attr); -void Srv6Orch::update(SubjectType type, void *cntx) -{ - SWSS_LOG_ENTER(); + attr.id = SAI_TUNNEL_ATTR_OVERLAY_INTERFACE; + attr.value.oid = tunnel.overlay_rif_oid; + tunnel_attrs.push_back(attr); - assert(cntx); + attr.id = SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE; + attr.value.oid = gUnderlayIfId; + tunnel_attrs.push_back(attr); - switch(type) { - case SUBJECT_TYPE_NEIGH_CHANGE: + attr.id = SAI_TUNNEL_ATTR_PEER_MODE; + attr.value.s32 = SAI_TUNNEL_PEER_MODE_P2MP; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_ATTR_DECAP_DSCP_MODE; + attr.value.s32 = dscp_mode; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_ATTR_DECAP_TTL_MODE; + attr.value.s32 = SAI_TUNNEL_TTL_MODE_PIPE_MODEL; + tunnel_attrs.push_back(attr); + + status = sai_tunnel_api->create_tunnel(&tunnel.tunnel_oid, gSwitchId, (uint32_t)tunnel_attrs.size(), tunnel_attrs.data()); + if (status != SAI_STATUS_SUCCESS) { - NeighborUpdate *update = static_cast(cntx); - updateNeighbor(*update); - break; - } - default: - // Received update in which we are not interested - // Ignore it - return; + SWSS_LOG_ERROR("Failed to create MySID IPinIP tunnel: %d", status); + return false; } + + SWSS_LOG_INFO("Created MySID IPinIP tunnel"); + + return true; } -bool Srv6Orch::sidEntryEndpointBehavior(string action, sai_my_sid_entry_endpoint_behavior_t &end_behavior, - sai_my_sid_entry_endpoint_behavior_flavor_t &end_flavor) +bool Srv6Orch::deinitIpInIpTunnel(MySidIpInIpTunnel& tunnel) { - if (end_behavior_map.find(action) == end_behavior_map.end()) + SWSS_LOG_ENTER(); + + auto status = sai_tunnel_api->remove_tunnel(tunnel.tunnel_oid); + if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Invalid endpoint behavior function"); + SWSS_LOG_ERROR("Failed to remove MySID IPinIP tunnel: %d", status); return false; } - end_behavior = end_behavior_map.at(action); - if (end_flavor_map.find(action) != end_flavor_map.end()) + tunnel.tunnel_oid = SAI_NULL_OBJECT_ID; + + status = sai_router_intfs_api->remove_router_interface(tunnel.overlay_rif_oid); + if (status != SAI_STATUS_SUCCESS) { - end_flavor = end_flavor_map.at(action); + SWSS_LOG_ERROR("Failed to remove MySID IPinIP tunnel RIF: %d", status); + return false; } + tunnel.overlay_rif_oid = SAI_NULL_OBJECT_ID; + + SWSS_LOG_INFO("Removed MySID IPinIP tunnel"); + return true; } -bool Srv6Orch::mySidVrfRequired(const sai_my_sid_entry_endpoint_behavior_t end_behavior) +bool Srv6Orch::createMySidIpInIpTunnel(sai_tunnel_dscp_mode_t dscp_mode, sai_object_id_t& tunnel_oid) { - if (end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_T || - end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT4 || - end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT6 || - end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT46) + SWSS_LOG_ENTER(); + + MySidIpInIpTunnel& uniform_tunnel = my_sid_ipinip_tunnels_.dscp_uniform_tunnel; + MySidIpInIpTunnel& pipe_tunnel = my_sid_ipinip_tunnels_.dscp_pipe_tunnel; + + MySidIpInIpTunnel& tunnel_info = (dscp_mode == SAI_TUNNEL_DSCP_MODE_UNIFORM_MODEL) ? uniform_tunnel : pipe_tunnel; + if (tunnel_info.refcount == 0) { - return true; + auto ok = initIpInIpTunnel(tunnel_info, dscp_mode); + if (!ok) { + return false; + } } - return false; + + tunnel_info.refcount++; + tunnel_oid = tunnel_info.tunnel_oid; + + SWSS_LOG_INFO("Increased refcount for MySID IPinIP tunnel to %" PRIu64, tunnel_info.refcount); + + return true; } -bool Srv6Orch::mySidNextHopRequired(const sai_my_sid_entry_endpoint_behavior_t end_behavior) +bool Srv6Orch::removeMySidIpInIpTunnel(sai_tunnel_dscp_mode_t dscp_mode) { - if (end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_X || - end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX4 || - end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX6 || - end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_ENCAPS || - end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_ENCAPS_RED || - end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT || - end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT_RED || - end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UA) + SWSS_LOG_ENTER(); + + MySidIpInIpTunnel& uniform_tunnel = my_sid_ipinip_tunnels_.dscp_uniform_tunnel; + MySidIpInIpTunnel& pipe_tunnel = my_sid_ipinip_tunnels_.dscp_pipe_tunnel; + + MySidIpInIpTunnel& tunnel_info = (dscp_mode == SAI_TUNNEL_DSCP_MODE_UNIFORM_MODEL) ? uniform_tunnel : pipe_tunnel; + tunnel_info.refcount--; + + SWSS_LOG_INFO("Decreased refcount for MySID IPinIP tunnel to %" PRIu64, tunnel_info.refcount); + + if (tunnel_info.refcount == 0) { - return true; + return deinitIpInIpTunnel(tunnel_info); } - return false; + + return true; } -bool Srv6Orch::createUpdateMysidEntry(string my_sid_string, const string dt_vrf, const string adj, const string end_action) +bool Srv6Orch::createMySidIpInIpTunnelTermEntry(sai_object_id_t tunnel_oid, const sai_ip6_t& sid_ip, sai_object_id_t& term_entry_oid) { SWSS_LOG_ENTER(); - vector attributes; + + vector tunnel_table_entry_attrs; sai_attribute_t attr; - string key_string = my_sid_string; - sai_my_sid_entry_endpoint_behavior_t end_behavior; - sai_my_sid_entry_endpoint_behavior_flavor_t end_flavor = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD; - bool entry_exists = false; - if (mySidExists(key_string)) + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID; + attr.value.oid = gVirtualRouterId; + tunnel_table_entry_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE; + attr.value.u32 = SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP; + tunnel_table_entry_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE; + attr.value.s32 = SAI_TUNNEL_TYPE_IPINIP; + tunnel_table_entry_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID; + attr.value.oid = tunnel_oid; + tunnel_table_entry_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP; + attr.value.ipaddr.addr_family = SAI_IP_ADDR_FAMILY_IPV6; + memcpy(attr.value.ipaddr.addr.ip6, sid_ip, sizeof(attr.value.ipaddr.addr.ip6)); + tunnel_table_entry_attrs.push_back(attr); + + auto status = sai_tunnel_api->create_tunnel_term_table_entry(&term_entry_oid, gSwitchId, (uint32_t)tunnel_table_entry_attrs.size(), tunnel_table_entry_attrs.data()); + if (status != SAI_STATUS_SUCCESS) { - entry_exists = true; + SWSS_LOG_ERROR("Failed to create tunnel termination entry for MySID - %d", status); + return false; } - sai_my_sid_entry_t my_sid_entry; - if (!entry_exists) + SWSS_LOG_INFO("Created tunnel termination entry for MySID entry"); + + return true; +} + +bool Srv6Orch::removeMySidIpInIpTunnelTermEntry(sai_object_id_t term_entry_oid) +{ + SWSS_LOG_ENTER(); + + auto status = sai_tunnel_api->remove_tunnel_term_table_entry(term_entry_oid); + if (status != SAI_STATUS_SUCCESS) { - vectorkeys = tokenize(my_sid_string, MY_SID_KEY_DELIMITER); + SWSS_LOG_ERROR("Failed to remove tunnel termination entry for MySID entry - %d", status); + return false; + } - my_sid_entry.vr_id = gVirtualRouterId; - my_sid_entry.switch_id = gSwitchId; - my_sid_entry.locator_block_len = (uint8_t)stoi(keys[0]); - my_sid_entry.locator_node_len = (uint8_t)stoi(keys[1]); - my_sid_entry.function_len = (uint8_t)stoi(keys[2]); - my_sid_entry.args_len = (uint8_t)stoi(keys[3]); - size_t keylen = keys[0].length()+keys[1].length()+keys[2].length()+keys[3].length() + 4; - my_sid_string.erase(0, keylen); - string my_sid = my_sid_string; - SWSS_LOG_INFO("MY SID STRING %s", my_sid.c_str()); - IpAddress address(my_sid); - memcpy(my_sid_entry.sid, address.getV6Addr(), sizeof(my_sid_entry.sid)); + SWSS_LOG_INFO("Removed tunnel termination entry for MySID entry"); + + return true; +} + +void Srv6Orch::srv6TunnelUpdateNexthops(const string srv6_source, const NextHopKey nhkey, bool insert) +{ + if (insert) + { + srv6_tunnel_table_[srv6_source].nexthops.insert(nhkey); + } + else + { + srv6_tunnel_table_[srv6_source].nexthops.erase(nhkey); + } +} + +size_t Srv6Orch::srv6TunnelNexthopSize(const string srv6_source) +{ + return srv6_tunnel_table_[srv6_source].nexthops.size(); +} + +bool Srv6Orch::sidListExists(const string &segment_name) +{ + SWSS_LOG_ENTER(); + if (sid_table_.find(segment_name) != sid_table_.end()) + { + return true; + } + return false; +} + +bool Srv6Orch::createSrv6Tunnel(const string srv6_source) +{ + SWSS_LOG_ENTER(); + vector tunnel_attrs; + sai_attribute_t attr; + sai_status_t status; + sai_object_id_t tunnel_id; + + if (srv6_tunnel_table_.find(srv6_source) != srv6_tunnel_table_.end()) + { + SWSS_LOG_INFO("Tunnel exists for the source %s", srv6_source.c_str()); + return true; + } + + SWSS_LOG_INFO("Create tunnel for the source %s", srv6_source.c_str()); + attr.id = SAI_TUNNEL_ATTR_TYPE; + attr.value.s32 = SAI_TUNNEL_TYPE_SRV6; + tunnel_attrs.push_back(attr); + attr.id = SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE; + attr.value.oid = gUnderlayIfId; + tunnel_attrs.push_back(attr); + + IpAddress src_ip(srv6_source); + sai_ip_address_t ipaddr; + ipaddr.addr_family = SAI_IP_ADDR_FAMILY_IPV6; + memcpy(ipaddr.addr.ip6, src_ip.getV6Addr(), sizeof(ipaddr.addr.ip6)); + attr.id = SAI_TUNNEL_ATTR_ENCAP_SRC_IP; + attr.value.ipaddr = ipaddr; + tunnel_attrs.push_back(attr); + + status = sai_tunnel_api->create_tunnel(&tunnel_id, gSwitchId, (uint32_t)tunnel_attrs.size(), tunnel_attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create tunnel for %s", srv6_source.c_str()); + return false; + } + srv6_tunnel_table_[srv6_source].tunnel_object_id = tunnel_id; + return true; +} + +bool Srv6Orch::srv6NexthopExists(const NextHopKey &nhKey) +{ + SWSS_LOG_ENTER(); + if (srv6_nexthop_table_.find(nhKey) != srv6_nexthop_table_.end()) + { + return true; + } + else + { + return false; + } +} + +bool Srv6Orch::removeSrv6NexthopWithoutVpn(const NextHopKey &nhKey) +{ + SWSS_LOG_ENTER(); + return deleteSrv6Nexthop(nhKey); +} + +bool Srv6Orch::removeSrv6Nexthops(const std::vector &nhgv) +{ + SWSS_LOG_ENTER(); + + // 1. remove vpn_sid first + for (auto& it_nhg : nhgv) + { + if (it_nhg.is_srv6_vpn()) + { + for (auto &sr_nh : it_nhg.getNextHops()) + { + if (sr_nh.isSrv6Vpn()) + { + if (!deleteSrv6Vpn(sr_nh.ip_address.to_string(), sr_nh.srv6_vpn_sid, getAggId(it_nhg))) + { + SWSS_LOG_ERROR("Failed to delete SRV6 vpn %s", sr_nh.to_string(false, true).c_str()); + return false; + } + } + } + decreasePrefixAggIdRefCount(it_nhg); + deleteAggId(it_nhg); + } + } + + // 2. delete nexthop & prefix agg id + for (auto& nhg : nhgv) + { + for (auto &sr_nh : nhg.getNextHops()) + { + if (!deleteSrv6Nexthop(sr_nh)) + { + SWSS_LOG_ERROR("Failed to delete SRV6 nexthop %s", sr_nh.to_string(false,true).c_str()); + return false; + } + } + } + + return true; +} + +bool Srv6Orch::createSrv6Nexthop(const NextHopKey &nh) +{ + SWSS_LOG_ENTER(); + string srv6_segment = nh.srv6_segment; + string srv6_source = nh.srv6_source; + string srv6_tunnel_endpoint; + + if (srv6NexthopExists(nh)) + { + SWSS_LOG_INFO("SRV6 nexthop already created for %s", nh.to_string(false,true).c_str()); + return true; + } + + sai_object_id_t srv6_segment_id; + sai_object_id_t srv6_tunnel_id; + + if (srv6_segment == "") + { + srv6_segment_id = SAI_NULL_OBJECT_ID; + } + else + { + if (!sidListExists(srv6_segment)) + { + SWSS_LOG_ERROR("Segment %s does not exist", srv6_segment.c_str()); + return false; + } + srv6_segment_id = sid_table_[srv6_segment].sid_object_id; + } + + if (nh.ip_address.isZero()) + { + srv6_tunnel_endpoint = srv6_source; + srv6_tunnel_id = srv6_tunnel_table_[srv6_tunnel_endpoint].tunnel_object_id; + } + else + { + srv6_tunnel_endpoint = nh.ip_address.to_string(); + srv6_tunnel_id = srv6_p2p_tunnel_table_[srv6_tunnel_endpoint].tunnel_id; + } + + SWSS_LOG_INFO("Create srv6 nh for tunnel src %s with seg %s", srv6_source.c_str(), srv6_segment.c_str()); + vector nh_attrs; + sai_object_id_t nexthop_id; + sai_attribute_t attr; + sai_status_t status; + + attr.id = SAI_NEXT_HOP_ATTR_TYPE; + attr.value.s32 = SAI_NEXT_HOP_TYPE_SRV6_SIDLIST; + nh_attrs.push_back(attr); + + attr.id = SAI_NEXT_HOP_ATTR_SRV6_SIDLIST_ID; + attr.value.oid = srv6_segment_id; + nh_attrs.push_back(attr); + + attr.id = SAI_NEXT_HOP_ATTR_TUNNEL_ID; + attr.value.oid = srv6_tunnel_id; + nh_attrs.push_back(attr); + + status = sai_next_hop_api->create_next_hop(&nexthop_id, gSwitchId, + (uint32_t)nh_attrs.size(), + nh_attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create srv6 nexthop for %s", nh.to_string(false,true).c_str()); + return false; + } + m_neighOrch->updateSrv6Nexthop(nh, nexthop_id); + srv6_nexthop_table_[nh] = nexthop_id; + if (srv6_segment != "") + { + sid_table_[srv6_segment].nexthops.insert(nh); + } + + if (nh.ip_address.isZero()) + { + srv6TunnelUpdateNexthops(srv6_source, nh, true); + } + else + { + srv6P2ptunnelUpdateNexthops(nh, true); + } + return true; +} + +bool Srv6Orch::deleteSrv6Nexthop(const NextHopKey &nh) +{ + SWSS_LOG_ENTER(); + + sai_status_t status = SAI_STATUS_SUCCESS; + + if (!srv6NexthopExists(nh)) + { + return true; + } + + SWSS_LOG_DEBUG("SRV6 Nexthop %s refcount %d", nh.to_string(false,true).c_str(), m_neighOrch->getNextHopRefCount(nh)); + if (m_neighOrch->getNextHopRefCount(nh) == 0) + { + sai_object_id_t nexthop_id; + nexthop_id = srv6_nexthop_table_[nh]; + status = sai_next_hop_api->remove_next_hop(nexthop_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove SRV6 nexthop %s", nh.to_string(false,true).c_str()); + return false; + } + + /* Decrease srv6 segment reference */ + if (nh.srv6_segment != "") + { + /* Update nexthop in SID table after deleting the nexthop */ + SWSS_LOG_INFO("Seg %s nexthop refcount %zu", + nh.srv6_segment.c_str(), + sid_table_[nh.srv6_segment].nexthops.size()); + if (sid_table_[nh.srv6_segment].nexthops.find(nh) != sid_table_[nh.srv6_segment].nexthops.end()) + { + sid_table_[nh.srv6_segment].nexthops.erase(nh); + } + } + m_neighOrch->updateSrv6Nexthop(nh, 0); + + srv6_nexthop_table_.erase(nh); + + /* Delete NH from the tunnel map */ + SWSS_LOG_INFO("Delete NH %s from tunnel map", + nh.to_string(false, true).c_str()); + + if (nh.ip_address.isZero()) + { + string srv6_source = nh.srv6_source; + srv6TunnelUpdateNexthops(srv6_source, nh, false); + size_t tunnel_nhs = srv6TunnelNexthopSize(srv6_source); + if (tunnel_nhs == 0) + { + status = sai_tunnel_api->remove_tunnel(srv6_tunnel_table_[srv6_source].tunnel_object_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove SRV6 tunnel object for source %s", srv6_source.c_str()); + return false; + } + srv6_tunnel_table_.erase(srv6_source); + } + else + { + SWSS_LOG_INFO("Nexthops referencing this tunnel object %s: %zu", srv6_source.c_str(),tunnel_nhs); + } + } + else + { + std::string endpoint = nh.ip_address.to_string(); + srv6P2ptunnelUpdateNexthops(nh, false); + if (!deleteSrv6P2pTunnel(endpoint)) + { + SWSS_LOG_ERROR("Failed to remove SRV6 p2p tunnel object for dst %s,", endpoint.c_str()); + return false; + } + } + } + + return true; +} + +bool Srv6Orch::createSrv6NexthopWithoutVpn(const NextHopKey &nh, sai_object_id_t &nexthop_id) +{ + SWSS_LOG_ENTER(); + + // 1. create tunnel + if (nh.ip_address.isZero()) + { + // create srv6 tunnel + auto srv6_source = nh.srv6_source; + if (!createSrv6Tunnel(srv6_source)) + { + SWSS_LOG_ERROR("Failed to create tunnel for source %s", srv6_source.c_str()); + return false; + } + } + else + { + // create p2p tunnel + if (!createSrv6P2pTunnel(nh.srv6_source, nh.ip_address.to_string())) + { + SWSS_LOG_ERROR("Failed to create SRV6 p2p tunnel %s", nh.to_string(false, true).c_str()); + return false; + } + } + + // 2. create nexthop + if (!createSrv6Nexthop(nh)) + { + SWSS_LOG_ERROR("Failed to create SRV6 nexthop %s", nh.to_string(false,true).c_str()); + return false; + } + + nexthop_id = srv6_nexthop_table_[nh]; + return true; +} + +bool Srv6Orch::srv6Nexthops(const NextHopGroupKey &nhgKey, sai_object_id_t &nexthop_id) +{ + SWSS_LOG_ENTER(); + set nexthops = nhgKey.getNextHops(); + + for (auto nh : nexthops) + { + // create SRv6 nexthop + if (!createSrv6NexthopWithoutVpn(nh, nexthop_id)) + { + SWSS_LOG_ERROR("Failed to create SRv6 nexthop %s", nh.to_string(false, true).c_str()); + return false; + } + } + + // create SRv6 VPN if need + if (nhgKey.is_srv6_vpn()) + { + for (auto it = nexthops.begin(); it != nexthops.end(); ++it) + { + if (it->isSrv6Vpn()) + { + if (!createSrv6Vpn(it->ip_address.to_string(), it->srv6_vpn_sid, getAggId(nhgKey))) + { + SWSS_LOG_ERROR("Failed to create SRV6 vpn %s", it->to_string(false, true).c_str()); + return false; + } + } + } + + increasePrefixAggIdRefCount(nhgKey); + } + + if (nhgKey.getSize() == 1) + { + NextHopKey nhkey(nhgKey.to_string(), false, true); + nexthop_id = srv6_nexthop_table_[nhkey]; + } + return true; +} + +bool Srv6Orch::createUpdateSidList(const string sid_name, const string sid_list, const string sidlist_type) +{ + SWSS_LOG_ENTER(); + bool exists = (sid_table_.find(sid_name) != sid_table_.end()) && sid_table_[sid_name].sid_object_id; + sai_segment_list_t segment_list; + vectorsid_ips = tokenize(sid_list, SID_LIST_DELIMITER); + sai_object_id_t segment_oid; + segment_list.count = (uint32_t)sid_ips.size(); + if (segment_list.count == 0) + { + SWSS_LOG_ERROR("segment list count is zero, skip"); + return true; + } + SWSS_LOG_INFO("Segment count %d", segment_list.count); + segment_list.list = new sai_ip6_t[segment_list.count]; + uint32_t index = 0; + + for (string ip_str : sid_ips) + { + IpPrefix ip(ip_str); + SWSS_LOG_INFO("Segment %s, count %d", ip.to_string().c_str(), segment_list.count); + memcpy(segment_list.list[index++], ip.getIp().getV6Addr(), 16); + } + sai_attribute_t attr; + sai_status_t status; + if (!exists) + { + /* Create sidlist object with list of ipv6 prefixes */ + SWSS_LOG_INFO("Create SID list"); + vector attributes; + attr.id = SAI_SRV6_SIDLIST_ATTR_SEGMENT_LIST; + attr.value.segmentlist.list = segment_list.list; + attr.value.segmentlist.count = segment_list.count; + attributes.push_back(attr); + + attr.id = SAI_SRV6_SIDLIST_ATTR_TYPE; + if (sidlist_type_map.find(sidlist_type) == sidlist_type_map.end()) + { + SWSS_LOG_INFO("Use default sidlist type: ENCAPS_RED"); + attr.value.s32 = SAI_SRV6_SIDLIST_TYPE_ENCAPS_RED; + } + else + { + SWSS_LOG_INFO("sidlist type: %s", sidlist_type.c_str()); + attr.value.s32 = sidlist_type_map.at(sidlist_type); + } + attributes.push_back(attr); + status = sai_srv6_api->create_srv6_sidlist(&segment_oid, gSwitchId, (uint32_t) attributes.size(), attributes.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create srv6 sidlist object, rv %d", status); + return false; + } + sid_table_[sid_name].sid_object_id = segment_oid; + } + else + { + SWSS_LOG_INFO("Set SID list"); + + /* Update sidlist object with new set of ipv6 addresses */ + attr.id = SAI_SRV6_SIDLIST_ATTR_SEGMENT_LIST; + attr.value.segmentlist.list = segment_list.list; + attr.value.segmentlist.count = segment_list.count; + segment_oid = (sid_table_.find(sid_name)->second).sid_object_id; + status = sai_srv6_api->set_srv6_sidlist_attribute(segment_oid, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set srv6 sidlist object with new segments, rv %d", status); + return false; + } + } + delete segment_list.list; + return true; +} + +task_process_status Srv6Orch::deleteSidList(const string sid_name) +{ + SWSS_LOG_ENTER(); + sai_status_t status = SAI_STATUS_SUCCESS; + if (sid_table_.find(sid_name) == sid_table_.end()) + { + SWSS_LOG_ERROR("segment name %s doesn't exist", sid_name.c_str()); + return task_process_status::task_failed; + } + + if (sid_table_[sid_name].nexthops.size() > 0) + { + SWSS_LOG_NOTICE("segment object %s referenced by other nexthops: count %zu, not deleting", + sid_name.c_str(), sid_table_[sid_name].nexthops.size()); + return task_process_status::task_need_retry; + } + SWSS_LOG_INFO("Remove sid list, segname %s", sid_name.c_str()); + status = sai_srv6_api->remove_srv6_sidlist(sid_table_[sid_name].sid_object_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to delete SRV6 sidlist object for %s", sid_name.c_str()); + return task_process_status::task_failed; + } + sid_table_.erase(sid_name); + return task_process_status::task_success; +} + +task_process_status Srv6Orch::doTaskSidTable(const KeyOpFieldsValuesTuple & tuple) +{ + SWSS_LOG_ENTER(); + string sid_name = kfvKey(tuple); + string op = kfvOp(tuple); + string sid_list, sidlist_type; + + for (auto i : kfvFieldsValues(tuple)) + { + if (fvField(i) == "path") + { + sid_list = fvValue(i); + } + if (fvField(i) == "type") + { + sidlist_type = fvValue(i); + } + } + if (op == SET_COMMAND) + { + if (!createUpdateSidList(sid_name, sid_list, sidlist_type)) + { + SWSS_LOG_ERROR("Failed to process sid %s", sid_name.c_str()); + return task_process_status::task_failed; + } + } + else if (op == DEL_COMMAND) + { + task_process_status status = deleteSidList(sid_name); + if (status != task_process_status::task_success) + { + SWSS_LOG_ERROR("Failed to delete sid %s", sid_name.c_str()); + return status; + } + } else { + SWSS_LOG_ERROR("Invalid command"); + return task_process_status::task_failed; + } + + return task_process_status::task_success; +} + +bool Srv6Orch::mySidExists(string my_sid_string) +{ + if (srv6_my_sid_table_.find(my_sid_string) != srv6_my_sid_table_.end()) + { + return true; + } + return false; +} + +/* + * Neighbor change notification to be processed for the SRv6 MySID entries + * + * In summary, this function handles both add and delete neighbor notifications + * + * When a neighbor ADD notification is received, we do the following steps: + * - We walk through the list of pending SRv6 MySID entries that are waiting for this neighbor to be ready + * - For each SID, we install the SID into the ASIC + * - We remove the SID from the pending MySID entries list + * + * When a neighbor DELETE notification is received, we do the following steps: + * - We walk through the list of pending SRv6 MySID entries installed in the ASIC + * - For each SID, we remove the SID from the ASIC + * - We add the SID to the pending MySID entries list + */ +void Srv6Orch::updateNeighbor(const NeighborUpdate& update) +{ + SWSS_LOG_ENTER(); + + /* Check if the received notification is a neighbor add or a neighbor delete */ + if (update.add) + { + /* + * It's a neighbor add notification, let's walk through the list of SRv6 MySID entries + * that are waiting for that neighbor to be ready, and install them into the ASIC. + */ + + SWSS_LOG_INFO("Neighbor ADD event: %s alias '%s', installing pending SRv6 SIDs", + update.entry.ip_address.to_string().c_str(), update.entry.alias.c_str()); + + auto it = m_pendingSRv6MySIDEntries.find(NextHopKey(update.entry.ip_address.to_string(), update.entry.alias)); + if (it == m_pendingSRv6MySIDEntries.end()) + { + /* No SID is waiting for this neighbor. Nothing to do */ + return; + } + auto &nexthop_key = it->first; + auto &pending_my_sid_entries = it->second; + + for (auto iter = pending_my_sid_entries.begin(); iter != pending_my_sid_entries.end();) + { + string my_sid_string = get<0>(*iter); + const string dt_vrf = get<1>(*iter); + const string adj = get<2>(*iter); + const string end_action = get<3>(*iter); + + SWSS_LOG_INFO("Creating SID %s, action %s, vrf %s, adj %s", my_sid_string.c_str(), end_action.c_str(), dt_vrf.c_str(), adj.c_str()); + + if(!createUpdateMysidEntry(my_sid_string, dt_vrf, adj, end_action)) + { + SWSS_LOG_ERROR("Failed to create/update my_sid entry for sid %s", my_sid_string.c_str()); + ++iter; + continue; + } + + SWSS_LOG_INFO("SID %s created successfully", my_sid_string.c_str()); + + iter = pending_my_sid_entries.erase(iter); + } + + if (pending_my_sid_entries.size() == 0) + { + m_pendingSRv6MySIDEntries.erase(nexthop_key); + } + } + else + { + /* + * It's a neighbor delete notification, let's uninstall the SRv6 MySID entries associated with that + * nexthop from the ASIC, and add them to the SRv6 MySID entries pending set. + */ + + SWSS_LOG_INFO("Neighbor DELETE event: %s alias '%s', removing associated SRv6 SIDs", + update.entry.ip_address.to_string().c_str(), update.entry.alias.c_str()); + + for (auto it = srv6_my_sid_table_.begin(); it != srv6_my_sid_table_.end();) + { + /* Skip SIDs that are not associated with a L3 Adjacency */ + if (it->second.endAdjString.empty()) + { + ++it; + continue; + } + + try + { + /* Skip SIDs that are not associated with this neighbor */ + if (IpAddress(it->second.endAdjString) != update.entry.ip_address) + { + ++it; + continue; + } + } + catch (const std::invalid_argument &e) + { + /* SRv6 SID is associated with an invalid L3 Adjacency IP address, skipping */ + ++it; + continue; + } + + /* + * Save SID entry information to temp variables, before removing the SID. + * This information will be consumed used later. + */ + string my_sid_string = it->first; + const string dt_vrf = it->second.endVrfString; + const string adj = it->second.endAdjString; + string end_action; + for (auto iter = end_behavior_map.begin(); iter != end_behavior_map.end(); iter++) + { + if (iter->second == it->second.endBehavior) + { + end_action = iter->first; + break; + } + } + + /* Skip SIDs with unknown SRv6 behavior */ + if (end_action.empty()) + { + ++it; + continue; + } + + SWSS_LOG_INFO("Removing SID %s, action %s, vrf %s, adj %s", my_sid_string.c_str(), dt_vrf.c_str(), adj.c_str(), end_action.c_str()); + + /* Let's delete the SID from the ASIC */ + unordered_map::iterator tmp = it; + ++tmp; + if(!deleteMysidEntry(it->first)) + { + SWSS_LOG_ERROR("Failed to delete my_sid entry for sid %s", it->first.c_str()); + ++it; + continue; + } + it = tmp; + + SWSS_LOG_INFO("SID %s removed successfully", my_sid_string.c_str()); + + /* + * Finally, add the SID to the pending MySID entries set, so that we can re-install it + * when the neighbor comes back + */ + auto pending_mysid_entry = make_tuple(my_sid_string, dt_vrf, adj, end_action); + m_pendingSRv6MySIDEntries[NextHopKey(update.entry.ip_address.to_string(), update.entry.alias)].insert(pending_mysid_entry); + } + } +} + +void Srv6Orch::update(SubjectType type, void *cntx) +{ + SWSS_LOG_ENTER(); + + assert(cntx); + + switch(type) { + case SUBJECT_TYPE_NEIGH_CHANGE: + { + NeighborUpdate *update = static_cast(cntx); + updateNeighbor(*update); + break; + } + default: + // Received update in which we are not interested + // Ignore it + return; + } +} + +bool Srv6Orch::sidEntryEndpointBehavior(string action, sai_my_sid_entry_endpoint_behavior_t &end_behavior, + sai_my_sid_entry_endpoint_behavior_flavor_t &end_flavor) +{ + if (end_behavior_map.find(action) == end_behavior_map.end()) + { + SWSS_LOG_ERROR("Invalid endpoint behavior function"); + return false; + } + end_behavior = end_behavior_map.at(action); + + if (end_flavor_map.find(action) != end_flavor_map.end()) + { + end_flavor = end_flavor_map.at(action); + } + + return true; +} + +bool Srv6Orch::mySidVrfRequired(const sai_my_sid_entry_endpoint_behavior_t end_behavior) +{ + if (end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_T || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT4 || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT6 || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT46 || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDT4 || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDT6 || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDT46) + { + return true; + } + return false; +} + +bool Srv6Orch::mySidNextHopRequired(const sai_my_sid_entry_endpoint_behavior_t end_behavior) +{ + if (end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_X || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX4 || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX6 || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDX4 || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDX6 || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_ENCAPS || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_ENCAPS_RED || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT_RED || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UA) + { + return true; + } + return false; +} + +bool Srv6Orch::mySidTunnelRequired(const string& my_sid_addr, const sai_my_sid_entry_t& sai_entry, sai_my_sid_entry_endpoint_behavior_t end_behavior, sai_tunnel_dscp_mode_t& dscp_mode) +{ + if (end_behavior != SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UN && + end_behavior != SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDT46) + { + return false; + } + + auto locator_cfg = getMySidEntryLocatorCfg(sai_entry); + + return getMySidEntryDscpMode(my_sid_addr, locator_cfg, dscp_mode); +} + +bool Srv6Orch::createUpdateMysidEntry(string my_sid_string, const string dt_vrf, const string adj, const string end_action) +{ + SWSS_LOG_ENTER(); + vector attributes; + sai_attribute_t attr; + string key_string = my_sid_string; + sai_my_sid_entry_endpoint_behavior_t end_behavior; + sai_my_sid_entry_endpoint_behavior_flavor_t end_flavor = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_NONE; + + bool entry_exists = false; + if (mySidExists(key_string)) + { + entry_exists = true; + } + + sai_my_sid_entry_t my_sid_entry; + if (!entry_exists) + { + vectorkeys = tokenize(my_sid_string, MY_SID_KEY_DELIMITER); + + my_sid_entry.vr_id = gVirtualRouterId; + my_sid_entry.switch_id = gSwitchId; + my_sid_entry.locator_block_len = (uint8_t)stoi(keys[0]); + my_sid_entry.locator_node_len = (uint8_t)stoi(keys[1]); + my_sid_entry.function_len = (uint8_t)stoi(keys[2]); + my_sid_entry.args_len = (uint8_t)stoi(keys[3]); + size_t keylen = keys[0].length()+keys[1].length()+keys[2].length()+keys[3].length() + 4; + my_sid_string.erase(0, keylen); + string my_sid = my_sid_string; + SWSS_LOG_INFO("MY SID STRING %s", my_sid.c_str()); + IpAddress address(my_sid); + memcpy(my_sid_entry.sid, address.getV6Addr(), sizeof(my_sid_entry.sid)); + } + else + { + my_sid_entry = srv6_my_sid_table_[key_string].entry; + } + + SWSS_LOG_INFO("MySid: sid %s, action %s, vrf %s, block %d, node %d, func %d, arg %d dt_vrf %s, adj %s", + my_sid_string.c_str(), end_action.c_str(), dt_vrf.c_str(),my_sid_entry.locator_block_len, my_sid_entry.locator_node_len, + my_sid_entry.function_len, my_sid_entry.args_len, dt_vrf.c_str(), adj.c_str()); + + if (sidEntryEndpointBehavior(end_action, end_behavior, end_flavor) != true) + { + SWSS_LOG_ERROR("Invalid my_sid action %s", end_action.c_str()); + return false; + } + sai_attribute_t vrf_attr; + bool vrf_update = false; + if (mySidVrfRequired(end_behavior)) + { + sai_object_id_t dt_vrf_id; + SWSS_LOG_INFO("DT VRF name %s", dt_vrf.c_str()); + if (dt_vrf == "default") + { + dt_vrf_id = gVirtualRouterId; + } + else if (m_vrfOrch->isVRFexists(dt_vrf)) + { + SWSS_LOG_INFO("VRF %s exists in DB", dt_vrf.c_str()); + dt_vrf_id = m_vrfOrch->getVRFid(dt_vrf); + if(dt_vrf_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("VRF object not created for DT VRF %s", dt_vrf.c_str()); + return false; + } + } + else + { + SWSS_LOG_ERROR("VRF %s doesn't exist in DB", dt_vrf.c_str()); + return false; + } + vrf_attr.id = SAI_MY_SID_ENTRY_ATTR_VRF; + vrf_attr.value.oid = dt_vrf_id; + attributes.push_back(vrf_attr); + vrf_update = true; + } + sai_attribute_t nh_attr; + NextHopKey nexthop; + bool nh_update = false; + if (mySidNextHopRequired(end_behavior)) + { + sai_object_id_t next_hop_id; + + vector adjv = tokenize(adj, ADJ_DELIMITER); + if (adjv.size() > 1) + { + SWSS_LOG_ERROR("Failed to create my_sid entry %s adj %s: ECMP adjacency not yet supported", key_string.c_str(), adj.c_str()); + return false; + } + + nexthop = NextHopKey(adj); + SWSS_LOG_INFO("Adjacency %s", adj.c_str()); + if (m_neighOrch->hasNextHop(nexthop)) + { + SWSS_LOG_INFO("Nexthop for adjacency %s exists in DB", adj.c_str()); + next_hop_id = m_neighOrch->getNextHopId(nexthop); + if(next_hop_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_INFO("Failed to get nexthop for adjacency %s", adj.c_str()); + SWSS_LOG_INFO("Nexthop for adjacency %s doesn't exist in DB yet", adj.c_str()); + auto pending_mysid_entry = make_tuple(key_string, dt_vrf, adj, end_action); + m_pendingSRv6MySIDEntries[nexthop].insert(pending_mysid_entry); + return false; + } + } + else + { + SWSS_LOG_INFO("Nexthop for adjacency %s doesn't exist in DB yet", adj.c_str()); + auto pending_mysid_entry = make_tuple(key_string, dt_vrf, adj, end_action); + m_pendingSRv6MySIDEntries[nexthop].insert(pending_mysid_entry); + return false; + } + nh_attr.id = SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID; + nh_attr.value.oid = next_hop_id; + attributes.push_back(nh_attr); + nh_update = true; + } + + sai_tunnel_dscp_mode_t dscp_mode; + if (mySidTunnelRequired(my_sid_string, my_sid_entry, end_behavior, dscp_mode)) + { + sai_object_id_t tunnel_oid; + auto ok = createMySidIpInIpTunnel(dscp_mode, tunnel_oid); + if (!ok) + { + return false; + } + + sai_object_id_t term_entry_oid; + ok = createMySidIpInIpTunnelTermEntry(tunnel_oid, my_sid_entry.sid, term_entry_oid); + if (!ok) + { + removeMySidIpInIpTunnel(dscp_mode); + return false; + } + + srv6_my_sid_table_[key_string].tunnel_term_entry = term_entry_oid; + srv6_my_sid_table_[key_string].dscp_mode = dscp_mode; + + attr.id = SAI_MY_SID_ENTRY_ATTR_TUNNEL_ID; + attr.value.oid = tunnel_oid; + attributes.push_back(attr); + + end_flavor = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_USD; + } + + attr.id = SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR; + attr.value.s32 = end_behavior; + attributes.push_back(attr); + + if (end_flavor != SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_NONE) + { + attr.id = SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR; + attr.value.s32 = end_flavor; + attributes.push_back(attr); + } + + sai_status_t status = SAI_STATUS_SUCCESS; + if (!entry_exists) + { + sai_object_id_t counter_oid = SAI_NULL_OBJECT_ID; + if (getMySidCountersSupported() && getMySidCountersEnabled()) + { + auto ok = addMySidCounter(my_sid_entry, counter_oid); + if (!ok) + { + return false; + } + + attr.id = SAI_MY_SID_ENTRY_ATTR_COUNTER_ID; + attr.value.oid = counter_oid; + attributes.push_back(attr); + } + + status = sai_srv6_api->create_my_sid_entry(&my_sid_entry, (uint32_t) attributes.size(), attributes.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create my_sid entry %s, rv %d", key_string.c_str(), status); + return false; + } + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_SRV6_MY_SID_ENTRY); + srv6_my_sid_table_[key_string].counter = counter_oid; + } + else + { + if (vrf_update) + { + status = sai_srv6_api->set_my_sid_entry_attribute(&my_sid_entry, &vrf_attr); + if(status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to update VRF to my_sid_entry %s, rv %d", key_string.c_str(), status); + return false; + } + } + if (nh_update) + { + status = sai_srv6_api->set_my_sid_entry_attribute(&my_sid_entry, &nh_attr); + if(status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to update nexthop to my_sid_entry %s, rv %d", key_string.c_str(), status); + return false; + } + } + } + SWSS_LOG_INFO("Store keystring %s in cache", key_string.c_str()); + if(vrf_update) + { + m_vrfOrch->increaseVrfRefCount(dt_vrf); + srv6_my_sid_table_[key_string].endVrfString = dt_vrf; + } + if(nh_update) + { + m_neighOrch->increaseNextHopRefCount(nexthop, 1); + + SWSS_LOG_INFO("Increasing refcount to %d for Nexthop %s", + m_neighOrch->getNextHopRefCount(nexthop), nexthop.to_string(false,true).c_str()); + + srv6_my_sid_table_[key_string].endAdjString = adj; + } + srv6_my_sid_table_[key_string].endBehavior = end_behavior; + srv6_my_sid_table_[key_string].entry = my_sid_entry; + + return true; +} + +bool Srv6Orch::deleteMysidEntry(const string my_sid_string) +{ + sai_status_t status = SAI_STATUS_SUCCESS; + if (!mySidExists(my_sid_string)) + { + SWSS_LOG_ERROR("My_sid_entry doesn't exist for %s", my_sid_string.c_str()); + return false; + } + sai_my_sid_entry_t my_sid_entry = srv6_my_sid_table_[my_sid_string].entry; + sai_object_id_t& counter = srv6_my_sid_table_[my_sid_string].counter; + + SWSS_LOG_NOTICE("MySid Delete: sid %s", my_sid_string.c_str()); + status = sai_srv6_api->remove_my_sid_entry(&my_sid_entry); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to delete my_sid entry rv %d", status); + return false; + } + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_SRV6_MY_SID_ENTRY); + + removeMySidCounter(my_sid_entry, counter); + + auto endBehavior = srv6_my_sid_table_[my_sid_string].endBehavior; + /* Decrease VRF refcount */ + if (mySidVrfRequired(endBehavior)) + { + m_vrfOrch->decreaseVrfRefCount(srv6_my_sid_table_[my_sid_string].endVrfString); + } + /* Decrease NextHop refcount */ + if (mySidNextHopRequired(endBehavior)) + { + NextHopKey nexthop = NextHopKey(srv6_my_sid_table_[my_sid_string].endAdjString); + m_neighOrch->decreaseNextHopRefCount(nexthop, 1); + + SWSS_LOG_INFO("Decreasing refcount to %d for Nexthop %s", + m_neighOrch->getNextHopRefCount(nexthop), nexthop.to_string(false,true).c_str()); + } + + auto tunnel_term_entry = srv6_my_sid_table_[my_sid_string].tunnel_term_entry; + if (tunnel_term_entry != SAI_NULL_OBJECT_ID) + { + auto ok = removeMySidIpInIpTunnelTermEntry(tunnel_term_entry); + if (!ok) + { + return false; + } + + ok = removeMySidIpInIpTunnel(srv6_my_sid_table_[my_sid_string].dscp_mode); + if (!ok) + { + return false; + } + } + + srv6_my_sid_table_.erase(my_sid_string); + return true; +} + +uint32_t Srv6Orch::getAggId(const NextHopGroupKey &nhg) +{ + SWSS_LOG_ENTER(); + static uint32_t g_agg_id = 1; + uint32_t agg_id; + + if (srv6_prefix_agg_id_table_.find(nhg) != srv6_prefix_agg_id_table_.end()) { + agg_id = srv6_prefix_agg_id_table_[nhg].prefix_agg_id; + SWSS_LOG_INFO("Agg id already exist, agg_id_key: %s, agg_id %u", nhg.to_string().c_str(), agg_id); + } else { + while (srv6_prefix_agg_id_set_.find(g_agg_id) != srv6_prefix_agg_id_set_.end()) { + SWSS_LOG_INFO("Agg id %d is busy, try next", g_agg_id); + g_agg_id++; + // restart with 1 if flip + if (g_agg_id == 0) { + g_agg_id = 1; + } + } + agg_id = g_agg_id; + srv6_prefix_agg_id_table_[nhg].prefix_agg_id = g_agg_id; + // initialize ref_count with 0, will be added in increasePrefixAggIdRefCount() later + srv6_prefix_agg_id_table_[nhg].ref_count = 0; + srv6_prefix_agg_id_set_.insert(g_agg_id); + SWSS_LOG_INFO("Agg id not exist, create agg_id_key: %s, agg_id %u", nhg.to_string().c_str(), agg_id); + } + + return agg_id; +} + +uint32_t Srv6Orch::getAggId(const std::string& index) +{ + SWSS_LOG_ENTER(); + static uint32_t g_agg_id = 1; + uint32_t agg_id; + + if (srv6_prefix_agg_id_table_for_nhg_.find(index) != srv6_prefix_agg_id_table_for_nhg_.end()) { + agg_id = srv6_prefix_agg_id_table_for_nhg_[index].prefix_agg_id; + SWSS_LOG_INFO("Agg id already exist, agg_id_key: %s, agg_id %u", index.c_str(), agg_id); + } else { + while (srv6_prefix_agg_id_set_.find(g_agg_id) != srv6_prefix_agg_id_set_.end()) { + SWSS_LOG_INFO("Agg id %d is busy, try next", g_agg_id); + g_agg_id++; + // restart with 1 if flip + if (g_agg_id == 0) { + g_agg_id = 1; + } + } + agg_id = g_agg_id; + srv6_prefix_agg_id_table_for_nhg_[index].prefix_agg_id = g_agg_id; + // initialize ref_count with 0, will be added in increasePrefixAggIdRefCount() later + srv6_prefix_agg_id_table_for_nhg_[index].ref_count = 0; + srv6_prefix_agg_id_set_.insert(g_agg_id); + SWSS_LOG_INFO("Agg id not exist, create agg_id_key: %s, agg_id %u", index.c_str(), agg_id); + } + + return agg_id; +} + +void Srv6Orch::deleteAggId(const NextHopGroupKey &nhg) +{ + SWSS_LOG_ENTER(); + uint32_t agg_id; + + if (srv6_prefix_agg_id_table_.find(nhg) == srv6_prefix_agg_id_table_.end()) { + return; + } + + agg_id = srv6_prefix_agg_id_table_[nhg].prefix_agg_id; + if (srv6_prefix_agg_id_table_[nhg].ref_count == 0) { + srv6_prefix_agg_id_table_.erase(nhg); + srv6_prefix_agg_id_set_.erase(agg_id); + SWSS_LOG_INFO("Delete Agg id %d, agg_id_key: %s", agg_id, nhg.to_string().c_str()); + } + else + { + SWSS_LOG_INFO("Referencing this prefix agg id %u : %u", agg_id, srv6_prefix_agg_id_table_[nhg].ref_count); + } +} + +void Srv6Orch::deleteAggId(const std::string& index) +{ + SWSS_LOG_ENTER(); + uint32_t agg_id; + + if (srv6_prefix_agg_id_table_for_nhg_.find(index) == srv6_prefix_agg_id_table_for_nhg_.end()) { + return; + } + + agg_id = srv6_prefix_agg_id_table_for_nhg_[index].prefix_agg_id; + if (srv6_prefix_agg_id_table_for_nhg_[index].ref_count == 0) { + srv6_prefix_agg_id_table_for_nhg_.erase(index); + srv6_prefix_agg_id_set_.erase(agg_id); + SWSS_LOG_INFO("Delete Agg id %d, agg_id_key: %s", agg_id, index.c_str()); + } + else + { + SWSS_LOG_INFO("Referencing this prefix agg id %u : %u", agg_id, srv6_prefix_agg_id_table_for_nhg_[index].ref_count); + } +} + +void Srv6Orch::increasePicContextIdRefCount(const std::string &index) +{ + SWSS_LOG_ENTER(); + if (srv6_pic_context_table_.find(index) == srv6_pic_context_table_.end()) + SWSS_LOG_ERROR("Unexpected refcount increase for context id %s", index.c_str()); + else + ++srv6_pic_context_table_[index].ref_count; +} + +void Srv6Orch::decreasePicContextIdRefCount(const std::string &index) +{ + SWSS_LOG_ENTER(); + if (srv6_pic_context_table_.find(index) == srv6_pic_context_table_.end()) + SWSS_LOG_ERROR("Unexpected refcount decrease for context id %s", index.c_str()); + else + --srv6_pic_context_table_[index].ref_count; +} + +void Srv6Orch::increasePrefixAggIdRefCount(const NextHopGroupKey &nhg) +{ + SWSS_LOG_ENTER(); + if (srv6_prefix_agg_id_table_.find(nhg) == srv6_prefix_agg_id_table_.end()) + { + SWSS_LOG_ERROR("Unexpected prefix agg refcount increase for nexthop %s", nhg.to_string().c_str()); + } + else + { + srv6_prefix_agg_id_table_[nhg].ref_count++; + } +} + +void Srv6Orch::increasePrefixAggIdRefCount(const std::string& index) +{ + SWSS_LOG_ENTER(); + if (srv6_prefix_agg_id_table_for_nhg_.find(index) == srv6_prefix_agg_id_table_for_nhg_.end()) + { + SWSS_LOG_ERROR("Unexpected prefix agg refcount increase for nexthop %s", index.c_str()); + } + else + { + ++srv6_prefix_agg_id_table_for_nhg_[index].ref_count; + } +} + +void Srv6Orch::decreasePrefixAggIdRefCount(const NextHopGroupKey &nhg) +{ + SWSS_LOG_ENTER(); + if (srv6_prefix_agg_id_table_.find(nhg) == srv6_prefix_agg_id_table_.end()) + { + SWSS_LOG_ERROR("Unexpected prefix agg refcount decrease for nexthop %s", nhg.to_string().c_str()); + } + else + { + srv6_prefix_agg_id_table_[nhg].ref_count--; + } +} + +void Srv6Orch::decreasePrefixAggIdRefCount(const std::string& index) +{ + SWSS_LOG_ENTER(); + if (srv6_prefix_agg_id_table_for_nhg_.find(index) == srv6_prefix_agg_id_table_for_nhg_.end()) + { + SWSS_LOG_ERROR("Unexpected prefix agg refcount decrease for nexthop %s", index.c_str()); + } + else + { + --srv6_prefix_agg_id_table_for_nhg_[index].ref_count; + } +} + +bool Srv6Orch::srv6P2pTunnelExists(const std::string &endpoint) +{ + if (srv6_p2p_tunnel_table_.find(endpoint) != srv6_p2p_tunnel_table_.end()) + { + return true; + } + return false; +} + +bool Srv6Orch::createSrv6P2pTunnel(const std::string &src, const std::string &endpoint) +{ + SWSS_LOG_ENTER(); + sai_status_t saistatus; + sai_object_id_t srv6_tunnel_map_id; + + sai_attribute_t tunnel_map_attr; + vector tunnel_map_attrs; + + if (srv6P2pTunnelExists(endpoint)) { + return true; + } + + // 0. create tunnel map + tunnel_map_attr.id = SAI_TUNNEL_MAP_ATTR_TYPE; + tunnel_map_attr.value.u32 = SAI_TUNNEL_MAP_TYPE_PREFIX_AGG_ID_TO_SRV6_VPN_SID; + tunnel_map_attrs.push_back(tunnel_map_attr); + + saistatus = sai_tunnel_api->create_tunnel_map(&srv6_tunnel_map_id, gSwitchId, + (uint32_t)tunnel_map_attrs.size(), tunnel_map_attrs.data()); + if (saistatus != SAI_STATUS_SUCCESS) { + SWSS_LOG_ERROR("Failed to create srv6 p2p tunnel map for src_ip: %s dst_ip: %s", src.c_str(), endpoint.c_str()); + return false; + } + + // 1. create tunnel + sai_object_id_t tunnel_id; + sai_attribute_t tunnel_attr; + vector tunnel_attrs; + sai_ip_address_t ipaddr; + + tunnel_attr.id = SAI_TUNNEL_ATTR_TYPE; + tunnel_attr.value.s32 = SAI_TUNNEL_TYPE_SRV6; + tunnel_attrs.push_back(tunnel_attr); + + IpAddress src_ip(src); + ipaddr.addr_family = SAI_IP_ADDR_FAMILY_IPV6; + memcpy(ipaddr.addr.ip6, src_ip.getV6Addr(), sizeof(ipaddr.addr.ip6)); + tunnel_attr.id = SAI_TUNNEL_ATTR_ENCAP_SRC_IP; + tunnel_attr.value.ipaddr = ipaddr; + tunnel_attrs.push_back(tunnel_attr); + + tunnel_attr.id = SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE; + tunnel_attr.value.oid = gUnderlayIfId; + tunnel_attrs.push_back(tunnel_attr); + + sai_object_id_t tunnel_map_list[1]; + tunnel_map_list[0] = srv6_tunnel_map_id; + tunnel_attr.id = SAI_TUNNEL_ATTR_ENCAP_MAPPERS; + tunnel_attr.value.objlist.count = 1; + tunnel_attr.value.objlist.list = tunnel_map_list; + tunnel_attrs.push_back(tunnel_attr); + + tunnel_attr.id = SAI_TUNNEL_ATTR_PEER_MODE; + tunnel_attr.value.u32 = SAI_TUNNEL_PEER_MODE_P2P; + tunnel_attrs.push_back(tunnel_attr); + + IpAddress dst_ip(endpoint); + ipaddr.addr_family = SAI_IP_ADDR_FAMILY_IPV6; + memcpy(ipaddr.addr.ip6, dst_ip.getV6Addr(), sizeof(ipaddr.addr.ip6)); + tunnel_attr.id = SAI_TUNNEL_ATTR_ENCAP_DST_IP; + tunnel_attr.value.ipaddr = ipaddr; + tunnel_attrs.push_back(tunnel_attr); + + saistatus = sai_tunnel_api->create_tunnel( + &tunnel_id, gSwitchId, (uint32_t)tunnel_attrs.size(), tunnel_attrs.data()); + if (saistatus != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create srv6 p2p tunnel for src ip: %s, dst ip: %s", + src.c_str(), endpoint.c_str()); + + sai_tunnel_api->remove_tunnel_map(srv6_tunnel_map_id); + return false; + } + + srv6_p2p_tunnel_table_[endpoint].tunnel_id = tunnel_id; + srv6_p2p_tunnel_table_[endpoint].tunnel_map_id = srv6_tunnel_map_id; + return true; +} + +bool Srv6Orch::deleteSrv6P2pTunnel(const std::string &endpoint) +{ + if (srv6_p2p_tunnel_table_.find(endpoint) == srv6_p2p_tunnel_table_.end()) + { + return true; + } + + if (srv6P2pTunnelNexthopSize(endpoint) || srv6P2pTunnelEntrySize(endpoint)) + { + SWSS_LOG_INFO("There are still SRv6 VPNs or Nexthops referencing this srv6 p2p tunnel object dst %s", endpoint.c_str()); + return true; + } + + sai_status_t status; + + // 0. remove tunnel + status = sai_tunnel_api->remove_tunnel(srv6_p2p_tunnel_table_[endpoint].tunnel_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove SRV6 p2p tunnel object for dst_ip: %s", endpoint.c_str()); + return false; + } + + // 1. remove tunnel map + status = sai_tunnel_api->remove_tunnel_map(srv6_p2p_tunnel_table_[endpoint].tunnel_map_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove SRV6 tunnel map object for dst_ip: %s", endpoint.c_str()); + return false; + } + + srv6_p2p_tunnel_table_.erase(endpoint); + return true; +} + +void Srv6Orch::srv6P2ptunnelUpdateNexthops(const NextHopKey &nhkey, bool insert) +{ + if (insert) + { + srv6_p2p_tunnel_table_[nhkey.ip_address.to_string()].nexthops.insert(nhkey); } else { - my_sid_entry = srv6_my_sid_table_[key_string].entry; + srv6_p2p_tunnel_table_[nhkey.ip_address.to_string()].nexthops.erase(nhkey); } +} - SWSS_LOG_INFO("MySid: sid %s, action %s, vrf %s, block %d, node %d, func %d, arg %d dt_vrf %s, adj %s", - my_sid_string.c_str(), end_action.c_str(), dt_vrf.c_str(),my_sid_entry.locator_block_len, my_sid_entry.locator_node_len, - my_sid_entry.function_len, my_sid_entry.args_len, dt_vrf.c_str(), adj.c_str()); +size_t Srv6Orch::srv6P2pTunnelNexthopSize(const std::string &endpoint) +{ + return srv6_p2p_tunnel_table_[endpoint].nexthops.size(); +} - if (sidEntryEndpointBehavior(end_action, end_behavior, end_flavor) != true) - { - SWSS_LOG_ERROR("Invalid my_sid action %s", end_action.c_str()); - return false; - } - sai_attribute_t vrf_attr; - bool vrf_update = false; - if (mySidVrfRequired(end_behavior)) +void Srv6Orch::srv6P2pTunnelUpdateEntries(const Srv6TunnelMapEntryKey &tmek, bool insert) +{ + if (insert) + srv6_p2p_tunnel_table_[tmek.endpoint].tunnel_map_entries.insert(tmek); + else + srv6_p2p_tunnel_table_[tmek.endpoint].tunnel_map_entries.erase(tmek); +} + +size_t Srv6Orch::srv6P2pTunnelEntrySize(const std::string &endpoint) +{ + return srv6_p2p_tunnel_table_[endpoint].tunnel_map_entries.size(); +} + +bool Srv6Orch::createSrv6Vpns(const Srv6PicContextInfo &pci, const std::string &context_id) +{ + auto agg_id = getAggId(context_id); + for (size_t i = 0; i < pci.nexthops.size(); ++i) { - sai_object_id_t dt_vrf_id; - SWSS_LOG_INFO("DT VRF name %s", dt_vrf.c_str()); - if (dt_vrf == "default") - { - dt_vrf_id = gVirtualRouterId; - } - else if (m_vrfOrch->isVRFexists(dt_vrf)) + if (!createSrv6Vpn(pci.nexthops[i], pci.sids[i], agg_id)) { - SWSS_LOG_INFO("VRF %s exists in DB", dt_vrf.c_str()); - dt_vrf_id = m_vrfOrch->getVRFid(dt_vrf); - if(dt_vrf_id == SAI_NULL_OBJECT_ID) + for (size_t j = 0; j < i; ++j) { - SWSS_LOG_ERROR("VRF object not created for DT VRF %s", dt_vrf.c_str()); - return false; + deleteSrv6Vpn(pci.nexthops[j], pci.sids[j], agg_id); } - } - else - { - SWSS_LOG_ERROR("VRF %s doesn't exist in DB", dt_vrf.c_str()); + deleteAggId(context_id); return false; } - vrf_attr.id = SAI_MY_SID_ENTRY_ATTR_VRF; - vrf_attr.value.oid = dt_vrf_id; - attributes.push_back(vrf_attr); - vrf_update = true; } - sai_attribute_t nh_attr; - NextHopKey nexthop; - bool nh_update = false; - if (mySidNextHopRequired(end_behavior)) - { - sai_object_id_t next_hop_id; - vector adjv = tokenize(adj, ADJ_DELIMITER); - if (adjv.size() > 1) - { - SWSS_LOG_ERROR("Failed to create my_sid entry %s adj %s: ECMP adjacency not yet supported", key_string.c_str(), adj.c_str()); - return false; - } + increasePrefixAggIdRefCount(context_id); - nexthop = NextHopKey(adj); - SWSS_LOG_INFO("Adjacency %s", adj.c_str()); - if (m_neighOrch->hasNextHop(nexthop)) - { - SWSS_LOG_INFO("Nexthop for adjacency %s exists in DB", adj.c_str()); - next_hop_id = m_neighOrch->getNextHopId(nexthop); - if(next_hop_id == SAI_NULL_OBJECT_ID) - { - SWSS_LOG_INFO("Failed to get nexthop for adjacency %s", adj.c_str()); - SWSS_LOG_INFO("Nexthop for adjacency %s doesn't exist in DB yet", adj.c_str()); - auto pending_mysid_entry = make_tuple(key_string, dt_vrf, adj, end_action); - m_pendingSRv6MySIDEntries[nexthop].insert(pending_mysid_entry); - return false; - } - } - else - { - SWSS_LOG_INFO("Nexthop for adjacency %s doesn't exist in DB yet", adj.c_str()); - auto pending_mysid_entry = make_tuple(key_string, dt_vrf, adj, end_action); - m_pendingSRv6MySIDEntries[nexthop].insert(pending_mysid_entry); - return false; - } - nh_attr.id = SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID; - nh_attr.value.oid = next_hop_id; - attributes.push_back(nh_attr); - nh_update = true; - } - attr.id = SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR; - attr.value.s32 = end_behavior; - attributes.push_back(attr); + return true; +} - attr.id = SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR; - attr.value.s32 = end_flavor; - attributes.push_back(attr); +bool Srv6Orch::createSrv6Vpn(const std::string &endpoint, const std::string &sid, const uint32_t prefix_agg_id) +{ + SWSS_LOG_ENTER(); - sai_status_t status = SAI_STATUS_SUCCESS; - if (!entry_exists) - { - status = sai_srv6_api->create_my_sid_entry(&my_sid_entry, (uint32_t) attributes.size(), attributes.data()); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to create my_sid entry %s, rv %d", key_string.c_str(), status); - return false; - } - gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_SRV6_MY_SID_ENTRY); - } - else + sai_status_t status; + + Srv6TunnelMapEntryKey tmek; + tmek.endpoint = endpoint; + tmek.vpn_sid = sid; + tmek.prefix_agg_id = prefix_agg_id; + + if (srv6_tunnel_map_entry_table_.find(tmek) != srv6_tunnel_map_entry_table_.end()) { - if (vrf_update) - { - status = sai_srv6_api->set_my_sid_entry_attribute(&my_sid_entry, &vrf_attr); - if(status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to update VRF to my_sid_entry %s, rv %d", key_string.c_str(), status); - return false; - } - } - if (nh_update) - { - status = sai_srv6_api->set_my_sid_entry_attribute(&my_sid_entry, &nh_attr); - if(status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to update nexthop to my_sid_entry %s, rv %d", key_string.c_str(), status); - return false; - } - } + srv6_tunnel_map_entry_table_[tmek].ref_count++; + return true; } - SWSS_LOG_INFO("Store keystring %s in cache", key_string.c_str()); - if(vrf_update) + + if (srv6_p2p_tunnel_table_.find(endpoint) == srv6_p2p_tunnel_table_.end()) { - m_vrfOrch->increaseVrfRefCount(dt_vrf); - srv6_my_sid_table_[key_string].endVrfString = dt_vrf; + SWSS_LOG_ERROR("Tunnel map for endpoint %s does not exist", endpoint.c_str()); + return false; } - if(nh_update) - { - m_neighOrch->increaseNextHopRefCount(nexthop, 1); + sai_object_id_t tunnel_map_id = srv6_p2p_tunnel_table_[endpoint].tunnel_map_id; - SWSS_LOG_INFO("Increasing refcount to %d for Nexthop %s", - m_neighOrch->getNextHopRefCount(nexthop), nexthop.to_string(false,true).c_str()); + // 1. create vpn tunnel_map entry + sai_attribute_t tunnel_map_entry_attr; + vector tunnel_map_entry_attrs; + sai_object_id_t tunnel_entry_id; - srv6_my_sid_table_[key_string].endAdjString = adj; + tunnel_map_entry_attr.id = SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE; + tunnel_map_entry_attr.value.u32 = SAI_TUNNEL_MAP_TYPE_PREFIX_AGG_ID_TO_SRV6_VPN_SID; + tunnel_map_entry_attrs.push_back(tunnel_map_entry_attr); + + tunnel_map_entry_attr.id = SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP; + tunnel_map_entry_attr.value.oid = tunnel_map_id; + tunnel_map_entry_attrs.push_back(tunnel_map_entry_attr); + + tunnel_map_entry_attr.id = SAI_TUNNEL_MAP_ENTRY_ATTR_PREFIX_AGG_ID_KEY; + tunnel_map_entry_attr.value.u32 = tmek.prefix_agg_id; + tunnel_map_entry_attrs.push_back(tunnel_map_entry_attr); + + IpAddress vpn_sid(tmek.vpn_sid); + tunnel_map_entry_attr.id = SAI_TUNNEL_MAP_ENTRY_ATTR_SRV6_VPN_SID_VALUE; + memcpy(tunnel_map_entry_attr.value.ip6, vpn_sid.getV6Addr(), sizeof(sai_ip6_t)); + tunnel_map_entry_attrs.push_back(tunnel_map_entry_attr); + + status = sai_tunnel_api->create_tunnel_map_entry(&tunnel_entry_id, gSwitchId, + (uint32_t)tunnel_map_entry_attrs.size(), + tunnel_map_entry_attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create vpn tunnel_map entry for vpn_sid: %s", tmek.vpn_sid.c_str()); + return false; } - srv6_my_sid_table_[key_string].endBehavior = end_behavior; - srv6_my_sid_table_[key_string].entry = my_sid_entry; + // add reference for tunnel map entry + srv6_tunnel_map_entry_table_[tmek].tunnel_map_entry_id = tunnel_entry_id; + srv6_tunnel_map_entry_table_[tmek].ref_count = 1; + + srv6P2pTunnelUpdateEntries(tmek, true); return true; } -bool Srv6Orch::deleteMysidEntry(const string my_sid_string) +bool Srv6Orch::deleteSrv6Vpns(const std::string &context_id) { - sai_status_t status = SAI_STATUS_SUCCESS; - if (!mySidExists(my_sid_string)) + const auto &it = srv6_pic_context_table_.find(context_id); + if (it == srv6_pic_context_table_.end()) { - SWSS_LOG_ERROR("My_sid_entry doesn't exist for %s", my_sid_string.c_str()); + SWSS_LOG_ERROR("Failed to find context id %s", context_id.c_str()); return false; } - sai_my_sid_entry_t my_sid_entry = srv6_my_sid_table_[my_sid_string].entry; - SWSS_LOG_NOTICE("MySid Delete: sid %s", my_sid_string.c_str()); - status = sai_srv6_api->remove_my_sid_entry(&my_sid_entry); - if (status != SAI_STATUS_SUCCESS) + bool success = true; + auto agg_id = getAggId(context_id); + for (size_t i = 0; i < it->second.nexthops.size(); ++i) { - SWSS_LOG_ERROR("Failed to delete my_sid entry rv %d", status); - return false; + if (!deleteSrv6Vpn(it->second.nexthops[i], it->second.sids[i], agg_id)) + { + success = false; + } } - gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_SRV6_MY_SID_ENTRY); - /* Decrease VRF refcount */ - if (mySidVrfRequired(srv6_my_sid_table_[my_sid_string].endBehavior)) + if (success) { - m_vrfOrch->decreaseVrfRefCount(srv6_my_sid_table_[my_sid_string].endVrfString); + decreasePrefixAggIdRefCount(context_id); } - /* Decrease NextHop refcount */ - if (mySidNextHopRequired(srv6_my_sid_table_[my_sid_string].endBehavior)) + deleteAggId(context_id); + + return success; +} + +bool Srv6Orch::deleteSrv6Vpn(const std::string &endpoint, const std::string &sid, const uint32_t prefix_agg_id) +{ + SWSS_LOG_ENTER(); + sai_status_t status; + + // 1. remove tunnel_map entry if need + sai_object_id_t tunnel_entry_id; + + Srv6TunnelMapEntryKey tmek; + tmek.endpoint = endpoint; + tmek.vpn_sid = sid; + tmek.prefix_agg_id = prefix_agg_id; + + if (srv6_tunnel_map_entry_table_.find(tmek) == srv6_tunnel_map_entry_table_.end()) { - NextHopKey nexthop = NextHopKey(srv6_my_sid_table_[my_sid_string].endAdjString); - m_neighOrch->decreaseNextHopRefCount(nexthop, 1); + return true; + } - SWSS_LOG_INFO("Decreasing refcount to %d for Nexthop %s", - m_neighOrch->getNextHopRefCount(nexthop), nexthop.to_string(false,true).c_str()); + srv6_tunnel_map_entry_table_[tmek].ref_count--; + if (srv6_tunnel_map_entry_table_[tmek].ref_count == 0) + { + tunnel_entry_id = srv6_tunnel_map_entry_table_[tmek].tunnel_map_entry_id; + status = sai_tunnel_api->remove_tunnel_map_entry(tunnel_entry_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove nexthop tunnel map entry (endpoint: %s, sid: %s, agg_id: %u)", + tmek.endpoint.c_str(), tmek.vpn_sid.c_str(), tmek.prefix_agg_id); + return false; + } + srv6_tunnel_map_entry_table_.erase(tmek); + + srv6P2pTunnelUpdateEntries(tmek, false); + if (!deleteSrv6P2pTunnel(tmek.endpoint)) + { + SWSS_LOG_ERROR("Failed to remove SRV6 p2p tunnel object for dst %s,", endpoint.c_str()); + return false; + } + } + else + { + SWSS_LOG_INFO("Nexthops referencing this tunnel map entry endpoint %s, vpn_sid %s, prefix_agg_id %u : %u", + tmek.endpoint.c_str(), + tmek.vpn_sid.c_str(), + tmek.prefix_agg_id, + srv6_tunnel_map_entry_table_[tmek].ref_count); } - srv6_my_sid_table_.erase(my_sid_string); return true; } @@ -901,9 +2242,107 @@ void Srv6Orch::doTaskMySidTable(const KeyOpFieldsValuesTuple & tuple) } } +void Srv6Orch::doTaskCfgMySidTable(const KeyOpFieldsValuesTuple &tuple) +{ + SWSS_LOG_ENTER(); + + auto op = kfvOp(tuple); + auto key = kfvKey(tuple); + auto& fvs = kfvFieldsValues(tuple); + + if (op == SET_COMMAND) + { + addMySidCfgCacheEntry(key, fvs); + } + else if (op == DEL_COMMAND) + { + removeMySidCfgCacheEntry(key); + } + else + { + SWSS_LOG_ERROR("Unexpected command"); + } +} + +task_process_status Srv6Orch::doTaskPicContextTable(const KeyOpFieldsValuesTuple &tuple) +{ + SWSS_LOG_ENTER(); + string op = kfvOp(tuple); + string key = kfvKey(tuple); + const auto &it = srv6_pic_context_table_.find(key); + if (op == SET_COMMAND) + { + if (it != srv6_pic_context_table_.end()) + { + SWSS_LOG_ERROR("update is not allowed for pic context table"); + return task_duplicated; + } + Srv6PicContextInfo pci; + pci.ref_count = 0; + for (auto i : kfvFieldsValues(tuple)) + { + if (fvField(i) == "nexthop" && fvValue(i) != "") + { + pci.nexthops = tokenize(fvValue(i), ','); + } + else if (fvField(i) == "vpn_sid" && fvValue(i) != "") + { + pci.sids = tokenize(fvValue(i), ','); + } + } + if (pci.nexthops.size() != pci.sids.size()) + { + SWSS_LOG_ERROR("inconsistent number of endpoints(%zu) and vpn sids(%zu)", + pci.nexthops.size(), pci.sids.size()); + return task_failed; + } + + if (!createSrv6Vpns(pci ,key)) + { + SWSS_LOG_ERROR("Failed to create SRv6 VPNs for context id %s", key.c_str()); + return task_need_retry; + } + + srv6_pic_context_table_[key] = pci; + } + else if (op == DEL_COMMAND) + { + if (it == srv6_pic_context_table_.end()) + { + SWSS_LOG_INFO("Unable to find pic context entry for key %s", key.c_str()); + return task_ignore; + } + else if (it->second.ref_count != 0) + { + SWSS_LOG_INFO("Unable to delete context id %s, because it is referenced %u times", key.c_str(), it->second.ref_count); + return task_need_retry; + } + else if (!deleteSrv6Vpns(key)) + { + SWSS_LOG_ERROR("Failed to delete SRv6 VPNs for context id %s", key.c_str()); + return task_need_retry; + } + srv6_pic_context_table_.erase(it); + } + else + { + SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); + return task_ignore; + } + return task_success; +} + +bool Srv6Orch::contextIdExists(const std::string &context_id) +{ + if (srv6_pic_context_table_.find(context_id) == srv6_pic_context_table_.end()) + return false; + return true; +} + void Srv6Orch::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); + task_process_status status; const string &table_name = consumer.getTableName(); auto it = consumer.m_toSync.begin(); while(it != consumer.m_toSync.end()) @@ -912,12 +2351,30 @@ void Srv6Orch::doTask(Consumer &consumer) SWSS_LOG_INFO("table name : %s",table_name.c_str()); if (table_name == APP_SRV6_SID_LIST_TABLE_NAME) { - doTaskSidTable(t); + status = doTaskSidTable(t); + if (status == task_process_status::task_need_retry) + { + it++; + continue; + } } else if (table_name == APP_SRV6_MY_SID_TABLE_NAME) { doTaskMySidTable(t); } + else if (table_name == APP_PIC_CONTEXT_TABLE_NAME) + { + status = doTaskPicContextTable(t); + if (status == task_need_retry) + { + ++it; + continue; + } + } + else if (table_name == CFG_SRV6_MY_SID_TABLE_NAME) + { + doTaskCfgMySidTable(t); + } else { SWSS_LOG_ERROR("Unknown table : %s",table_name.c_str()); diff --git a/orchagent/srv6orch.h b/orchagent/srv6orch.h index a3e39b56326..3a6afe0369a 100644 --- a/orchagent/srv6orch.h +++ b/orchagent/srv6orch.h @@ -26,6 +26,8 @@ using namespace std; using namespace swss; +#define SRV6_STAT_COUNTER_FLEX_COUNTER_GROUP "SRV6_STAT_COUNTER" + struct SidTableEntry { sai_object_id_t sid_object_id; // SRV6 SID list object id @@ -44,67 +46,226 @@ struct MySidEntry sai_my_sid_entry_endpoint_behavior_t endBehavior; string endVrfString; // Used for END.T, END.DT4, END.DT6 and END.DT46, string endAdjString; // Used for END.X, END.DX4, END.DX6 + sai_tunnel_dscp_mode_t dscp_mode; // Used for decapsulation configuration + sai_object_id_t tunnel_term_entry; // Used for decapsulation configuration + sai_object_id_t counter; +}; + +struct MySidIpInIpTunnel +{ + sai_object_id_t overlay_rif_oid; + sai_object_id_t tunnel_oid; + uint64_t refcount; +}; + +struct MySidIpInIpTunnels +{ + MySidIpInIpTunnel dscp_uniform_tunnel; + MySidIpInIpTunnel dscp_pipe_tunnel; +}; + +struct MySidLocatorCfg +{ + uint8_t block_len; + uint8_t node_len; + uint8_t func_len; + uint8_t arg_len; + + bool operator==(const MySidLocatorCfg& rhs) const { + return std::tie(block_len, node_len, func_len, arg_len) == std::tie(rhs.block_len, rhs.node_len, rhs.func_len, rhs.arg_len); + } +}; + +struct Srv6TunnelMapEntryKey +{ + string endpoint; + string vpn_sid; + uint32_t prefix_agg_id; + + bool operator==(const Srv6TunnelMapEntryKey &o) const + { + return tie(endpoint, vpn_sid, prefix_agg_id) == + tie(o.endpoint, o.vpn_sid, o.prefix_agg_id); + } + + bool operator<(const Srv6TunnelMapEntryKey &o) const + { + return tie(endpoint, vpn_sid, prefix_agg_id) < + tie(o.endpoint, o.vpn_sid, o.prefix_agg_id); + } + + bool operator!=(const Srv6TunnelMapEntryKey &o) const + { + return !(*this == o); + } +}; + +struct Srv6TunnelMapEntryEntry +{ + sai_object_id_t tunnel_map_entry_id; + + // for sid remarking + sai_object_id_t inner_tunnel_map_id; + map inner_tunnel_map_entry_ids; + + uint32_t ref_count; +}; + +struct P2pTunnelEntry +{ + sai_object_id_t tunnel_id; + sai_object_id_t tunnel_map_id; + + set nexthops; + set tunnel_map_entries; +}; + +struct Srv6PrefixAggIdEntry +{ + uint32_t prefix_agg_id; + + uint32_t ref_count; +}; + +struct Srv6PicContextInfo +{ + vector nexthops; + vector sids; + uint32_t ref_count; }; typedef unordered_map SidTable; typedef unordered_map Srv6TunnelTable; typedef map Srv6NextHopTable; typedef unordered_map Srv6MySidTable; +typedef map Srv6P2pTunnelTable; +typedef map Srv6PrefixAggIdTable; +typedef map Srv6PrefixAggIdTableForNhg; +typedef set Srv6PrefixAggIdSet; +typedef map Srv6TunnelMapEntryTable; +typedef map Srv6PicContextTable; +typedef pair Srv6MySidDscpCfgCacheVal; +typedef std::unordered_multimap Srv6MySidDscpCfg; #define SID_LIST_DELIMITER ',' #define MY_SID_KEY_DELIMITER ':' class Srv6Orch : public Orch, public Observer { public: - Srv6Orch(DBConnector *applDb, vector &tableNames, SwitchOrch *switchOrch, VRFOrch *vrfOrch, NeighOrch *neighOrch): - Orch(applDb, tableNames), - m_vrfOrch(vrfOrch), - m_switchOrch(switchOrch), - m_neighOrch(neighOrch), - m_sidTable(applDb, APP_SRV6_SID_LIST_TABLE_NAME), - m_mysidTable(applDb, APP_SRV6_MY_SID_TABLE_NAME) - { - m_neighOrch->attach(this); - } - ~Srv6Orch() - { - m_neighOrch->detach(this); - } + Srv6Orch(DBConnector *cfgDb, DBConnector *applDb, const vector& tables, SwitchOrch *switchOrch, VRFOrch *vrfOrch, NeighOrch *neighOrch); + ~Srv6Orch(); + void increasePicContextIdRefCount(const std::string&); + void decreasePicContextIdRefCount(const std::string&); + void increasePrefixAggIdRefCount(const NextHopGroupKey&); + void increasePrefixAggIdRefCount(const std::string&); + void decreasePrefixAggIdRefCount(const NextHopGroupKey&); + void decreasePrefixAggIdRefCount(const std::string&); + uint32_t getAggId(const NextHopGroupKey &nhg); + uint32_t getAggId(const std::string& index); + void deleteAggId(const NextHopGroupKey &nhg); + void deleteAggId(const std::string& index); + bool createSrv6NexthopWithoutVpn(const NextHopKey &nhKey, sai_object_id_t &nexthop_id); bool srv6Nexthops(const NextHopGroupKey &nextHops, sai_object_id_t &next_hop_id); - bool removeSrv6Nexthops(const NextHopGroupKey &nhg); + bool removeSrv6NexthopWithoutVpn(const NextHopKey &nhKey); + bool removeSrv6Nexthops(const std::vector &nhgv); void update(SubjectType, void *); + bool contextIdExists(const std::string &context_id); + void setCountersState(bool enable); private: void doTask(Consumer &consumer); - void doTaskSidTable(const KeyOpFieldsValuesTuple &tuple); + void doTask(SelectableTimer &timer); + task_process_status doTaskSidTable(const KeyOpFieldsValuesTuple &tuple); void doTaskMySidTable(const KeyOpFieldsValuesTuple &tuple); + task_process_status doTaskPicContextTable(const KeyOpFieldsValuesTuple &tuple); + void doTaskCfgMySidTable(const KeyOpFieldsValuesTuple &tuple); bool createUpdateSidList(const string seg_name, const string ips, const string sidlist_type); - bool deleteSidList(const string seg_name); + task_process_status deleteSidList(const string seg_name); bool createSrv6Tunnel(const string srv6_source); bool createSrv6Nexthop(const NextHopKey &nh); + bool deleteSrv6Nexthop(const NextHopKey &nh); bool srv6NexthopExists(const NextHopKey &nh); bool createUpdateMysidEntry(string my_sid_string, const string vrf, const string adj, const string end_action); bool deleteMysidEntry(const string my_sid_string); bool sidEntryEndpointBehavior(const string action, sai_my_sid_entry_endpoint_behavior_t &end_behavior, sai_my_sid_entry_endpoint_behavior_flavor_t &end_flavor); + MySidLocatorCfg getMySidEntryLocatorCfg(const sai_my_sid_entry_t& sai_entry) const; + string getMySidPrefix(const string& my_sid_addr, const MySidLocatorCfg& locator_cfg) const; + bool getLocatorCfgFromDb(const string& locator, MySidLocatorCfg& cfg); + bool reverseLookupLocator(const vector& candidates, const MySidLocatorCfg& locator_cfg, string& locator); + void mySidCfgCacheRefresh(); + void addMySidCfgCacheEntry(const string& my_sid_key, const vector& fvs); + void removeMySidCfgCacheEntry(const string& my_sid_key); + bool getMySidEntryDscpMode(const string& my_sid_addr, const MySidLocatorCfg& locator_cfg, sai_tunnel_dscp_mode_t& dscp_mode); bool mySidExists(const string mysid_string); bool mySidVrfRequired(const sai_my_sid_entry_endpoint_behavior_t end_behavior); bool mySidNextHopRequired(const sai_my_sid_entry_endpoint_behavior_t end_behavior); + bool mySidTunnelRequired(const string& my_sid_addr, const sai_my_sid_entry_t& sai_entry, sai_my_sid_entry_endpoint_behavior_t end_behavior, sai_tunnel_dscp_mode_t& dscp_mode); void srv6TunnelUpdateNexthops(const string srv6_source, const NextHopKey nhkey, bool insert); size_t srv6TunnelNexthopSize(const string srv6_source); + bool initIpInIpTunnel(MySidIpInIpTunnel& tunnel, sai_tunnel_dscp_mode_t dscp_mode); + bool deinitIpInIpTunnel(MySidIpInIpTunnel& tunnel); + bool createMySidIpInIpTunnel(sai_tunnel_dscp_mode_t dscp_mode, sai_object_id_t& tunnel_oid); + bool removeMySidIpInIpTunnel(sai_tunnel_dscp_mode_t dscp_mode); + bool createMySidIpInIpTunnelTermEntry(sai_object_id_t tunnel_oid, const sai_ip6_t& sid_ip, sai_object_id_t& term_entry_oid); + bool removeMySidIpInIpTunnelTermEntry(sai_object_id_t term_entry_oid); + bool sidListExists(const string &segment_name); + bool srv6P2pTunnelExists(const string &endpoint); + bool createSrv6P2pTunnel(const string &src, const string &endpoint); + bool deleteSrv6P2pTunnel(const string &endpoint); + void srv6P2ptunnelUpdateNexthops(const NextHopKey &nhkey, bool insert); + size_t srv6P2pTunnelNexthopSize(const string &endpoint); + void srv6P2pTunnelUpdateEntries(const Srv6TunnelMapEntryKey &tmek, bool insert); + size_t srv6P2pTunnelEntrySize(const string &endpoint); + bool createSrv6Vpn(const string &endpoint, const string &sid, const uint32_t prefix_agg_id); + bool createSrv6Vpns(const Srv6PicContextInfo &pci ,const std::string &context_id); + bool deleteSrv6Vpn(const string &endpoint, const string &sid, const uint32_t prefix_agg_id); + bool deleteSrv6Vpns(const std::string &context_id); void updateNeighbor(const NeighborUpdate& update); + void initializeCounters(); + bool queryMySidCountersCapability() const; + bool getMySidCountersEnabled() const; + bool getMySidCountersSupported() const; + string getMySidCounterKey(const sai_my_sid_entry_t& sai_entry) const; + IpAddress getMySidAddress(const sai_my_sid_entry_t& sai_entry) const; + bool addMySidCounter(const sai_my_sid_entry_t& sai_entry, sai_object_id_t& counter_oid); + void removeMySidCounter(const sai_my_sid_entry_t& sai_entry, sai_object_id_t& counter_oid); + void setMySidEntryCounter(const sai_my_sid_entry_t& sai_entry, sai_object_id_t counter_oid); + ProducerStateTable m_sidTable; ProducerStateTable m_mysidTable; + ProducerStateTable m_piccontextTable; + Table m_mysidCfgTable; + Table m_locatorCfgTable; SidTable sid_table_; Srv6TunnelTable srv6_tunnel_table_; Srv6NextHopTable srv6_nexthop_table_; Srv6MySidTable srv6_my_sid_table_; + Srv6P2pTunnelTable srv6_p2p_tunnel_table_; + Srv6PrefixAggIdTable srv6_prefix_agg_id_table_; + Srv6PrefixAggIdTableForNhg srv6_prefix_agg_id_table_for_nhg_; + Srv6PrefixAggIdSet srv6_prefix_agg_id_set_; + Srv6TunnelMapEntryTable srv6_tunnel_map_entry_table_; + Srv6PicContextTable srv6_pic_context_table_; + MySidIpInIpTunnels my_sid_ipinip_tunnels_{}; + Srv6MySidDscpCfg my_sid_dscp_cfg_cache_; + VRFOrch *m_vrfOrch; SwitchOrch *m_switchOrch; NeighOrch *m_neighOrch; + FlexCounterManager m_counter_manager; + unique_ptr
m_mysid_counters_table; + unique_ptr
m_vid_to_rid_table; + shared_ptr m_counter_db; + shared_ptr m_asic_db; + map m_pending_counters; + SelectableTimer* m_counter_update_timer = nullptr; + bool m_mysid_counters_enabled = false; + bool m_mysid_counters_supported = false; + /* * Map to store the SRv6 MySID entries not yet configured in ASIC because associated to a non-ready nexthop * diff --git a/orchagent/stporch.cpp b/orchagent/stporch.cpp new file mode 100644 index 00000000000..22f348c2977 --- /dev/null +++ b/orchagent/stporch.cpp @@ -0,0 +1,616 @@ +#include +#include "portsorch.h" +#include "logger.h" +#include "fdborch.h" +#include "stporch.h" + +extern sai_stp_api_t *sai_stp_api; +extern sai_vlan_api_t *sai_vlan_api; +extern sai_switch_api_t *sai_switch_api; + +extern FdbOrch *gFdbOrch; +extern PortsOrch *gPortsOrch; + + +extern sai_object_id_t gSwitchId; + +StpOrch::StpOrch(DBConnector * db, DBConnector * stateDb, vector &tableNames) : + Orch(db, tableNames) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + sai_status_t status; + bool ret = false; + + m_stpTable = unique_ptr
(new Table(stateDb, STATE_STP_TABLE_NAME)); + + vector attrs; + attr.id = SAI_SWITCH_ATTR_DEFAULT_STP_INST_ID; + attrs.push_back(attr); + + attr.id = SAI_SWITCH_ATTR_MAX_STP_INSTANCE; + attrs.push_back(attr); + status = sai_switch_api->get_switch_attribute(gSwitchId, (uint32_t)attrs.size(), attrs.data()); + if (status == SAI_STATUS_SUCCESS) + { + m_defaultStpId = attrs[0].value.oid; + updateMaxStpInstance(attrs[1].value.u32); + ret = true; + } + + SWSS_LOG_NOTICE("StpOrch initialization %s", (ret == true)?"success":"failure"); +}; + + +sai_object_id_t StpOrch::getStpInstanceOid(sai_uint16_t stp_instance) +{ + std::map::iterator it; + + it = m_stpInstToOid.find(stp_instance); + if (it == m_stpInstToOid.end()) + { + return SAI_NULL_OBJECT_ID; + } + + return it->second; +} + +sai_object_id_t StpOrch::addStpInstance(sai_uint16_t stp_instance) +{ + sai_object_id_t stp_oid; + sai_attribute_t attr; + + attr.id = 0; + attr.value.u32 = 0; + + sai_status_t status = sai_stp_api->create_stp(&stp_oid, gSwitchId, 0, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create STP instance %u status %u", stp_instance, status); + return SAI_NULL_OBJECT_ID; + } + + m_stpInstToOid[stp_instance] = stp_oid; + SWSS_LOG_INFO("Added STP instance:%hu oid:%" PRIx64 "", stp_instance, stp_oid); + return stp_oid; +} + +bool StpOrch::removeStpInstance(sai_uint16_t stp_instance) +{ + sai_object_id_t stp_oid; + + stp_oid = getStpInstanceOid(stp_instance); + if (stp_oid == SAI_NULL_OBJECT_ID) + { + return false; + } + + /* Remove all STP ports before deleting the STP instance */ + auto portList = gPortsOrch->getAllPorts(); + for (auto &it: portList) + { + auto &port = it.second; + if (port.m_type == Port::PHY || port.m_type == Port::LAG) + { + if(port.m_stp_port_ids.find(stp_instance) == port.m_stp_port_ids.end()) + continue; + + removeStpPort(port, stp_instance); + } + } + + sai_status_t status = sai_stp_api->remove_stp(stp_oid); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove STP instance %u oid %" PRIx64 "status %u", stp_instance, stp_oid, status); + return false; + } + + m_stpInstToOid.erase(stp_instance); + SWSS_LOG_INFO("Removed STP instance:%hu oid:%" PRIx64 "", stp_instance, stp_oid); + return true; +} + +bool StpOrch::addVlanToStpInstance(string vlan_alias, sai_uint16_t stp_instance) +{ + SWSS_LOG_ENTER(); + + Port vlan; + sai_object_id_t stp_oid; + sai_attribute_t attr; + + if (!gPortsOrch->getPort(vlan_alias, vlan)) + { + return false; + } + + stp_oid = getStpInstanceOid(stp_instance); + if (stp_oid == SAI_NULL_OBJECT_ID) + { + stp_oid = addStpInstance(stp_instance); + if(stp_oid == SAI_NULL_OBJECT_ID) + return false; + } + + attr.id = SAI_VLAN_ATTR_STP_INSTANCE; + attr.value.oid = stp_oid; + + sai_status_t status = sai_vlan_api->set_vlan_attribute(vlan.m_vlan_info.vlan_oid, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to add VLAN %s to STP instance:%hu status %u", vlan_alias.c_str(), stp_instance, status); + return false; + } + + vlan.m_stp_id = stp_instance; + gPortsOrch->setPort(vlan_alias, vlan); + + // Update the new map structure + auto it = m_vlanAliasToStpInstanceMap.find(stp_instance); + if (it == m_vlanAliasToStpInstanceMap.end()) + { + StpInstEntry entry = {stp_oid, {vlan_alias}}; + m_vlanAliasToStpInstanceMap[stp_instance] = entry; + } + else + { + it->second.stp_inst_vlan_list.insert(vlan_alias); + } + + SWSS_LOG_INFO("Add VLAN %s to STP instance:%hu m_stp_id:%d", vlan_alias.c_str(), stp_instance, vlan.m_stp_id); + return true; +} + +bool StpOrch::removeVlanFromStpInstance(string vlan_alias, sai_uint16_t stp_instance) +{ + SWSS_LOG_ENTER(); + + Port vlan; + sai_attribute_t attr; + + if (!gPortsOrch->getPort(vlan_alias, vlan)) + { + return false; + } + + attr.id = SAI_VLAN_ATTR_STP_INSTANCE; + attr.value.oid = m_defaultStpId; + + sai_status_t status = sai_vlan_api->set_vlan_attribute(vlan.m_vlan_info.vlan_oid, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove VLAN %s from STP instance:%d status %u", vlan_alias.c_str(), vlan.m_stp_id, status); + return false; + } + + SWSS_LOG_INFO("Remove %s from instance:%d add instance:%" PRIx64 "", vlan_alias.c_str(), vlan.m_stp_id, m_defaultStpId); + + // Update the new map structure + auto it = m_vlanAliasToStpInstanceMap.find(stp_instance); + if (it != m_vlanAliasToStpInstanceMap.end()) + { + it->second.stp_inst_vlan_list.erase(vlan_alias); + if (it->second.stp_inst_vlan_list.empty()) + { + //removeStpInstance(stp_instance); + m_vlanAliasToStpInstanceMap.erase(it); + } + } + removeStpInstance(vlan.m_stp_id); + vlan.m_stp_id = -1; + gPortsOrch->setPort(vlan_alias, vlan); + return true; +} + +/* If STP Port exists return else create a new STP Port */ +sai_object_id_t StpOrch::addStpPort(Port &port, sai_uint16_t stp_instance) +{ + sai_object_id_t stp_port_id = SAI_NULL_OBJECT_ID; + sai_object_id_t stp_id = SAI_NULL_OBJECT_ID; + sai_attribute_t attr[3]; + + if(port.m_stp_port_ids.find(stp_instance) != port.m_stp_port_ids.end()) + { + return port.m_stp_port_ids[stp_instance]; + } + + if(port.m_bridge_port_id == SAI_NULL_OBJECT_ID) + { + gPortsOrch->addBridgePort(port); + + if(port.m_bridge_port_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("Failed to add STP port %s invalid bridge port id STP instance %d", port.m_alias.c_str(), stp_instance); + return SAI_NULL_OBJECT_ID; + } + } + attr[0].id = SAI_STP_PORT_ATTR_BRIDGE_PORT; + attr[0].value.oid = port.m_bridge_port_id; + + stp_id = getStpInstanceOid(stp_instance); + if(stp_id == SAI_NULL_OBJECT_ID) + { + stp_id = addStpInstance(stp_instance); + if(stp_id == SAI_NULL_OBJECT_ID) + { + return SAI_NULL_OBJECT_ID; + } + } + + attr[1].id = SAI_STP_PORT_ATTR_STP; + attr[1].value.oid = stp_id; + + attr[2].id = SAI_STP_PORT_ATTR_STATE; + attr[2].value.s32 = SAI_STP_PORT_STATE_BLOCKING; + + sai_status_t status = sai_stp_api->create_stp_port(&stp_port_id, gSwitchId, 3, attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to add STP port %s instance %d status %u", port.m_alias.c_str(), stp_instance, status); + return SAI_NULL_OBJECT_ID; + } + + SWSS_LOG_INFO("Add STP port %s instance %d oid %" PRIx64 " size %zu", port.m_alias.c_str(), stp_instance, stp_port_id, port.m_stp_port_ids.size()); + port.m_stp_port_ids[stp_instance] = stp_port_id; + gPortsOrch->setPort(port.m_alias, port); + return stp_port_id; +} + +bool StpOrch::removeStpPort(Port &port, sai_uint16_t stp_instance) +{ + if(port.m_stp_port_ids.find(stp_instance) == port.m_stp_port_ids.end()) + { + /* Deletion could have already happened as part of other flows, so ignore this msg*/ + return true; + } + + sai_status_t status = sai_stp_api->remove_stp_port(port.m_stp_port_ids[stp_instance]); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove STP port %s instance %d oid %" PRIx64 " status %x", port.m_alias.c_str(), stp_instance, + port.m_stp_port_ids[stp_instance], status); + return false; + } + + SWSS_LOG_INFO("Remove STP port %s instance %d oid %" PRIx64 " size %zu", port.m_alias.c_str(), stp_instance, + port.m_stp_port_ids[stp_instance], port.m_stp_port_ids.size()); + port.m_stp_port_ids.erase(stp_instance); + gPortsOrch->setPort(port.m_alias, port); + return true; +} + +bool StpOrch::removeStpPorts(Port &port) +{ + if(port.m_stp_port_ids.empty()) + return true; + + for(auto stp_port_id: port.m_stp_port_ids) + { + uint16_t stp_instance = stp_port_id.first; + sai_object_id_t stp_port_oid = stp_port_id.second; + + if(stp_port_oid == SAI_NULL_OBJECT_ID) + { + continue; + } + + sai_status_t status = sai_stp_api->remove_stp_port(stp_port_oid); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove STP port %s instance %d oid %" PRIx64 " status %x", port.m_alias.c_str(), stp_instance, stp_port_oid, status); + } + else + { + SWSS_LOG_INFO("Remove STP port %s instance %d oid %" PRIx64 "", port.m_alias.c_str(), stp_instance, stp_port_oid); + } + } + + port.m_stp_port_ids.clear(); + gPortsOrch->setPort(port.m_alias, port); + return true; +} + +sai_stp_port_state_t StpOrch::getStpSaiState(sai_uint8_t stp_state) +{ + sai_stp_port_state_t state = SAI_STP_PORT_STATE_BLOCKING; + + switch(stp_state) + { + case STP_STATE_DISABLED: + case STP_STATE_BLOCKING: + case STP_STATE_LISTENING: + state = SAI_STP_PORT_STATE_BLOCKING; + break; + + case STP_STATE_LEARNING: + state = SAI_STP_PORT_STATE_LEARNING; + break; + + case STP_STATE_FORWARDING: + state = SAI_STP_PORT_STATE_FORWARDING; + break; + } + return state; +} + +bool StpOrch::updateStpPortState(Port &port, sai_uint16_t stp_instance, sai_uint8_t stp_state) +{ + sai_attribute_t attr[1]; + sai_object_id_t stp_port_oid; + + stp_port_oid = addStpPort(port, stp_instance); + if (stp_port_oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("Failed to get STP port oid port %s instance %d state %d ", port.m_alias.c_str(), stp_instance, stp_state); + return true; + } + attr[0].id = SAI_STP_PORT_ATTR_STATE; + attr[0].value.u32 = getStpSaiState(stp_state); + + sai_status_t status = sai_stp_api->set_stp_port_attribute(stp_port_oid, attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set STP port state %s instance %d state %d status %x", port.m_alias.c_str(), stp_instance, stp_state, status); + return false; + } + + SWSS_LOG_INFO("Set STP port state %s instance %d state %d ", port.m_alias.c_str(), stp_instance, stp_state); + + return true; +} + +bool StpOrch::stpVlanFdbFlush(string vlan_alias) +{ + SWSS_LOG_ENTER(); + + Port vlan; + + if (!gPortsOrch->getPort(vlan_alias, vlan)) + { + return false; + } + + gFdbOrch->flushFdbByVlan(vlan_alias); + + SWSS_LOG_INFO("Set STP FDB flush vlan %s ", vlan_alias.c_str()); + return true; +} + +void StpOrch::doStpTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + auto &t = it->second; + + string vlan_alias = kfvKey(t); + string op = kfvOp(t); + + if (op == SET_COMMAND) + { + uint16_t instance = STP_INVALID_INSTANCE; + + for (auto i : kfvFieldsValues(t)) + { + if (fvField(i) == "stp_instance") + { + instance = (uint16_t)std::stoi(fvValue(i)); + } + } + + if(instance == STP_INVALID_INSTANCE) + { + SWSS_LOG_ERROR("No instance found for VLAN %s", vlan_alias.c_str()); + } + else + { + if(!addVlanToStpInstance(vlan_alias, instance)) + { + it++; + continue; + } + } + } + else if (op == DEL_COMMAND) + { + if(!removeVlanFromStpInstance(vlan_alias, 0)) + { + it++; + continue; + } + } + it = consumer.m_toSync.erase(it); + } +} + +void StpOrch::doStpPortStateTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + auto &t = it->second; + string key = kfvKey(t); + size_t found = key.find(':'); + /* Return if the format of key is wrong */ + if (found == string::npos) + { + return; + } + string port_alias = key.substr(0, found); + string stp_instance = key.substr(found+1); + uint16_t instance = (uint16_t)std::stoi(stp_instance); + Port port; + + if (!gPortsOrch->getPort(port_alias, port)) + { + return; + } + + string op = kfvOp(t); + + if (op == SET_COMMAND) + { + uint8_t state = STP_STATE_INVALID; + + for (auto i : kfvFieldsValues(t)) + { + if (fvField(i) == "state") + { + state = (uint8_t)std::stoi(fvValue(i)); + } + } + if(state != STP_STATE_INVALID) + { + if(!updateStpPortState(port, instance, state)) + { + it++; + continue; + } + } + } + else if (op == DEL_COMMAND) + { + if(!removeStpPort(port, instance)) + { + it++; + continue; + } + } + it = consumer.m_toSync.erase(it); + } +} + +void StpOrch::doStpFastageTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + for (auto it = consumer.m_toSync.begin(); it != consumer.m_toSync.end(); ) + { + auto &t = it->second; + string op = kfvOp(t); + string vlan_alias = kfvKey(t); + + if (op == SET_COMMAND) + { + string state; + for (auto i : kfvFieldsValues(t)) + { + if (fvField(i) == "state") + state = fvValue(i); + } + + if(state.compare("true") == 0) + { + stpVlanFdbFlush(vlan_alias); + } + } + else if (op == DEL_COMMAND) + { + // no operation + } + + it = consumer.m_toSync.erase(it); + } +} + +void StpOrch::doMstInstPortFlushTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + for (auto it = consumer.m_toSync.begin(); it != consumer.m_toSync.end(); ) + { + auto &t = it->second; + string op = kfvOp(t); + string key = kfvKey(t); + size_t found = key.find(':'); + /* Return if the format of key is wrong */ + if (found == string::npos) + { + return; + } + + if (op == SET_COMMAND) + { + string state; + + string instance_alias = key.substr(0, found); + string port_alias = key.substr(found+1); + uint16_t instance = static_cast(stoi(instance_alias)); + + for (auto i : kfvFieldsValues(t)) + { + if (fvField(i) == "state") + state = fvValue(i); + } + + if (state.compare("true") == 0) + { + // Get all VLAN aliases for the given STP instance + auto it_map = m_vlanAliasToStpInstanceMap.find(instance); + if (it_map != m_vlanAliasToStpInstanceMap.end()) + { + for (const auto& vlan_alias : it_map->second.stp_inst_vlan_list) + { + stpVlanFdbFlush(vlan_alias); + } + } + } + } + else if (op == DEL_COMMAND) + { + // Handle delete command if necessary + } + + it = consumer.m_toSync.erase(it); + } +} + + +void StpOrch::doTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + if (!gPortsOrch->allPortsReady()) + { + return; + } + + string table_name = consumer.getTableName(); + if (table_name == APP_STP_VLAN_INSTANCE_TABLE_NAME) + { + doStpTask(consumer); + } + else if (table_name == APP_STP_PORT_STATE_TABLE_NAME) + { + doStpPortStateTask(consumer); + } + else if (table_name == APP_STP_FASTAGEING_FLUSH_TABLE_NAME) + { + doStpFastageTask(consumer); + } + + else if (table_name == APP_STP_INST_PORT_FLUSH_TABLE_NAME) + { + doMstInstPortFlushTask(consumer); + } +} + +bool StpOrch::updateMaxStpInstance(uint32_t max_stp_instances) +{ + m_maxStpInstance = (sai_uint16_t)max_stp_instances - 1; + + SWSS_LOG_NOTICE("StpOrch Max STP instances %d", m_maxStpInstance); + + vector tuples; + FieldValueTuple tuple("max_stp_inst", to_string(m_maxStpInstance)); + tuples.push_back(tuple); + m_stpTable->set("GLOBAL", tuples); + + return true; + +} \ No newline at end of file diff --git a/orchagent/stporch.h b/orchagent/stporch.h new file mode 100644 index 00000000000..6d27b133b77 --- /dev/null +++ b/orchagent/stporch.h @@ -0,0 +1,64 @@ +#ifndef SWSS_STPORCH_H +#define SWSS_STPORCH_H + +#include +#include +#include "orch.h" + +#define STP_INVALID_INSTANCE 0xFFFF +#define APP_STP_INST_PORT_FLUSH_TABLE_NAME "STP_INST_PORT_FLUSH_TABLE" + +typedef enum _stp_state +{ + STP_STATE_DISABLED = 0, + STP_STATE_BLOCKING = 1, + STP_STATE_LISTENING = 2, + STP_STATE_LEARNING = 3, + STP_STATE_FORWARDING = 4, + STP_STATE_INVALID = 5 +}stp_state; + +typedef struct StpInstEntry +{ + sai_object_id_t stp_inst_oid; + std::set stp_inst_vlan_list; +} StpInstEntry; + + +class StpOrch : public Orch +{ +public: + StpOrch(DBConnector *db, DBConnector *stateDb, vector &tableNames); + bool stpVlanFdbFlush(string vlan_alias); + bool updateMaxStpInstance(uint32_t max_stp_instance); + bool removeStpPorts(Port &port); + bool removeVlanFromStpInstance(string vlan, sai_uint16_t stp_instance); + +private: + unique_ptr
m_stpTable; + std::map m_stpInstToOid;//Mapping from STP instance id to corresponding object id + sai_object_id_t m_defaultStpId; + std::map m_vlanAliasToStpInstanceMap; + + sai_uint16_t m_maxStpInstance; + + + void doStpTask(Consumer &consumer); + void doStpPortStateTask(Consumer &consumer); + void doStpFastageTask(Consumer &consumer); + void doStpVlanIntfFlushTask(Consumer &consumer); + void doMstInstPortFlushTask(Consumer &consumer); + + sai_object_id_t addStpInstance(sai_uint16_t stp_instance); + bool removeStpInstance(sai_uint16_t stp_instance); + bool addVlanToStpInstance(string vlan, sai_uint16_t stp_instance); + sai_object_id_t getStpInstanceOid(sai_uint16_t stp_instance); + + sai_object_id_t addStpPort(Port &port, sai_uint16_t stp_instance); + bool removeStpPort(Port &port, sai_uint16_t stp_instance); + sai_stp_port_state_t getStpSaiState(sai_uint8_t stp_state); + bool updateStpPortState(Port &port, sai_uint16_t stp_instance, sai_uint8_t stp_state); + + void doTask(Consumer& consumer); +}; +#endif /* SWSS_STPORCH_H */ \ No newline at end of file diff --git a/orchagent/switch/switch_capabilities.cpp b/orchagent/switch/switch_capabilities.cpp index d1f191bf391..049d9894e94 100644 --- a/orchagent/switch/switch_capabilities.cpp +++ b/orchagent/switch/switch_capabilities.cpp @@ -63,7 +63,8 @@ static const std::unordered_map swHashHash { SAI_NATIVE_HASH_FIELD_INNER_DST_IP, SWITCH_HASH_FIELD_INNER_DST_IP }, { SAI_NATIVE_HASH_FIELD_INNER_SRC_IP, SWITCH_HASH_FIELD_INNER_SRC_IP }, { SAI_NATIVE_HASH_FIELD_INNER_L4_DST_PORT, SWITCH_HASH_FIELD_INNER_L4_DST_PORT }, - { SAI_NATIVE_HASH_FIELD_INNER_L4_SRC_PORT, SWITCH_HASH_FIELD_INNER_L4_SRC_PORT } + { SAI_NATIVE_HASH_FIELD_INNER_L4_SRC_PORT, SWITCH_HASH_FIELD_INNER_L4_SRC_PORT }, + { SAI_NATIVE_HASH_FIELD_IPV6_FLOW_LABEL, SWITCH_HASH_FIELD_IPV6_FLOW_LABEL } }; static const std::unordered_map swHashAlgorithmMap = diff --git a/orchagent/switch/switch_helper.cpp b/orchagent/switch/switch_helper.cpp index 23a7c3fd5a6..8d2d5d71aa7 100644 --- a/orchagent/switch/switch_helper.cpp +++ b/orchagent/switch/switch_helper.cpp @@ -38,7 +38,8 @@ static const std::unordered_map swHashHash { SWITCH_HASH_FIELD_INNER_DST_IP, SAI_NATIVE_HASH_FIELD_INNER_DST_IP }, { SWITCH_HASH_FIELD_INNER_SRC_IP, SAI_NATIVE_HASH_FIELD_INNER_SRC_IP }, { SWITCH_HASH_FIELD_INNER_L4_DST_PORT, SAI_NATIVE_HASH_FIELD_INNER_L4_DST_PORT }, - { SWITCH_HASH_FIELD_INNER_L4_SRC_PORT, SAI_NATIVE_HASH_FIELD_INNER_L4_SRC_PORT } + { SWITCH_HASH_FIELD_INNER_L4_SRC_PORT, SAI_NATIVE_HASH_FIELD_INNER_L4_SRC_PORT }, + { SWITCH_HASH_FIELD_IPV6_FLOW_LABEL, SAI_NATIVE_HASH_FIELD_IPV6_FLOW_LABEL } }; static const std::unordered_map swHashAlgorithmMap = diff --git a/orchagent/switch/switch_schema.h b/orchagent/switch/switch_schema.h index 16a17f179c6..64c8360170b 100644 --- a/orchagent/switch/switch_schema.h +++ b/orchagent/switch/switch_schema.h @@ -20,6 +20,7 @@ #define SWITCH_HASH_FIELD_INNER_SRC_IP "INNER_SRC_IP" #define SWITCH_HASH_FIELD_INNER_L4_DST_PORT "INNER_L4_DST_PORT" #define SWITCH_HASH_FIELD_INNER_L4_SRC_PORT "INNER_L4_SRC_PORT" +#define SWITCH_HASH_FIELD_IPV6_FLOW_LABEL "IPV6_FLOW_LABEL" #define SWITCH_HASH_ECMP_HASH "ecmp_hash" #define SWITCH_HASH_LAG_HASH "lag_hash" diff --git a/orchagent/switch/trimming/capabilities.cpp b/orchagent/switch/trimming/capabilities.cpp new file mode 100644 index 00000000000..e97977865a0 --- /dev/null +++ b/orchagent/switch/trimming/capabilities.cpp @@ -0,0 +1,745 @@ +// includes ----------------------------------------------------------------------------------------------------------- + +extern "C" { +#include +#include +#include +#include +} + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "schema.h" +#include "capabilities.h" + +using namespace swss; + +// defines ------------------------------------------------------------------------------------------------------------ + +#define CAPABILITY_SWITCH_DSCP_RESOLUTION_MODE_FIELD "SWITCH|PACKET_TRIMMING_DSCP_RESOLUTION_MODE" +#define CAPABILITY_SWITCH_QUEUE_RESOLUTION_MODE_FIELD "SWITCH|PACKET_TRIMMING_QUEUE_RESOLUTION_MODE" +#define CAPABILITY_SWITCH_NUMBER_OF_TRAFFIC_CLASSES_FIELD "SWITCH|NUMBER_OF_TRAFFIC_CLASSES" +#define CAPABILITY_SWITCH_NUMBER_OF_UNICAST_QUEUES_FIELD "SWITCH|NUMBER_OF_UNICAST_QUEUES" + +#define CAPABILITY_SWITCH_TRIMMING_CAPABLE_FIELD "SWITCH_TRIMMING_CAPABLE" + +#define CAPABILITY_KEY "switch" + +#define SWITCH_STATE_DB_NAME "STATE_DB" +#define SWITCH_STATE_DB_TIMEOUT 0 + +// constants ---------------------------------------------------------------------------------------------------------- + +static const std::unordered_map dscpModeMap = +{ + { SAI_PACKET_TRIM_DSCP_RESOLUTION_MODE_DSCP_VALUE, SWITCH_TRIMMING_DSCP_MODE_DSCP_VALUE }, + { SAI_PACKET_TRIM_DSCP_RESOLUTION_MODE_FROM_TC, SWITCH_TRIMMING_DSCP_MODE_FROM_TC } +}; + +static const std::unordered_map queueModeMap = +{ + { SAI_PACKET_TRIM_QUEUE_RESOLUTION_MODE_STATIC, SWITCH_TRIMMING_QUEUE_MODE_STATIC }, + { SAI_PACKET_TRIM_QUEUE_RESOLUTION_MODE_DYNAMIC, SWITCH_TRIMMING_QUEUE_MODE_DYNAMIC } +}; + +// variables ---------------------------------------------------------------------------------------------------------- + +extern sai_object_id_t gSwitchId; +extern sai_switch_api_t *sai_switch_api; + +// functions ---------------------------------------------------------------------------------------------------------- + +static std::string toStr(sai_object_type_t objType, sai_attr_id_t attrId) +{ + const auto *meta = sai_metadata_get_attr_metadata(objType, attrId); + + return meta != nullptr ? meta->attridname : "UNKNOWN"; +} + +static std::string toStr(sai_packet_trim_dscp_resolution_mode_t value) +{ + const auto *name = sai_metadata_get_packet_trim_dscp_resolution_mode_name(value); + + return name != nullptr ? name : "UNKNOWN"; +} + +static std::string toStr(const std::set &value) +{ + std::vector strList; + + for (const auto &cit1 : value) + { + const auto &cit2 = dscpModeMap.find(cit1); + if (cit2 != dscpModeMap.cend()) + { + strList.push_back(cit2->second); + } + } + + return join(",", strList.cbegin(), strList.cend()); +} + +static std::string toStr(sai_packet_trim_queue_resolution_mode_t value) +{ + const auto *name = sai_metadata_get_packet_trim_queue_resolution_mode_name(value); + + return name != nullptr ? name : "UNKNOWN"; +} + +static std::string toStr(const std::set &value) +{ + std::vector strList; + + for (const auto &cit1 : value) + { + const auto &cit2 = queueModeMap.find(cit1); + if (cit2 != queueModeMap.cend()) + { + strList.push_back(cit2->second); + } + } + + return join(",", strList.cbegin(), strList.cend()); +} + +static std::string toStr(sai_status_t value) +{ + const auto *name = sai_metadata_get_status_name(value); + + return name != nullptr ? name : "UNKNOWN"; +} + +static std::string toStr(sai_uint8_t value) +{ + return std::to_string(value); +} + +static std::string toStr(sai_uint32_t value) +{ + return std::to_string(value); +} + +static std::string toStr(bool value) +{ + return value ? "true" : "false"; +} + +// capabilities ------------------------------------------------------------------------------------------------------- + +SwitchTrimmingCapabilities::SwitchTrimmingCapabilities() +{ + queryCapabilities(); + writeCapabilitiesToDb(); +} + +bool SwitchTrimmingCapabilities::isSwitchTrimmingSupported() const +{ + auto size = trimCap.size.isAttrSupported; + auto dscpMode = trimCap.dscp.mode.isAttrSupported; + auto dscp = true; + auto tc = true; + auto queueMode = trimCap.queue.mode.isAttrSupported; + auto queueIndex = true; + + // Do not care of dscp configuration capabilities, + // if DSCP_VALUE dscp resolution mode is not supported + if (trimCap.dscp.mode.isDscpValueModeSupported) + { + dscp = trimCap.dscp.isAttrSupported; + } + + // Do not care of tc configuration capabilities, + // if FROM_TC dscp resolution mode is not supported + if (trimCap.dscp.mode.isFromTcModeSupported) + { + tc = trimCap.tc.isAttrSupported; + } + + // Do not care of queue index configuration capabilities, + // if STATIC queue resolution mode is not supported + if (trimCap.queue.mode.isStaticModeSupported) + { + queueIndex = trimCap.queue.index.isAttrSupported; + } + + return size && dscpMode && dscp && tc && queueMode && queueIndex; +} + +bool SwitchTrimmingCapabilities::validateTrimDscpModeCap(sai_packet_trim_dscp_resolution_mode_t value) const +{ + SWSS_LOG_ENTER(); + + if (!trimCap.dscp.mode.isEnumSupported) + { + return true; + } + + if (trimCap.dscp.mode.mSet.empty()) + { + SWSS_LOG_ERROR("Failed to validate dscp resolution mode: no capabilities"); + return false; + } + + if (trimCap.dscp.mode.mSet.count(value) == 0) + { + SWSS_LOG_ERROR("Failed to validate dscp resolution mode: value(%s) is not supported", toStr(value).c_str()); + return false; + } + + return true; +} + +bool SwitchTrimmingCapabilities::validateTrimTcCap(sai_uint8_t value) const +{ + SWSS_LOG_ENTER(); + + if (!genCap.tcNum.isAttrSupported) + { + return true; + } + + auto maxTC = genCap.tcNum.value - 1; + + if (!(value <= maxTC)) + { + SWSS_LOG_ERROR( + "Failed to validate traffic class: value(%u) is out of range: 0 <= class <= %u", + value, maxTC + ); + return false; + } + + return true; +} + +bool SwitchTrimmingCapabilities::validateTrimQueueModeCap(sai_packet_trim_queue_resolution_mode_t value) const +{ + SWSS_LOG_ENTER(); + + if (!trimCap.queue.mode.isEnumSupported) + { + return true; + } + + if (trimCap.queue.mode.mSet.empty()) + { + SWSS_LOG_ERROR("Failed to validate queue resolution mode: no capabilities"); + return false; + } + + if (trimCap.queue.mode.mSet.count(value) == 0) + { + SWSS_LOG_ERROR("Failed to validate queue resolution mode: value(%s) is not supported", toStr(value).c_str()); + return false; + } + + return true; +} + +bool SwitchTrimmingCapabilities::validateQueueIndexCap(sai_uint32_t value) const +{ + SWSS_LOG_ENTER(); + + if (!genCap.uqNum.isAttrSupported) + { + return true; + } + + auto maxUQIdx = genCap.uqNum.value - 1; + + if (!(value <= maxUQIdx)) + { + SWSS_LOG_ERROR( + "Failed to validate queue index: value(%u) is out of range: 0 <= index <= %u", + value, maxUQIdx + ); + return false; + } + + return true; +} + +sai_status_t SwitchTrimmingCapabilities::queryEnumCapabilitiesSai(std::vector &capList, sai_object_type_t objType, sai_attr_id_t attrId) const +{ + sai_s32_list_t enumList = { .count = 0, .list = nullptr }; + + auto status = sai_query_attribute_enum_values_capability(gSwitchId, objType, attrId, &enumList); + if ((status != SAI_STATUS_SUCCESS) && (status != SAI_STATUS_BUFFER_OVERFLOW)) + { + return status; + } + + capList.resize(enumList.count); + enumList.list = capList.data(); + + return sai_query_attribute_enum_values_capability(gSwitchId, objType, attrId, &enumList); +} + +sai_status_t SwitchTrimmingCapabilities::queryAttrCapabilitiesSai(sai_attr_capability_t &attrCap, sai_object_type_t objType, sai_attr_id_t attrId) const +{ + return sai_query_attribute_capability(gSwitchId, objType, attrId, &attrCap); +} + +void SwitchTrimmingCapabilities::queryTrimSizeAttrCapabilities() +{ + SWSS_LOG_ENTER(); + + sai_attr_capability_t attrCap; + + auto status = queryAttrCapabilitiesSai( + attrCap, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_SIZE + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_NOTICE( + "Attribute(%s) capabilities are not available: unexpected status(%s)", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_SIZE).c_str(), + toStr(status).c_str() + ); + return; + } + + if (!attrCap.set_implemented) + { + SWSS_LOG_WARN( + "Attribute(%s) SET is not implemented in SAI", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_SIZE).c_str() + ); + return; + } + + trimCap.size.isAttrSupported = true; +} + +void SwitchTrimmingCapabilities::queryTrimDscpModeEnumCapabilities() +{ + SWSS_LOG_ENTER(); + + std::vector mList; + auto status = queryEnumCapabilitiesSai( + mList, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_RESOLUTION_MODE + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_NOTICE( + "Attribute(%s) enum value capabilities are not available: unexpected status(%s)", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_RESOLUTION_MODE).c_str(), + toStr(status).c_str() + ); + return; + } + + auto &mSet = trimCap.dscp.mode.mSet; + std::transform( + mList.cbegin(), mList.cend(), std::inserter(mSet, mSet.begin()), + [](sai_int32_t value) { return static_cast(value); } + ); + + if (!mSet.empty()) + { + if (mSet.count(SAI_PACKET_TRIM_DSCP_RESOLUTION_MODE_DSCP_VALUE) == 0) + { + trimCap.dscp.mode.isDscpValueModeSupported = false; + } + + if (mSet.count(SAI_PACKET_TRIM_DSCP_RESOLUTION_MODE_FROM_TC) == 0) + { + trimCap.dscp.mode.isFromTcModeSupported = false; + } + } + else + { + trimCap.dscp.mode.isDscpValueModeSupported = false; + trimCap.dscp.mode.isFromTcModeSupported = false; + } + + trimCap.dscp.mode.isEnumSupported = true; +} + +void SwitchTrimmingCapabilities::queryTrimDscpModeAttrCapabilities() +{ + SWSS_LOG_ENTER(); + + sai_attr_capability_t attrCap; + + auto status = queryAttrCapabilitiesSai( + attrCap, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_RESOLUTION_MODE + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_NOTICE( + "Attribute(%s) capabilities are not available: unexpected status(%s)", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_RESOLUTION_MODE).c_str(), + toStr(status).c_str() + ); + return; + } + + if (!attrCap.set_implemented) + { + SWSS_LOG_WARN( + "Attribute(%s) SET is not implemented in SAI", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_RESOLUTION_MODE).c_str() + ); + return; + } + + trimCap.dscp.mode.isAttrSupported = true; +} + +void SwitchTrimmingCapabilities::queryTrimDscpAttrCapabilities() +{ + SWSS_LOG_ENTER(); + + sai_attr_capability_t attrCap; + + auto status = queryAttrCapabilitiesSai( + attrCap, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_VALUE + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_NOTICE( + "Attribute(%s) capabilities are not available: unexpected status(%s)", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_VALUE).c_str(), + toStr(status).c_str() + ); + return; + } + + if (!attrCap.set_implemented) + { + SWSS_LOG_WARN( + "Attribute(%s) SET is not implemented in SAI", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_VALUE).c_str() + ); + return; + } + + trimCap.dscp.isAttrSupported = true; +} + +void SwitchTrimmingCapabilities::queryTrimTcAttrCapabilities() +{ + SWSS_LOG_ENTER(); + + sai_attr_capability_t attrCap; + + auto status = queryAttrCapabilitiesSai( + attrCap, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_TC_VALUE + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_NOTICE( + "Attribute(%s) capabilities are not available: unexpected status(%s)", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_TC_VALUE).c_str(), + toStr(status).c_str() + ); + return; + } + + if (!attrCap.set_implemented) + { + SWSS_LOG_WARN( + "Attribute(%s) SET is not implemented in SAI", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_TC_VALUE).c_str() + ); + return; + } + + trimCap.tc.isAttrSupported = true; +} + +void SwitchTrimmingCapabilities::queryTrimQueueModeEnumCapabilities() +{ + SWSS_LOG_ENTER(); + + std::vector mList; + auto status = queryEnumCapabilitiesSai( + mList, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_RESOLUTION_MODE + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_NOTICE( + "Attribute(%s) enum value capabilities are not available: unexpected status(%s)", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_RESOLUTION_MODE).c_str(), + toStr(status).c_str() + ); + return; + } + + auto &mSet = trimCap.queue.mode.mSet; + std::transform( + mList.cbegin(), mList.cend(), std::inserter(mSet, mSet.begin()), + [](sai_int32_t value) { return static_cast(value); } + ); + + if (mSet.empty() || (mSet.count(SAI_PACKET_TRIM_QUEUE_RESOLUTION_MODE_STATIC) == 0)) + { + trimCap.queue.mode.isStaticModeSupported = false; + } + + trimCap.queue.mode.isEnumSupported = true; +} + +void SwitchTrimmingCapabilities::queryTrimQueueModeAttrCapabilities() +{ + SWSS_LOG_ENTER(); + + sai_attr_capability_t attrCap; + + auto status = queryAttrCapabilitiesSai( + attrCap, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_RESOLUTION_MODE + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_NOTICE( + "Attribute(%s) capabilities are not available: unexpected status(%s)", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_RESOLUTION_MODE).c_str(), + toStr(status).c_str() + ); + return; + } + + if (!attrCap.set_implemented) + { + SWSS_LOG_WARN( + "Attribute(%s) SET is not implemented in SAI", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_RESOLUTION_MODE).c_str() + ); + return; + } + + trimCap.queue.mode.isAttrSupported = true; +} + +void SwitchTrimmingCapabilities::queryTrimQueueIndexAttrCapabilities() +{ + SWSS_LOG_ENTER(); + + sai_attr_capability_t attrCap; + + auto status = queryAttrCapabilitiesSai( + attrCap, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_INDEX + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_NOTICE( + "Attribute(%s) capabilities are not available: unexpected status(%s)", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_INDEX).c_str(), + toStr(status).c_str() + ); + return; + } + + if (!attrCap.set_implemented) + { + SWSS_LOG_WARN( + "Attribute(%s) SET is not implemented in SAI", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_INDEX).c_str() + ); + return; + } + + trimCap.queue.index.isAttrSupported = true; +} + +void SwitchTrimmingCapabilities::queryTrimTrafficClassNumberAttrCapabilities() +{ + SWSS_LOG_ENTER(); + + sai_attr_capability_t attrCap; + + auto status = queryAttrCapabilitiesSai( + attrCap, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_QOS_MAX_NUMBER_OF_TRAFFIC_CLASSES + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) capabilities", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_QOS_MAX_NUMBER_OF_TRAFFIC_CLASSES).c_str() + ); + return; + } + + if (!attrCap.get_implemented) + { + SWSS_LOG_WARN( + "Attribute(%s) GET is not implemented in SAI", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_QOS_MAX_NUMBER_OF_TRAFFIC_CLASSES).c_str() + ); + return; + } + + sai_attribute_t attr; + attr.id = SAI_SWITCH_ATTR_QOS_MAX_NUMBER_OF_TRAFFIC_CLASSES; + + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) value", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_QOS_MAX_NUMBER_OF_TRAFFIC_CLASSES).c_str() + ); + return; + } + + if (attr.value.u8 == 0) + { + SWSS_LOG_WARN( + "Unexpected attribute(%s) value: traffic classes are not supported", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_QOS_MAX_NUMBER_OF_TRAFFIC_CLASSES).c_str() + ); + return; + } + + genCap.tcNum.isAttrSupported = true; + genCap.tcNum.value = attr.value.u8; +} + +void SwitchTrimmingCapabilities::queryTrimUnicastQueueNumberAttrCapabilities() +{ + SWSS_LOG_ENTER(); + + sai_attr_capability_t attrCap; + + auto status = queryAttrCapabilitiesSai( + attrCap, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_NUMBER_OF_UNICAST_QUEUES + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) capabilities", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_NUMBER_OF_UNICAST_QUEUES).c_str() + ); + return; + } + + if (!attrCap.get_implemented) + { + SWSS_LOG_WARN( + "Attribute(%s) GET is not implemented in SAI", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_NUMBER_OF_UNICAST_QUEUES).c_str() + ); + return; + } + + sai_attribute_t attr; + attr.id = SAI_SWITCH_ATTR_NUMBER_OF_UNICAST_QUEUES; + + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) value", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_NUMBER_OF_UNICAST_QUEUES).c_str() + ); + return; + } + + if (attr.value.u32 == 0) + { + SWSS_LOG_WARN( + "Unexpected attribute(%s) value: unicast queues are not supported", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_NUMBER_OF_UNICAST_QUEUES).c_str() + ); + return; + } + + genCap.uqNum.isAttrSupported = true; + genCap.uqNum.value = attr.value.u32; +} + +void SwitchTrimmingCapabilities::queryCapabilities() +{ + queryTrimSizeAttrCapabilities(); + + queryTrimDscpModeEnumCapabilities(); + queryTrimDscpModeAttrCapabilities(); + + queryTrimDscpAttrCapabilities(); + queryTrimTcAttrCapabilities(); + + queryTrimQueueModeEnumCapabilities(); + queryTrimQueueModeAttrCapabilities(); + + queryTrimQueueIndexAttrCapabilities(); + + queryTrimTrafficClassNumberAttrCapabilities(); + queryTrimUnicastQueueNumberAttrCapabilities(); +} + +FieldValueTuple SwitchTrimmingCapabilities::makeSwitchTrimmingCapDbEntry() const +{ + auto field = CAPABILITY_SWITCH_TRIMMING_CAPABLE_FIELD; + auto value = toStr(isSwitchTrimmingSupported()); + + return FieldValueTuple(field, value); +} + +FieldValueTuple SwitchTrimmingCapabilities::makeDscpModeCapDbEntry() const +{ + auto field = CAPABILITY_SWITCH_DSCP_RESOLUTION_MODE_FIELD; + auto value = trimCap.dscp.mode.isEnumSupported ? toStr(trimCap.dscp.mode.mSet) : "N/A"; + + return FieldValueTuple(field, value); +} + +FieldValueTuple SwitchTrimmingCapabilities::makeQueueModeCapDbEntry() const +{ + auto field = CAPABILITY_SWITCH_QUEUE_RESOLUTION_MODE_FIELD; + auto value = trimCap.queue.mode.isEnumSupported ? toStr(trimCap.queue.mode.mSet) : "N/A"; + + return FieldValueTuple(field, value); +} + +FieldValueTuple SwitchTrimmingCapabilities::makeTrafficClassNumberCapDbEntry() const +{ + auto field = CAPABILITY_SWITCH_NUMBER_OF_TRAFFIC_CLASSES_FIELD; + auto value = genCap.tcNum.isAttrSupported ? toStr(genCap.tcNum.value) : "N/A"; + + return FieldValueTuple(field, value); +} + +FieldValueTuple SwitchTrimmingCapabilities::makeUnicastQueueNumberCapDbEntry() const +{ + auto field = CAPABILITY_SWITCH_NUMBER_OF_UNICAST_QUEUES_FIELD; + auto value = genCap.uqNum.isAttrSupported ? toStr(genCap.uqNum.value) : "N/A"; + + return FieldValueTuple(field, value); +} + +void SwitchTrimmingCapabilities::writeCapabilitiesToDb() +{ + SWSS_LOG_ENTER(); + + DBConnector stateDb(SWITCH_STATE_DB_NAME, SWITCH_STATE_DB_TIMEOUT); + Table capTable(&stateDb, STATE_SWITCH_CAPABILITY_TABLE_NAME); + + std::vector fvList = { + makeSwitchTrimmingCapDbEntry(), + makeDscpModeCapDbEntry(), + makeQueueModeCapDbEntry(), + makeTrafficClassNumberCapDbEntry(), + makeUnicastQueueNumberCapDbEntry() + }; + + capTable.set(CAPABILITY_KEY, fvList); + + SWSS_LOG_NOTICE( + "Wrote switch trimming capabilities to State DB: %s key", + capTable.getKeyName(CAPABILITY_KEY).c_str() + ); +} diff --git a/orchagent/switch/trimming/capabilities.h b/orchagent/switch/trimming/capabilities.h new file mode 100644 index 00000000000..ee62889ff01 --- /dev/null +++ b/orchagent/switch/trimming/capabilities.h @@ -0,0 +1,98 @@ +#pragma once + +extern "C" { +#include +#include +#include +} + +#include +#include + +class SwitchTrimmingCapabilities final +{ +public: + SwitchTrimmingCapabilities(); + ~SwitchTrimmingCapabilities() = default; + + bool isSwitchTrimmingSupported() const; + + bool validateTrimDscpModeCap(sai_packet_trim_dscp_resolution_mode_t value) const; + bool validateTrimTcCap(sai_uint8_t value) const; + bool validateTrimQueueModeCap(sai_packet_trim_queue_resolution_mode_t value) const; + bool validateQueueIndexCap(sai_uint32_t value) const; + +private: + swss::FieldValueTuple makeSwitchTrimmingCapDbEntry() const; + swss::FieldValueTuple makeDscpModeCapDbEntry() const; + swss::FieldValueTuple makeQueueModeCapDbEntry() const; + swss::FieldValueTuple makeTrafficClassNumberCapDbEntry() const; + swss::FieldValueTuple makeUnicastQueueNumberCapDbEntry() const; + + sai_status_t queryEnumCapabilitiesSai(std::vector &capList, sai_object_type_t objType, sai_attr_id_t attrId) const; + sai_status_t queryAttrCapabilitiesSai(sai_attr_capability_t &attrCap, sai_object_type_t objType, sai_attr_id_t attrId) const; + + void queryTrimSizeAttrCapabilities(); + void queryTrimDscpModeEnumCapabilities(); + void queryTrimDscpModeAttrCapabilities(); + void queryTrimDscpAttrCapabilities(); + void queryTrimTcAttrCapabilities(); + void queryTrimQueueModeEnumCapabilities(); + void queryTrimQueueModeAttrCapabilities(); + void queryTrimQueueIndexAttrCapabilities(); + void queryTrimTrafficClassNumberAttrCapabilities(); + void queryTrimUnicastQueueNumberAttrCapabilities(); + + void queryCapabilities(); + void writeCapabilitiesToDb(); + + struct { + struct { + bool isAttrSupported = false; + } size; // SAI_SWITCH_ATTR_PACKET_TRIM_SIZE + + struct { + struct { + std::set mSet; + bool isDscpValueModeSupported = true; + bool isFromTcModeSupported = true; + bool isEnumSupported = false; + bool isAttrSupported = false; + } mode; // SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_RESOLUTION_MODE + + bool isAttrSupported = false; + } dscp; // SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_VALUE + + struct { + bool isAttrSupported = false; + } tc; // SAI_SWITCH_ATTR_PACKET_TRIM_TC_VALUE + + struct { + struct { + std::set mSet; + bool isStaticModeSupported = true; + bool isEnumSupported = false; + bool isAttrSupported = false; + } mode; // SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_RESOLUTION_MODE + + struct { + bool isAttrSupported = false; + } index; // SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_INDEX + } queue; + } trimCap; + + struct + { + struct { + sai_uint8_t value; + bool is_set = false; + bool isAttrSupported = false; + } tcNum; // SAI_SWITCH_ATTR_QOS_MAX_NUMBER_OF_TRAFFIC_CLASSES + + struct { + sai_uint32_t value; + bool is_set = false; + bool isAttrSupported = false; + } uqNum; // SAI_SWITCH_ATTR_NUMBER_OF_UNICAST_QUEUES + } genCap; +}; diff --git a/orchagent/switch/trimming/container.h b/orchagent/switch/trimming/container.h new file mode 100644 index 00000000000..045c2a04433 --- /dev/null +++ b/orchagent/switch/trimming/container.h @@ -0,0 +1,54 @@ +#pragma once + +extern "C" { +#include +} + +#include +#include + +class SwitchTrimming final +{ +public: + SwitchTrimming() = default; + ~SwitchTrimming() = default; + + struct { + sai_uint32_t value; + bool is_set = false; + } size; // Trim packets to this size to reduce bandwidth + + struct { + struct { + sai_packet_trim_dscp_resolution_mode_t value; + bool is_set = false; + } mode; + + sai_uint8_t value; + bool is_set = false; + } dscp; // New packet trimming DSCP value + + struct { + struct { + sai_uint8_t value; + bool is_set = false; + } cache; + + sai_uint8_t value; + bool is_set = false; + } tc; // New packet trimming TC value + + struct { + struct { + sai_packet_trim_queue_resolution_mode_t value; + bool is_set = false; + } mode; + + struct { + sai_uint8_t value; + bool is_set = false; + } index; + } queue; // New packet trimming queue index + + std::unordered_map fieldValueMap; +}; diff --git a/orchagent/switch/trimming/helper.cpp b/orchagent/switch/trimming/helper.cpp new file mode 100644 index 00000000000..7484035c825 --- /dev/null +++ b/orchagent/switch/trimming/helper.cpp @@ -0,0 +1,246 @@ +// includes ----------------------------------------------------------------------------------------------------------- + +extern "C" { +#include +} + +#include +#include + +#include +#include + +#include + +#include +#include + +#include "schema.h" +#include "helper.h" + +using namespace swss; + +// constants ---------------------------------------------------------------------------------------------------------- + +static const std::uint8_t minDscp = 0; +static const std::uint8_t maxDscp = 63; + +// functions ---------------------------------------------------------------------------------------------------------- + +static inline std::uint8_t toUInt8(const std::string &str) +{ + return to_uint(str); +} + +static inline std::uint32_t toUInt32(const std::string &str) +{ + return to_uint(str); +} + +// helper ------------------------------------------------------------------------------------------------------------- + +bool SwitchTrimmingHelper::isSymDscpMode(const SwitchTrimming &cfg) const +{ + return cfg.dscp.mode.is_set && (cfg.dscp.mode.value == SAI_PACKET_TRIM_DSCP_RESOLUTION_MODE_DSCP_VALUE); +} + +bool SwitchTrimmingHelper::isStaticQueueMode(const SwitchTrimming &cfg) const +{ + return cfg.queue.mode.is_set && (cfg.queue.mode.value == SAI_PACKET_TRIM_QUEUE_RESOLUTION_MODE_STATIC); +} + +const SwitchTrimming& SwitchTrimmingHelper::getConfig() const +{ + return cfg; +} + +void SwitchTrimmingHelper::setConfig(const SwitchTrimming &value) +{ + cfg = value; +} + +bool SwitchTrimmingHelper::parseTrimSize(SwitchTrimming &cfg, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + try + { + cfg.size.value = toUInt32(value); + cfg.size.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + return true; +} + +bool SwitchTrimmingHelper::parseTrimDscp(SwitchTrimming &cfg, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + if (boost::algorithm::to_lower_copy(value) == SWITCH_TRIMMING_DSCP_VALUE_FROM_TC) + { + cfg.dscp.mode.value = SAI_PACKET_TRIM_DSCP_RESOLUTION_MODE_FROM_TC; + cfg.dscp.mode.is_set = true; + return true; + } + + try + { + cfg.dscp.value = toUInt8(value); + cfg.dscp.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + if (!((minDscp <= cfg.dscp.value) && (cfg.dscp.value <= maxDscp))) + { + SWSS_LOG_ERROR( + "Failed to parse field(%s): value(%s) is out of range: %u <= dscp <= %u", + field.c_str(), value.c_str(), minDscp, maxDscp + ); + return false; + } + + cfg.dscp.mode.value = SAI_PACKET_TRIM_DSCP_RESOLUTION_MODE_DSCP_VALUE; + cfg.dscp.mode.is_set = true; + + return true; +} + +bool SwitchTrimmingHelper::parseTrimTc(SwitchTrimming &cfg, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + try + { + cfg.tc.value = toUInt8(value); + cfg.tc.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + return true; +} + +bool SwitchTrimmingHelper::parseTrimQueue(SwitchTrimming &cfg, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + if (boost::algorithm::to_lower_copy(value) == SWITCH_TRIMMING_QUEUE_INDEX_DYNAMIC) + { + cfg.queue.mode.value = SAI_PACKET_TRIM_QUEUE_RESOLUTION_MODE_DYNAMIC; + cfg.queue.mode.is_set = true; + return true; + } + + try + { + cfg.queue.index.value = toUInt8(value); + cfg.queue.index.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + cfg.queue.mode.value = SAI_PACKET_TRIM_QUEUE_RESOLUTION_MODE_STATIC; + cfg.queue.mode.is_set = true; + + return true; +} + +bool SwitchTrimmingHelper::parseTrimConfig(SwitchTrimming &cfg) const +{ + SWSS_LOG_ENTER(); + + for (const auto &cit : cfg.fieldValueMap) + { + const auto &field = cit.first; + const auto &value = cit.second; + + if (field == SWITCH_TRIMMING_SIZE) + { + if (!parseTrimSize(cfg, field, value)) + { + return false; + } + } + else if (field == SWITCH_TRIMMING_DSCP_VALUE) + { + if (!parseTrimDscp(cfg, field, value)) + { + return false; + } + } + else if (field == SWITCH_TRIMMING_TC_VALUE) + { + if (!parseTrimTc(cfg, field, value)) + { + return false; + } + } + else if (field == SWITCH_TRIMMING_QUEUE_INDEX) + { + if (!parseTrimQueue(cfg, field, value)) + { + return false; + } + } + else + { + SWSS_LOG_WARN("Unknown field(%s): skipping ...", field.c_str()); + } + } + + return validateTrimConfig(cfg); +} + +bool SwitchTrimmingHelper::validateTrimConfig(SwitchTrimming &cfg) const +{ + SWSS_LOG_ENTER(); + + auto cond = cfg.size.is_set || cfg.dscp.mode.is_set || cfg.tc.is_set || cfg.queue.mode.is_set; + + if (!cond) + { + SWSS_LOG_ERROR("Validation error: missing valid fields"); + return false; + } + + return true; +} diff --git a/orchagent/switch/trimming/helper.h b/orchagent/switch/trimming/helper.h new file mode 100644 index 00000000000..7cc8eb88112 --- /dev/null +++ b/orchagent/switch/trimming/helper.h @@ -0,0 +1,31 @@ +#pragma once + +#include + +#include "container.h" + +class SwitchTrimmingHelper final +{ +public: + SwitchTrimmingHelper() = default; + ~SwitchTrimmingHelper() = default; + + bool isSymDscpMode(const SwitchTrimming &cfg) const; + bool isStaticQueueMode(const SwitchTrimming &cfg) const; + + const SwitchTrimming& getConfig() const; + void setConfig(const SwitchTrimming &cfg); + + bool parseTrimConfig(SwitchTrimming &cfg) const; + +private: + bool parseTrimSize(SwitchTrimming &cfg, const std::string &field, const std::string &value) const; + bool parseTrimDscp(SwitchTrimming &cfg, const std::string &field, const std::string &value) const; + bool parseTrimTc(SwitchTrimming &cfg, const std::string &field, const std::string &value) const; + bool parseTrimQueue(SwitchTrimming &cfg, const std::string &field, const std::string &value) const; + + bool validateTrimConfig(SwitchTrimming &cfg) const; + +private: + SwitchTrimming cfg; +}; diff --git a/orchagent/switch/trimming/schema.h b/orchagent/switch/trimming/schema.h new file mode 100644 index 00000000000..7bc58a8865e --- /dev/null +++ b/orchagent/switch/trimming/schema.h @@ -0,0 +1,18 @@ +#pragma once + +// defines ------------------------------------------------------------------------------------------------------------ + +#define SWITCH_TRIMMING_DSCP_VALUE_FROM_TC "from-tc" + +#define SWITCH_TRIMMING_DSCP_MODE_DSCP_VALUE "DSCP_VALUE" +#define SWITCH_TRIMMING_DSCP_MODE_FROM_TC "FROM_TC" + +#define SWITCH_TRIMMING_QUEUE_INDEX_DYNAMIC "dynamic" + +#define SWITCH_TRIMMING_QUEUE_MODE_STATIC "STATIC" +#define SWITCH_TRIMMING_QUEUE_MODE_DYNAMIC "DYNAMIC" + +#define SWITCH_TRIMMING_SIZE "size" +#define SWITCH_TRIMMING_DSCP_VALUE "dscp_value" +#define SWITCH_TRIMMING_TC_VALUE "tc_value" +#define SWITCH_TRIMMING_QUEUE_INDEX "queue_index" diff --git a/orchagent/switchorch.cpp b/orchagent/switchorch.cpp index 06dc36e4723..9ed36e9a36d 100644 --- a/orchagent/switchorch.cpp +++ b/orchagent/switchorch.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include "switchorch.h" #include "crmorch.h" @@ -10,6 +11,9 @@ #include "macaddress.h" #include "return_code.h" #include "saihelper.h" +#include "sai_serialize.h" +#include "notifications.h" +#include "redisapi.h" using namespace std; using namespace swss; @@ -20,6 +24,20 @@ extern sai_acl_api_t *sai_acl_api; extern sai_hash_api_t *sai_hash_api; extern MacAddress gVxlanMacAddress; extern CrmOrch *gCrmOrch; +extern event_handle_t g_events_handle; +extern string gMyAsicName; + +// defines ------------------------------------------------------------------------------------------------------------ + +#define SWITCH_STAT_COUNTER_POLLING_INTERVAL_MS 60000 + +// constants ---------------------------------------------------------------------------------------------------------- + +static const vector switch_stat_ids = +{ + SAI_SWITCH_STAT_DROPPED_TRIM_PACKETS, + SAI_SWITCH_STAT_TX_TRIM_PACKETS +}; const map switch_attribute_map = { @@ -31,7 +49,9 @@ const map switch_attribute_map = {"fdb_aging_time", SAI_SWITCH_ATTR_FDB_AGING_TIME}, {"debug_shell_enable", SAI_SWITCH_ATTR_SWITCH_SHELL_ENABLE}, {"vxlan_port", SAI_SWITCH_ATTR_VXLAN_DEFAULT_PORT}, - {"vxlan_router_mac", SAI_SWITCH_ATTR_VXLAN_DEFAULT_ROUTER_MAC} + {"vxlan_router_mac", SAI_SWITCH_ATTR_VXLAN_DEFAULT_ROUTER_MAC}, + {"ecmp_hash_offset", SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_OFFSET}, + {"lag_hash_offset", SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_OFFSET} }; const map switch_tunnel_attribute_map = @@ -47,9 +67,62 @@ const map packet_action_map = {"trap", SAI_PACKET_ACTION_TRAP} }; +const map switch_asic_sdk_health_event_severity_to_switch_attribute_map = +{ + {"fatal", SAI_SWITCH_ATTR_REG_FATAL_SWITCH_ASIC_SDK_HEALTH_CATEGORY}, + {"warning", SAI_SWITCH_ATTR_REG_WARNING_SWITCH_ASIC_SDK_HEALTH_CATEGORY}, + {"notice", SAI_SWITCH_ATTR_REG_NOTICE_SWITCH_ASIC_SDK_HEALTH_CATEGORY} +}; + +const map switch_asic_sdk_health_event_severity_reverse_map = +{ + {SAI_SWITCH_ASIC_SDK_HEALTH_SEVERITY_FATAL, "fatal"}, + {SAI_SWITCH_ASIC_SDK_HEALTH_SEVERITY_WARNING, "warning"}, + {SAI_SWITCH_ASIC_SDK_HEALTH_SEVERITY_NOTICE, "notice"}, +}; + +const map switch_asic_sdk_health_event_category_reverse_map = +{ + {SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_SW, "software"}, + {SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_FW, "firmware"}, + {SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_CPU_HW, "cpu_hw"}, + {SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_ASIC_HW, "asic_hw"} +}; + +const map switch_asic_sdk_health_event_category_map = +{ + {"software", SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_SW}, + {"firmware", SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_FW}, + {"cpu_hw", SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_CPU_HW}, + {"asic_hw", SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_ASIC_HW} +}; + +const std::set switch_asic_sdk_health_event_category_universal_set = +{ + SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_SW, + SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_FW, + SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_CPU_HW, + SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_ASIC_HW +}; const std::set switch_non_sai_attribute_set = {"ordered_ecmp"}; +// functions ---------------------------------------------------------------------------------------------------------- + +static std::unordered_set serializeSwitchCounterStats(const std::vector statIdList) +{ + std::unordered_set stats; + + for (const auto &cit : statIdList) + { + stats.emplace(sai_serialize_switch_stat(cit)); + } + + return stats; +} + +// Switch OA ---------------------------------------------------------------------------------------------------------- + void SwitchOrch::set_switch_pfc_dlr_init_capability() { vector fvVector; @@ -75,24 +148,154 @@ SwitchOrch::SwitchOrch(DBConnector *db, vector& connectors, Tabl Orch(connectors), m_switchTable(switchTable.first, switchTable.second), m_db(db), - m_stateDb(new DBConnector(STATE_DB, DBConnector::DEFAULT_UNIXSOCKET, 0)), + m_stateDb(new DBConnector("STATE_DB", 0)), m_asicSensorsTable(new Table(m_stateDb.get(), ASIC_TEMPERATURE_INFO_TABLE_NAME)), - m_sensorsPollerTimer (new SelectableTimer((timespec { .tv_sec = DEFAULT_ASIC_SENSORS_POLLER_INTERVAL, .tv_nsec = 0 }))) + m_sensorsPollerTimer (new SelectableTimer((timespec { .tv_sec = DEFAULT_ASIC_SENSORS_POLLER_INTERVAL, .tv_nsec = 0 }))), + m_stateDbForNotification(new DBConnector("STATE_DB", 0)), + m_asicSdkHealthEventTable(new Table(m_stateDbForNotification.get(), STATE_ASIC_SDK_HEALTH_EVENT_TABLE_NAME)), + m_counterManager(SWITCH_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, SWITCH_STAT_COUNTER_POLLING_INTERVAL_MS, false) { m_restartCheckNotificationConsumer = new NotificationConsumer(db, "RESTARTCHECK"); auto restartCheckNotifier = new Notifier(m_restartCheckNotificationConsumer, this, "RESTARTCHECK"); Orch::addExecutor(restartCheckNotifier); + initAsicSdkHealthEventNotification(); set_switch_pfc_dlr_init_capability(); initSensorsTable(); querySwitchTpidCapability(); querySwitchPortEgressSampleCapability(); + querySwitchPortMirrorCapability(); querySwitchHashDefaults(); + setSwitchIcmpOffloadCapability(); auto executorT = new ExecutableTimer(m_sensorsPollerTimer, this, "ASIC_SENSORS_POLL_TIMER"); Orch::addExecutor(executorT); } +void SwitchOrch::generateSwitchCounterNameMap() const +{ + SWSS_LOG_ENTER(); + + DBConnector db("COUNTERS_DB", 0); + Table table(&db, COUNTERS_SWITCH_NAME_MAP); + + FieldValueTuple tuple("ASIC", sai_serialize_object_id(gSwitchId)); + std::vector fvList = { tuple }; + + table.set("", fvList); + + SWSS_LOG_NOTICE("Wrote switch name mapping to Counters DB"); +} + +void SwitchOrch::generateSwitchCounterIdList() +{ + if (m_isSwitchCounterIdListGenerated) + { + return; + } + + auto switchStats = serializeSwitchCounterStats(switch_stat_ids); + m_counterManager.setCounterIdList(gSwitchId, CounterType::SWITCH, switchStats); + + generateSwitchCounterNameMap(); + + m_isSwitchCounterIdListGenerated = true; +} + +void SwitchOrch::initAsicSdkHealthEventNotification() +{ + sai_attribute_t attr; + sai_status_t status; + vector fvVector; + vector> reg_severities = { + {SAI_SWITCH_ATTR_REG_FATAL_SWITCH_ASIC_SDK_HEALTH_CATEGORY, SWITCH_CAPABILITY_TABLE_REG_FATAL_ASIC_SDK_HEALTH_CATEGORY, "fatal"}, + {SAI_SWITCH_ATTR_REG_WARNING_SWITCH_ASIC_SDK_HEALTH_CATEGORY, SWITCH_CAPABILITY_TABLE_REG_WARNING_ASIC_SDK_HEALTH_CATEGORY, "warning"}, + {SAI_SWITCH_ATTR_REG_NOTICE_SWITCH_ASIC_SDK_HEALTH_CATEGORY, SWITCH_CAPABILITY_TABLE_REG_NOTICE_ASIC_SDK_HEALTH_CATEGORY, "notice"} + }; + + bool supported = querySwitchCapability(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_SWITCH_ASIC_SDK_HEALTH_EVENT_NOTIFY); + if (supported) + { + attr.id = SAI_SWITCH_ATTR_SWITCH_ASIC_SDK_HEALTH_EVENT_NOTIFY; + attr.value.ptr = (void *)on_switch_asic_sdk_health_event; + status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to register ASIC/SDK health event handler: %s", sai_serialize_status(status).c_str()); + supported = false; + } + else + { + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_ASIC_SDK_HEALTH_EVENT_CAPABLE, "true"); + } + } + else + { + SWSS_LOG_NOTICE("ASIC/SDK health event is not supported"); + } + + DBConnector cfgDb("CONFIG_DB", 0); + Table cfgSuppressASHETable(&cfgDb, CFG_SUPPRESS_ASIC_SDK_HEALTH_EVENT_NAME); + string suppressedCategories; + bool atLeastOneSupported = false; + + if (!supported) + { + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_ASIC_SDK_HEALTH_EVENT_CAPABLE, "false"); + for (auto c : reg_severities) + { + fvVector.emplace_back(get<1>(c), "false"); + } + set_switch_capability(fvVector); + + return; + } + + for (auto c : reg_severities) + { + supported = querySwitchCapability(SAI_OBJECT_TYPE_SWITCH, get<0>(c)); + if (supported) + { + cfgSuppressASHETable.hget(get<2>(c), "categories", suppressedCategories); + registerAsicSdkHealthEventCategories(get<0>(c), get<2>(c), suppressedCategories, true); + suppressedCategories.clear(); + + m_supportedAsicSdkHealthEventAttributes.insert(get<0>(c)); + fvVector.emplace_back(get<1>(c), "true"); + } + else + { + SWSS_LOG_NOTICE("Unsupport to register ASIC/SDK health categories for severity %s", get<2>(c).c_str()); + fvVector.emplace_back(get<1>(c), "false"); + } + atLeastOneSupported = atLeastOneSupported || supported; + } + + set_switch_capability(fvVector); + + if (atLeastOneSupported) + { + try + { + // Load the Lua script to eliminate oldest entries + string eliminateEventsLuaScript = swss::loadLuaScript("eliminate_events.lua"); + m_eliminateEventsSha = swss::loadRedisScript(m_stateDb.get(), eliminateEventsLuaScript); + + // Init timer + auto interv = timespec { .tv_sec = ASIC_SDK_HEALTH_EVENT_ELIMINATE_INTERVAL, .tv_nsec = 0 }; + m_eliminateEventsTimer = new SelectableTimer(interv); + auto executor = new ExecutableTimer(m_eliminateEventsTimer, this, "ASIC_SDK_HEALTH_EVENT_ELIMINATE_TIMER"); + Orch::addExecutor(executor); + m_eliminateEventsTimer->start(); + } + catch (...) + { + // This can happen only on mock test. If it happens on a real switch, we should log an error message + SWSS_LOG_ERROR("Unable to load the Lua script to eliminate events\n"); + } + } +} + void SwitchOrch::initAclGroupsBindToSwitch() { // Create an ACL group per stage, INGRESS, EGRESS and PRE_INGRESS @@ -401,6 +604,8 @@ void SwitchOrch::doAppSwitchTableTask(Consumer &consumer) MacAddress mac_addr; bool invalid_attr = false; + bool ret = false; + bool unsupported_attr = false; switch (attr.id) { case SAI_SWITCH_ATTR_FDB_UNICAST_MISS_PACKET_ACTION: @@ -438,6 +643,29 @@ void SwitchOrch::doAppSwitchTableTask(Consumer &consumer) memcpy(attr.value.mac, mac_addr.getMac(), sizeof(sai_mac_t)); break; + case SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_OFFSET: + ret = querySwitchCapability(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_OFFSET); + if (ret == false) + { + unsupported_attr = true; + } + else + { + attr.value.u8 = to_uint(value); + } + break; + case SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_OFFSET: + ret = querySwitchCapability(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_OFFSET); + if (ret == false) + { + unsupported_attr = true; + } + else + { + attr.value.u8 = to_uint(value); + } + break; + default: invalid_attr = true; break; @@ -445,8 +673,15 @@ void SwitchOrch::doAppSwitchTableTask(Consumer &consumer) if (invalid_attr) { /* break from kfvFieldsValues for loop */ + SWSS_LOG_ERROR("Invalid Attribute %s", attribute.c_str()); + // Will not continue to set the rest of the attributes break; } + if (unsupported_attr){ + SWSS_LOG_ERROR("Unsupported Attribute %s", attribute.c_str()); + // Continue to set the rest of the attributes, even if current attribute is unsupported + continue; + } sai_status_t status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); if (status != SAI_STATUS_SUCCESS) @@ -726,153 +961,747 @@ void SwitchOrch::doCfgSwitchHashTableTask(Consumer &consumer) } } -void SwitchOrch::doTask(Consumer &consumer) +bool SwitchOrch::setSwitchTrimmingSizeSai(const SwitchTrimming &trim) const { - SWSS_LOG_ENTER(); + sai_attribute_t attr; - const auto &tableName = consumer.getTableName(); + attr.id = SAI_SWITCH_ATTR_PACKET_TRIM_SIZE; + attr.value.u32 = trim.size.value; - if (tableName == APP_SWITCH_TABLE_NAME) - { - doAppSwitchTableTask(consumer); - } - else if (tableName == CFG_ASIC_SENSORS_TABLE_NAME) - { - doCfgSensorsTableTask(consumer); - } - else if (tableName == CFG_SWITCH_HASH_TABLE_NAME) - { - doCfgSwitchHashTableTask(consumer); - } - else - { - SWSS_LOG_ERROR("Unknown table : %s", tableName.c_str()); - } + auto status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + return status == SAI_STATUS_SUCCESS; } -void SwitchOrch::doTask(NotificationConsumer& consumer) +bool SwitchOrch::setSwitchTrimmingDscpModeSai(const SwitchTrimming &trim) const { - SWSS_LOG_ENTER(); + sai_attribute_t attr; - std::string op; - std::string data; - std::vector values; + attr.id = SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_RESOLUTION_MODE; + attr.value.s32 = trim.dscp.mode.value; - consumer.pop(op, data, values); + auto status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + return status == SAI_STATUS_SUCCESS; +} - if (&consumer != m_restartCheckNotificationConsumer) - { - return; - } +bool SwitchOrch::setSwitchTrimmingDscpSai(const SwitchTrimming &trim) const +{ + sai_attribute_t attr; - m_warmRestartCheck.checkRestartReadyState = false; - m_warmRestartCheck.noFreeze = false; - m_warmRestartCheck.skipPendingTaskCheck = false; + attr.id = SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_VALUE; + attr.value.u8 = trim.dscp.value; - SWSS_LOG_NOTICE("RESTARTCHECK notification for %s ", op.c_str()); - if (op == "orchagent") - { - string s = op; + auto status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + return status == SAI_STATUS_SUCCESS; +} - m_warmRestartCheck.checkRestartReadyState = true; - for (auto &i : values) - { - s += "|" + fvField(i) + ":" + fvValue(i); +bool SwitchOrch::setSwitchTrimmingTcSai(const SwitchTrimming &trim) const +{ + sai_attribute_t attr; - if (fvField(i) == "NoFreeze" && fvValue(i) == "true") - { - m_warmRestartCheck.noFreeze = true; - } - if (fvField(i) == "SkipPendingTaskCheck" && fvValue(i) == "true") - { - m_warmRestartCheck.skipPendingTaskCheck = true; - } - } - SWSS_LOG_NOTICE("%s", s.c_str()); - } + attr.id = SAI_SWITCH_ATTR_PACKET_TRIM_TC_VALUE; + attr.value.u8 = trim.tc.value; + + auto status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + return status == SAI_STATUS_SUCCESS; } -void SwitchOrch::restartCheckReply(const string &op, const string &data, std::vector &values) +bool SwitchOrch::setSwitchTrimmingQueueModeSai(const SwitchTrimming &trim) const { - NotificationProducer restartRequestReply(m_db, "RESTARTCHECKREPLY"); - restartRequestReply.send(op, data, values); - checkRestartReadyDone(); + sai_attribute_t attr; + + attr.id = SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_RESOLUTION_MODE; + attr.value.s32 = trim.queue.mode.value; + + auto status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + return status == SAI_STATUS_SUCCESS; } -bool SwitchOrch::setAgingFDB(uint32_t sec) +bool SwitchOrch::setSwitchTrimmingQueueIndexSai(const SwitchTrimming &trim) const { sai_attribute_t attr; - attr.id = SAI_SWITCH_ATTR_FDB_AGING_TIME; - attr.value.u32 = sec; + + attr.id = SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_INDEX; + attr.value.u8 = trim.queue.index.value; + auto status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to set switch %" PRIx64 " fdb_aging_time attribute: %d", gSwitchId, status); - task_process_status handle_status = handleSaiSetStatus(SAI_API_SWITCH, status); - if (handle_status != task_success) - { - return parseHandleSaiStatusFailure(handle_status); - } - } - SWSS_LOG_NOTICE("Set switch %" PRIx64 " fdb_aging_time %u sec", gSwitchId, sec); - return true; + return status == SAI_STATUS_SUCCESS; } -void SwitchOrch::doTask(SelectableTimer &timer) +bool SwitchOrch::setSwitchTrimming(const SwitchTrimming &trim) { SWSS_LOG_ENTER(); - if (&timer == m_sensorsPollerTimer) + auto tObj = trimHlpr.getConfig(); + + auto dscpBak = false; + auto tcBak = false; + auto queueBak = false; + + auto tcUpdate = false; + auto tcSync = false; + + auto cfgUpd = false; + + if (!trimCap.isSwitchTrimmingSupported()) { - if (m_sensorsPollerIntervalChanged) + SWSS_LOG_WARN("Switch trimming configuration is not supported: skipping ..."); + return true; + } + + if (trim.size.is_set) + { + if (!tObj.size.is_set || (tObj.size.value != trim.size.value)) { - m_sensorsPollerTimer->reset(); - m_sensorsPollerIntervalChanged = false; - } + if (!setSwitchTrimmingSizeSai(trim)) + { + SWSS_LOG_ERROR("Failed to set switch trimming size in SAI"); + return false; + } - if (!m_sensorsPollerEnabled) + cfgUpd = true; + } + } + else + { + if (tObj.size.is_set) { - m_sensorsPollerTimer->stop(); - return; + SWSS_LOG_ERROR("Failed to remove switch trimming size configuration: operation is not supported"); + return false; } + } - sai_attribute_t attr; - sai_status_t status; - std::vector values; - - if (m_numTempSensors) + if (trim.dscp.mode.is_set) + { + if (!tObj.dscp.mode.is_set || (tObj.dscp.mode.value != trim.dscp.mode.value)) { - std::vector temp_list(m_numTempSensors); - - memset(&attr, 0, sizeof(attr)); - attr.id = SAI_SWITCH_ATTR_TEMP_LIST; - attr.value.s32list.count = m_numTempSensors; - attr.value.s32list.list = temp_list.data(); - - status = sai_switch_api->get_switch_attribute(gSwitchId , 1, &attr); - if (status == SAI_STATUS_SUCCESS) + if (!trimCap.validateTrimDscpModeCap(trim.dscp.mode.value)) { - for (size_t i = 0; i < attr.value.s32list.count ; i++) { - const std::string &fieldName = "temperature_" + std::to_string(i); - values.emplace_back(fieldName, std::to_string(temp_list[i])); - } - m_asicSensorsTable->set("",values); + SWSS_LOG_ERROR("Failed to validate switch trimming DSCP mode: capability is not supported"); + return false; } - else + + if (!setSwitchTrimmingDscpModeSai(trim)) { - SWSS_LOG_ERROR("ASIC sensors : failed to get SAI_SWITCH_ATTR_TEMP_LIST: %d", status); + SWSS_LOG_ERROR("Failed to set switch trimming DSCP mode in SAI"); + return false; } - } - if (m_sensorsMaxTempSupported) - { - memset(&attr, 0, sizeof(attr)); - attr.id = SAI_SWITCH_ATTR_MAX_TEMP; + if (trimHlpr.isSymDscpMode(tObj)) + { + dscpBak = true; + } - status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); - if (status == SAI_STATUS_SUCCESS) + if (!trimHlpr.isSymDscpMode(trim)) { - const std::string &fieldName = "maximum_temperature"; + if (!tObj.tc.cache.is_set) + { + tcUpdate = true; + } + else + { + tObj.tc.value = tObj.tc.cache.value; + } + } + + cfgUpd = true; + } + } + else + { + if (tObj.dscp.mode.is_set) + { + SWSS_LOG_ERROR("Failed to remove switch trimming DSCP configuration: operation is not supported"); + return false; + } + } + + if (trim.dscp.is_set) + { + if (!tObj.dscp.is_set || (tObj.dscp.value != trim.dscp.value)) + { + if (!setSwitchTrimmingDscpSai(trim)) + { + SWSS_LOG_ERROR("Failed to set switch trimming DSCP value in SAI"); + return false; + } + + cfgUpd = true; + } + } + + if (trim.tc.is_set) + { + if (!tObj.tc.is_set || (tObj.tc.value != trim.tc.value) || tcUpdate) + { + if (!trimHlpr.isSymDscpMode(trim)) + { + if (!trimCap.validateTrimTcCap(trim.tc.value)) + { + SWSS_LOG_ERROR("Failed to validate switch trimming TC value: capability is not supported"); + return false; + } + + if (!setSwitchTrimmingTcSai(trim)) + { + SWSS_LOG_ERROR("Failed to set switch trimming TC value in SAI"); + return false; + } + + tcSync = true; + } + else + { + SWSS_LOG_WARN("Skip setting switch trimming TC value for symmetric DSCP mode"); + } + + cfgUpd = true; + } + + // Cache synchronization and backup are mutually exclusive + if (!tcSync) + { + tcBak = true; + } + } + else + { + if (tObj.tc.is_set) + { + SWSS_LOG_ERROR("Failed to remove switch trimming TC configuration: operation is not supported"); + return false; + } + } + + if (trim.queue.mode.is_set) + { + if (!tObj.queue.mode.is_set || (tObj.queue.mode.value != trim.queue.mode.value)) + { + if (!trimCap.validateTrimQueueModeCap(trim.queue.mode.value)) + { + SWSS_LOG_ERROR("Failed to validate switch trimming queue mode: capability is not supported"); + return false; + } + + if (!setSwitchTrimmingQueueModeSai(trim)) + { + SWSS_LOG_ERROR("Failed to set switch trimming queue mode in SAI"); + return false; + } + + if (trimHlpr.isStaticQueueMode(tObj)) + { + queueBak = true; + } + + cfgUpd = true; + } + } + else + { + if (tObj.queue.mode.is_set) + { + SWSS_LOG_ERROR("Failed to remove switch trimming queue configuration: operation is not supported"); + return false; + } + } + + if (trim.queue.index.is_set) + { + if (!tObj.queue.index.is_set || (tObj.queue.index.value != trim.queue.index.value)) + { + if (!trimCap.validateQueueIndexCap(trim.queue.index.value)) + { + SWSS_LOG_ERROR("Failed to validate switch trimming queue index: capability is not supported"); + return false; + } + + if (!setSwitchTrimmingQueueIndexSai(trim)) + { + SWSS_LOG_ERROR("Failed to set switch trimming queue index in SAI"); + return false; + } + + cfgUpd = true; + } + } + + // Don't update internal cache when config remains unchanged + if (!cfgUpd) + { + SWSS_LOG_NOTICE("Switch trimming in SAI is up-to-date"); + return true; + } + + if (dscpBak || tcBak || queueBak || tcSync) // Custom configuration update + { + auto cfg = trim; + + if (dscpBak) // Override dscp configuration during transition from symmetric -> asymmetric + { + cfg.dscp = tObj.dscp; + cfg.dscp.mode = trim.dscp.mode; + } + + if (tcBak) // Override tc configuration to pass synchronization cache + { + cfg.tc.cache = tObj.tc.cache; + } + + if (queueBak) // override queue configuration during transition from static -> dynamic + { + cfg.queue.index = tObj.queue.index; + } + + if (tcSync) // Update tc synchronization cache + { + cfg.tc.cache.value = trim.tc.value; + cfg.tc.cache.is_set = true; + } + + trimHlpr.setConfig(cfg); + } + else // Regular configuration update + { + trimHlpr.setConfig(trim); + } + + SWSS_LOG_NOTICE("Set switch trimming in SAI"); + + return true; +} + +void SwitchOrch::doCfgSwitchTrimmingTableTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto &map = consumer.m_toSync; + auto it = map.begin(); + + while (it != map.end()) + { + auto keyOpFieldsValues = it->second; + auto key = kfvKey(keyOpFieldsValues); + auto op = kfvOp(keyOpFieldsValues); + + SWSS_LOG_INFO("KEY: %s, OP: %s", key.c_str(), op.c_str()); + + if (key.empty()) + { + SWSS_LOG_ERROR("Failed to parse switch trimming key: empty string"); + it = map.erase(it); + continue; + } + + SwitchTrimming trim; + + if (op == SET_COMMAND) + { + for (const auto &cit : kfvFieldsValues(keyOpFieldsValues)) + { + auto fieldName = fvField(cit); + auto fieldValue = fvValue(cit); + + SWSS_LOG_INFO("FIELD: %s, VALUE: %s", fieldName.c_str(), fieldValue.c_str()); + + trim.fieldValueMap[fieldName] = fieldValue; + } + + if (trimHlpr.parseTrimConfig(trim)) + { + if (!setSwitchTrimming(trim)) + { + SWSS_LOG_ERROR("Failed to set switch trimming: ASIC and CONFIG DB are diverged"); + } + } + } + else if (op == DEL_COMMAND) + { + SWSS_LOG_ERROR("Failed to remove switch trimming: operation is not supported: ASIC and CONFIG DB are diverged"); + } + else + { + SWSS_LOG_ERROR("Unknown operation(%s)", op.c_str()); + } + + it = map.erase(it); + } +} + +void SwitchOrch::registerAsicSdkHealthEventCategories(sai_switch_attr_t saiSeverity, const string &severityString, const string &suppressed_category_list, bool isInitializing) +{ + sai_status_t status; + set interested_categories_set = switch_asic_sdk_health_event_category_universal_set; + + SWSS_LOG_INFO("Register ASIC/SDK health event for severity %s(%d) with categories [%s] suppressed", severityString.c_str(), saiSeverity, suppressed_category_list.c_str()); + + if (!suppressed_category_list.empty()) + { + auto &&categories = tokenize(suppressed_category_list, ','); + for (auto category : categories) + { + try + { + interested_categories_set.erase(switch_asic_sdk_health_event_category_map.at(category)); + } + catch (std::out_of_range &e) + { + SWSS_LOG_ERROR("Unknown ASIC/SDK health category %s to suppress", category.c_str()); + continue; + } + } + } + + if (isInitializing && interested_categories_set.empty()) + { + SWSS_LOG_INFO("All categories are suppressed for severity %s", severityString.c_str()); + return; + } + + vector sai_categories(interested_categories_set.begin(), interested_categories_set.end()); + sai_attribute_t attr; + + attr.id = saiSeverity; + attr.value.s32list.count = (uint32_t)sai_categories.size(); + attr.value.s32list.list = sai_categories.data(); + status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to register ASIC/SDK health event categories for severity %s, status: %s", severityString.c_str(), sai_serialize_status(status).c_str()); + } +} + +void SwitchOrch::doCfgSuppressAsicSdkHealthEventTableTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto &map = consumer.m_toSync; + auto it = map.begin(); + + while (it != map.end()) + { + auto keyOpFieldsValues = it->second; + auto key = kfvKey(keyOpFieldsValues); + auto op = kfvOp(keyOpFieldsValues); + + SWSS_LOG_INFO("KEY: %s, OP: %s", key.c_str(), op.c_str()); + + if (key.empty()) + { + SWSS_LOG_ERROR("Failed to parse switch hash key: empty string"); + it = map.erase(it); + continue; + } + + sai_switch_attr_t saiSeverity; + try + { + saiSeverity = switch_asic_sdk_health_event_severity_to_switch_attribute_map.at(key); + } + catch (std::out_of_range &e) + { + SWSS_LOG_ERROR("Unknown severity %s in SUPPRESS_ASIC_SDK_HEALTH_EVENT table", key.c_str()); + it = map.erase(it); + continue; + } + + if (op == SET_COMMAND) + { + bool categoriesConfigured = false; + bool continueMainLoop = false; + for (const auto &cit : kfvFieldsValues(keyOpFieldsValues)) + { + auto fieldName = fvField(cit); + auto fieldValue = fvValue(cit); + + SWSS_LOG_INFO("FIELD: %s, VALUE: %s", fieldName.c_str(), fieldValue.c_str()); + + if (m_supportedAsicSdkHealthEventAttributes.find(saiSeverity) == m_supportedAsicSdkHealthEventAttributes.end()) + { + SWSS_LOG_NOTICE("Unsupport to register categories on severity %d", saiSeverity); + it = map.erase(it); + continueMainLoop = true; + break; + } + + if (fieldName == "categories") + { + registerAsicSdkHealthEventCategories(saiSeverity, key, fieldValue); + categoriesConfigured = true; + } + } + + if (continueMainLoop) + { + continue; + } + + if (!categoriesConfigured) + { + registerAsicSdkHealthEventCategories(saiSeverity, key); + } + } + else if (op == DEL_COMMAND) + { + registerAsicSdkHealthEventCategories(saiSeverity, key); + } + else + { + SWSS_LOG_ERROR("Unknown operation(%s)", op.c_str()); + } + + it = map.erase(it); + } +} + +void SwitchOrch::doTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + const auto &tableName = consumer.getTableName(); + + if (tableName == APP_SWITCH_TABLE_NAME) + { + doAppSwitchTableTask(consumer); + } + else if (tableName == CFG_ASIC_SENSORS_TABLE_NAME) + { + doCfgSensorsTableTask(consumer); + } + else if (tableName == CFG_SWITCH_HASH_TABLE_NAME) + { + doCfgSwitchHashTableTask(consumer); + } + else if (tableName == CFG_SWITCH_TRIMMING_TABLE_NAME) + { + doCfgSwitchTrimmingTableTask(consumer); + } + else if (tableName == CFG_SUPPRESS_ASIC_SDK_HEALTH_EVENT_NAME) + { + doCfgSuppressAsicSdkHealthEventTableTask(consumer); + } + else + { + SWSS_LOG_ERROR("Unknown table : %s", tableName.c_str()); + } +} + +void SwitchOrch::doTask(NotificationConsumer& consumer) +{ + SWSS_LOG_ENTER(); + + std::string op; + std::string data; + std::vector values; + + consumer.pop(op, data, values); + + if (&consumer != m_restartCheckNotificationConsumer) + { + return; + } + + m_warmRestartCheck.checkRestartReadyState = false; + m_warmRestartCheck.noFreeze = false; + m_warmRestartCheck.skipPendingTaskCheck = false; + + SWSS_LOG_NOTICE("RESTARTCHECK notification for %s ", op.c_str()); + if (op == "orchagent") + { + string s = op; + + m_warmRestartCheck.checkRestartReadyState = true; + for (auto &i : values) + { + s += "|" + fvField(i) + ":" + fvValue(i); + + if (fvField(i) == "NoFreeze" && fvValue(i) == "true") + { + m_warmRestartCheck.noFreeze = true; + } + if (fvField(i) == "SkipPendingTaskCheck" && fvValue(i) == "true") + { + m_warmRestartCheck.skipPendingTaskCheck = true; + } + } + SWSS_LOG_NOTICE("%s", s.c_str()); + } +} + +void SwitchOrch::restartCheckReply(const string &op, const string &data, std::vector &values) +{ + NotificationProducer restartRequestReply(m_db, "RESTARTCHECKREPLY"); + restartRequestReply.send(op, data, values); + checkRestartReadyDone(); +} + +void SwitchOrch::onSwitchAsicSdkHealthEvent(sai_object_id_t switch_id, + sai_switch_asic_sdk_health_severity_t severity, + sai_timespec_t timestamp, + sai_switch_asic_sdk_health_category_t category, + sai_switch_health_data_t data, + const sai_u8_list_t &description) +{ + std::vector values; + const string &severity_str = switch_asic_sdk_health_event_severity_reverse_map.at(severity); + const string &category_str = switch_asic_sdk_health_event_category_reverse_map.at(category); + string description_str; + std::time_t t = (std::time_t)timestamp.tv_sec; + const std::time_t now = std::time(0); + const double year_in_seconds = 86400 * 365; + stringstream time_ss; + + /* + * In case vendor SAI passed a very large timestamp, put_time can cause segment fault which can not be caught by try/catch infra + * We check the difference between the timestamp from SAI and the current time and force to use current time if the gap is too large + * By doing so, we can avoid the segment fault + */ + if (difftime(t, now) > year_in_seconds) + { + SWSS_LOG_ERROR("Invalid timestamp second %" PRIx64 " in received ASIC/SDK health event, reset to current time", timestamp.tv_sec); + t = now; + } + + time_ss << std::put_time(std::localtime(&t), "%Y-%m-%d %H:%M:%S"); + + switch (data.data_type) + { + case SAI_HEALTH_DATA_TYPE_GENERAL: + { + vector description_with_terminator(description.list, description.list + description.count); + // Add the terminate character + description_with_terminator.push_back(0); + description_str = string(reinterpret_cast(description_with_terminator.data())); + // Remove unprintable characters but keep CR and NL + if (description_str.end() != + description_str.erase(std::remove_if( + description_str.begin(), + description_str.end(), + [](unsigned char x) { + return (x != 0x0d) && (x != 0x0a) && !std::isprint(x); + }), + description_str.end())) + { + SWSS_LOG_NOTICE("Unprintable characters in description of ASIC/SDK health event"); + } + break; + } + default: + SWSS_LOG_ERROR("Unknown data type %d when receiving ASIC/SDK health event", data.data_type); + // Do not return. The ASIC/SDK health event will still be recorded but without the description + break; + } + + event_params_t params = { + { "sai_timestamp", time_ss.str() }, + { "severity", severity_str }, + { "category", category_str }, + { "description", description_str }}; + + string asic_name_str; + if (!gMyAsicName.empty()) + { + asic_name_str = "asic " + gMyAsicName + ","; + params["asic_name"] = gMyAsicName; + } + + if (severity == SAI_SWITCH_ASIC_SDK_HEALTH_SEVERITY_FATAL) + { + SWSS_LOG_ERROR("[%s] ASIC/SDK health event occurred at %s, %scategory %s: %s", severity_str.c_str(), time_ss.str().c_str(), asic_name_str.c_str(), category_str.c_str(), description_str.c_str()); + } + else + { + SWSS_LOG_NOTICE("[%s] ASIC/SDK health event occurred at %s, %scategory %s: %s", severity_str.c_str(), time_ss.str().c_str(), asic_name_str.c_str(), category_str.c_str(), description_str.c_str()); + } + + values.emplace_back("severity", severity_str); + values.emplace_back("category", category_str); + values.emplace_back("description", description_str); + + m_asicSdkHealthEventTable->set(time_ss.str(),values); + + event_publish(g_events_handle, "asic-sdk-health-event", ¶ms); + + if (severity == SAI_SWITCH_ASIC_SDK_HEALTH_SEVERITY_FATAL) + { + m_fatalEventCount++; + } +} + +bool SwitchOrch::setAgingFDB(uint32_t sec) +{ + sai_attribute_t attr; + attr.id = SAI_SWITCH_ATTR_FDB_AGING_TIME; + attr.value.u32 = sec; + auto status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set switch %" PRIx64 " fdb_aging_time attribute: %d", gSwitchId, status); + task_process_status handle_status = handleSaiSetStatus(SAI_API_SWITCH, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + SWSS_LOG_NOTICE("Set switch %" PRIx64 " fdb_aging_time %u sec", gSwitchId, sec); + return true; +} + +void SwitchOrch::doTask(SelectableTimer &timer) +{ + SWSS_LOG_ENTER(); + + if (&timer == m_sensorsPollerTimer) + { + if (m_sensorsPollerIntervalChanged) + { + m_sensorsPollerTimer->reset(); + m_sensorsPollerIntervalChanged = false; + } + + if (!m_sensorsPollerEnabled) + { + m_sensorsPollerTimer->stop(); + return; + } + + sai_attribute_t attr; + sai_status_t status; + std::vector values; + + if (m_numTempSensors) + { + std::vector temp_list(m_numTempSensors); + + memset(&attr, 0, sizeof(attr)); + attr.id = SAI_SWITCH_ATTR_TEMP_LIST; + attr.value.s32list.count = m_numTempSensors; + attr.value.s32list.list = temp_list.data(); + + status = sai_switch_api->get_switch_attribute(gSwitchId , 1, &attr); + if (status == SAI_STATUS_SUCCESS) + { + for (size_t i = 0; i < attr.value.s32list.count ; i++) { + const std::string &fieldName = "temperature_" + std::to_string(i); + values.emplace_back(fieldName, std::to_string(temp_list[i])); + } + m_asicSensorsTable->set("",values); + } + else + { + SWSS_LOG_ERROR("ASIC sensors : failed to get SAI_SWITCH_ATTR_TEMP_LIST: %d", status); + } + } + + if (m_sensorsMaxTempSupported) + { + memset(&attr, 0, sizeof(attr)); + attr.id = SAI_SWITCH_ATTR_MAX_TEMP; + + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + if (status == SAI_STATUS_SUCCESS) + { + const std::string &fieldName = "maximum_temperature"; values.emplace_back(fieldName, std::to_string(attr.value.s32)); m_asicSensorsTable->set("",values); } @@ -912,6 +1741,14 @@ void SwitchOrch::doTask(SelectableTimer &timer) } } } + else if (&timer == m_eliminateEventsTimer) + { + auto ret = swss::runRedisScript(*m_stateDb, m_eliminateEventsSha, {}, {}); + for (auto str: ret) + { + SWSS_LOG_INFO("Eliminate ASIC/SDK health %s", str.c_str()); + } + } } void SwitchOrch::initSensorsTable() @@ -989,6 +1826,11 @@ void SwitchOrch::set_switch_capability(const std::vector& value m_switchTable.set("switch", values); } +void SwitchOrch::get_switch_capability(const std::string& capability, std::string& val) +{ + m_switchTable.hget("switch", capability, val); +} + void SwitchOrch::querySwitchPortEgressSampleCapability() { vector fvVector; @@ -1018,6 +1860,63 @@ void SwitchOrch::querySwitchPortEgressSampleCapability() set_switch_capability(fvVector); } +void SwitchOrch::querySwitchPortMirrorCapability() +{ + vector fvVector; + sai_status_t status = SAI_STATUS_SUCCESS; + sai_attr_capability_t capability; + + // Check if SAI is capable of handling Port ingress mirror session + status = sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_PORT, + SAI_PORT_ATTR_INGRESS_MIRROR_SESSION, &capability); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Could not query port ingress mirror capability %d", status); + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_PORT_INGRESS_MIRROR_CAPABLE, "true"); + m_portIngressMirrorSupported = true; + } + else + { + if (capability.set_implemented) + { + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_PORT_INGRESS_MIRROR_CAPABLE, "true"); + m_portIngressMirrorSupported = true; + } + else + { + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_PORT_INGRESS_MIRROR_CAPABLE, "false"); + m_portIngressMirrorSupported = false; + } + SWSS_LOG_NOTICE("port ingress mirror capability %d", capability.set_implemented); + } + + // Check if SAI is capable of handling Port egress mirror session + status = sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_PORT, + SAI_PORT_ATTR_EGRESS_MIRROR_SESSION, &capability); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Could not query port egress mirror capability %d", status); + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_PORT_EGRESS_MIRROR_CAPABLE, "true"); + m_portEgressMirrorSupported = true; + } + else + { + if (capability.set_implemented) + { + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_PORT_EGRESS_MIRROR_CAPABLE, "true"); + m_portEgressMirrorSupported = true; + } + else + { + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_PORT_EGRESS_MIRROR_CAPABLE, "false"); + m_portEgressMirrorSupported = false; + } + SWSS_LOG_NOTICE("port egress mirror capability %d", capability.set_implemented); + } + + set_switch_capability(fvVector); +} + void SwitchOrch::querySwitchTpidCapability() { SWSS_LOG_ENTER(); @@ -1103,6 +2002,27 @@ void SwitchOrch::querySwitchHashDefaults() } } +void SwitchOrch::setSwitchIcmpOffloadCapability() +{ + SWSS_LOG_ENTER(); + + vector fvVector; + // icmp echo offload does not support capability attribute, + // we depend on its notification capability + bool supported = querySwitchCapability(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_ICMP_ECHO_SESSION_STATE_CHANGE_NOTIFY); + if (supported == false) + { + SWSS_LOG_NOTICE("Icmp Echo Offload not supported"); + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_ICMP_OFFLOAD_CAPABLE, "false"); + } + else + { + SWSS_LOG_NOTICE("Icmp Echo Offload supported"); + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_ICMP_OFFLOAD_CAPABLE, "true"); + } + set_switch_capability(fvVector); +} + bool SwitchOrch::querySwitchCapability(sai_object_type_t sai_object, sai_attr_id_t attr_id) { SWSS_LOG_ENTER(); @@ -1130,3 +2050,51 @@ bool SwitchOrch::querySwitchCapability(sai_object_type_t sai_object, sai_attr_id } } } + +// Bind ACL table (with bind type switch) to switch +bool SwitchOrch::bindAclTableToSwitch(acl_stage_type_t stage, sai_object_id_t table_id) +{ + sai_attribute_t attr; + if ( stage == ACL_STAGE_INGRESS ) { + attr.id = SAI_SWITCH_ATTR_INGRESS_ACL; + } else { + attr.id = SAI_SWITCH_ATTR_EGRESS_ACL; + } + attr.value.oid = table_id; + sai_status_t status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + string stage_str = (stage == ACL_STAGE_INGRESS) ? "ingress" : "egress"; + if (status == SAI_STATUS_SUCCESS) + { + SWSS_LOG_NOTICE("Bind %s acl table %" PRIx64" to switch", stage_str.c_str(), table_id); + return true; + } + else + { + SWSS_LOG_ERROR("Failed to bind %s acl table %" PRIx64" to switch", stage_str.c_str(), table_id); + return false; + } +} + +// Unbind ACL table from swtich +bool SwitchOrch::unbindAclTableFromSwitch(acl_stage_type_t stage,sai_object_id_t table_id) +{ + sai_attribute_t attr; + if ( stage == ACL_STAGE_INGRESS ) { + attr.id = SAI_SWITCH_ATTR_INGRESS_ACL; + } else { + attr.id = SAI_SWITCH_ATTR_EGRESS_ACL; + } + attr.value.oid = SAI_NULL_OBJECT_ID; + sai_status_t status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + string stage_str = (stage == ACL_STAGE_INGRESS) ? "ingress" : "egress"; + if (status == SAI_STATUS_SUCCESS) + { + SWSS_LOG_NOTICE("Unbind %s acl table %" PRIx64" to switch", stage_str.c_str(), table_id); + return true; + } + else + { + SWSS_LOG_ERROR("Failed to unbind %s acl table %" PRIx64" to switch", stage_str.c_str(), table_id); + return false; + } +} diff --git a/orchagent/switchorch.h b/orchagent/switchorch.h index 7135bcdc395..d55bd72f8b1 100644 --- a/orchagent/switchorch.h +++ b/orchagent/switchorch.h @@ -3,8 +3,11 @@ #include "acltable.h" #include "orch.h" #include "timer.h" +#include "flex_counter/flex_counter_manager.h" #include "switch/switch_capabilities.h" #include "switch/switch_helper.h" +#include "switch/trimming/capabilities.h" +#include "switch/trimming/helper.h" #define DEFAULT_ASIC_SENSORS_POLLER_INTERVAL 60 #define ASIC_SENSORS_POLLER_STATUS "ASIC_SENSORS_POLLER_STATUS" @@ -15,6 +18,18 @@ #define SWITCH_CAPABILITY_TABLE_ORDERED_ECMP_CAPABLE "ORDERED_ECMP_CAPABLE" #define SWITCH_CAPABILITY_TABLE_PFC_DLR_INIT_CAPABLE "PFC_DLR_INIT_CAPABLE" #define SWITCH_CAPABILITY_TABLE_PORT_EGRESS_SAMPLE_CAPABLE "PORT_EGRESS_SAMPLE_CAPABLE" +#define SWITCH_CAPABILITY_TABLE_PATH_TRACING_CAPABLE "PATH_TRACING_CAPABLE" +#define SWITCH_CAPABILITY_TABLE_ICMP_OFFLOAD_CAPABLE "ICMP_OFFLOAD_CAPABLE" + +#define ASIC_SDK_HEALTH_EVENT_ELIMINATE_INTERVAL 3600 +#define SWITCH_CAPABILITY_TABLE_ASIC_SDK_HEALTH_EVENT_CAPABLE "ASIC_SDK_HEALTH_EVENT" +#define SWITCH_CAPABILITY_TABLE_REG_FATAL_ASIC_SDK_HEALTH_CATEGORY "REG_FATAL_ASIC_SDK_HEALTH_CATEGORY" +#define SWITCH_CAPABILITY_TABLE_REG_WARNING_ASIC_SDK_HEALTH_CATEGORY "REG_WARNING_ASIC_SDK_HEALTH_CATEGORY" +#define SWITCH_CAPABILITY_TABLE_REG_NOTICE_ASIC_SDK_HEALTH_CATEGORY "REG_NOTICE_ASIC_SDK_HEALTH_CATEGORY" +#define SWITCH_CAPABILITY_TABLE_PORT_INGRESS_MIRROR_CAPABLE "PORT_INGRESS_MIRROR_CAPABLE" +#define SWITCH_CAPABILITY_TABLE_PORT_EGRESS_MIRROR_CAPABLE "PORT_EGRESS_MIRROR_CAPABLE" + +#define SWITCH_STAT_COUNTER_FLEX_COUNTER_GROUP "SWITCH_STAT_COUNTER" struct WarmRestartCheck { @@ -34,6 +49,7 @@ class SwitchOrch : public Orch void restartCheckReply(const std::string &op, const std::string &data, std::vector &values); bool setAgingFDB(uint32_t sec); void set_switch_capability(const std::vector& values); + void get_switch_capability(const std::string& capability, std::string& val); bool querySwitchCapability(sai_object_type_t sai_object, sai_attr_id_t attr_id); bool checkPfcDlrInitEnable() { return m_PfcDlrInitEnable; } void set_switch_pfc_dlr_init_capability(); @@ -46,15 +62,43 @@ class SwitchOrch : public Orch bool checkOrderedEcmpEnable() { return m_orderedEcmpEnable; } + void onSwitchAsicSdkHealthEvent(sai_object_id_t switch_id, + sai_switch_asic_sdk_health_severity_t severity, + sai_timespec_t timestamp, + sai_switch_asic_sdk_health_category_t category, + sai_switch_health_data_t data, + const sai_u8_list_t &description); + + inline bool isFatalEventReceived() const + { + return (m_fatalEventCount != 0); + } + + bool bindAclTableToSwitch(acl_stage_type_t stage, sai_object_id_t table_id); + bool unbindAclTableFromSwitch(acl_stage_type_t stage, sai_object_id_t table_id); + + // Statistics + void generateSwitchCounterIdList(); + + // Mirror capability interface for MirrorOrch + bool isPortIngressMirrorSupported() const { return m_portIngressMirrorSupported; } + bool isPortEgressMirrorSupported() const { return m_portEgressMirrorSupported; } + private: void doTask(Consumer &consumer); void doTask(swss::SelectableTimer &timer); void doCfgSwitchHashTableTask(Consumer &consumer); + void doCfgSwitchTrimmingTableTask(Consumer &consumer); void doCfgSensorsTableTask(Consumer &consumer); + void doCfgSuppressAsicSdkHealthEventTableTask(Consumer &consumer); void doAppSwitchTableTask(Consumer &consumer); void initSensorsTable(); void querySwitchTpidCapability(); void querySwitchPortEgressSampleCapability(); + void querySwitchPortMirrorCapability(); + + // Statistics + void generateSwitchCounterNameMap() const; // Switch hash bool setSwitchHashFieldListSai(const SwitchHash &hash, bool isEcmpHash) const; @@ -63,6 +107,16 @@ class SwitchOrch : public Orch bool getSwitchHashOidSai(sai_object_id_t &oid, bool isEcmpHash) const; void querySwitchHashDefaults(); + void setSwitchIcmpOffloadCapability(); + + // Switch trimming + bool setSwitchTrimmingSizeSai(const SwitchTrimming &trim) const; + bool setSwitchTrimmingDscpModeSai(const SwitchTrimming &trim) const; + bool setSwitchTrimmingDscpSai(const SwitchTrimming &trim) const; + bool setSwitchTrimmingTcSai(const SwitchTrimming &trim) const; + bool setSwitchTrimmingQueueModeSai(const SwitchTrimming &trim) const; + bool setSwitchTrimmingQueueIndexSai(const SwitchTrimming &trim) const; + bool setSwitchTrimming(const SwitchTrimming &trim); sai_status_t setSwitchTunnelVxlanParams(swss::FieldValueTuple &val); void setSwitchNonSaiAttributes(swss::FieldValueTuple &val); @@ -79,6 +133,8 @@ class SwitchOrch : public Orch swss::NotificationConsumer* m_restartCheckNotificationConsumer; void doTask(swss::NotificationConsumer& consumer); + void doAsicSdkHealthEventNotificationConsumerTask(swss::NotificationConsumer& consumer); + void doRestartCheckNotificationConsumerTask(swss::NotificationConsumer& consumer); swss::DBConnector *m_db; swss::Table m_switchTable; std::map m_aclGroups; @@ -99,6 +155,21 @@ class SwitchOrch : public Orch bool m_orderedEcmpEnable = false; bool m_PfcDlrInitEnable = false; + // Port mirror capabilities + bool m_portIngressMirrorSupported = false; + bool m_portEgressMirrorSupported = false; + + // ASIC SDK health event + std::shared_ptr m_stateDbForNotification = nullptr; + std::shared_ptr m_asicSdkHealthEventTable = nullptr; + std::set m_supportedAsicSdkHealthEventAttributes; + std::string m_eliminateEventsSha; + swss::SelectableTimer* m_eliminateEventsTimer = nullptr; + uint32_t m_fatalEventCount = 0; + + void initAsicSdkHealthEventNotification(); + void registerAsicSdkHealthEventCategories(sai_switch_attr_t saiSeverity, const std::string &severityString, const std::string &suppressed_category_list="", bool isInitializing=false); + // Switch hash SAI defaults struct { struct { @@ -109,13 +180,19 @@ class SwitchOrch : public Orch } lagHash; } m_switchHashDefaults; + // Statistics + FlexCounterManager m_counterManager; + bool m_isSwitchCounterIdListGenerated = false; + // Information contained in the request from // external program for orchagent pre-shutdown state check WarmRestartCheck m_warmRestartCheck = {false, false, false}; // Switch OA capabilities SwitchCapabilities swCap; + SwitchTrimmingCapabilities trimCap; // Switch OA helper SwitchHelper swHlpr; + SwitchTrimmingHelper trimHlpr; }; diff --git a/orchagent/swssnet.h b/orchagent/swssnet.h index 82b5b6f94f5..8084b7fb4ed 100644 --- a/orchagent/swssnet.h +++ b/orchagent/swssnet.h @@ -21,6 +21,7 @@ inline static sai_ip_address_t& copy(sai_ip_address_t& dst, const IpAddress& src switch(sip.family) { case AF_INET: + memset((void*)&dst.addr, 0, sizeof(dst.addr)); dst.addr_family = SAI_IP_ADDR_FAMILY_IPV4; dst.addr.ip4 = sip.ip_addr.ipv4_addr; break; @@ -41,6 +42,7 @@ inline static sai_ip_prefix_t& copy(sai_ip_prefix_t& dst, const IpPrefix& src) switch(ia.family) { case AF_INET: + memset((void*)&dst, 0, sizeof(dst)); dst.addr_family = SAI_IP_ADDR_FAMILY_IPV4; dst.addr.ip4 = ia.ip_addr.ipv4_addr; dst.mask.ip4 = ma.ip_addr.ipv4_addr; @@ -62,6 +64,7 @@ inline static sai_ip_prefix_t& copy(sai_ip_prefix_t& dst, const IpAddress& src) switch(sip.family) { case AF_INET: + memset((void*)&dst, 0, sizeof(dst)); dst.addr_family = SAI_IP_ADDR_FAMILY_IPV4; dst.addr.ip4 = sip.ip_addr.ipv4_addr; dst.mask.ip4 = 0xFFFFFFFF; diff --git a/orchagent/trap_rates.lua b/orchagent/trap_rates.lua index 69b9c5cd3f7..a20f62b9d08 100644 --- a/orchagent/trap_rates.lua +++ b/orchagent/trap_rates.lua @@ -36,7 +36,8 @@ for i = 1, n do logit(initialized) -- Get new COUNTERS values - local in_pkts = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_COUNTER_STAT_PACKETS') + local in_pkts_str = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_COUNTER_STAT_PACKETS') + local in_pkts = tonumber(in_pkts_str) or 0 if initialized == 'DONE' or initialized == 'COUNTERS_LAST' then -- Get old COUNTERS values diff --git a/orchagent/tunneldecaporch.cpp b/orchagent/tunneldecaporch.cpp index 065e78a0c07..79b74e0bfa2 100644 --- a/orchagent/tunneldecaporch.cpp +++ b/orchagent/tunneldecaporch.cpp @@ -6,8 +6,15 @@ #include "logger.h" #include "swssnet.h" #include "qosorch.h" +#include "subscriberstatetable.h" + +using namespace std; +using namespace swss; #define OVERLAY_RIF_DEFAULT_MTU 9100 +#define APPEND_IF_NOT_EMPTY(vec, obj, attr) \ + if (!obj.attr.empty()) \ + vec.push_back({#attr, obj.attr}) \ extern sai_tunnel_api_t* sai_tunnel_api; extern sai_router_interface_api_t* sai_router_intfs_api; @@ -20,12 +27,28 @@ extern PortsOrch* gPortsOrch; extern CrmOrch* gCrmOrch; extern QosOrch* gQosOrch; -TunnelDecapOrch::TunnelDecapOrch(DBConnector *db, string tableName) : Orch(db, tableName) +TunnelDecapOrch::TunnelDecapOrch( + DBConnector *appDb, DBConnector *stateDb, + DBConnector *configDb, const vector &tableNames) + : Orch(appDb, tableNames), + stateTunnelDecapTable(make_unique
(stateDb, STATE_TUNNEL_DECAP_TABLE_NAME)), + stateTunnelDecapTermTable(make_unique
(stateDb, STATE_TUNNEL_DECAP_TERM_TABLE_NAME)) { SWSS_LOG_ENTER(); + + auto cfgSubnetDecapSubTable = new SubscriberStateTable(configDb, CFG_SUBNET_DECAP_TABLE_NAME, TableConsumable::DEFAULT_POP_BATCH_SIZE, 0); + deque entries; + cfgSubnetDecapSubTable->pops(entries); + // init subnet decap config + for (auto &entry : entries) + { + doSubnetDecapTask(entry); + } + + Orch::addExecutor(new Consumer(cfgSubnetDecapSubTable, this, CFG_SUBNET_DECAP_TABLE_NAME)); } -void TunnelDecapOrch::doTask(Consumer& consumer) +void TunnelDecapOrch::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -33,6 +56,32 @@ void TunnelDecapOrch::doTask(Consumer& consumer) { return; } + + string table_name = consumer.getTableName(); + if (table_name == APP_TUNNEL_DECAP_TABLE_NAME) + { + doDecapTunnelTask(consumer); + } + else if (table_name == APP_TUNNEL_DECAP_TERM_TABLE_NAME) + { + doDecapTunnelTermTask(consumer); + } + else if (table_name == CFG_SUBNET_DECAP_TABLE_NAME) + { + doSubnetDecapTask(consumer); + } + else + { + SWSS_LOG_ERROR("Invalid table %s", table_name.c_str()); + } + + return; +} + +void TunnelDecapOrch::doDecapTunnelTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + string table_name = consumer.getTableName(); auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) @@ -42,7 +91,6 @@ void TunnelDecapOrch::doTask(Consumer& consumer) string key = kfvKey(t); string op = kfvOp(t); - IpAddresses ip_addresses; IpAddress src_ip; IpAddress* p_src_ip = nullptr; string tunnel_type; @@ -83,23 +131,6 @@ void TunnelDecapOrch::doTask(Consumer& consumer) break; } } - else if (fvField(i) == "dst_ip") - { - try - { - ip_addresses = IpAddresses(fvValue(i)); - } - catch (const std::invalid_argument &e) - { - SWSS_LOG_ERROR("%s", e.what()); - valid = false; - break; - } - if (exists) - { - setIpAttribute(key, ip_addresses, tunnelTable.find(key)->second.tunnel_id); - } - } else if (fvField(i) == "src_ip") { try @@ -129,6 +160,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { + // Apply to SAI; only touch cache/flag on success setTunnelAttribute(fvField(i), dscp_mode, tunnel_id); tunnelTable[key].dscp_mode = dscp_mode; } @@ -176,6 +208,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { + // Apply to SAI; only touch cache/flag on success setTunnelAttribute(fvField(i), ttl_mode, tunnel_id); } } @@ -190,6 +223,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { + // Apply to SAI; only touch cache/flag on success setTunnelAttribute(fvField(i), dscp_to_tc_map_id, tunnel_id); } } @@ -204,6 +238,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { + // Apply to SAI; only touch cache/flag on success setTunnelAttribute(fvField(i), tc_to_pg_map_id, tunnel_id); } } @@ -237,6 +272,19 @@ void TunnelDecapOrch::doTask(Consumer& consumer) tunnelTable[key].encap_tc_to_queue_map_id = tc_to_queue_map_id; } } + else + { + SWSS_LOG_ERROR("unknown decap tunnel table attribute '%s'.", fvField(i).c_str()); + valid = false; + break; + } + } + + if (exists) + { + // Publish to STATE_DB if any mirrored field changed + setDecapTunnelStatus(key); + SWSS_LOG_NOTICE("Fields for TUNNEL_DECAP_TABLE entry '%s' have been synchronised in STATE_DB", key.c_str()); } if (task_status == task_process_status::task_need_retry) @@ -249,44 +297,413 @@ void TunnelDecapOrch::doTask(Consumer& consumer) if (valid && !exists) { - if (addDecapTunnel(key, tunnel_type, ip_addresses, p_src_ip, dscp_mode, ecn_mode, encap_ecn_mode, ttl_mode, - dscp_to_tc_map_id, tc_to_pg_map_id)) + if (addDecapTunnel(key, tunnel_type, p_src_ip, dscp_mode, ecn_mode, encap_ecn_mode, ttl_mode, + dscp_to_tc_map_id, tc_to_pg_map_id)) { // Record only tunnelTable[key].encap_tc_to_dscp_map_id = tc_to_dscp_map_id; tunnelTable[key].encap_tc_to_queue_map_id = tc_to_queue_map_id; - SWSS_LOG_NOTICE("Tunnel(s) added to ASIC_DB."); + SWSS_LOG_NOTICE("Tunnel %s added to ASIC_DB.", key.c_str()); + + // process unhandled decap terms + processUnhandledDecapTunnelTerms(key); } else { - SWSS_LOG_ERROR("Failed to add tunnels to ASIC_DB."); + SWSS_LOG_ERROR("Failed to add tunnel %s to ASIC_DB.", key.c_str()); } } } - - if (op == DEL_COMMAND) + else if (op == DEL_COMMAND) { if (exists) { - removeDecapTunnel(table_name, key); + decreaseTunnelRefCount(key); + RemoveTunnelIfNotReferenced(key); } else { - SWSS_LOG_ERROR("Tunnel cannot be removed since it doesn't exist."); + SWSS_LOG_ERROR("Tunnel %s cannot be removed since it doesn't exist.", key.c_str()); } } + else + { + SWSS_LOG_ERROR("Unknown operation type %s.", op.c_str()); + } it = consumer.m_toSync.erase(it); } } +void TunnelDecapOrch::doDecapTunnelTermTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + static const map DecapTermTypes = { + {"P2P", TUNNEL_TERM_TYPE_P2P}, + {"P2MP", TUNNEL_TERM_TYPE_P2MP}, + {"MP2MP", TUNNEL_TERM_TYPE_MP2MP}}; + + auto it = consumer.m_toSync.begin(); + + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple &t = it->second; + + string key = kfvKey(t); + string op = kfvOp(t); + + string tunnel_name; + string dst_ip_str; + string src_ip_str; + IpPrefix dst_ip; + IpPrefix src_ip; + TunnelTermType term_type = TUNNEL_TERM_TYPE_P2MP; + string subnet_type; + bool valid = true; + + size_t found = key.find(DEFAULT_KEY_SEPARATOR); + if (found == string::npos) + { + SWSS_LOG_ERROR("%s: invalid tunnel decap term key %s.", key.c_str(), key.c_str()); + valid = false; + } + else + { + tunnel_name = key.substr(0, found); + dst_ip_str = key.substr(found + 1); + try + { + dst_ip = IpPrefix(dst_ip_str); + } + catch (const std::invalid_argument &e) + { + SWSS_LOG_ERROR("%s: invalid destination IP prefix %s.", key.c_str(), e.what()); + valid = false; + } + } + + if (!valid) + { + it = consumer.m_toSync.erase(it); + continue; + } + + bool tunnel_exists = (tunnelTable.find(tunnel_name) != tunnelTable.end()); + bool is_subnet_decap_term = (tunnel_name == subnetDecapConfig.tunnel || + tunnel_name == subnetDecapConfig.tunnel_v6); + bool is_v4_term = dst_ip.isV4(); + + if (op == SET_COMMAND) + { + for (auto &fv : kfvFieldsValues(t)) + { + if (fvField(fv) == "src_ip") + { + src_ip_str = fvValue(fv); + try + { + src_ip = IpPrefix(src_ip_str); + } + catch (const std::invalid_argument &e) + { + SWSS_LOG_ERROR("%s: invalid source IP prefix %s.", key.c_str(), src_ip_str.c_str()); + valid = false; + break; + } + } + else if (fvField(fv) == "term_type") + { + auto it = DecapTermTypes.find(fvValue(fv)); + if (it == DecapTermTypes.end()) + { + SWSS_LOG_ERROR("%s: invalid tunnel decap term type %s.", key.c_str(), fvValue(fv).c_str()); + valid = false; + break; + } + term_type = it->second; + } + else if (fvField(fv) == "subnet_type") + { + subnet_type = fvValue(fv); + if (subnet_type != "vlan" && subnet_type != "vip") + { + SWSS_LOG_ERROR("%s: invalid subnet type: %s.", key.c_str(), subnet_type.c_str()); + valid = false; + break; + } + } + else + { + SWSS_LOG_ERROR("%s: unknown decap term table attribute '%s'", key.c_str(), fvField(fv).c_str()); + valid = false; + break; + } + } + + if (valid) + { + if (is_subnet_decap_term && term_type != TUNNEL_TERM_TYPE_MP2MP) + { + SWSS_LOG_ERROR("%s: only MP2MP tunnel decap term is allowed for subnet decap tunnel.", key.c_str()); + valid = false; + } + else if (!subnet_type.empty() && term_type != TUNNEL_TERM_TYPE_MP2MP) + { + SWSS_LOG_ERROR("%s: only MP2MP is allowed for subnet decap term.", key.c_str()); + valid = false; + } + else if (term_type == TUNNEL_TERM_TYPE_P2P && src_ip_str.empty()) + { + SWSS_LOG_ERROR("%s: no source IP is provided.", key.c_str()); + valid = false; + } + else if (term_type == TUNNEL_TERM_TYPE_MP2MP && !is_subnet_decap_term && src_ip_str.empty()) + { + SWSS_LOG_ERROR("%s: no source IP is provided.", key.c_str()); + valid = false; + } + } + + if (valid) + { + // if subnet decap is enabled, take source IP from the subnet decap config + // for subnet decap tunnnel term + if (subnetDecapConfig.enable) + { + if (is_subnet_decap_term) + { + if (is_v4_term) + { + if (!subnetDecapConfig.src_ip.empty()) + { + src_ip_str = subnetDecapConfig.src_ip; + } + else + { + SWSS_LOG_ERROR("%s: source IP is not configured for subnet decap term, ignored.", key.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + } + else + { + if (!subnetDecapConfig.src_ip_v6.empty()) + { + src_ip_str = subnetDecapConfig.src_ip_v6; + } + else + { + SWSS_LOG_ERROR("%s: source IPv6 is not configured for subnet decap term, ignored.", key.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + } + } + } + else if (is_subnet_decap_term) + { + SWSS_LOG_ERROR("%s: subnet decap is disabled, ignored.", key.c_str()); + it = consumer.m_toSync.erase(it); + continue; + } + + if (tunnel_exists) + { + if (!addDecapTunnelTermEntry(tunnel_name, src_ip_str, dst_ip_str, term_type, subnet_type)) + { + SWSS_LOG_ERROR("%s: failed to add tunnel decap term to ASIC_DB.", key.c_str()); + } + } + else + { + SWSS_LOG_NOTICE("%s: tunnel doesn't exist, added to unhandled list.", key.c_str()); + addUnhandledDecapTunnelTerm(tunnel_name, src_ip_str, dst_ip_str, term_type, subnet_type); + } + } + } + else if (op == DEL_COMMAND) + { + if (tunnel_exists) + { + if (removeDecapTunnelTermEntry(tunnel_name, dst_ip_str)) + { + RemoveTunnelIfNotReferenced(tunnel_name); + } + else + { + SWSS_LOG_ERROR("Failed to remove tunnel decap term %s from ASIC_DB.", key.c_str()); + } + } + else + { + SWSS_LOG_NOTICE("Tunnel for decap term %s doesn't exist, removed from unhandled list.", key.c_str()); + removeUnhandledDecapTunnelTerm(tunnel_name, dst_ip_str); + } + } + else + { + SWSS_LOG_ERROR("Unknown operation type %s.", op.c_str()); + } + + it = consumer.m_toSync.erase(it); + } +} + +void TunnelDecapOrch::doSubnetDecapTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple &t = it->second; + doSubnetDecapTask(t); + it = consumer.m_toSync.erase(it); + } +} + +void TunnelDecapOrch::doSubnetDecapTask(const KeyOpFieldsValuesTuple &tuple) +{ + SWSS_LOG_ENTER(); + + string key = kfvKey(tuple); + string op = kfvOp(tuple); + + bool valid = true; + string src_ip_str; + string src_ip_v6_str; + IpPrefix src_ip{""}; + IpPrefix src_ip_v6{""}; + bool enable = false; + + if (op == SET_COMMAND) + { + for (auto &fv : kfvFieldsValues(tuple)) + { + if (fvField(fv) == "src_ip") + { + src_ip_str = fvValue(fv); + try + { + src_ip = swss::IpPrefix(src_ip_str); + } + catch (const std::invalid_argument &e) + { + SWSS_LOG_ERROR("Invalid source IP prefix %s.", src_ip_str.c_str()); + valid = false; + break; + } + if (!src_ip.isV4()) + { + SWSS_LOG_ERROR("Invalid source IP prefix %s.", src_ip_str.c_str()); + valid = false; + break; + } + } + else if (fvField(fv) == "src_ip_v6") + { + src_ip_v6_str = fvValue(fv); + try + { + src_ip_v6 = swss::IpPrefix(src_ip_v6_str); + } + catch (const std::invalid_argument &e) + { + SWSS_LOG_ERROR("Invalid source IPv6 prefix %s.", src_ip_v6_str.c_str()); + valid = false; + break; + } + if (src_ip_v6.isV4()) + { + SWSS_LOG_ERROR("Invalid source IPv6 prefix %s.", src_ip_v6_str.c_str()); + valid = false; + break; + } + } + else if (fvField(fv) == "status") + { + enable = (fvValue(fv) == "enable"); + } + else + { + SWSS_LOG_ERROR("unknown subnet decap table attribute '%s'.", fvField(fv).c_str()); + valid = false; + break; + } + } + + if (src_ip_str.empty() && src_ip_v6_str.empty()) + { + SWSS_LOG_ERROR("Both src_ip and src_ip_v6 of subnet decap are not set."); + valid = false; + } + + if (valid) + { + subnetDecapConfig.enable = enable; + ostringstream oss; + oss << "Updated subnet decap config, enable: " << enable; + if (!src_ip_str.empty()) + { + src_ip_str = src_ip.to_string(); + oss << " , src_ip: " << src_ip_str; + if (subnetDecapConfig.src_ip.empty()) + { + subnetDecapConfig.src_ip = src_ip_str; + } + else if (subnetDecapConfig.src_ip != src_ip_str) + { + if (subnetDecapConfig.enable) + { + // update source IP of existing IPv4 decap terms + setIpAttribute(subnetDecapConfig.tunnel, src_ip_str); + // update source IP of unhandled IPv4 decap terms + updateUnhandledDecapTunnelTerms(subnetDecapConfig.tunnel, src_ip_str); + } + subnetDecapConfig.src_ip = src_ip_str; + } + } + if (!src_ip_v6_str.empty()) + { + src_ip_v6_str = src_ip_v6.to_string(); + oss << " , src_ip_v6: " << src_ip_v6_str; + if (subnetDecapConfig.src_ip_v6.empty()) + { + subnetDecapConfig.src_ip_v6 = src_ip_v6_str; + } + else if (subnetDecapConfig.src_ip_v6 != src_ip_v6_str) + { + if (subnetDecapConfig.enable) + { + // update source IP of existing IPv6 decap terms + setIpAttribute(subnetDecapConfig.tunnel_v6, src_ip_v6_str); + // update source IP of unhandled IPv6 decap terms + updateUnhandledDecapTunnelTerms(subnetDecapConfig.tunnel_v6, src_ip_v6_str); + } + subnetDecapConfig.src_ip_v6 = src_ip_v6_str; + } + } + oss << "."; + SWSS_LOG_NOTICE("%s", oss.str().c_str()); + } + } + else if (op == DEL_COMMAND) + { + subnetDecapConfig.enable = false; + } + else + { + SWSS_LOG_ERROR("Unknown operation type %s.", op.c_str()); + } +} + /** * Function Description: * @brief adds a decap tunnel to ASIC_DB * * Arguments: * @param[in] type - type of tunnel - * @param[in] dst_ip - destination ip address to decap * @param[in] p_src_ip - source ip address for encap (nullptr to skip this) * @param[in] dscp - dscp mode (uniform/pipe) * @param[in] ecn - ecn mode (copy_from_outer/standard) @@ -300,7 +717,6 @@ void TunnelDecapOrch::doTask(Consumer& consumer) bool TunnelDecapOrch::addDecapTunnel( string key, string type, - IpAddresses dst_ip, IpAddress* p_src_ip, string dscp, string ecn, @@ -313,12 +729,10 @@ bool TunnelDecapOrch::addDecapTunnel( SWSS_LOG_ENTER(); sai_status_t status; - IpAddress src_ip("0.0.0.0"); // adding tunnel attributes to array and writing to ASIC_DB sai_attribute_t attr; vector tunnel_attrs; sai_object_id_t overlayIfId; - TunnelTermType term_type = TUNNEL_TERM_TYPE_P2MP; // create the overlay router interface to create a LOOPBACK type router interface (decap) vector overlay_intf_attrs; @@ -366,8 +780,6 @@ bool TunnelDecapOrch::addDecapTunnel( attr.id = SAI_TUNNEL_ATTR_ENCAP_SRC_IP; copy(attr.value.ipaddr, p_src_ip->to_string()); tunnel_attrs.push_back(attr); - src_ip = *p_src_ip; - term_type = TUNNEL_TERM_TYPE_P2P; } // decap ecn mode (copy from outer/standard) @@ -445,13 +857,20 @@ bool TunnelDecapOrch::addDecapTunnel( } } - tunnelTable[key] = { tunnel_id, overlayIfId, dst_ip, {}, dscp, SAI_NULL_OBJECT_ID, SAI_NULL_OBJECT_ID }; - - // create a decap tunnel entry for every source_ip - dest_ip pair - if (!addDecapTunnelTermEntries(key, src_ip, dst_ip, tunnel_id, term_type)) - { - return false; - } + tunnelTable[key] = { + tunnel_id, // tunnel_id + overlayIfId, // overlay_intf_id + 1, // ref count + {}, // tunnel_term_info + type, // tunnel_type + dscp, // dscp_mode + ecn, // ecn_mode + encap_ecn, // encap_ecn_mode + ttl, // ttl_mode + SAI_NULL_OBJECT_ID, // encap_tc_to_dscp_map_id + SAI_NULL_OBJECT_ID // encap_tc_to_queue_map_id + }; + setDecapTunnelStatus(key); return true; } @@ -461,20 +880,41 @@ bool TunnelDecapOrch::addDecapTunnel( * @brief adds a decap tunnel termination entry to ASIC_DB * * Arguments: - * @param[in] tunnelKey - key of the tunnel from APP_DB - * @param[in] src_ip - source ip address of decap tunnel - * @param[in] dst_ips - destination ip addresses to decap - * @param[in] tunnel_id - the id of the tunnel + * @param[in] tunnel_name - key of the tunnel from APP_DB + * @param[in] src_ip_str - source ip prefix of the decap term entry + * @param[in] dst_ip_str - destination ip prefix of the decap term entry * @param[in] term_type - P2P or P2MP. Other types (MP2P and MP2MP) not supported yet + * @param[in] subnet_type - the subnet type * * Return Values: * @return true on success and false if there's an error */ -bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, swss::IpAddress src_ip, swss::IpAddresses dst_ips, sai_object_id_t tunnel_id, TunnelTermType tunnel_type) +bool TunnelDecapOrch::addDecapTunnelTermEntry( + std::string tunnel_name, + std::string src_ip_str, + std::string dst_ip_str, + TunnelTermType term_type, + std::string subnet_type) { SWSS_LOG_ENTER(); + auto tunnel_it = tunnelTable.find(tunnel_name); + if (tunnel_it == tunnelTable.end()) + { + SWSS_LOG_ERROR("Tunnel %s does not exist.", tunnel_name.c_str()); + return false; + } + auto &tunnel = tunnel_it->second; + sai_attribute_t attr; + IpPrefix src_ip{src_ip_str}; + IpPrefix dst_ip{dst_ip_str}; + + if (tunnel.tunnel_term_info.find(dst_ip) != tunnel.tunnel_term_info.end()) + { + SWSS_LOG_NOTICE("Tunnel decap term entry %s already exists.", dst_ip_str.c_str()); + return true; + } // adding tunnel table entry attributes to array and writing to ASIC_DB vector tunnel_table_entry_attrs; @@ -483,14 +923,18 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, swss::IpAddres tunnel_table_entry_attrs.push_back(attr); attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE; - if (tunnel_type == TUNNEL_TERM_TYPE_P2P) + if (term_type == TUNNEL_TERM_TYPE_P2P) { attr.value.u32 = SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2P; } - else + else if (term_type == TUNNEL_TERM_TYPE_P2MP) { attr.value.u32 = SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP; } + else if (term_type == TUNNEL_TERM_TYPE_MP2MP) + { + attr.value.u32 = SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_MP2MP; + } tunnel_table_entry_attrs.push_back(attr); attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE; @@ -498,74 +942,63 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, swss::IpAddres tunnel_table_entry_attrs.push_back(attr); attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID; - attr.value.oid = tunnel_id; + attr.value.oid = tunnel.tunnel_id; tunnel_table_entry_attrs.push_back(attr); - if (tunnel_type == TUNNEL_TERM_TYPE_P2P) + if (term_type == TUNNEL_TERM_TYPE_P2P || term_type == TUNNEL_TERM_TYPE_MP2MP) { - // Set src ip for P2P only + if (src_ip.isV4() != dst_ip.isV4()) + { + SWSS_LOG_ERROR("Src IP %s doesn't match IP version of dst IP %s.", src_ip_str.c_str(), dst_ip_str.c_str()); + return false; + } + // Set src ip for P2P or MP2MP attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP; - copy(attr.value.ipaddr, src_ip); + copy(attr.value.ipaddr, src_ip.getIp()); tunnel_table_entry_attrs.push_back(attr); } - TunnelEntry *tunnel_info = &tunnelTable.find(tunnelKey)->second; + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP; + copy(attr.value.ipaddr, dst_ip.getIp()); + tunnel_table_entry_attrs.push_back(attr); - // loop through the IP list and create a new tunnel table entry for every IP (in network byte order) - set tunnel_ips = dst_ips.getIpAddresses(); - for (auto it = tunnel_ips.begin(); it != tunnel_ips.end(); ++it) + if (term_type == TUNNEL_TERM_TYPE_MP2MP) { - const IpAddress& ia = *it; - string dst_ip = ia.to_string(); - // The key will be src_ip-dst_ip (like 10.1.1.1-20.2.2.2) if src_ip is not 0, - // or the key will contain dst_ip only - string key; - if (!src_ip.isZero()) - { - key = src_ip.to_string() + '-' + dst_ip; - } - else - { - key = dst_ip; - } + // Set src/dst ip mask for MP2MP + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP_MASK; + copy(attr.value.ipaddr, src_ip.getMask()); + tunnel_table_entry_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP_MASK; + copy(attr.value.ipaddr, dst_ip.getMask()); + tunnel_table_entry_attrs.push_back(attr); + } - // check if the there's an entry already for the key pair - if (existingIps.find(key) != existingIps.end()) + // create the tunnel table entry + sai_object_id_t tunnel_term_table_entry_id; + sai_status_t status = sai_tunnel_api->create_tunnel_term_table_entry(&tunnel_term_table_entry_id, gSwitchId, (uint32_t)tunnel_table_entry_attrs.size(), tunnel_table_entry_attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create tunnel decap term entry %s.", dst_ip.to_string().c_str()); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_TUNNEL, status); + if (handle_status != task_success) { - SWSS_LOG_NOTICE("%s already exists. Did not create entry.", key.c_str()); + return parseHandleSaiStatusFailure(handle_status); } - else - { - attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP; - copy(attr.value.ipaddr, ia); - tunnel_table_entry_attrs.push_back(attr); - - // create the tunnel table entry - sai_object_id_t tunnel_term_table_entry_id; - sai_status_t status = sai_tunnel_api->create_tunnel_term_table_entry(&tunnel_term_table_entry_id, gSwitchId, (uint32_t)tunnel_table_entry_attrs.size(), tunnel_table_entry_attrs.data()); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to create tunnel entry table for ip: %s", key.c_str()); - task_process_status handle_status = handleSaiCreateStatus(SAI_API_TUNNEL, status); - if (handle_status != task_success) - { - return parseHandleSaiStatusFailure(handle_status); - } - } - - // insert into ip to entry mapping - existingIps.insert(key); - - // insert entry id and ip into tunnel mapping - tunnel_info->tunnel_term_info.push_back({ tunnel_term_table_entry_id, src_ip.to_string(), dst_ip, tunnel_type }); + } - // pop the last element for the next loop - tunnel_table_entry_attrs.pop_back(); + tunnel.tunnel_term_info[dst_ip] = { + tunnel_term_table_entry_id, // tunnel_term_id + src_ip_str, // src_ip + dst_ip_str, // dst_ip + term_type, // tunnel_type + subnet_type // subnet_type + }; + increaseTunnelRefCount(tunnel_name); + setDecapTunnelTermStatus(tunnel_name, dst_ip_str, src_ip_str, term_type, subnet_type); - SWSS_LOG_NOTICE("Created tunnel entry for ip: %s", dst_ip.c_str()); - } + SWSS_LOG_NOTICE("Created tunnel decap term entry %s.", dst_ip_str.c_str()); - } return true; } @@ -678,46 +1111,53 @@ bool TunnelDecapOrch::setTunnelAttribute(string field, sai_object_id_t value, sa * @brief sets ips for a particular tunnel. deletes ips that are old and adds new ones * * Arguments: - * @param[in] key - key of the tunnel from APP_DB - * @param[in] new_ip_addresses - new destination ip addresses to decap (comes from APP_DB) - * @param[in] tunnel_id - the id of the tunnel + * @param[in] tunnel_name - tunnel name from APP_DB + * @param[in] src_ip_str - new source ip address for the decap terms * * Return Values: * @return true on success and false if there's an error */ -bool TunnelDecapOrch::setIpAttribute(string key, IpAddresses new_ip_addresses, sai_object_id_t tunnel_id) +bool TunnelDecapOrch::setIpAttribute(string tunnel_name, string src_ip_str) { - TunnelEntry *tunnel_info = &tunnelTable.find(key)->second; + SWSS_LOG_ENTER(); - // make a copy of tunnel_term_info to loop through - vector tunnel_term_info_copy(tunnel_info->tunnel_term_info); + if (src_ip_str.empty()) + { + return false; + } - tunnel_info->tunnel_term_info.clear(); - tunnel_info->dst_ip_addrs = new_ip_addresses; + SWSS_LOG_NOTICE("Setting source IP for decap terms of tunnel %s to %s", tunnel_name.c_str(), src_ip_str.c_str()); - // loop through original ips and remove ips not in the new ip_addresses - for (auto it = tunnel_term_info_copy.begin(); it != tunnel_term_info_copy.end(); ++it) + auto tunnel_it = tunnelTable.find(tunnel_name); + if (tunnel_it == tunnelTable.end()) { - TunnelTermEntry tunnel_entry_info = *it; - string ip = tunnel_entry_info.dst_ip; - if (!new_ip_addresses.contains(ip)) - { - if (!removeDecapTunnelTermEntry(tunnel_entry_info.tunnel_term_id, ip)) - { - return false; - } - } - else + SWSS_LOG_INFO("Tunnel %s does not exist", tunnel_name.c_str()); + return true; + } + + TunnelEntry *tunnel_info = &tunnel_it->second; + map decap_terms_copy(tunnel_info->tunnel_term_info.begin(), tunnel_info->tunnel_term_info.end()); + + for (auto it = decap_terms_copy.begin(); it != decap_terms_copy.end(); ++it) + { + TunnelTermEntry &term_entry = it->second; + if (!removeDecapTunnelTermEntry(tunnel_name, term_entry.dst_ip)) { - // add the data into the tunnel_term_info - tunnel_info->tunnel_term_info.push_back({ tunnel_entry_info.tunnel_term_id, "0.0.0.0", ip, TUNNEL_TERM_TYPE_P2MP }); + return false; } } - // add all the new ip addresses - if(!addDecapTunnelTermEntries(key, IpAddress(0), new_ip_addresses, tunnel_id, TUNNEL_TERM_TYPE_P2MP)) + for (auto it = decap_terms_copy.begin(); it != decap_terms_copy.end(); ++it) { - return false; + if (tunnel_info->tunnel_term_info.find(it->first) == tunnel_info->tunnel_term_info.end()) + { + TunnelTermEntry &term_entry = it->second; + // add the decap term with new src ip + if (!addDecapTunnelTermEntry(tunnel_name, src_ip_str, term_entry.dst_ip, term_entry.term_type, term_entry.subnet_type)) + { + return false; + } + } } return true; @@ -739,28 +1179,12 @@ bool TunnelDecapOrch::removeDecapTunnel(string table_name, string key) sai_status_t status; TunnelEntry *tunnel_info = &tunnelTable.find(key)->second; - // loop through the tunnel entry ids related to the tunnel and remove them before removing the tunnel - for (auto it = tunnel_info->tunnel_term_info.begin(); it != tunnel_info->tunnel_term_info.end(); ++it) + if (tunnel_info->tunnel_term_info.size() > 0) { - TunnelTermEntry tunnel_entry_info = *it; - string term_key; - swss::IpAddress src_ip(tunnel_entry_info.src_ip); - if (!src_ip.isZero()) - { - term_key = src_ip.to_string() + '-' + tunnel_entry_info.dst_ip; - } - else - { - term_key = tunnel_entry_info.dst_ip; - } - if (!removeDecapTunnelTermEntry(tunnel_entry_info.tunnel_term_id, term_key)) - { - return false; - } + SWSS_LOG_ERROR("Failed to remove tunnel %s that has decap terms.", key.c_str()); + return false; } - tunnel_info->tunnel_term_info = {}; - status = sai_tunnel_api->remove_tunnel(tunnel_info->tunnel_id); if (status != SAI_STATUS_SUCCESS) { @@ -786,6 +1210,7 @@ bool TunnelDecapOrch::removeDecapTunnel(string table_name, string key) tunnelTable.erase(key); gQosOrch->removeTunnelReference(table_name, key); + removeDecapTunnelStatus(key); return true; } @@ -794,19 +1219,36 @@ bool TunnelDecapOrch::removeDecapTunnel(string table_name, string key) * @brief remove decap tunnel termination entry * * Arguments: - * @param[in] key - key of the tunnel from APP_DB + * @param[in] tunnel_name - tunnel name + * @param[in] dst_ip - destination ip address of the decap term entry * * Return Values: * @return true on success and false if there's an error */ -bool TunnelDecapOrch::removeDecapTunnelTermEntry(sai_object_id_t tunnel_term_id, string key) +bool TunnelDecapOrch::removeDecapTunnelTermEntry(std::string tunnel_name, std::string dst_ip_str) { sai_status_t status; - status = sai_tunnel_api->remove_tunnel_term_table_entry(tunnel_term_id); + auto tunnel_it = tunnelTable.find(tunnel_name); + if (tunnel_it == tunnelTable.end()) + { + SWSS_LOG_ERROR("Tunnel %s does not exist.", tunnel_name.c_str()); + return false; + } + + IpPrefix dst_ip{dst_ip_str}; + auto term_it = tunnel_it->second.tunnel_term_info.find(dst_ip); + if (term_it == tunnel_it->second.tunnel_term_info.end()) + { + SWSS_LOG_ERROR("Tunnel decap term entry %s does not exist.", dst_ip_str.c_str()); + return false; + } + auto &decap_term = term_it->second; + + status = sai_tunnel_api->remove_tunnel_term_table_entry(decap_term.tunnel_term_id); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to remove tunnel table entry: %" PRIu64, tunnel_term_id); + SWSS_LOG_ERROR("Failed to remove tunnel table entry: %" PRIu64, decap_term.tunnel_term_id); task_process_status handle_status = handleSaiRemoveStatus(SAI_API_TUNNEL, status); if (handle_status != task_success) { @@ -814,9 +1256,10 @@ bool TunnelDecapOrch::removeDecapTunnelTermEntry(sai_object_id_t tunnel_term_id, } } - // making sure to remove all instances of the ip address - existingIps.erase(key); - SWSS_LOG_NOTICE("Removed decap tunnel term entry with ip address: %s", key.c_str()); + tunnel_it->second.tunnel_term_info.erase(term_it); + decreaseTunnelRefCount(tunnel_name); + removeDecapTunnelTermStatus(tunnel_name, dst_ip_str); + SWSS_LOG_NOTICE("Removed decap tunnel term entry with ip address: %s.", dst_ip_str.c_str()); return true; } @@ -976,13 +1419,21 @@ bool TunnelDecapOrch::removeNextHopTunnel(std::string tunnelKey, IpAddress& ipAd IpAddresses TunnelDecapOrch::getDstIpAddresses(std::string tunnelKey) { + IpAddresses dst_ips{}; + if (tunnelTable.find(tunnelKey) == tunnelTable.end()) { SWSS_LOG_INFO("Tunnel not found %s", tunnelKey.c_str()); - return IpAddresses(); + return dst_ips; + } + + auto &tunnel = tunnelTable[tunnelKey]; + for (auto it = tunnel.tunnel_term_info.begin(); it != tunnel.tunnel_term_info.end(); ++it) + { + dst_ips.add(it->first.getIp()); } - return tunnelTable[tunnelKey].dst_ip_addrs; + return dst_ips; } std::string TunnelDecapOrch::getDscpMode(const std::string &tunnelKey) const @@ -1019,3 +1470,107 @@ bool TunnelDecapOrch::getQosMapId(const std::string &tunnelKey, const std::strin } return true; } + +void TunnelDecapOrch::updateUnhandledDecapTunnelTerms(const string &tunnel_name, const string &src_ip_str) +{ + SWSS_LOG_ENTER(); + + if (src_ip_str.empty()) + { + return; + } + + SWSS_LOG_INFO("Updating unhandled decap tunnel terms for tunnel %s with source IP %s", + tunnel_name.c_str(), src_ip_str.c_str()); + + auto tunnel_it = unhandledDecapTerms.find(tunnel_name); + if (tunnel_it != unhandledDecapTerms.end()) + { + for (auto term_it = tunnel_it->second.begin(); term_it != tunnel_it->second.end(); ++term_it) + { + auto &term = term_it->second; + term.src_ip = src_ip_str; + } + } +} + +void TunnelDecapOrch::processUnhandledDecapTunnelTerms(const string &tunnel_name) +{ + SWSS_LOG_ENTER(); + SWSS_LOG_INFO("Processing unhandled decap tunnel terms for tunnel %s", + tunnel_name.c_str()); + + auto tunnel_it = unhandledDecapTerms.find(tunnel_name); + if (tunnel_it != unhandledDecapTerms.end()) + { + for (auto term_it = tunnel_it->second.begin(); term_it != tunnel_it->second.end();) + { + auto &term = term_it->second; + if (addDecapTunnelTermEntry(tunnel_name, term.src_ip, term.dst_ip, term.term_type, term.subnet_type)) + { + term_it = tunnel_it->second.erase(term_it); + } + else + { + ++term_it; + } + } + } +} + +inline void TunnelDecapOrch::setDecapTunnelStatus(const std::string &tunnel_name) +{ + auto &tunnel = tunnelTable.at(tunnel_name); + + vector fv; + APPEND_IF_NOT_EMPTY(fv, tunnel, tunnel_type); + APPEND_IF_NOT_EMPTY(fv, tunnel, dscp_mode); + APPEND_IF_NOT_EMPTY(fv, tunnel, ecn_mode); + APPEND_IF_NOT_EMPTY(fv, tunnel, encap_ecn_mode); + APPEND_IF_NOT_EMPTY(fv, tunnel, ttl_mode); + stateTunnelDecapTable->set(tunnel_name, fv); +} + +inline void TunnelDecapOrch::removeDecapTunnelStatus(const std::string &tunnel_name) +{ + stateTunnelDecapTable->del(tunnel_name); +} + +inline void TunnelDecapOrch::setDecapTunnelTermStatus( + const std::string &tunnel_name, const std::string &dst_ip_str, const std::string &src_ip_str, + TunnelTermType term_type, const std::string &subnet_type) +{ + const static map DecapTermTypeStrLookupTable = { + {TUNNEL_TERM_TYPE_P2P, "P2P"}, + {TUNNEL_TERM_TYPE_P2MP, "P2MP"}, + {TUNNEL_TERM_TYPE_MP2MP, "MP2MP"}}; + + string tunnel_term_key = tunnel_name + state_db_key_delimiter + dst_ip_str; + string term_type_str = DecapTermTypeStrLookupTable.at(term_type); + vector fv = {{ "term_type", term_type_str }}; + if (!src_ip_str.empty()) + { + fv.emplace_back("src_ip", src_ip_str); + } + if (!subnet_type.empty()) + { + fv.emplace_back("subnet_type", subnet_type); + } + + stateTunnelDecapTermTable->set(tunnel_term_key, fv); +} + +inline void TunnelDecapOrch::removeDecapTunnelTermStatus(const std::string &tunnel_name, const std::string &dst_ip_str) +{ + string tunnel_term_key = tunnel_name + state_db_key_delimiter + dst_ip_str; + stateTunnelDecapTermTable->del(tunnel_term_key); +} + +inline void TunnelDecapOrch::RemoveTunnelIfNotReferenced(const string &tunnel_name) +{ + if (getTunnelRefCount(tunnel_name) == 0) + { + removeDecapTunnel(APP_TUNNEL_DECAP_TABLE_NAME, tunnel_name); + SWSS_LOG_NOTICE("Tunnel %s removed from ASIC_DB.", tunnel_name.c_str()); + } +} diff --git a/orchagent/tunneldecaporch.h b/orchagent/tunneldecaporch.h index 18cf4f88566..0814a3b668b 100644 --- a/orchagent/tunneldecaporch.h +++ b/orchagent/tunneldecaporch.h @@ -13,7 +13,8 @@ enum TunnelTermType { TUNNEL_TERM_TYPE_P2P, - TUNNEL_TERM_TYPE_P2MP + TUNNEL_TERM_TYPE_P2MP, + TUNNEL_TERM_TYPE_MP2MP }; /* Constants */ @@ -26,17 +27,31 @@ struct TunnelTermEntry std::string src_ip; std::string dst_ip; TunnelTermType term_type; + std::string subnet_type; }; struct TunnelEntry { - sai_object_id_t tunnel_id; // tunnel id - sai_object_id_t overlay_intf_id; // overlay interface id - swss::IpAddresses dst_ip_addrs; // destination ip addresses - std::vector tunnel_term_info; // tunnel_entry ids related to the tunnel abd ips related to the tunnel (all ips for tunnel entries that refer to this tunnel) - std::string dscp_mode; // dscp_mode, will be used in muxorch - sai_object_id_t encap_tc_to_dscp_map_id; // TC_TO_DSCP map id, will be used in muxorch - sai_object_id_t encap_tc_to_queue_map_id; // TC_TO_QUEUE map id, will be used in muxorch + sai_object_id_t tunnel_id; // tunnel id + sai_object_id_t overlay_intf_id; // overlay interface id + int ref_count; // reference count + std::map tunnel_term_info; // decap terms + std::string tunnel_type; // tunnel type, IPINIP only + std::string dscp_mode; // dscp_mode, will be used in muxorch + std::string ecn_mode; // ECN mode + std::string encap_ecn_mode; // encap ECN mode + std::string ttl_mode; // TTL mode + sai_object_id_t encap_tc_to_dscp_map_id; // TC_TO_DSCP map id, will be used in muxorch + sai_object_id_t encap_tc_to_queue_map_id; // TC_TO_QUEUE map id, will be used in muxorch +}; + +struct SubnetDecapConfig +{ + bool enable; + std::string src_ip; + std::string src_ip_v6; + std::string tunnel; + std::string tunnel_v6; }; struct NexthopTunnel @@ -48,49 +63,104 @@ struct NexthopTunnel /* TunnelTable: key string, tunnel object id */ typedef std::map TunnelTable; -/* - ExistingIps: ips that currently have term entries, - Key in ExistingIps is src_ip-dst_ip -*/ -typedef std::unordered_set ExistingIps; - /* Nexthop IP to refcount map */ typedef std::map Nexthop; /* Tunnel to nexthop maps */ typedef std::map TunnelNhs; +/* unhandled decap term table */ +typedef std::map> UnhandledDecapTermTable; + class TunnelDecapOrch : public Orch { public: - TunnelDecapOrch(swss::DBConnector *db, std::string tableName); + TunnelDecapOrch(swss::DBConnector *appDb, swss::DBConnector *stateDb, + swss::DBConnector *configDb, const std::vector &tableNames); sai_object_id_t createNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr); bool removeNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr); swss::IpAddresses getDstIpAddresses(std::string tunnelKey); std::string getDscpMode(const std::string &tunnelKey) const; bool getQosMapId(const std::string &tunnelKey, const std::string &qos_table_type, sai_object_id_t &oid) const; + const SubnetDecapConfig &getSubnetDecapConfig() const + { + return subnetDecapConfig; + } + private: TunnelTable tunnelTable; - ExistingIps existingIps; TunnelNhs tunnelNhs; - - bool addDecapTunnel(std::string key, std::string type, swss::IpAddresses dst_ip, swss::IpAddress* p_src_ip, + UnhandledDecapTermTable unhandledDecapTerms; + std::unique_ptr stateTunnelDecapTable = nullptr; + std::unique_ptr stateTunnelDecapTermTable = nullptr; + SubnetDecapConfig subnetDecapConfig = { + false, + "", + "", + "IPINIP_SUBNET", + "IPINIP_SUBNET_V6" + }; + + bool addDecapTunnel(std::string key, std::string type, swss::IpAddress* p_src_ip, std::string dscp, std::string ecn, std::string encap_ecn, std::string ttl, sai_object_id_t dscp_to_tc_map_id, sai_object_id_t tc_to_pg_map_id); bool removeDecapTunnel(std::string table_name, std::string key); - bool addDecapTunnelTermEntries(std::string tunnelKey, swss::IpAddress src_ip, swss::IpAddresses dst_ip, sai_object_id_t tunnel_id, TunnelTermType type); - bool removeDecapTunnelTermEntry(sai_object_id_t tunnel_term_id, std::string ip); + bool addDecapTunnelTermEntry(std::string tunnel_name, std::string src_ip_str, + std::string dst_ip_str, TunnelTermType term_type, std::string subnet_type); + bool removeDecapTunnelTermEntry(std::string tunnel_name, std::string dst_ip_str); + + void addUnhandledDecapTunnelTerm(const std::string &tunnel_name, const std::string &src_ip_str, + const std::string &dst_ip_str, TunnelTermType term_type, + const std::string &subnet_type) + { + swss::IpPrefix dst_ip(dst_ip_str); + unhandledDecapTerms[tunnel_name][dst_ip] = {SAI_NULL_OBJECT_ID, src_ip_str, dst_ip_str, term_type, subnet_type}; + } + void removeUnhandledDecapTunnelTerm(const std::string &tunnel_name, const std::string &dst_ip_str) + { + swss::IpPrefix dst_ip(dst_ip_str); + auto tunnel_it = unhandledDecapTerms.find(tunnel_name); + if (tunnel_it != unhandledDecapTerms.end()) + { + tunnel_it->second.erase(dst_ip); + } + } + void updateUnhandledDecapTunnelTerms(const std::string &tunnel_name, const std::string &src_ip_str); + void processUnhandledDecapTunnelTerms(const std::string &tunnel_name); bool setTunnelAttribute(std::string field, std::string value, sai_object_id_t existing_tunnel_id); bool setTunnelAttribute(std::string field, sai_object_id_t value, sai_object_id_t existing_tunnel_id); - bool setIpAttribute(std::string key, swss::IpAddresses new_ip_addresses, sai_object_id_t tunnel_id); + bool setIpAttribute(std::string tunnel_name, std::string src_ip_str); sai_object_id_t getNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr); int incNextHopRef(std::string tunnelKey, swss::IpAddress& ipAddr); int decNextHopRef(std::string tunnelKey, swss::IpAddress& ipAddr); void doTask(Consumer& consumer); + void doDecapTunnelTask(Consumer &consumer); + void doDecapTunnelTermTask(Consumer &consumer); + void doSubnetDecapTask(Consumer &consumer); + void doSubnetDecapTask(const swss::KeyOpFieldsValuesTuple &tuple); + + void setDecapTunnelStatus(const std::string &tunnel_name); + void removeDecapTunnelStatus(const std::string &tunnel_name); + void setDecapTunnelTermStatus(const std::string &tunnel_name, const std::string &dst_ip_str, + const std::string &src_ip_str, TunnelTermType term_type, const std::string &subnet_type); + void removeDecapTunnelTermStatus(const std::string &tunnel_name, const std::string &dst_ip_str); + void RemoveTunnelIfNotReferenced(const std::string &tunnel_name); + int getTunnelRefCount(const std::string &tunnel_name) + { + return tunnelTable[tunnel_name].ref_count; + } + void increaseTunnelRefCount(const std::string &tunnel_name) + { + ++tunnelTable[tunnel_name].ref_count; + } + void decreaseTunnelRefCount(const std::string &tunnel_name) + { + --tunnelTable[tunnel_name].ref_count; + } }; #endif diff --git a/orchagent/tunneltermhelper.cpp b/orchagent/tunneltermhelper.cpp new file mode 100644 index 00000000000..6ef97f99dba --- /dev/null +++ b/orchagent/tunneltermhelper.cpp @@ -0,0 +1,103 @@ +#include "tunneltermhelper.h" +#include "swss/ipaddress.h" +#include "swss/ipprefix.h" +#include "directory.h" + +extern Directory gDirectory; +extern PortsOrch *gPortsOrch; +extern IntfsOrch *gIntfsOrch; + +using namespace std; +using namespace swss; + +TunnelTermHelper::TunnelTermHelper(DBConnector *cfgDb) + : ports_orch_(nullptr), intfs_orch_(nullptr) +{ + SWSS_LOG_ENTER(); + + port_table_ = make_unique
(cfgDb, CFG_PORT_TABLE_NAME); +} + +void TunnelTermHelper::initialize() +{ + SWSS_LOG_ENTER(); + + ports_orch_ = gDirectory.get(); + intfs_orch_ = gDirectory.get(); + + assert(ports_orch_); + assert(intfs_orch_); +} + +std::vector TunnelTermHelper::getBindPoints() +{ + std::vector bind_points; + std::set internal_ports = findInternalPorts(); + auto all_ports = ports_orch_->getAllPorts(); + + std::set legitSet; + + /* Add physical and LAG prots */ + for (auto& it : all_ports) + { + Port& port = it.second; + if (port.m_type == Port::PHY || port.m_type == Port::LAG) + { + legitSet.insert(it.first); + } + } + + /* Remove LAG members */ + for (auto& it : all_ports) + { + Port& port = it.second; + if (port.m_type == Port::LAG) + { + for (auto member : port.m_members) + { + legitSet.erase(member); + } + } + } + + for (auto& port : legitSet) + { + if (internal_ports.find(port) == internal_ports.end()) + { + bind_points.push_back(port); + } + } + + return bind_points; +} + +std::set TunnelTermHelper::findInternalPorts() +{ + std::set internal_ports; + std::vector all_ports; + port_table_->getKeys(all_ports); + + for (auto& port : all_ports) + { + std::string role; + if (port_table_->hget(port, PORT_ROLE, role)) + { + if (role == PORT_ROLE_DPC) + { + internal_ports.insert(port); + } + } + } + + return internal_ports; +} + +std::string TunnelTermHelper::getNbrAlias(const swss::IpAddress& ip) +{ + return intfs_orch_->getRouterIntfsAlias(ip); +} + +std::string TunnelTermHelper::getRuleName(const string& vnet_name, const swss::IpPrefix& vip) +{ + return std::string(VNET_TUNNEL_TERM_ACL_TABLE) + ":" + vnet_name + "_" + vip.to_string() + "_" + VNET_TUNNEL_TERM_ACL_RULE_NAME_SUFFIX; +} \ No newline at end of file diff --git a/orchagent/tunneltermhelper.h b/orchagent/tunneltermhelper.h new file mode 100644 index 00000000000..51f7365135f --- /dev/null +++ b/orchagent/tunneltermhelper.h @@ -0,0 +1,35 @@ +#ifndef _TUNNELTERMHELPER_H +#define _TUNNELTERMHELPER_H + +#include +#include +#include +#include +#include "dbconnector.h" +#include "portsorch.h" +#include "intfsorch.h" + +#define VNET_TUNNEL_TERM_ACL_TABLE_TYPE "VNET_LOCAL_ENDPOINT_REDIRECT" +#define VNET_TUNNEL_TERM_ACL_TABLE "VNET_LOCAL_ENDPOINT" +#define VNET_TUNNEL_TERM_ACL_BASE_PRIORITY 9998 +#define VNET_TUNNEL_TERM_ACL_RULE_NAME_SUFFIX "TUNN_TERM" + +class TunnelTermHelper +{ +public: + TunnelTermHelper(DBConnector *cfgDb); + + virtual void initialize(); + + std::vector getBindPoints(); + std::set findInternalPorts(); + std::string getNbrAlias(const swss::IpAddress& ip); + std::string getRuleName(const std::string& vnet_name, const swss::IpPrefix& vip); + +private: + unique_ptr port_table_; + PortsOrch *ports_orch_; + IntfsOrch *intfs_orch_; +}; + +#endif // _TUNNELTERMHELPER_H diff --git a/orchagent/twamporch.cpp b/orchagent/twamporch.cpp new file mode 100644 index 00000000000..cd4fe0b666b --- /dev/null +++ b/orchagent/twamporch.cpp @@ -0,0 +1,1053 @@ +#include "twamporch.h" +#include "vrforch.h" +#include "crmorch.h" +#include "logger.h" +#include "swssnet.h" +#include "converter.h" +#include "sai_serialize.h" +#include "tokenize.h" +#include "notifier.h" +#include "notifications.h" + +#include + +using namespace std; +using namespace swss; + +/* TWAMP infor */ +#define TWAMP_SESSION_MODE "MODE" +#define TWAMP_SESSION_ROLE "ROLE" +#define TWAMP_SESSION_VRF_NAME "VRF_NAME" +#define TWAMP_SESSION_HW_LOOKUP "HW_LOOKUP" + +/* TWAMP-test packet */ +#define TWAMP_SESSION_SRC_IP "SRC_IP" +#define TWAMP_SESSION_SRC_UDP_PORT "SRC_UDP_PORT" +#define TWAMP_SESSION_DST_IP "DST_IP" +#define TWAMP_SESSION_DST_UDP_PORT "DST_UDP_PORT" +#define TWAMP_SESSION_DSCP "DSCP" +#define TWAMP_SESSION_TTL "TTL" +#define TWAMP_SESSION_PACKET_TIMESTAMP_FORMAT "TIMESTAMP_FORMAT" +#define TWAMP_SESSION_PACKET_PADDING_SIZE "PADDING_SIZE" + +/* Session-Sender */ +#define TWAMP_SESSION_TX_PACKET_COUNT "PACKET_COUNT" +#define TWAMP_SESSION_TX_MONITOR_TIME "MONITOR_TIME" +#define TWAMP_SESSION_TX_INTERVAL "TX_INTERVAL" +#define TWAMP_SESSION_TIMEOUT "TIMEOUT" +#define TWAMP_SESSION_STATISTICS_INTERVAL "STATISTICS_INTERVAL" +#define TWAMP_SESSION_ADMIN_STATE "ADMIN_STATE" + +/* TWAMP session status */ +#define TWAMP_SESSION_STATUS "status" +#define TWAMP_SESSION_STATUS_ACTIVE "active" +#define TWAMP_SESSION_STATUS_INACTIVE "inactive" + +#define TWAMP_SESSION_TX_MODE_PACKET_NUM "packet_num" +#define TWAMP_SESSION_TX_MODE_CONTINUOUS "continuous" + +#define TWAMP_SESSION_DSCP_MIN 0 +#define TWAMP_SESSION_DSCP_MAX 63 + +#define TWAMP_SESSION_TIMEOUT_MIN 1 +#define TWAMP_SESSION_TIMEOUT_MAX 10 + +static map twamp_role_map = +{ + { "SENDER", SAI_TWAMP_SESSION_ROLE_SENDER }, + { "REFLECTOR", SAI_TWAMP_SESSION_ROLE_REFLECTOR } +}; + +static map twamp_mode_map = +{ + { "FULL", SAI_TWAMP_MODE_FULL }, + { "LIGHT", SAI_TWAMP_MODE_LIGHT } +}; + +static map timestamp_format_map = +{ + { "NTP", SAI_TWAMP_TIMESTAMP_FORMAT_NTP }, + { "PTP", SAI_TWAMP_TIMESTAMP_FORMAT_PTP } +}; + +static map session_admin_state_map = +{ + { "ENABLED", true }, + { "DISABLED", false } +}; + +static map hw_lookup_map = +{ + { "TRUE", true }, + { "FALSE", false } +}; + +/* Global variables */ +extern sai_object_id_t gSwitchId; +extern sai_object_id_t gVirtualRouterId; +extern sai_switch_api_t *sai_switch_api; +extern sai_twamp_api_t *sai_twamp_api; +extern CrmOrch *gCrmOrch; + +const vector twamp_session_stat_ids = +{ + SAI_TWAMP_SESSION_STAT_RX_PACKETS, + SAI_TWAMP_SESSION_STAT_RX_BYTE, + SAI_TWAMP_SESSION_STAT_TX_PACKETS, + SAI_TWAMP_SESSION_STAT_TX_BYTE, + SAI_TWAMP_SESSION_STAT_DROP_PACKETS, + SAI_TWAMP_SESSION_STAT_MAX_LATENCY, + SAI_TWAMP_SESSION_STAT_MIN_LATENCY, + SAI_TWAMP_SESSION_STAT_AVG_LATENCY, + SAI_TWAMP_SESSION_STAT_MAX_JITTER, + SAI_TWAMP_SESSION_STAT_MIN_JITTER, + SAI_TWAMP_SESSION_STAT_AVG_JITTER +}; + + + +TwampOrch::TwampOrch(TableConnector confDbConnector, TableConnector stateDbConnector, SwitchOrch *switchOrch, PortsOrch *portOrch, VRFOrch *vrfOrch) : + Orch(confDbConnector.first, confDbConnector.second), + m_stateDbTwampTable(stateDbConnector.first, stateDbConnector.second), + m_switchOrch(switchOrch), + m_portsOrch(portOrch), + m_vrfOrch(vrfOrch) +{ + /* Set entries count to 0 */ + m_maxTwampSessionCount = m_twampSessionCount = 0; + + /* Get the Maximum supported TWAMP sessions */ + SWSS_LOG_INFO("Get the Maximum supported TWAMP sessions"); + sai_attribute_t attr; + attr.id = SAI_SWITCH_ATTR_MAX_TWAMP_SESSION; + sai_status_t status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_NOTICE("Twamp session resource availability is not supported. Skipping ..."); + return; + } + else + { + m_maxTwampSessionCount = attr.value.u32; + } + + /* Set MAX entries to state DB */ + if (m_maxTwampSessionCount) + { + vector fvTuple; + fvTuple.emplace_back("MAX_TWAMP_SESSION_COUNT", to_string(m_maxTwampSessionCount)); + m_switchOrch->set_switch_capability(fvTuple); + } + else + { + SWSS_LOG_NOTICE("Twamp session resource availability is not supported. Skipping ..."); + return; + } + + /* Add TWAMP session event notification support */ + DBConnector *notificationsDb = new DBConnector("ASIC_DB", 0); + m_twampNotificationConsumer = new swss::NotificationConsumer(notificationsDb, "NOTIFICATIONS"); + auto twampNotifier = new Notifier(m_twampNotificationConsumer, this, "TWAMP_NOTIFICATIONS"); + Orch::addExecutor(twampNotifier); + register_event_notif = false; + + /* Initialize DB connectors */ + m_asicDb = shared_ptr(new DBConnector("ASIC_DB", 0)); + m_countersDb = shared_ptr(new DBConnector("COUNTERS_DB", 0)); + + /* Initialize VIDTORID table */ + m_vidToRidTable = unique_ptr
(new Table(m_asicDb.get(), "VIDTORID")); + + /* Initialize counter tables */ + m_counterTwampSessionNameMapTable = unique_ptr
(new Table(m_countersDb.get(), COUNTERS_TWAMP_SESSION_NAME_MAP)); + m_countersTable = unique_ptr
(new Table(m_countersDb.get(), COUNTERS_TABLE)); +} + +bool TwampOrch::isSessionExists(const string& name) +{ + SWSS_LOG_ENTER(); + + return m_twampEntries.find(name) != m_twampEntries.end(); +} + +bool TwampOrch::getSessionName(const sai_object_id_t oid, string& name) +{ + SWSS_LOG_ENTER(); + + for (const auto& it: m_twampEntries) + { + if (it.second.session_id == oid) + { + name = it.first; + return true; + } + } + + return false; +} + +bool TwampOrch::validateUdpPort(uint16_t udp_port) +{ + if (udp_port == 862) + { + return true; + } + if (udp_port == 863) + { + return true; + } + if (udp_port >= 1025) + { + return true; + } + return false; +} + +void TwampOrch::increaseTwampSessionCount(void) +{ + m_twampSessionCount++; +} + +void TwampOrch::decreaseTwampSessionCount(void) +{ + m_twampSessionCount--; +} + +bool TwampOrch::checkTwampSessionCount(void) +{ + return m_twampSessionCount < m_maxTwampSessionCount; +} + +void TwampOrch::setSessionStatus(const string& name, const string& status) +{ + SWSS_LOG_ENTER(); + + vector fvVector; + fvVector.emplace_back(TWAMP_SESSION_STATUS, status); + m_stateDbTwampTable.set(name, fvVector); +} + +bool TwampOrch::getSessionStatus(const string &name, string& status) +{ + SWSS_LOG_ENTER(); + + if (m_stateDbTwampTable.hget(name, TWAMP_SESSION_STATUS, status)) + { + return true; + } + return false; +} + +void TwampOrch::removeSessionStatus(const string& name) +{ + SWSS_LOG_ENTER(); + + m_stateDbTwampTable.del(name); +} + +void TwampOrch::removeSessionCounter(const sai_object_id_t session_id) +{ + SWSS_LOG_ENTER(); + + string key_pattern = "COUNTERS:" + sai_serialize_object_id(session_id) + "*"; + auto keys = m_countersDb->keys(key_pattern); + for (auto& k : keys) + { + m_countersDb->del(k); + } +} + +void TwampOrch::initSessionStats(const string& name) +{ + SWSS_LOG_ENTER(); + + auto it = m_twampStatistics.find(name); + if (it == m_twampStatistics.end()) + { + SWSS_LOG_ERROR("Failed to init non-existent twamp session %s stat info", name.c_str()); + return; + } + + TwampStats& total_stats = it->second; + + total_stats.rx_packets = 0; + total_stats.rx_bytes = 0; + total_stats.tx_packets = 0; + total_stats.tx_bytes = 0; + total_stats.drop_packets = 0; + total_stats.max_latency = 0; + total_stats.min_latency = 0; + total_stats.avg_latency = 0; + total_stats.max_jitter = 0; + total_stats.min_jitter = 0; + total_stats.avg_jitter = 0; + total_stats.avg_latency_total = 0; + total_stats.avg_jitter_total = 0; +} + +bool TwampOrch::registerTwampEventNotification(void) +{ + sai_attribute_t attr; + sai_status_t status; + sai_attr_capability_t capability; + + status = sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_SWITCH, + SAI_SWITCH_ATTR_TWAMP_SESSION_EVENT_NOTIFY, + &capability); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_NOTICE("Unable to query the TWAMP event notification capability"); + return false; + } + + if (!capability.set_implemented) + { + SWSS_LOG_NOTICE("TWAMP register event notification not supported"); + return false; + } + + attr.id = SAI_SWITCH_ATTR_TWAMP_SESSION_EVENT_NOTIFY; + attr.value.ptr = (void *)on_twamp_session_event; + + status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to register TWAMP notification handler"); + return false; + } + + return true; +} + +bool TwampOrch::activateSession(const string& name, TwampEntry& entry) +{ + SWSS_LOG_ENTER(); + + sai_status_t status; + sai_attribute_t attr; + vector attrs; + + attr.id = SAI_TWAMP_SESSION_ATTR_TWAMP_MODE; + attr.value.s32 = entry.mode; + attrs.emplace_back(attr); + + attr.id = SAI_TWAMP_SESSION_ATTR_SESSION_ROLE; + attr.value.s32 = entry.role; + attrs.emplace_back(attr); + + attr.id = SAI_TWAMP_SESSION_ATTR_HW_LOOKUP_VALID; + attr.value.booldata = entry.hw_lookup; + attrs.emplace_back(attr); + + if (entry.vrf_id) + { + attr.id = SAI_TWAMP_SESSION_ATTR_VIRTUAL_ROUTER; + attr.value.oid = entry.vrf_id; + attrs.emplace_back(attr); + } + + attr.id = SAI_TWAMP_SESSION_ATTR_SRC_IP; + copy(attr.value.ipaddr, entry.src_ip); + attrs.emplace_back(attr); + + attr.id = SAI_TWAMP_SESSION_ATTR_DST_IP; + copy(attr.value.ipaddr, entry.dst_ip); + attrs.emplace_back(attr); + + attr.id = SAI_TWAMP_SESSION_ATTR_UDP_SRC_PORT; + attr.value.u32 = entry.src_udp_port; + attrs.emplace_back(attr); + + attr.id = SAI_TWAMP_SESSION_ATTR_UDP_DST_PORT; + attr.value.u32 = entry.dst_udp_port; + attrs.emplace_back(attr); + + if (entry.role == SAI_TWAMP_SESSION_ROLE_SENDER) + { + if (entry.tx_mode == TWAMP_SESSION_TX_MODE_PACKET_NUM) + { + attr.id = SAI_TWAMP_SESSION_ATTR_TWAMP_PKT_TX_MODE; + attr.value.s32 = SAI_TWAMP_PKT_TX_MODE_PACKET_COUNT; + attrs.emplace_back(attr); + + attr.id = SAI_TWAMP_SESSION_ATTR_TX_PKT_CNT; + attr.value.u32 = entry.packet_count; + attrs.emplace_back(attr); + } + else if (entry.tx_mode == TWAMP_SESSION_TX_MODE_CONTINUOUS) + { + if (entry.monitor_time) + { + attr.id = SAI_TWAMP_SESSION_ATTR_TWAMP_PKT_TX_MODE; + attr.value.u32 = SAI_TWAMP_PKT_TX_MODE_PERIOD; + attrs.emplace_back(attr); + + attr.id = SAI_TWAMP_SESSION_ATTR_TX_PKT_PERIOD; + attr.value.u32 = entry.monitor_time; + attrs.emplace_back(attr); + } + else + { + attr.id = SAI_TWAMP_SESSION_ATTR_TWAMP_PKT_TX_MODE; + attr.value.u32 = SAI_TWAMP_PKT_TX_MODE_CONTINUOUS; + attrs.emplace_back(attr); + } + } + + attr.id = SAI_TWAMP_SESSION_ATTR_TX_INTERVAL; + attr.value.u32 = entry.tx_interval; + attrs.emplace_back(attr); + + attr.id = SAI_TWAMP_SESSION_ATTR_TIMEOUT; + attr.value.u32 = entry.timeout; + attrs.emplace_back(attr); + + attr.id = SAI_TWAMP_SESSION_ATTR_STATISTICS_INTERVAL; + attr.value.u32 = entry.statistics_interval; + attrs.emplace_back(attr); + + attr.id = SAI_TWAMP_SESSION_ATTR_SESSION_ENABLE_TRANSMIT; + attr.value.booldata = entry.admin_state; + attrs.emplace_back(attr); + } + + setSessionStatus(name, TWAMP_SESSION_STATUS_INACTIVE); + + status = sai_twamp_api->create_twamp_session(&entry.session_id, gSwitchId, (uint32_t)attrs.size(), attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create twamp session %s, status %d", name.c_str(), status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_TWAMP, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + /* increase VRF reference count */ + m_vrfOrch->increaseVrfRefCount(entry.vrf_id); + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_TWAMP_ENTRY); + + increaseTwampSessionCount(); + + if (entry.role == SAI_TWAMP_SESSION_ROLE_REFLECTOR) + { + setSessionStatus(name, TWAMP_SESSION_STATUS_ACTIVE); + } + + return true; +} + +bool TwampOrch::deactivateSession(const string& name, TwampEntry& entry) +{ + SWSS_LOG_ENTER(); + sai_status_t status; + + status = sai_twamp_api->remove_twamp_session(entry.session_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove twamp session %s, status %d", name.c_str(), status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_TWAMP, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + /* decrease VRF reference count */ + m_vrfOrch->decreaseVrfRefCount(entry.vrf_id); + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_TWAMP_ENTRY); + + decreaseTwampSessionCount(); + + setSessionStatus(name, TWAMP_SESSION_STATUS_INACTIVE); + + return true; +} + +bool TwampOrch::setSessionTransmitEn(TwampEntry& entry, string admin_state) +{ + SWSS_LOG_ENTER(); + + if (entry.role != SAI_TWAMP_SESSION_ROLE_SENDER) + { + return false; + } + + auto found = session_admin_state_map.find(admin_state); + if (found == session_admin_state_map.end()) + { + SWSS_LOG_ERROR("Incorrect transmit value: %s", admin_state.c_str()); + return false; + } + + sai_attribute_t attr; + attr.id = SAI_TWAMP_SESSION_ATTR_SESSION_ENABLE_TRANSMIT; + attr.value.booldata = found->second; + sai_status_t status = sai_twamp_api->set_twamp_session_attribute(entry.session_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set twamp session %" PRIx64 " %s transmit, status %d", + entry.session_id, admin_state.c_str(), status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_TWAMP, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + return true; +} + +task_process_status TwampOrch::createEntry(const string& key, const vector& data) +{ + SWSS_LOG_ENTER(); + + if (!register_event_notif) + { + if (!registerTwampEventNotification()) + { + SWSS_LOG_ERROR("TWAMP session for %s cannot be created", key.c_str()); + return task_process_status::task_failed; + } + register_event_notif = true; + } + + if (!checkTwampSessionCount()) + { + SWSS_LOG_NOTICE("Failed to create twamp session %s: resources are not available", key.c_str()); + return task_process_status::task_failed; + } + + TwampEntry entry; + for (auto i : data) + { + try { + string attr_name = to_upper(fvField(i)); + string attr_value = fvValue(i); + + if (attr_name == TWAMP_SESSION_MODE) + { + string value = to_upper(attr_value); + if (twamp_mode_map.find(value) == twamp_mode_map.end()) + { + SWSS_LOG_ERROR("Failed to parse valid mode %s", attr_value.c_str()); + return task_process_status::task_invalid_entry; + } + entry.mode = twamp_mode_map[value]; + } + else if (attr_name == TWAMP_SESSION_ROLE) + { + string value = to_upper(attr_value); + if (twamp_role_map.find(value) == twamp_role_map.end()) + { + SWSS_LOG_ERROR("Failed to parse valid role %s", attr_value.c_str()); + return task_process_status::task_invalid_entry; + } + entry.role = twamp_role_map[value]; + } + else if (attr_name == TWAMP_SESSION_SRC_IP) + { + entry.src_ip = attr_value; + } + else if (attr_name == TWAMP_SESSION_DST_IP) + { + entry.dst_ip = attr_value; + } + else if (attr_name == TWAMP_SESSION_SRC_UDP_PORT) + { + uint16_t value = to_uint(attr_value); + if (!validateUdpPort(value)) + { + SWSS_LOG_ERROR("Failed to parse valid souce udp port %d", value); + return task_process_status::task_invalid_entry; + } + entry.src_udp_port = value; + } + else if (attr_name == TWAMP_SESSION_DST_UDP_PORT) + { + uint16_t value = to_uint(attr_value); + if (!validateUdpPort(value)) + { + SWSS_LOG_ERROR("Failed to parse valid destination udp port %d", value); + return task_process_status::task_invalid_entry; + } + entry.dst_udp_port = to_uint(attr_value); + } + else if (attr_name == TWAMP_SESSION_VRF_NAME) + { + if (attr_value == "default") + { + entry.vrf_id = gVirtualRouterId; + } + else + { + if (!m_vrfOrch->isVRFexists(attr_value)) + { + SWSS_LOG_WARN("Vrf '%s' hasn't been created yet", attr_value.c_str()); + return task_process_status::task_invalid_entry; + } + entry.vrf_id = m_vrfOrch->getVRFid(attr_value); + } + } + else if (attr_name == TWAMP_SESSION_DSCP) + { + entry.dscp = to_uint(attr_value, TWAMP_SESSION_DSCP_MIN, TWAMP_SESSION_DSCP_MAX); + } + else if (attr_name == TWAMP_SESSION_TTL) + { + entry.ttl = to_uint(attr_value); + } + else if (attr_name == TWAMP_SESSION_PACKET_TIMESTAMP_FORMAT) + { + string value = to_upper(attr_value); + if (timestamp_format_map.find(value) == timestamp_format_map.end()) + { + SWSS_LOG_ERROR("Failed to parse timestamp format value: %s", attr_value.c_str()); + return task_process_status::task_invalid_entry; + } + entry.timestamp_format = timestamp_format_map[value]; + } + else if (attr_name == TWAMP_SESSION_PACKET_PADDING_SIZE) + { + entry.padding_size = to_uint(attr_value); + } + else if (attr_name == TWAMP_SESSION_TX_PACKET_COUNT) + { + if (entry.tx_mode == TWAMP_SESSION_TX_MODE_CONTINUOUS) + { + SWSS_LOG_ERROR("Configured packet count %s is conflict with monitor time", attr_value.c_str()); + return task_process_status::task_invalid_entry; + } + + entry.packet_count = to_uint(attr_value); + entry.tx_mode = TWAMP_SESSION_TX_MODE_PACKET_NUM; + } + else if (attr_name == TWAMP_SESSION_TX_MONITOR_TIME) + { + if (entry.tx_mode == TWAMP_SESSION_TX_MODE_PACKET_NUM) + { + SWSS_LOG_ERROR("Configured monitor time %s is conflict with packet count", attr_value.c_str()); + return task_process_status::task_invalid_entry; + } + + entry.monitor_time = to_uint(attr_value); + entry.tx_mode = TWAMP_SESSION_TX_MODE_CONTINUOUS; + } + else if (attr_name == TWAMP_SESSION_TX_INTERVAL) + { + entry.tx_interval = to_uint(attr_value); + } + else if (attr_name == TWAMP_SESSION_STATISTICS_INTERVAL) + { + entry.statistics_interval = to_uint(attr_value); + } + else if (attr_name == TWAMP_SESSION_TIMEOUT) + { + entry.timeout = to_uint(attr_value, TWAMP_SESSION_TIMEOUT_MIN, TWAMP_SESSION_TIMEOUT_MAX); + } + else if (attr_name == TWAMP_SESSION_ADMIN_STATE) + { + string value = to_upper(attr_value); + if (session_admin_state_map.find(value) == session_admin_state_map.end()) + { + SWSS_LOG_ERROR("Failed to parse transmit mode value: %s", attr_value.c_str()); + return task_process_status::task_invalid_entry; + } + entry.admin_state = session_admin_state_map[value]; + } + else if (attr_name == TWAMP_SESSION_HW_LOOKUP) + { + string value = to_upper(attr_value); + if (hw_lookup_map.find(value) == hw_lookup_map.end()) + { + SWSS_LOG_ERROR("Failed to parse hw lookup value: %s", attr_value.c_str()); + return task_process_status::task_invalid_entry; + } + entry.hw_lookup = hw_lookup_map[value]; + } + else + { + SWSS_LOG_ERROR("Failed to parse session %s configuration. Unknown attribute %s", key.c_str(), attr_name.c_str()); + return task_process_status::task_invalid_entry; + } + } + catch (const exception& e) + { + SWSS_LOG_ERROR("Failed to parse session %s attribute %s error: %s.", key.c_str(), fvField(i).c_str(), e.what()); + return task_process_status::task_invalid_entry; + } + catch (...) + { + SWSS_LOG_ERROR("Failed to parse session %s attribute %s. Unknown error has been occurred", key.c_str(), fvField(i).c_str()); + return task_process_status::task_failed; + } + } + + m_twampEntries.emplace(key, entry); + + if (entry.role == SAI_TWAMP_SESSION_ROLE_SENDER) + { + TwampStats hw_stats; + memset(&hw_stats, 0, sizeof(TwampStats)); + m_twampStatistics.emplace(key, hw_stats); + initSessionStats(key); + } + + auto &session = m_twampEntries.find(key)->second; + if (!activateSession(key, session)) + { + SWSS_LOG_ERROR("Failed to create twamp session %s", key.c_str()); + return task_process_status::task_failed; + } + + return task_process_status::task_success; +} + +task_process_status TwampOrch::updateEntry(const string& key, const vector& data) +{ + SWSS_LOG_ENTER(); + + auto it = m_twampEntries.find(key); + if (it == m_twampEntries.end()) + { + SWSS_LOG_NOTICE("Failed to set twamp session, session %s not exists", key.c_str()); + return task_process_status::task_invalid_entry; + } + TwampEntry& entry = it->second; + + for (auto i : data) + { + try { + const auto &attr_field = to_upper(fvField(i)); + const auto &attr_value = fvValue(i); + + if (attr_field == TWAMP_SESSION_ADMIN_STATE) + { + string value = to_upper(attr_value); + if (setSessionTransmitEn(entry, value)) + { + entry.admin_state = session_admin_state_map[value]; + if (entry.admin_state) + { + string running_status; + getSessionStatus(key, running_status); + if (running_status == TWAMP_SESSION_STATUS_INACTIVE) + { + removeSessionCounter(entry.session_id); + initSessionStats(key); + } + setSessionStatus(key, TWAMP_SESSION_STATUS_ACTIVE); + SWSS_LOG_NOTICE("Activated twamp session %s", key.c_str()); + } + else + { + setSessionStatus(key, TWAMP_SESSION_STATUS_INACTIVE); + SWSS_LOG_NOTICE("Deactivated twamp session %s", key.c_str()); + } + } + else + { + SWSS_LOG_ERROR("Failed to set twamp session %s transmit %s", key.c_str(), attr_value.c_str()); + } + } + else + { + SWSS_LOG_DEBUG("Ignore to parse session %s configuration attribute %s", key.c_str(), fvField(i).c_str()); + } + } + catch (const exception& e) + { + SWSS_LOG_ERROR("Failed to parse session %s attribute %s error: %s.", key.c_str(), fvField(i).c_str(), e.what()); + return task_process_status::task_invalid_entry; + } + catch (...) + { + SWSS_LOG_ERROR("Failed to parse session %s attribute %s. Unknown error has been occurred", key.c_str(), fvField(i).c_str()); + return task_process_status::task_failed; + } + } + + return task_process_status::task_success; +} + +task_process_status TwampOrch::deleteEntry(const string& key) +{ + SWSS_LOG_ENTER(); + + auto it = m_twampEntries.find(key); + if (it == m_twampEntries.end()) + { + SWSS_LOG_ERROR("Failed to remove non-existent twamp session %s", key.c_str()); + return task_process_status::task_invalid_entry; + } + + TwampEntry& entry = it->second; + + if (!deactivateSession(key, entry)) + { + SWSS_LOG_ERROR("Failed to remove twamp session %s", key.c_str()); + return task_process_status::task_failed; + } + + /* remove TWAMP session in STATE_DB */ + removeSessionStatus(key); + + /* remove TWAMP session maps in COUNTERS_DB */ + m_counterTwampSessionNameMapTable->hdel("", key); + + /* remove TWAMP session in COUNTER_DB */ + removeSessionCounter(entry.session_id); + + /* remove soft table in orchagent */ + m_twampEntries.erase(key); + m_twampStatistics.erase(key); + + SWSS_LOG_NOTICE("Removed twamp session %s", key.c_str()); + + return task_process_status::task_success; +} + +void TwampOrch::doTask(Consumer& consumer) +{ + SWSS_LOG_ENTER(); + + if (!m_portsOrch->allPortsReady()) + { + return; + } + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + + string key = kfvKey(t); + string op = kfvOp(t); + auto data = kfvFieldsValues(t); + task_process_status task_status = task_process_status::task_failed; + + if (op == SET_COMMAND) + { + if (!isSessionExists(key)) + { + task_status = createEntry(key, data); + } + else + { + task_status = updateEntry(key, data); + } + } + else if (op == DEL_COMMAND) + { + task_status = deleteEntry(key); + } + else + { + SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); + } + + /* Specifically retry the task when asked */ + if (task_status == task_process_status::task_need_retry) + { + it++; + } + else + { + it = consumer.m_toSync.erase(it); + } + } +} + +bool TwampOrch::addCounterNameMap(const string& name, const sai_object_id_t session_id) +{ + SWSS_LOG_ENTER(); + + string value; + const auto id = sai_serialize_object_id(session_id); + + if (m_vidToRidTable->hget("", id, value)) + { + vector fields; + fields.emplace_back(name, id); + m_counterTwampSessionNameMapTable->set("", fields); + + return true; + } + else + { + SWSS_LOG_NOTICE("TWAMP session counter %s already exists.", name.c_str()); + return true; + } + + return false; +} + +void TwampOrch::saveSessionStatsLatest(const sai_object_id_t session_id, const uint32_t index, const vector& stats) +{ + SWSS_LOG_ENTER(); + + vector values; + + for (const auto& it: twamp_session_stat_ids) + { + values.emplace_back(sai_serialize_twamp_session_stat(it), to_string(stats[it])); + } + + m_countersTable->set(sai_serialize_object_id(session_id) + ":INDEX:" + to_string(index), values); + + return; +} + +void TwampOrch::calculateCounters(const string& name, const uint32_t index, const vector& stats) +{ + SWSS_LOG_ENTER(); + + auto it = m_twampStatistics.find(name); + if (it == m_twampStatistics.end()) + { + SWSS_LOG_ERROR("Failed to caculate non-existent twamp session %s", name.c_str()); + return; + } + + TwampStats& total_stats = it->second; + /* packets */ + total_stats.rx_packets += stats[SAI_TWAMP_SESSION_STAT_RX_PACKETS]; + total_stats.rx_bytes += stats[SAI_TWAMP_SESSION_STAT_RX_BYTE]; + total_stats.tx_packets += stats[SAI_TWAMP_SESSION_STAT_TX_PACKETS]; + total_stats.tx_bytes += stats[SAI_TWAMP_SESSION_STAT_TX_BYTE]; + total_stats.drop_packets += stats[SAI_TWAMP_SESSION_STAT_DROP_PACKETS]; + + /* latency */ + total_stats.max_latency = (stats[SAI_TWAMP_SESSION_STAT_MAX_LATENCY] > total_stats.max_latency) ? + stats[SAI_TWAMP_SESSION_STAT_MAX_LATENCY] : total_stats.max_latency; + total_stats.min_latency = (index == 1) ? stats[SAI_TWAMP_SESSION_STAT_MIN_LATENCY] : + ((stats[SAI_TWAMP_SESSION_STAT_MIN_LATENCY] < total_stats.min_latency) ? + stats[SAI_TWAMP_SESSION_STAT_MIN_LATENCY] : total_stats.min_latency); + total_stats.avg_latency_total += stats[SAI_TWAMP_SESSION_STAT_AVG_LATENCY]; + total_stats.avg_latency = total_stats.avg_latency_total / index; + + /* jitter */ + total_stats.max_jitter = (stats[SAI_TWAMP_SESSION_STAT_MAX_JITTER] > total_stats.max_jitter) ? + stats[SAI_TWAMP_SESSION_STAT_MAX_JITTER] : total_stats.max_jitter; + total_stats.min_jitter = (index == 1) ? stats[SAI_TWAMP_SESSION_STAT_MIN_JITTER] : + ((stats[SAI_TWAMP_SESSION_STAT_MIN_JITTER] < total_stats.min_jitter) ? + stats[SAI_TWAMP_SESSION_STAT_MIN_JITTER] : total_stats.min_jitter); + total_stats.avg_jitter_total += stats[SAI_TWAMP_SESSION_STAT_AVG_JITTER]; + total_stats.avg_jitter = total_stats.avg_jitter_total / index; +} + +void TwampOrch::saveCountersTotal(const string& name, const sai_object_id_t session_id) +{ + SWSS_LOG_ENTER(); + + vector values; + + auto it = m_twampStatistics.find(name); + if (it == m_twampStatistics.end()) + { + SWSS_LOG_ERROR("Failed to caculate non-existent twamp session %s", + name.c_str()); + return; + } + + TwampStats& total_stats = it->second; + + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_RX_PACKETS), to_string(total_stats.rx_packets)); + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_RX_BYTE), to_string(total_stats.rx_bytes)); + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_TX_PACKETS), to_string(total_stats.tx_packets)); + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_TX_BYTE), to_string(total_stats.tx_bytes)); + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_DROP_PACKETS), to_string(total_stats.drop_packets)); + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_MAX_LATENCY), to_string(total_stats.max_latency)); + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_MIN_LATENCY), to_string(total_stats.min_latency)); + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_AVG_LATENCY), to_string(total_stats.avg_latency)); + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_MAX_JITTER), to_string(total_stats.max_jitter)); + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_MIN_JITTER), to_string(total_stats.min_jitter)); + values.emplace_back(sai_serialize_twamp_session_stat(SAI_TWAMP_SESSION_STAT_AVG_JITTER), to_string(total_stats.avg_jitter)); + + m_countersTable->set(sai_serialize_object_id(session_id), values); +} + +void TwampOrch::doTask(NotificationConsumer& consumer) +{ + SWSS_LOG_ENTER(); + + if (!m_portsOrch->allPortsReady()) + { + return; + } + + std::string op; + std::string data; + std::vector values; + + consumer.pop(op, data, values); + + if (&consumer != m_twampNotificationConsumer) + { + return; + } + + if (op == "twamp_session_event") + { + uint32_t count = 0; + sai_twamp_session_event_notification_data_t *twamp_session = nullptr; + + sai_deserialize_twamp_session_event_ntf(data, count, &twamp_session); + + for (uint32_t i = 0; i < count; i++) + { + string name; + sai_object_id_t session_id = twamp_session[i].twamp_session_id; + sai_twamp_session_state_t session_state = twamp_session[i].session_state; + uint32_t stats_index = twamp_session[i].session_stats.index; + + if (!getSessionName(session_id, name)) + { + continue; + } + + /* update state db */ + if (session_state == SAI_TWAMP_SESSION_STATE_ACTIVE) + { + setSessionStatus(name, TWAMP_SESSION_STATUS_ACTIVE); + } + else + { + setSessionStatus(name, TWAMP_SESSION_STATUS_INACTIVE); + } + + /* save counter db */ + if (twamp_session[i].session_stats.number_of_counters) + { + if (0 == stats_index) + { + continue; + } + else if (1 == stats_index) + { + addCounterNameMap(name, session_id); + } + + vector hw_stats; + hw_stats.resize(twamp_session_stat_ids.size()); + for (uint32_t j = 0; j < twamp_session[i].session_stats.number_of_counters; j++) + { + uint32_t counters_id = twamp_session[i].session_stats.counters_ids[j]; + auto it = find(twamp_session_stat_ids.begin(), twamp_session_stat_ids.end(), counters_id); + if (it != twamp_session_stat_ids.end()) + { + hw_stats[counters_id] = twamp_session[i].session_stats.counters[j]; + } + } + + saveSessionStatsLatest(session_id, stats_index, hw_stats); + calculateCounters(name, stats_index, hw_stats); + saveCountersTotal(name, session_id); + } + } + + sai_deserialize_free_twamp_session_event_ntf(count, twamp_session); + } +} diff --git a/orchagent/twamporch.h b/orchagent/twamporch.h new file mode 100644 index 00000000000..09134f6be4a --- /dev/null +++ b/orchagent/twamporch.h @@ -0,0 +1,136 @@ +#ifndef SWSS_TWAMPORCH_H +#define SWSS_TWAMPORCH_H + +#include "orch.h" +#include "observer.h" +#include "switchorch.h" +#include "portsorch.h" +#include "vrforch.h" +#include "ipaddress.h" +#include "table.h" +#include + +struct TwampStats +{ + uint64_t rx_packets; + uint64_t rx_bytes; + uint64_t tx_packets; + uint64_t tx_bytes; + uint64_t drop_packets; + uint64_t max_latency; + uint64_t min_latency; + uint64_t avg_latency; + uint64_t max_jitter; + uint64_t min_jitter; + uint64_t avg_jitter; + uint64_t avg_latency_total; + uint64_t avg_jitter_total; +}; + +struct TwampEntry +{ + uint8_t mode; /* twamp mode: full, light */ + uint8_t role; /* sender, reflector */ + bool admin_state; /* test packet state. enabled, disabled */ + bool hw_lookup; + + sai_object_id_t vrf_id; + IpAddress src_ip; + IpAddress dst_ip; + uint16_t src_udp_port; + uint16_t dst_udp_port; + uint16_t padding_size; + uint8_t dscp; + uint8_t ttl; + uint8_t timestamp_format; + + /* sender attr */ + string tx_mode; + uint32_t packet_count; + uint32_t monitor_time; /* second */ + uint32_t tx_interval; /* millisecond */ + uint32_t statistics_interval; /* millisecond */ + uint8_t timeout; /* second */ + + sai_object_id_t session_id; + + TwampEntry() + { + session_id = 0; + admin_state = false; + hw_lookup = true; + vrf_id = 0; + packet_count = 0; + monitor_time = 0; + tx_interval = 0; + statistics_interval = 0; + timeout = 0; + }; +}; + +typedef map TwampEntryTable; +typedef map TwampStatsTable; + +class TwampOrch : public Orch +{ +public: + TwampOrch(TableConnector confDbConnector, TableConnector stateDbConnector, + SwitchOrch *switchOrch, PortsOrch *portOrch, VRFOrch *vrfOrch); + + ~TwampOrch() + { + // do nothing + } + + bool isSessionExists(const string&); + bool getSessionName(const sai_object_id_t oid, string& name); + +private: + SwitchOrch *m_switchOrch; + PortsOrch *m_portsOrch; + VRFOrch *m_vrfOrch; + NotificationConsumer* m_twampNotificationConsumer; + bool register_event_notif; + + unsigned int m_twampSessionCount; + unsigned int m_maxTwampSessionCount; + + TwampEntryTable m_twampEntries; + TwampStatsTable m_twampStatistics; + + shared_ptr m_asicDb; + shared_ptr m_countersDb; + unique_ptr
m_counterTwampSessionNameMapTable; + unique_ptr
m_countersTable; + unique_ptr
m_vidToRidTable; + Table m_stateDbTwampTable; + + bool validateUdpPort(uint16_t udp_port); + void increaseTwampSessionCount(void); + void decreaseTwampSessionCount(void); + bool checkTwampSessionCount(void); + + void setSessionStatus(const string&, const string&); + bool getSessionStatus(const string&, string&); + void removeSessionStatus(const string&); + void removeSessionCounter(const sai_object_id_t); + void initSessionStats(const string&); + + bool registerTwampEventNotification(void); + bool activateSession(const string&, TwampEntry&); + bool deactivateSession(const string&, TwampEntry&); + bool setSessionTransmitEn(TwampEntry&, string test_start); + + task_process_status createEntry(const string&, const vector&); + task_process_status updateEntry(const string&, const vector&); + task_process_status deleteEntry(const string&); + void doTask(Consumer& consumer); + + bool addCounterNameMap(const string&, const sai_object_id_t session_id); + void saveSessionStatsLatest(const sai_object_id_t session_id, const uint32_t index, const vector& stats); + void calculateCounters(const string&, const uint32_t index, const vector& stats); + void saveCountersTotal(const string&, const sai_object_id_t session_id); + void doTask(NotificationConsumer& consumer); +}; + +#endif /* SWSS_TWAMPORCH_H */ diff --git a/orchagent/vnetorch.cpp b/orchagent/vnetorch.cpp index 4b4e91b978d..a17651ef163 100644 --- a/orchagent/vnetorch.cpp +++ b/orchagent/vnetorch.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include "sai.h" #include "saiextensions.h" @@ -21,6 +22,7 @@ #include "neighorch.h" #include "crmorch.h" #include "routeorch.h" +#include "tunneldecaporch.h" #include "flowcounterrouteorch.h" extern sai_virtual_router_api_t* sai_virtual_router_api; @@ -43,6 +45,7 @@ extern RouteOrch *gRouteOrch; extern MacAddress gVxlanMacAddress; extern BfdOrch *gBfdOrch; extern SwitchOrch *gSwitchOrch; +extern TunnelDecapOrch *gTunneldecapOrch; /* * VRF Modeling and VNetVrf class definitions */ @@ -159,13 +162,12 @@ bool VNetVrfObject::hasRoute(IpPrefix& ipPrefix) bool VNetVrfObject::addRoute(IpPrefix& ipPrefix, NextHopGroupKey& nexthops) { - if (nexthops.is_overlay_nexthop()) - { - tunnels_[ipPrefix] = nexthops; - } - else + + tunnels_[ipPrefix] = nexthops; + + if (!nexthops.is_overlay_nexthop()) { - SWSS_LOG_ERROR("Input %s is not overlay nexthop group", nexthops.to_string().c_str()); + SWSS_LOG_NOTICE("Input %s is not overlay nexthop group", nexthops.to_string().c_str()); return false; } @@ -243,7 +245,7 @@ void VNetVrfObject::decreaseNextHopRefCount(const nextHop& nh) } } -bool VNetVrfObject::addRoute(IpPrefix& ipPrefix, nextHop& nh) +bool VNetVrfObject::addRoute(IpPrefix& ipPrefix, nextHop& nh, bool increaseRefCount) { if (hasRoute(ipPrefix)) { @@ -251,12 +253,15 @@ bool VNetVrfObject::addRoute(IpPrefix& ipPrefix, nextHop& nh) return false; } - increaseNextHopRefCount(nh); + if (increaseRefCount) + { + increaseNextHopRefCount(nh); + } routes_[ipPrefix] = nh; return true; } -bool VNetVrfObject::removeRoute(IpPrefix& ipPrefix) +bool VNetVrfObject::removeRoute(IpPrefix& ipPrefix, bool decreaseRefCount) { if (!hasRoute(ipPrefix)) { @@ -274,7 +279,10 @@ bool VNetVrfObject::removeRoute(IpPrefix& ipPrefix) else { nextHop nh = routes_[ipPrefix]; - decreaseNextHopRefCount(nh); + if (decreaseRefCount) + { + decreaseNextHopRefCount(nh); + } routes_.erase(ipPrefix); } return true; @@ -334,7 +342,7 @@ VNetVrfObject::~VNetVrfObject() set vr_ent = getVRids(); for (auto it : vr_ent) { - if (it != gVirtualRouterId) + if (it != gVirtualRouterId) { sai_status_t status = sai_virtual_router_api->remove_virtual_router(it); if (status != SAI_STATUS_SUCCESS) @@ -717,13 +725,15 @@ static bool update_route(sai_object_id_t vr_id, sai_ip_prefix_t& ip_pfx, sai_obj } VNetRouteOrch::VNetRouteOrch(DBConnector *db, vector &tableNames, VNetOrch *vnetOrch) - : Orch2(db, tableNames, request_), vnet_orch_(vnetOrch), bfd_session_producer_(db, APP_BFD_SESSION_TABLE_NAME) + : Orch2(db, tableNames, request_), vnet_orch_(vnetOrch), bfd_session_producer_(db, APP_BFD_SESSION_TABLE_NAME), + app_tunnel_decap_term_producer_(db, APP_TUNNEL_DECAP_TERM_TABLE_NAME) { SWSS_LOG_ENTER(); handler_map_.insert(handler_pair(APP_VNET_RT_TABLE_NAME, &VNetRouteOrch::handleRoutes)); handler_map_.insert(handler_pair(APP_VNET_RT_TUNNEL_TABLE_NAME, &VNetRouteOrch::handleTunnel)); + config_db_ = shared_ptr(new DBConnector("CONFIG_DB", 0)); state_db_ = shared_ptr(new DBConnector("STATE_DB", 0)); app_db_ = shared_ptr(new DBConnector("APPL_DB", 0)); @@ -731,6 +741,8 @@ VNetRouteOrch::VNetRouteOrch(DBConnector *db, vector &tableNames, VNetOr state_vnet_rt_adv_table_ = unique_ptr
(new Table(state_db_.get(), STATE_ADVERTISE_NETWORK_TABLE_NAME)); monitor_session_producer_ = unique_ptr
(new Table(app_db_.get(), APP_VNET_MONITOR_TABLE_NAME)); + vnet_tunnel_term_acl_ = make_shared(config_db_.get(), app_db_.get()); + gBfdOrch->attach(this); } @@ -745,7 +757,7 @@ sai_object_id_t VNetRouteOrch::getNextHopGroupId(const string& vnet, const NextH return syncd_nexthop_groups_[vnet][nexthops].next_hop_group_id; } -bool VNetRouteOrch::addNextHopGroup(const string& vnet, const NextHopGroupKey &nexthops, VNetVrfObject *vrf_obj, const string& monitoring) +bool VNetRouteOrch::addNextHopGroup(const string& vnet, const NextHopGroupKey &nexthops, VNetVrfObject *vrf_obj, const string& monitoring, const bool isLocalEp) { SWSS_LOG_ENTER(); @@ -770,7 +782,12 @@ bool VNetRouteOrch::addNextHopGroup(const string& vnet, const NextHopGroupKey &n { continue; } - sai_object_id_t next_hop_id = vrf_obj->getTunnelNextHop(it); + if (isLocalEp && !gNeighOrch->hasNextHop(it)) + { + SWSS_LOG_NOTICE("Next hop %s not found in neighorch, skipping.", it.to_string().c_str()); + continue; + } + sai_object_id_t next_hop_id = isLocalEp? gNeighOrch->getNextHopId(it):vrf_obj->getTunnelNextHop(it); next_hop_ids.push_back(next_hop_id); nhopgroup_members_set[next_hop_id] = it; } @@ -884,7 +901,13 @@ bool VNetRouteOrch::removeNextHopGroup(const string& vnet, const NextHopGroupKey return false; } - vrf_obj->removeTunnelNextHop(nexthop); + /* For local endpoint, we don't remove the next hop from NeighOrch, + * as it is not created by VNetRouteOrch. + */ + if (!isLocalEndpoint(vnet, nexthop.ip_address)) + { + vrf_obj->removeTunnelNextHop(nexthop); + } gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); nhop = next_hop_group_entry->second.active_members.erase(nhop); @@ -910,26 +933,44 @@ bool VNetRouteOrch::createNextHopGroup(const string& vnet, VNetVrfObject *vrf_obj, const string& monitoring) { - + SWSS_LOG_INFO("Creating nexthop group from nexthops(%s)\n", nexthops.to_string().c_str()); if (nexthops.getSize() == 0) { return true; } else if (nexthops.getSize() == 1) { - NextHopKey nexthop(nexthops.to_string(), true); + NextHopKey nexthop = *nexthops.getNextHops().begin(); + bool isLocalEp = isLocalEndpoint(vnet, nexthop.ip_address); + if (isLocalEp && !gNeighOrch->hasNextHop(nexthop)) + { + SWSS_LOG_NOTICE("Next hop %s not found in neighorch, skipping.", nexthop.to_string().c_str()); + return false; + } NextHopGroupInfo next_hop_group_entry; - next_hop_group_entry.next_hop_group_id = vrf_obj->getTunnelNextHop(nexthop); - next_hop_group_entry.ref_count = 0; + if (isLocalEp) + { + gNeighOrch->increaseNextHopRefCount(nexthop); + next_hop_group_entry.next_hop_group_id = gNeighOrch->getNextHopId(nexthop); + next_hop_group_entry.ref_count = gNeighOrch->getNextHopRefCount(nexthop); + } + else + { + next_hop_group_entry.next_hop_group_id = vrf_obj->getTunnelNextHop(nexthop); + next_hop_group_entry.ref_count = 0; + } + if (monitoring == "custom" || nexthop_info_[vnet].find(nexthop.ip_address) == nexthop_info_[vnet].end() || nexthop_info_[vnet][nexthop.ip_address].bfd_state == SAI_BFD_SESSION_STATE_UP) { + SWSS_LOG_INFO("Adding nexthop: %s to the active group", nexthop.ip_address.to_string().c_str()); next_hop_group_entry.active_members[nexthop] = SAI_NULL_OBJECT_ID; } syncd_nexthop_groups_[vnet][nexthops] = next_hop_group_entry; } else { - if (!addNextHopGroup(vnet, nexthops, vrf_obj, monitoring)) + const bool isLocalEp = isLocalEndpoint(vnet, nexthops.getNextHops().begin()->ip_address); + if (!addNextHopGroup(vnet, nexthops, vrf_obj, monitoring, isLocalEp)) { SWSS_LOG_ERROR("Failed to create next hop group %s", nexthops.to_string().c_str()); return false; @@ -974,6 +1015,8 @@ bool VNetRouteOrch::selectNextHopGroup(const string& vnet, NextHopGroupKey& nexthops_primary, NextHopGroupKey& nexthops_secondary, const string& monitoring, + const int32_t rx_monitor_timer, + const int32_t tx_monitor_timer, IpPrefix& ipPrefix, VNetVrfObject *vrf_obj, NextHopGroupKey& nexthops_selected, @@ -986,23 +1029,24 @@ bool VNetRouteOrch::selectNextHopGroup(const string& vnet, // This is followed by an attempt to create a NHG which can be subset of nexthops_primary // depending on the endpoint monitor state. If no NHG from primary is created, we attempt // the same for secondary. + if(nexthops_secondary.getSize() != 0 && monitoring == "custom") { auto it_route = syncd_tunnel_routes_[vnet].find(ipPrefix); if (it_route == syncd_tunnel_routes_[vnet].end()) { - setEndpointMonitor(vnet, monitors, nexthops_primary, monitoring, ipPrefix); - setEndpointMonitor(vnet, monitors, nexthops_secondary, monitoring, ipPrefix); + setEndpointMonitor(vnet, monitors, nexthops_primary, monitoring, rx_monitor_timer, tx_monitor_timer, ipPrefix); + setEndpointMonitor(vnet, monitors, nexthops_secondary, monitoring, rx_monitor_timer, tx_monitor_timer, ipPrefix); } else { if (it_route->second.primary != nexthops_primary) { - setEndpointMonitor(vnet, monitors, nexthops_primary, monitoring, ipPrefix); + setEndpointMonitor(vnet, monitors, nexthops_primary, monitoring, rx_monitor_timer, tx_monitor_timer, ipPrefix); } if (it_route->second.secondary != nexthops_secondary) { - setEndpointMonitor(vnet, monitors, nexthops_secondary, monitoring, ipPrefix); + setEndpointMonitor(vnet, monitors, nexthops_secondary, monitoring, rx_monitor_timer, tx_monitor_timer, ipPrefix); } nexthops_selected = it_route->second.nhg_key; return true; @@ -1061,7 +1105,7 @@ bool VNetRouteOrch::selectNextHopGroup(const string& vnet, else if (!hasNextHopGroup(vnet, nexthops_primary)) { SWSS_LOG_INFO("Creating next hop group %s", nexthops_primary.to_string().c_str()); - setEndpointMonitor(vnet, monitors, nexthops_primary, monitoring, ipPrefix); + setEndpointMonitor(vnet, monitors, nexthops_primary, monitoring, rx_monitor_timer, tx_monitor_timer, ipPrefix); if (!createNextHopGroup(vnet, nexthops_primary, vrf_obj, monitoring)) { delEndpointMonitor(vnet, nexthops_primary, ipPrefix); @@ -1075,7 +1119,10 @@ bool VNetRouteOrch::selectNextHopGroup(const string& vnet, template<> bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op, string& profile, - const string& monitoring, NextHopGroupKey& nexthops_secondary, + const string& monitoring, + const int32_t rx_monitor_timer, + const int32_t tx_monitor_timer, + NextHopGroupKey& nexthops_secondary, const IpPrefix& adv_prefix, const map& monitors) { @@ -1116,7 +1163,7 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP { sai_object_id_t nh_id = SAI_NULL_OBJECT_ID; NextHopGroupKey active_nhg("", true); - if (!selectNextHopGroup(vnet, nexthops, nexthops_secondary, monitoring, ipPrefix, vrf_obj, active_nhg, monitors)) + if (!selectNextHopGroup(vnet, nexthops, nexthops_secondary, monitoring, rx_monitor_timer, tx_monitor_timer, ipPrefix, vrf_obj, active_nhg, monitors)) { return true; } @@ -1144,6 +1191,22 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP } else { + auto prefixToRemove = ipPrefix; + if (adv_prefix.to_string() != ipPrefix.to_string()) + { + prefixToRemove = adv_prefix; + } + auto prefixSubnet = prefixToRemove.getSubnet(); + if(gRouteOrch && gRouteOrch->isRouteExists(prefixSubnet)) + { + if (!gRouteOrch->removeRoutePrefix(prefixSubnet)) + { + SWSS_LOG_ERROR("Could not remove existing bgp route for prefix: %s\n", prefixSubnet.to_string().c_str()); + return false; + } + SWSS_LOG_INFO("Successfully removed existing bgp route for prefix: %s\n", + prefixSubnet.to_string().c_str()); + } if (it_route == syncd_tunnel_routes_[vnet].end()) { route_status = add_route(vr_id, pfx, nh_id); @@ -1214,8 +1277,11 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP syncd_nexthop_groups_[vnet].erase(nhg); if(nhg.getSize() == 1) { - NextHopKey nexthop(nhg.to_string(), true); - vrf_obj->removeTunnelNextHop(nexthop); + NextHopKey nexthop = *nhg.getNextHops().begin(); + if (!isLocalEndpoint(vnet, nexthop.ip_address)) + { + vrf_obj->removeTunnelNextHop(nexthop); + } } } if (monitoring != "custom") @@ -1292,6 +1358,8 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP SWSS_LOG_ERROR("Route del failed for %s, vr_id '0x%" PRIx64, ipPrefix.to_string().c_str(), vr_id); return false; } + SWSS_LOG_INFO("Successfully deleted the route for prefix: %s", ipPrefix.to_string().c_str()); + } } @@ -1308,8 +1376,11 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP // In case of Priority routes we can end up in a situation where the active NHG has 0 nexthops. if(nhg.getSize() == 1) { - NextHopKey nexthop(nhg.to_string(), true); - vrf_obj->removeTunnelNextHop(nexthop); + NextHopKey nexthop = *nhg.getNextHops().begin(); + if (!isLocalEndpoint(vnet, nexthop.ip_address)) + { + vrf_obj->removeTunnelNextHop(nexthop); + } } } if (monitor_info_[vnet].find(ipPrefix) == monitor_info_[vnet].end()) @@ -1432,6 +1503,119 @@ bool VNetRouteOrch::updateTunnelRoute(const string& vnet, IpPrefix& ipPrefix, return true; } +inline void VNetRouteOrch::createSubnetDecapTerm(const IpPrefix &ipPrefix) +{ + const SubnetDecapConfig &config = gTunneldecapOrch->getSubnetDecapConfig(); + if (!config.enable || subnet_decap_terms_created_.find(ipPrefix) != subnet_decap_terms_created_.end()) + { + return; + } + SWSS_LOG_NOTICE("Add subnet decap term for %s", ipPrefix.to_string().c_str()); + static const vector data = { + {"term_type", "MP2MP"}, + {"subnet_type", "vip"} + }; + string tunnel_name = ipPrefix.isV4() ? config.tunnel : config.tunnel_v6; + string key = tunnel_name + ":" + ipPrefix.to_string(); + app_tunnel_decap_term_producer_.set(key, data); + subnet_decap_terms_created_.insert(ipPrefix); +} + +inline void VNetRouteOrch::removeSubnetDecapTerm(const IpPrefix &ipPrefix) +{ + const SubnetDecapConfig &config = gTunneldecapOrch->getSubnetDecapConfig(); + auto it = subnet_decap_terms_created_.find(ipPrefix); + if (it == subnet_decap_terms_created_.end()) + { + return; + } + SWSS_LOG_NOTICE("Remove subnet decap term for %s", ipPrefix.to_string().c_str()); + string tunnel_name = ipPrefix.isV4() ? config.tunnel : config.tunnel_v6; + string key = tunnel_name + ":" + ipPrefix.to_string(); + app_tunnel_decap_term_producer_.del(key); + subnet_decap_terms_created_.erase(it); +} + +bool VNetRouteOrch::setAndDeleteRoutesWithRouteOrch(const sai_object_id_t vr_id, const IpPrefix& ipPrefix, + const NextHopGroupKey& nhg, const string& op) +{ + auto& bulkNhgReducedRefCnt = gRouteOrch->getBulkNhgReducedRefCnt(); + + // Get vnet name from vrf id + std::string vnet_name; + if (!vnet_orch_->getVnetNameByVrfId(vr_id, vnet_name)) + { + SWSS_LOG_INFO("Failed to get VNET name for vrf id '0x%" PRIx64, vr_id); + return false; + } + + // Set up route bulk context + string key = vnet_name + ":" + ipPrefix.to_string(); + RouteBulkContext ctx(key, (op == SET_COMMAND)); + ctx.vrf_id = vr_id; + ctx.ip_prefix = ipPrefix; + ctx.nhg = nhg; + + if (op == SET_COMMAND) + { + // Add route via route orch + if (gRouteOrch->addRoute(ctx, nhg)) + { + return true; + } + + // Flush the route bulker, so routes will be written to syncd and ASIC + gRouteOrch->flushRouteBulker(); + bulkNhgReducedRefCnt.clear(); + + // Post add route via route orch + if (gRouteOrch->addRoutePost(ctx, nhg)) + { + SWSS_LOG_NOTICE("Route %s added via routeorch for vnet %s", ipPrefix.to_string().c_str(), vnet_name.c_str()); + } + else + { + SWSS_LOG_ERROR("Route %s add failed in routeorch for vnet %s", ipPrefix.to_string().c_str(), vnet_name.c_str()); + return false; + } + } + else if (op == DEL_COMMAND) + { + // Remove route via route orch + if (gRouteOrch->removeRoute(ctx)) + { + return true; + } + + // Flush the route bulker, so routes will be written to syncd and ASIC + gRouteOrch->flushRouteBulker(); + bulkNhgReducedRefCnt.clear(); + + // Post remove route via route orch + if (gRouteOrch->removeRoutePost(ctx)) + { + SWSS_LOG_NOTICE("Route %s removed via routeorch for vnet %s", ipPrefix.to_string().c_str(), vnet_name.c_str()); + } + else + { + SWSS_LOG_ERROR("Route %s remove failed in routeorch for vnet %s", ipPrefix.to_string().c_str(), vnet_name.c_str()); + return false; + } + } + + // Remove next hop groups with 0 ref count + for (auto& it : bulkNhgReducedRefCnt) + { + if (gRouteOrch->getNextHopGroupRefCount(it.first) == 0) + { + gRouteOrch->removeNextHopGroup(it.first); + SWSS_LOG_INFO("Next hop group %s has 0 references, removed via routeorch", it.first.to_string().c_str()); + } + } + + return true; +} + template<> bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipPrefix, nextHop& nh, string& op) @@ -1502,31 +1686,28 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP sai_ip_prefix_t pfx; copy(pfx, ipPrefix); sai_object_id_t nh_id=SAI_NULL_OBJECT_ID; + string nhg_str; if (is_subnet) { nh_id = port.m_rif_id; } - else if (nh.ips.getSize() == 1) + else { - NextHopKey nexthop(nh.ips.to_string(), nh.ifname); - if (gNeighOrch->hasNextHop(nexthop)) + // Populate next hop group string + auto ifnames = tokenize(nh.ifname, ','); + int idx = 0; + for (auto it : nh.ips.getIpAddresses()) { - nh_id = gNeighOrch->getNextHopId(nexthop); - } - else - { - SWSS_LOG_INFO("Failed to get next hop %s for %s", - nexthop.to_string().c_str(), ipPrefix.to_string().c_str()); - return false; + if (!nhg_str.empty()) + { + nhg_str += ","; + } + + nhg_str += it.to_string() + "@" + ifnames[idx]; + idx++; } } - else - { - // FIXME - Handle ECMP routes - SWSS_LOG_WARN("VNET ECMP NHs not implemented for '%s'", ipPrefix.to_string().c_str()); - return true; - } for (auto vr_id : vr_set) { @@ -1534,25 +1715,37 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP { continue; } - if (op == SET_COMMAND && !add_route(vr_id, pfx, nh_id)) + + if (is_subnet) { - SWSS_LOG_INFO("Route add failed for %s", ipPrefix.to_string().c_str()); - break; + if (op == SET_COMMAND && !add_route(vr_id, pfx, nh_id)) + { + SWSS_LOG_INFO("Route add failed for %s", ipPrefix.to_string().c_str()); + break; + } + else if (op == DEL_COMMAND && !del_route(vr_id, pfx)) + { + SWSS_LOG_INFO("Route del failed for %s", ipPrefix.to_string().c_str()); + break; + } } - else if (op == DEL_COMMAND && !del_route(vr_id, pfx)) + else { - SWSS_LOG_INFO("Route del failed for %s", ipPrefix.to_string().c_str()); - break; + NextHopGroupKey nhg(nhg_str); + if (!setAndDeleteRoutesWithRouteOrch(vr_id, ipPrefix, nhg, op)) + { + return false; + } } } if (op == SET_COMMAND) { - vrf_obj->addRoute(ipPrefix, nh); + vrf_obj->addRoute(ipPrefix, nh, is_subnet); } else { - vrf_obj->removeRoute(ipPrefix); + vrf_obj->removeRoute(ipPrefix, is_subnet); } return true; @@ -1793,7 +1986,7 @@ void VNetRouteOrch::delRoute(const IpPrefix& ipPrefix) syncd_routes_.erase(route_itr); } -void VNetRouteOrch::createBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& monitor_addr) +void VNetRouteOrch::createBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& monitor_addr, const int32_t rx_monitor_timer, const int32_t tx_monitor_timer) { SWSS_LOG_ENTER(); @@ -1812,11 +2005,31 @@ void VNetRouteOrch::createBfdSession(const string& vnet, const NextHopKey& endpo auto tun_name = vnet_orch_->getTunnelName(vnet); VxlanTunnelOrch* vxlan_orch = gDirectory.get(); auto tunnel_obj = vxlan_orch->getVxlanTunnel(tun_name); + /* + Even for local endpoints, we will use tunnel source IP as local_addr of BFD session. + */ IpAddress src_ip = tunnel_obj->getSrcIP(); FieldValueTuple fvTuple("local_addr", src_ip.to_string()); data.push_back(fvTuple); data.emplace_back("multihop", "true"); + // The BFD sessions established by the Vnet routes with monitoring need to be brought down + // when the device goes into TSA. The following parameter ensures that these session are + // brought down while transitioning to TSA and brought back up when transitioning to TSB. + data.emplace_back("shutdown_bfd_during_tsa", "true"); + + if (rx_monitor_timer >= 0) + { + FieldValueTuple fv_rx("rx_interval", to_string(rx_monitor_timer)); + data.push_back(fv_rx); + } + + if (tx_monitor_timer >= 0) + { + FieldValueTuple fv_tx("tx_interval", to_string(tx_monitor_timer)); + data.push_back(fv_tx); + } + bfd_session_producer_.set(key, data); bfd_sessions_[monitor_addr].bfd_state = SAI_BFD_SESSION_STATE_DOWN; } @@ -1840,6 +2053,7 @@ void VNetRouteOrch::removeBfdSession(const string& vnet, const NextHopKey& endpo { SWSS_LOG_ERROR("BFD session for endpoint %s does not exist", endpoint_addr.to_string().c_str()); } + SWSS_LOG_INFO("Removing nexthop info for endpoint: %s\n", endpoint_addr.to_string().c_str()); nexthop_info_[vnet].erase(endpoint_addr); string key = "default:default:" + monitor_addr.to_string(); @@ -1920,7 +2134,7 @@ void VNetRouteOrch::removeMonitoringSession(const string& vnet, const NextHopKey monitor_info_[vnet][ipPrefix].erase(monitor_addr); } -void VNetRouteOrch::setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops, const string& monitoring, IpPrefix& ipPrefix) +void VNetRouteOrch::setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops, const string& monitoring, const int32_t rx_monitor_timer, const int32_t tx_monitor_timer, IpPrefix& ipPrefix) { SWSS_LOG_ENTER(); @@ -1949,7 +2163,7 @@ void VNetRouteOrch::setEndpointMonitor(const string& vnet, const mapgetAdvertisePrefix(vnet)) { if (route_state == "active") @@ -2084,12 +2303,21 @@ void VNetRouteOrch::postRouteState(const string& vnet, IpPrefix& ipPrefix, NextH removeRouteAdvertisement(prefix_to_use); } } + if (route_state == "active") + { + createSubnetDecapTerm(prefix_to_use); + } + else if (route_state == "inactive") + { + removeSubnetDecapTerm(prefix_to_use); + } } void VNetRouteOrch::removeRouteState(const string& vnet, IpPrefix& ipPrefix) { const string state_db_key = vnet + state_db_key_delimiter + ipPrefix.to_string(); state_vnet_rt_tunnel_table_->del(state_db_key); + SWSS_LOG_NOTICE("Advertisement of prefix: %s stopped.\n", ipPrefix.to_string().c_str()); if(prefix_to_adv_prefix_.find(ipPrefix) !=prefix_to_adv_prefix_.end()) { @@ -2097,11 +2325,13 @@ void VNetRouteOrch::removeRouteState(const string& vnet, IpPrefix& ipPrefix) if(adv_prefix_refcount_[adv_pfx] == 1) { removeRouteAdvertisement(adv_pfx); + removeSubnetDecapTerm(adv_pfx); } } else { removeRouteAdvertisement(ipPrefix); + removeSubnetDecapTerm(ipPrefix); } } @@ -2220,7 +2450,12 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) { continue; } + // when we add the first nexthop to the route, we dont create a nexthop group, we call the updateTunnelRoute with NHG with one member. + // when adding the 2nd, 3rd ... members we create each NH using this create_next_hop_group_member call but give it the reference of next_hop_group_id. + // this way we dont have to update the route, the syncd does it by itself. we only call the updateTunnelRoute to add/remove when adding or removing the + // route fully. + bool failed = false; if (state == SAI_BFD_SESSION_STATE_UP) { sai_object_id_t next_hop_group_member_id = SAI_NULL_OBJECT_ID; @@ -2272,10 +2507,47 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) { for (auto ip_pfx : syncd_nexthop_groups_[vnet][nexthops].tunnel_routes) { + // remove the bgp learnt route first if any exists and then add the tunnel route. + auto ipPrefixsubnet = ip_pfx.getSubnet(); + auto prefixStr = ip_pfx.to_string(); + auto nhStr = nexthops.to_string(); + if (prefix_to_adv_prefix_.find(ip_pfx) != prefix_to_adv_prefix_.end()) + { + auto adv_prefix = prefix_to_adv_prefix_[ip_pfx]; + if(adv_prefix.to_string() != prefixStr) + { + ipPrefixsubnet = adv_prefix.getSubnet(); + } + } + if(gRouteOrch && gRouteOrch->isRouteExists(ipPrefixsubnet)) + { + if (!gRouteOrch->removeRoutePrefix(ipPrefixsubnet)) + { + SWSS_LOG_ERROR("Could not remove existing bgp route for prefix: %s\n", prefixStr.c_str()); + return; + } + SWSS_LOG_INFO("Successfully removed existing bgp route for prefix: %s\n", prefixStr.c_str()); + } string op = SET_COMMAND; - updateTunnelRoute(vnet, ip_pfx, nexthops, op); + SWSS_LOG_INFO("Adding Vnet route for prefix:%s with nexthop group: %s\n", prefixStr.c_str(), nhStr.c_str()); + + if (!updateTunnelRoute(vnet, ip_pfx, nexthops, op)) + { + SWSS_LOG_NOTICE("Failed to create tunnel route in hardware for prefix: %s\n", prefixStr.c_str()); + failed = true; + } + else + { + SWSS_LOG_INFO("Successfully created tunnel route in hardware for prefix: %s\n", prefixStr.c_str()); + } } } + if (failed) + { + // This is an unrecoverable error, Throw a LOG_ERROR and return + SWSS_LOG_ERROR("Inconsistent hardware State. Failed to create tunnel routes.\n"); + return; + } } else { @@ -2300,7 +2572,11 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) } } - vrf_obj->removeTunnelNextHop(endpoint); + if (!isLocalEndpoint(vnet, endpoint.ip_address)) + { + vrf_obj->removeTunnelNextHop(endpoint); + SWSS_LOG_INFO("Successfully removed nexthop: %s\n",endpoint.to_string().c_str() ); + } gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); } @@ -2316,6 +2592,7 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) { for (auto ip_pfx : syncd_nexthop_groups_[vnet][nexthops].tunnel_routes) { + SWSS_LOG_NOTICE("Removing Vnet route for prefix : %s due to no active nexthops.\n",ip_pfx.to_string().c_str()); string op = DEL_COMMAND; updateTunnelRoute(vnet, ip_pfx, nexthops, op); } @@ -2323,12 +2600,14 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) } } } - - // Post configured in State DB - for (auto ip_pfx : syncd_nexthop_groups_[vnet][nexthops].tunnel_routes) + if (!failed) { - string profile = vrf_obj->getProfile(ip_pfx); - postRouteState(vnet, ip_pfx, nexthops, profile); + // Post configured in State DB + for (auto ip_pfx : syncd_nexthop_groups_[vnet][nexthops].tunnel_routes) + { + string profile = vrf_obj->getProfile(ip_pfx); + postRouteState(vnet, ip_pfx, nexthops, profile); + } } } } @@ -2389,6 +2668,8 @@ void VNetRouteOrch::updateVnetTunnelCustomMonitor(const MonitorUpdate& update) copy(pfx, prefix); NextHopGroupKey nhg_custom_primary = getActiveNHSet( vnet, primary, prefix); NextHopGroupKey nhg_custom_secondary = getActiveNHSet( vnet, secondary, prefix); + SWSS_LOG_INFO("Primary active(%s), Secondary active (%s), Current active(%s)\n", nhg_custom_primary.to_string().c_str(), + nhg_custom_secondary.to_string().c_str(), active_nhg.to_string().c_str()); if (nhg_custom_primary.getSize() > 0) { if (nhg_custom_primary != active_nhg ) @@ -2436,7 +2717,7 @@ void VNetRouteOrch::updateVnetTunnelCustomMonitor(const MonitorUpdate& update) if (nhg_custom.getSize() == 0) { // nhg_custom is empty. we shall create a dummy empty NHG for book keeping. - SWSS_LOG_INFO(" Neither Primary or Secondary endpoints are up."); + SWSS_LOG_INFO(" Neither Primary or Secondary endpoints are up.\n"); if (!hasNextHopGroup(vnet, nhg_custom)) { NextHopGroupInfo next_hop_group_entry; @@ -2454,6 +2735,7 @@ void VNetRouteOrch::updateVnetTunnelCustomMonitor(const MonitorUpdate& update) { if (active_nhg_size > 0) { + SWSS_LOG_INFO(" Removing the route for prefix: %s.",prefix.to_string().c_str()); // we need to remove the route del_route(vr_id, pfx); } @@ -2466,11 +2748,33 @@ void VNetRouteOrch::updateVnetTunnelCustomMonitor(const MonitorUpdate& update) if (active_nhg_size > 0) { // we need to replace the nhg in the route + SWSS_LOG_INFO("Replacing nexthop group for prefix: %s, nexthop group: %s\n", + prefix.to_string().c_str(), nhg_custom.to_string().c_str()); route_status = update_route(vr_id, pfx, nh_id); } else { // we need to readd the route. + SWSS_LOG_NOTICE("Adding Custom monitored Route with prefix: %s and nexthop group: %s\n", + prefix.to_string().c_str(), nhg_custom.to_string().c_str()); + auto prefixToUse = prefix; + if (prefix_to_adv_prefix_.find(prefix) != prefix_to_adv_prefix_.end()) + { + auto adv_prefix = prefix_to_adv_prefix_[prefix]; + if(adv_prefix.to_string() != prefix.to_string()) + { + prefixToUse = adv_prefix; + } + } + auto prefixsubnet = prefixToUse.getSubnet(); + if (gRouteOrch && gRouteOrch->isRouteExists(prefixsubnet)) + { + if (!gRouteOrch->removeRoutePrefix(prefixsubnet)) + { + SWSS_LOG_ERROR("Could not remove existing bgp route for prefix: %s\n", prefix.to_string().c_str()); + } + SWSS_LOG_INFO("Successfully removed existing bgp route for prefix: %s\n", prefix.to_string().c_str()); + } route_status = add_route(vr_id, pfx, nh_id); } if (!route_status) @@ -2509,14 +2813,19 @@ void VNetRouteOrch::updateVnetTunnelCustomMonitor(const MonitorUpdate& update) if(active_nhg_size == 1) { NextHopKey nexthop(active_nhg.to_string(), true); - vrf_obj->removeTunnelNextHop(nexthop); + if (!isLocalEndpoint(vnet, nexthop.ip_address)) + { + vrf_obj->removeTunnelNextHop(nexthop); + } } } } else { + SWSS_LOG_INFO("Prefix %s no longer references nexthop group: %s\n",prefix.to_string().c_str(), active_nhg.to_string().c_str()); syncd_nexthop_groups_[vnet][active_nhg].tunnel_routes.erase(prefix); } + SWSS_LOG_INFO("Prefix %s now references nexthop group: %s\n",prefix.to_string().c_str(), nhg_custom.to_string().c_str()); syncd_nexthop_groups_[vnet][nhg_custom].tunnel_routes.insert(prefix); syncd_tunnel_routes_[vnet][prefix].nhg_key = nhg_custom; if (nhg_custom != active_nhg) @@ -2526,6 +2835,7 @@ void VNetRouteOrch::updateVnetTunnelCustomMonitor(const MonitorUpdate& update) if (nhg_custom.getSize() == 0 && active_nhg_size > 0) { vrf_obj->removeRoute(prefix); + SWSS_LOG_NOTICE("Route prefix is no longer active: %s\n", prefix.to_string().c_str()); removeRouteState(vnet, prefix); if (prefix_to_adv_prefix_.find(prefix) != prefix_to_adv_prefix_.end()) { @@ -2539,12 +2849,15 @@ void VNetRouteOrch::updateVnetTunnelCustomMonitor(const MonitorUpdate& update) } else if (nhg_custom.getSize() > 0 && active_nhg_size == 0) { - auto adv_prefix = prefix_to_adv_prefix_[prefix]; - if (adv_prefix_refcount_.find(adv_prefix) == adv_prefix_refcount_.end()) + if (prefix_to_adv_prefix_.find(prefix) != prefix_to_adv_prefix_.end()) { - adv_prefix_refcount_[adv_prefix] = 0; + auto adv_prefix = prefix_to_adv_prefix_[prefix]; + if (adv_prefix_refcount_.find(adv_prefix) == adv_prefix_refcount_.end()) + { + adv_prefix_refcount_[adv_prefix] = 0; + } + adv_prefix_refcount_[adv_prefix] += 1; } - adv_prefix_refcount_[adv_prefix] += 1; string profile = vrf_obj->getProfile(prefix); postRouteState(vnet, prefix, nhg_custom, profile); } @@ -2566,10 +2879,14 @@ bool VNetRouteOrch::handleTunnel(const Request& request) vector monitor_list; string profile = ""; vector primary_list; + vector secondary_list; string monitoring; + int32_t rx_monitor_timer = -1; + int32_t tx_monitor_timer = -1; swss::IpPrefix adv_prefix; bool has_priority_ep = false; bool has_adv_pfx = false; + bool check_directly_connected = false; for (const auto& name: request.getAttrFieldNames()) { if (name == "endpoint") @@ -2607,6 +2924,18 @@ bool VNetRouteOrch::handleTunnel(const Request& request) adv_prefix = request.getAttrIpPrefix(name); has_adv_pfx = true; } + else if (name == "check_directly_connected") + { + check_directly_connected = request.getAttrBool(name); + } + else if (name == "rx_monitor_timer") + { + rx_monitor_timer = static_cast(request.getAttrUint(name)); + } + else if (name == "tx_monitor_timer") + { + tx_monitor_timer = static_cast(request.getAttrUint(name)); + } else { SWSS_LOG_INFO("Unknown attribute: %s", name.c_str()); @@ -2641,6 +2970,8 @@ bool VNetRouteOrch::handleTunnel(const Request& request) auto ip_pfx = request.getKeyIpPrefix(1); auto op = request.getOperation(); + vnet_tunnel_route_check_directly_connected[vnet_name] = check_directly_connected; + SWSS_LOG_INFO("VNET-RT '%s' op '%s' for pfx %s", vnet_name.c_str(), op.c_str(), ip_pfx.to_string().c_str()); @@ -2650,13 +2981,49 @@ bool VNetRouteOrch::handleTunnel(const Request& request) SWSS_LOG_INFO("Handling Priority Tunnel with prefix %s", ip_pfx.to_string().c_str()); } - NextHopGroupKey nhg_primary("", true); - NextHopGroupKey nhg_secondary("", true); + if (primary_list.size() != ip_list.size()) + { + for (auto& ip : primary_list) + { + if (std::find(ip_list.begin(), ip_list.end(), ip) == ip_list.end()) + { + secondary_list.push_back(ip); + } + } + } + + /* + * A local endpoint is an endpoint that is directly connected, i.e., present in the neighbor table. + * This check ensures that for primary/backup endpoint groups, all endpoints must be either local or all remote. + * Partially local means that some endpoints are local and some are not. Mixing local and remote endpoints in a + * single group is not supported. + */ + if (check_directly_connected && (isPartiallyLocal(primary_list) || isPartiallyLocal(secondary_list) )) + { + SWSS_LOG_ERROR("Endpoints in Primary/backup should either all be local endpoints or no local endpoint at all."); + return false; + } + + NextHopGroupKey nhg_primary; + NextHopGroupKey nhg_secondary; + if (check_directly_connected) + { + nhg_primary = NextHopGroupKey("", primary_list.empty() || !isLocalEndpoint(vnet_name, primary_list[0])); + nhg_secondary = NextHopGroupKey("", secondary_list.empty() || !isLocalEndpoint(vnet_name, secondary_list[0])); + } + else + { + nhg_primary = NextHopGroupKey("", true); + nhg_secondary = NextHopGroupKey("", true); + } NextHopGroupKey nhg("", true); map monitors; for (size_t idx_ip = 0; idx_ip < ip_list.size(); idx_ip++) { IpAddress ip = ip_list[idx_ip]; + bool is_local = isLocalEndpoint(vnet_name, ip); + bool is_overlay = !is_local; + string alias = is_local ? gIntfsOrch->getRouterIntfsAlias(ip) : ""; MacAddress mac; uint32_t vni = 0; if (vni_list.size() == 1 && vni_list[0] != "") @@ -2673,7 +3040,13 @@ bool VNetRouteOrch::handleTunnel(const Request& request) mac = MacAddress(mac_list[idx_ip]); } - NextHopKey nh(ip, mac, vni, true); + if (is_local) + { + SWSS_LOG_INFO("Attempting to add TUNNEL TERM ACL for local endpoint %s", ip.to_string().c_str()); + vnet_tunnel_term_acl_->createAclRule(vnet_name, ip_pfx, ip); + } + + NextHopKey nh(ip, alias, mac, vni, is_overlay); if (!monitor_list.empty()) { monitors[nh] = monitor_list[idx_ip]; @@ -2698,7 +3071,7 @@ bool VNetRouteOrch::handleTunnel(const Request& request) } if (vnet_orch_->isVnetExecVrf()) { - return doRouteTask(vnet_name, ip_pfx, (has_priority_ep == true) ? nhg_primary : nhg, op, profile, monitoring, nhg_secondary, adv_prefix, monitors); + return doRouteTask(vnet_name, ip_pfx, (has_priority_ep == true) ? nhg_primary : nhg, op, profile, monitoring, rx_monitor_timer, tx_monitor_timer, nhg_secondary, adv_prefix, monitors); } return true; @@ -2752,6 +3125,38 @@ bool VNetRouteOrch::delOperation(const Request& request) return true; } +bool VNetRouteOrch::isLocalEndpoint(const string&vnet, const IpAddress &ipAddr) +{ + auto it = vnet_tunnel_route_check_directly_connected.find(vnet); + if (it == vnet_tunnel_route_check_directly_connected.end() || !it->second) + { + return false; + } + + NeighborEntry n; + MacAddress m; + return gNeighOrch->getNeighborEntry(ipAddr, n, m); +} + +bool VNetRouteOrch::isPartiallyLocal(const std::vector& ip_list) +{ + bool all_true = std::all_of(ip_list.begin(), ip_list.end(), + [this](const swss::IpAddress& ip) { + NeighborEntry n; + MacAddress m; + return gNeighOrch->getNeighborEntry(ip, n, m); + }); + bool all_false = std::all_of(ip_list.begin(), ip_list.end(), + [this](const swss::IpAddress& ip) { + NeighborEntry n; + MacAddress m; + return !gNeighOrch->getNeighborEntry(ip, n, m); + }); + + return !(all_true || all_false); +} + + VNetCfgRouteOrch::VNetCfgRouteOrch(DBConnector *db, DBConnector *appDb, vector &tableNames) : Orch(db, tableNames), m_appVnetRouteTable(appDb, APP_VNET_RT_TABLE_NAME), @@ -2885,3 +3290,153 @@ bool MonitorOrch::delOperation(const Request& request) return true; } + +VNetTunnelTermAcl::VNetTunnelTermAcl(DBConnector *cfgDb, DBConnector *appDb) +{ + SWSS_LOG_ENTER(); + + acl_table_ = make_unique(appDb, APP_ACL_TABLE_TABLE_NAME); + acl_table_type_ = make_unique(appDb, APP_ACL_TABLE_TYPE_TABLE_NAME); + acl_rule_table_ = make_unique(appDb, APP_ACL_RULE_TABLE_NAME); + + ctx_ = make_shared(cfgDb); +} + +void VNetTunnelTermAcl::lazyInit() +{ + + SWSS_LOG_ENTER(); + + if (acl_table_initialized_) + { + return; + } + + ctx_->initialize(); + + vector match_list = { + MATCH_DST_IP, + MATCH_DST_IPV6, + MATCH_TUNNEL_TERM + }; + string matches = std::accumulate(std::next(match_list.begin()), match_list.end(), match_list[0], concat); + + vector bpoint_list = { + BIND_POINT_TYPE_PORT, + BIND_POINT_TYPE_PORTCHANNEL + }; + string bpoints = std::accumulate(std::next(bpoint_list.begin()), bpoint_list.end(), bpoint_list[0], concat); + + vector fvs = { + {ACL_TABLE_TYPE_MATCHES, matches}, + {ACL_TABLE_TYPE_ACTIONS, ACTION_REDIRECT_ACTION}, + {ACL_TABLE_TYPE_BPOINT_TYPES, bpoints} + }; + + acl_table_type_->set(VNET_TUNNEL_TERM_ACL_TABLE_TYPE, fvs); + + std::string ports_str = ""; + auto ports = ctx_->getBindPoints(); + if (!ports.empty()) + { + ports_str = std::accumulate(std::next(ports.begin()), ports.end(), ports[0], concat); + } + + vector fvs2 = { + {ACL_TABLE_DESCRIPTION, "Vnet Tunnel Termination ACL"}, + {ACL_TABLE_TYPE, VNET_TUNNEL_TERM_ACL_TABLE_TYPE}, + {ACL_TABLE_STAGE, STAGE_INGRESS}, + {ACL_TABLE_PORTS, ports_str} + }; + + acl_table_->set(VNET_TUNNEL_TERM_ACL_TABLE, fvs2); + + acl_table_initialized_ = true; +} + +bool VNetTunnelTermAcl::createAclRule(const string vnet_name, swss::IpPrefix& vip, swss::IpAddress nh_ip) +{ + SWSS_LOG_ENTER(); + + lazyInit(); + + VNetLocEpAclRule rule; + + if (getAclRule(vnet_name, vip, rule)) + { + /* If there are more than one local points for the same VIP, we will not create a new rule. */ + SWSS_LOG_NOTICE("ACL rule already exists for VNet %s with VIP %s", vnet_name.c_str(), vip.to_string().c_str()); + return true; + } + + std::string rule_name = ctx_->getRuleName(vnet_name, vip); + std::string alias = ctx_->getNbrAlias(nh_ip); + + if (alias.empty()) + { + SWSS_LOG_ERROR("Failed to get interface alias for IP %s", nh_ip.to_string().c_str()); + return false; + } + + vector fvs = { + {RULE_PRIORITY, to_string(VNET_TUNNEL_TERM_ACL_BASE_PRIORITY)}, + {MATCH_DST_IP, vip.to_string()}, + {MATCH_TUNNEL_TERM, "true"}, + /* This tunnel term acl is to handle a transient state in DPU failover, so the redirect can't point to a VIP.*/ + {ACTION_REDIRECT_ACTION, nh_ip.to_string()} + }; + + SWSS_LOG_NOTICE("Creating ACL rule %s for VNet %s with VIP %s to redirect to %s", + rule_name.c_str(), vnet_name.c_str(), vip.to_string().c_str(), nh_ip.to_string().c_str()); + + acl_rule_table_->set(rule_name, fvs); + + rule.rule_name = rule_name; + rule.vip = vip; + rule.nh_ip = nh_ip; + + vnet_loc_ep_acl_rule_map_[vnet_name].push_back(rule); + + return true; +} + +bool VNetTunnelTermAcl::removeAclRule(const string vnet_name, swss::IpPrefix& vip) +{ + SWSS_LOG_ENTER(); + + VNetLocEpAclRule rule; + + if (!getAclRule(vnet_name, vip, rule)) + { + SWSS_LOG_ERROR("No ACL rule found for VNet %s with VIP %s", vnet_name.c_str(), vip.to_string().c_str()); + return false; + } + + acl_rule_table_->del(rule.rule_name); + + vnet_loc_ep_acl_rule_map_[vnet_name].erase( + std::remove_if(vnet_loc_ep_acl_rule_map_[vnet_name].begin(), + vnet_loc_ep_acl_rule_map_[vnet_name].end(), + [&vip](const VNetLocEpAclRule& rule) { return rule.vip == vip; }), + vnet_loc_ep_acl_rule_map_[vnet_name].end()); + + return true; +} + +bool VNetTunnelTermAcl::getAclRule(const string vnet_name, const swss::IpPrefix& vip, VNetLocEpAclRule& rule_found) +{ + auto it = vnet_loc_ep_acl_rule_map_.find(vnet_name); + if (it != vnet_loc_ep_acl_rule_map_.end()) + { + for (const auto& rule : it->second) + { + if (rule.vip == vip) + { + rule_found = rule; + return true; + } + } + } + + return false; +} diff --git a/orchagent/vnetorch.h b/orchagent/vnetorch.h index 0cffa115fdb..5cd38b89ac9 100644 --- a/orchagent/vnetorch.h +++ b/orchagent/vnetorch.h @@ -8,12 +8,14 @@ #include #include +#include "aclorch.h" #include "request_parser.h" #include "ipaddresses.h" #include "producerstatetable.h" #include "observer.h" #include "nexthopgroupkey.h" #include "bfdorch.h" +#include "tunneltermhelper.h" #define VNET_BITMAP_SIZE 32 #define VNET_TUNNEL_SIZE 40960 @@ -202,8 +204,8 @@ class VNetVrfObject : public VNetObject bool updateObj(vector&); bool addRoute(IpPrefix& ipPrefix, NextHopGroupKey& nexthops); - bool addRoute(IpPrefix& ipPrefix, nextHop& nh); - bool removeRoute(IpPrefix& ipPrefix); + bool addRoute(IpPrefix& ipPrefix, nextHop& nh, bool increaseRefCount = true); + bool removeRoute(IpPrefix& ipPrefix, bool decreaseRefCount = true); void addProfile(IpPrefix& ipPrefix, string& profile); void removeProfile(IpPrefix& ipPrefix); @@ -308,6 +310,9 @@ const request_description_t vnet_route_description = { { "primary", REQ_T_IP_LIST }, { "monitoring", REQ_T_STRING }, { "adv_prefix", REQ_T_IP_PREFIX }, + { "check_directly_connected", REQ_T_BOOL }, + { "rx_monitor_timer", REQ_T_UINT }, + { "tx_monitor_timer", REQ_T_UINT }, }, { } }; @@ -393,6 +398,7 @@ struct MonitorUpdate IpPrefix prefix; std::string vnet; }; + struct VNetTunnelRouteEntry { // The nhg_key is the key for the next hop group which is currently active in hardware. @@ -404,12 +410,43 @@ struct VNetTunnelRouteEntry NextHopGroupKey secondary; }; +struct VNetLocEpAclRule +{ + swss::IpPrefix vip; + swss::IpAddress nh_ip; + std::string rule_name; +}; + typedef std::map VNetNextHopGroupInfoTable; typedef std::map VNetTunnelRouteTable; typedef std::map BfdSessionTable; typedef std::map> MonitorSessionTable; typedef std::map VNetEndpointInfoTable; +class VNetTunnelTermAcl +{ +public: + VNetTunnelTermAcl(DBConnector *cfgDb, DBConnector *appDb); + + bool createAclRule(const string vnet_name, swss::IpPrefix& vip, swss::IpAddress nh_ip); + bool removeAclRule(const string vnet_name, swss::IpPrefix& vip); + std::function concat = + [](const std::string &a, const std::string &b) { return a + "," + b; }; + bool getAclRule(const string vnet_name, const swss::IpPrefix& vip, VNetLocEpAclRule& rule_found); + +protected: + + void lazyInit(); + + std::shared_ptr ctx_; + + bool acl_table_initialized_ = false; + unique_ptr acl_table_; + unique_ptr acl_table_type_; + unique_ptr acl_rule_table_; + std::map> vnet_loc_ep_acl_rule_map_; +}; + class VNetRouteOrch : public Orch2, public Subject, public Observer { public: @@ -438,22 +475,23 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer bool hasNextHopGroup(const string&, const NextHopGroupKey&); sai_object_id_t getNextHopGroupId(const string&, const NextHopGroupKey&); bool addNextHopGroup(const string&, const NextHopGroupKey&, VNetVrfObject *vrf_obj, - const string& monitoring); + const string& monitoring, const bool isLocalEp=false); bool removeNextHopGroup(const string&, const NextHopGroupKey&, VNetVrfObject *vrf_obj); bool createNextHopGroup(const string&, NextHopGroupKey&, VNetVrfObject *vrf_obj, const string& monitoring); NextHopGroupKey getActiveNHSet(const string&, NextHopGroupKey&, const IpPrefix& ); - bool selectNextHopGroup(const string&, NextHopGroupKey&, NextHopGroupKey&, const string&, IpPrefix&, + bool selectNextHopGroup(const string&, NextHopGroupKey&, NextHopGroupKey&, const string&, const int32_t, const int32_t, IpPrefix&, VNetVrfObject *vrf_obj, NextHopGroupKey&, const std::map& monitors=std::map()); - void createBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr); + void createBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr, const int32_t rx_monitor_timer, const int32_t tx_monitor_timer); void removeBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr); void createMonitoringSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr, IpPrefix& ipPrefix); void removeMonitoringSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr, IpPrefix& ipPrefix); void setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops, - const string& monitoring, IpPrefix& ipPrefix); + const string& monitoring, const int32_t rx_monitor_timer, const int32_t tx_monitor_timer, + IpPrefix& ipPrefix); void delEndpointMonitor(const string& vnet, NextHopGroupKey& nexthops, IpPrefix& ipPrefix); void postRouteState(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& profile); void removeRouteState(const string& vnet, IpPrefix& ipPrefix); @@ -463,15 +501,24 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer void updateVnetTunnel(const BfdUpdate&); void updateVnetTunnelCustomMonitor(const MonitorUpdate& update); bool updateTunnelRoute(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op); + void createSubnetDecapTerm(const IpPrefix &ipPrefix); + void removeSubnetDecapTerm(const IpPrefix &ipPrefix); + + bool setAndDeleteRoutesWithRouteOrch(const sai_object_id_t vr_id, const IpPrefix& ipPrefix, + const NextHopGroupKey& nhg, const string& op); template bool doRouteTask(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op, string& profile, - const string& monitoring, NextHopGroupKey& nexthops_secondary, const IpPrefix& adv_prefix, + const string& monitoring, const int32_t rx_monitor_timer, const int32_t tx_monitor_timer, + NextHopGroupKey& nexthops_secondary, const IpPrefix& adv_prefix, const std::map& monitors=std::map()); template bool doRouteTask(const string& vnet, IpPrefix& ipPrefix, nextHop& nh, string& op); + bool isLocalEndpoint(const string&vnet, const IpAddress &ipAddr); + bool isPartiallyLocal(const std::vector& ip_list); + VNetOrch *vnet_orch_; VNetRouteRequest request_; handler_map handler_map_; @@ -480,17 +527,23 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer VNetNextHopObserverTable next_hop_observers_; std::map syncd_nexthop_groups_; std::map syncd_tunnel_routes_; + std::map vnet_tunnel_route_check_directly_connected; BfdSessionTable bfd_sessions_; std::map monitor_info_; std::map nexthop_info_; std::map prefix_to_adv_prefix_; std::map adv_prefix_refcount_; + std::set subnet_decap_terms_created_; ProducerStateTable bfd_session_producer_; + ProducerStateTable app_tunnel_decap_term_producer_; unique_ptr
monitor_session_producer_; + shared_ptr config_db_; shared_ptr state_db_; shared_ptr app_db_; unique_ptr
state_vnet_rt_tunnel_table_; unique_ptr
state_vnet_rt_adv_table_; + + shared_ptr vnet_tunnel_term_acl_; }; class VNetCfgRouteOrch : public Orch diff --git a/orchagent/vrforch.cpp b/orchagent/vrforch.cpp index 776cf1eb0f2..a47661412f1 100644 --- a/orchagent/vrforch.cpp +++ b/orchagent/vrforch.cpp @@ -147,6 +147,7 @@ bool VRFOrch::addOperation(const Request& request) return false; } + m_stateVrfObjectTable.hset(vrf_name, "state", "ok"); SWSS_LOG_NOTICE("VRF '%s' was updated", vrf_name.c_str()); } diff --git a/orchagent/vxlanorch.cpp b/orchagent/vxlanorch.cpp index 1983cf7286b..3e489b7fb1b 100644 --- a/orchagent/vxlanorch.cpp +++ b/orchagent/vxlanorch.cpp @@ -30,6 +30,7 @@ extern Directory gDirectory; extern PortsOrch* gPortsOrch; extern sai_object_id_t gUnderlayIfId; extern FlexManagerDirectory g_FlexManagerDirectory; +extern bool gTraditionalFlexCounter; #define FLEX_COUNTER_UPD_INTERVAL 1 @@ -1219,7 +1220,10 @@ VxlanTunnelOrch::VxlanTunnelOrch(DBConnector *statedb, DBConnector *db, const st m_tunnelNameTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_TUNNEL_NAME_MAP)); m_tunnelTypeTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_TUNNEL_TYPE_MAP)); - m_vidToRidTable = unique_ptr
(new Table(m_asic_db.get(), "VIDTORID")); + if (gTraditionalFlexCounter) + { + m_vidToRidTable = make_unique
(m_asic_db.get(), "VIDTORID"); + } auto intervT = timespec { .tv_sec = FLEX_COUNTER_UPD_INTERVAL , .tv_nsec = 0 }; m_FlexCounterUpdTimer = new SelectableTimer(intervT); @@ -1237,7 +1241,7 @@ void VxlanTunnelOrch::doTask(SelectableTimer &timer) string value; const auto id = sai_serialize_object_id(it->first); - if (m_vidToRidTable->hget("", id, value)) + if (!gTraditionalFlexCounter || m_vidToRidTable->hget("", id, value)) { SWSS_LOG_INFO("Registering %s, id %s", it->second.c_str(), id.c_str()); vector tunnelNameFvs; @@ -1325,10 +1329,6 @@ VxlanTunnelOrch::createNextHopTunnel(string tunnelName, IpAddress& ipAddr, return SAI_NULL_OBJECT_ID; } - SWSS_LOG_NOTICE("NH tunnel create for %s, ip %s, mac %s, vni %d", - tunnelName.c_str(), ipAddr.to_string().c_str(), - macAddress.to_string().c_str(), vni); - auto tunnel_obj = getVxlanTunnel(tunnelName); sai_object_id_t nh_id, tunnel_id = tunnel_obj->getTunnelId(); @@ -1338,6 +1338,10 @@ VxlanTunnelOrch::createNextHopTunnel(string tunnelName, IpAddress& ipAddr, return nh_id; } + SWSS_LOG_NOTICE("NH tunnel create for %s, ip %s, mac %s, vni %d", + tunnelName.c_str(), ipAddr.to_string().c_str(), + macAddress.to_string().c_str(), vni); + sai_ip_address_t host_ip; swss::copy(host_ip, ipAddr); @@ -2405,8 +2409,8 @@ bool EvpnRemoteVnip2pOrch::addOperation(const Request& request) } // SAI Call to add tunnel to the VLAN flood domain - - string tagging_mode = "untagged"; + // NOTE: does 'untagged' make the most sense here? + string tagging_mode = "untagged"; gPortsOrch->addVlanMember(vlanPort, tunnelPort, tagging_mode); SWSS_LOG_INFO("remote_vtep=%s vni=%d vlanid=%d ", @@ -2565,7 +2569,7 @@ bool EvpnRemoteVnip2mpOrch::addOperation(const Request& request) } // SAI Call to add tunnel to the VLAN flood domain - + // NOTE: does 'untagged' make the most sense here? string tagging_mode = "untagged"; gPortsOrch->addVlanMember(vlanPort, tunnelPort, tagging_mode, end_point_ip); diff --git a/orchagent/vxlanorch.h b/orchagent/vxlanorch.h index 695f7441e05..f53dca05f38 100644 --- a/orchagent/vxlanorch.h +++ b/orchagent/vxlanorch.h @@ -46,6 +46,7 @@ typedef enum #define MAX_VLAN_ID 4095 #define MAX_VNI_ID 16777215 +#define DEFAULT_TUNNEL_ENCAP_TTL 255 typedef enum { @@ -196,7 +197,7 @@ class VxlanTunnel bool deleteMapperHw(uint8_t mapper_list, tunnel_map_use_t map_src); bool createMapperHw(uint8_t mapper_list, tunnel_map_use_t map_src); - bool createTunnelHw(uint8_t mapper_list, tunnel_map_use_t map_src, bool with_term = true, sai_uint8_t encap_ttl=0); + bool createTunnelHw(uint8_t mapper_list, tunnel_map_use_t map_src, bool with_term = true, sai_uint8_t encap_ttl=DEFAULT_TUNNEL_ENCAP_TTL); bool deleteTunnelHw(uint8_t mapper_list, tunnel_map_use_t map_src, bool with_term = true); void deletePendingSIPTunnel(); void increment_spurious_imr_add(const std::string remote_vtep); @@ -299,7 +300,7 @@ class VxlanTunnelOrch : public Orch2 bool createVxlanTunnelMap(string tunnelName, tunnel_map_type_t mapType, uint32_t vni, - sai_object_id_t encap, sai_object_id_t decap, uint8_t encap_ttl=0); + sai_object_id_t encap, sai_object_id_t decap, uint8_t encap_ttl=DEFAULT_TUNNEL_ENCAP_TTL); bool removeVxlanTunnelMap(string tunnelName, uint32_t vni); diff --git a/orchagent/zmqorch.cpp b/orchagent/zmqorch.cpp index a3aef7d81bc..d92321902c9 100644 --- a/orchagent/zmqorch.cpp +++ b/orchagent/zmqorch.cpp @@ -9,14 +9,11 @@ void ZmqConsumer::execute() { SWSS_LOG_ENTER(); - size_t update_size = 0; auto table = static_cast(getSelectable()); - do - { - std::deque entries; - table->pops(entries); - update_size = addToSync(entries); - } while (update_size != 0); + + std::deque entries; + table->pops(entries); + addToSync(entries); drain(); } @@ -37,9 +34,18 @@ ZmqOrch::ZmqOrch(DBConnector *db, const vector &tableNames, ZmqServer *z } } + +ZmqOrch::ZmqOrch(DBConnector *db, const vector &tableNames_with_pri, ZmqServer *zmqServer) +{ + for (const auto& it : tableNames_with_pri) + { + addConsumer(db, it.first, it.second, zmqServer); + } +} + void ZmqOrch::addConsumer(DBConnector *db, string tableName, int pri, ZmqServer *zmqServer) { - if (db->getDbId() == APPL_DB) + if (db->getDbId() == APPL_DB || db->getDbId() == DPU_APPL_DB) { if (zmqServer != nullptr) { diff --git a/orchagent/zmqorch.h b/orchagent/zmqorch.h index 1541996932b..264611efaa9 100644 --- a/orchagent/zmqorch.h +++ b/orchagent/zmqorch.h @@ -26,6 +26,7 @@ class ZmqOrch : public Orch { public: ZmqOrch(swss::DBConnector *db, const std::vector &tableNames, swss::ZmqServer *zmqServer); + ZmqOrch(swss::DBConnector *db, const std::vector &tableNames_with_pri, swss::ZmqServer *zmqServer); virtual void doTask(ConsumerBase &consumer) { }; void doTask(Consumer &consumer) override; diff --git a/portsyncd/Makefile.am b/portsyncd/Makefile.am index 3db61870594..b65e3b4a4ff 100644 --- a/portsyncd/Makefile.am +++ b/portsyncd/Makefile.am @@ -15,7 +15,7 @@ portsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) portsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lswsscommon if GCOV_ENABLED -portsyncd_LDADD += -lgcovpreload +portsyncd_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED diff --git a/portsyncd/linksync.cpp b/portsyncd/linksync.cpp index 66cdc4df5fc..4c00d366e75 100644 --- a/portsyncd/linksync.cpp +++ b/portsyncd/linksync.cpp @@ -27,7 +27,6 @@ using namespace swss; #define VLAN_DRV_NAME "bridge" #define TEAM_DRV_NAME "team" -const string MGMT_PREFIX = "eth"; const string INTFS_PREFIX = "Ethernet"; const string LAG_PREFIX = "PortChannel"; @@ -38,57 +37,11 @@ extern string g_switchType; LinkSync::LinkSync(DBConnector *appl_db, DBConnector *state_db) : m_portTableProducer(appl_db, APP_PORT_TABLE_NAME), m_portTable(appl_db, APP_PORT_TABLE_NAME), - m_statePortTable(state_db, STATE_PORT_TABLE_NAME), - m_stateMgmtPortTable(state_db, STATE_MGMT_PORT_TABLE_NAME) + m_statePortTable(state_db, STATE_PORT_TABLE_NAME) { std::shared_ptr if_ni(if_nameindex(), if_freenameindex); struct if_nameindex *idx_p; - for (idx_p = if_ni.get(); - idx_p != NULL && idx_p->if_index != 0 && idx_p->if_name != NULL; - idx_p++) - { - string key = idx_p->if_name; - - /* Explicitly store management ports oper status into the state database. - * This piece of information is used by SNMP. */ - if (!key.compare(0, MGMT_PREFIX.length(), MGMT_PREFIX)) - { - ostringstream cmd; - string res; - cmd << "cat /sys/class/net/" << shellquote(key) << "/operstate"; - try - { - EXEC_WITH_ERROR_THROW(cmd.str(), res); - } - catch (...) - { - SWSS_LOG_WARN("Failed to get %s oper status", key.c_str()); - continue; - } - - /* Remove the trailing newline */ - if (res.length() >= 1 && res.at(res.length() - 1) == '\n') - { - res.erase(res.length() - 1); - /* The value of operstate will be either up or down */ - if (res != "up" && res != "down") - { - SWSS_LOG_WARN("Unknown %s oper status %s", - key.c_str(), res.c_str()); - } - FieldValueTuple fv("oper_status", res); - vector fvs; - fvs.push_back(fv); - - m_stateMgmtPortTable.set(key, fvs); - SWSS_LOG_INFO("Store %s oper status %s to state DB", - key.c_str(), res.c_str()); - } - continue; - } - } - if (!WarmStart::isWarmStart()) { /* See the comments for g_portSet in portsyncd.cpp */ @@ -168,8 +121,7 @@ void LinkSync::onMsg(int nlmsg_type, struct nl_object *obj) string key = rtnl_link_get_name(link); if (key.compare(0, INTFS_PREFIX.length(), INTFS_PREFIX) && - key.compare(0, LAG_PREFIX.length(), LAG_PREFIX) && - key.compare(0, MGMT_PREFIX.length(), MGMT_PREFIX)) + key.compare(0, LAG_PREFIX.length(), LAG_PREFIX)) { return; } @@ -188,24 +140,13 @@ void LinkSync::onMsg(int nlmsg_type, struct nl_object *obj) if (type) { - SWSS_LOG_NOTICE("nlmsg type:%d key:%s admin:%d oper:%d addr:%s ifindex:%d master:%d type:%s", - nlmsg_type, key.c_str(), admin, oper, addrStr, ifindex, master, type); + SWSS_LOG_NOTICE("nlmsg type:%d key:%s admin:%d oper:%d addr:%s ifindex:%d master:%d type:%s flags:%d", + nlmsg_type, key.c_str(), admin, oper, addrStr, ifindex, master, type, flags); } else { - SWSS_LOG_NOTICE("nlmsg type:%d key:%s admin:%d oper:%d addr:%s ifindex:%d master:%d", - nlmsg_type, key.c_str(), admin, oper, addrStr, ifindex, master); - } - - if (!key.compare(0, MGMT_PREFIX.length(), MGMT_PREFIX)) - { - FieldValueTuple fv("oper_status", oper ? "up" : "down"); - vector fvs; - fvs.push_back(fv); - m_stateMgmtPortTable.set(key, fvs); - SWSS_LOG_INFO("Store %s oper status %s to state DB", - key.c_str(), oper ? "up" : "down"); - return; + SWSS_LOG_NOTICE("nlmsg type:%d key:%s admin:%d oper:%d addr:%s ifindex:%d master:%d flags:%d", + nlmsg_type, key.c_str(), admin, oper, addrStr, ifindex, master, flags); } /* teamd instances are dealt in teamsyncd */ diff --git a/portsyncd/linksync.h b/portsyncd/linksync.h index d72e1ba1242..5b31ed9b3c7 100644 --- a/portsyncd/linksync.h +++ b/portsyncd/linksync.h @@ -20,7 +20,7 @@ class LinkSync : public NetMsg private: ProducerStateTable m_portTableProducer; - Table m_portTable, m_statePortTable, m_stateMgmtPortTable; + Table m_portTable, m_statePortTable; std::map m_ifindexNameMap; std::map m_ifindexOldNameMap; diff --git a/swssconfig/Makefile.am b/swssconfig/Makefile.am index 3cfc0b9629a..70d4090fcad 100644 --- a/swssconfig/Makefile.am +++ b/swssconfig/Makefile.am @@ -1,4 +1,4 @@ -INCLUDES = -I $(top_srcdir) +INCLUDES = -I $(top_srcdir) -I$(top_srcdir)/lib bin_PROGRAMS = swssconfig swssplayer @@ -21,8 +21,8 @@ swssplayer_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) swssplayer_LDADD = $(LDFLAGS_ASAN) -lswsscommon if GCOV_ENABLED -swssconfig_LDADD += -lgcovpreload -swssplayer_LDADD += -lgcovpreload +swssconfig_SOURCES += ../gcovpreload/gcovpreload.cpp +swssplayer_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED @@ -30,3 +30,5 @@ swssconfig_SOURCES += $(top_srcdir)/lib/asan.cpp swssplayer_SOURCES += $(top_srcdir)/lib/asan.cpp endif +swssconfig_SOURCES += $(top_srcdir)/lib/orch_zmq_config.cpp +swssplayer_SOURCES += $(top_srcdir)/lib/orch_zmq_config.cpp diff --git a/swssconfig/swssconfig.cpp b/swssconfig/swssconfig.cpp index 8029f3a3d88..115988a239f 100644 --- a/swssconfig/swssconfig.cpp +++ b/swssconfig/swssconfig.cpp @@ -4,11 +4,15 @@ #include #include +#include #include #include "logger.h" #include "dbconnector.h" #include "producerstatetable.h" +#include "zmqclient.h" +#include "zmqproducerstatetable.h" +#include "orch_zmq_config.h" #include using namespace std; @@ -39,12 +43,35 @@ void dump_db_item(KeyOpFieldsValuesTuple &db_item) SWSS_LOG_DEBUG("]"); } -bool write_db_data(vector &db_items) +shared_ptr get_table(unordered_map> &table_map, RedisPipeline &pipeline, string table_name, set &zmq_tables, std::shared_ptr zmq_client) +{ + shared_ptr p_table= nullptr; + auto findResult = table_map.find(table_name); + if (findResult == table_map.end()) + { + if ((zmq_tables.find(table_name) != zmq_tables.end()) && (zmq_client != nullptr)) { + p_table = make_shared(&pipeline, table_name, *zmq_client, true); + } + else { + p_table = make_shared(&pipeline, table_name, true); + } + + table_map.emplace(table_name, p_table); + } + else + { + p_table = findResult->second; + } + + return p_table; +} + +bool write_db_data(vector &db_items, set &zmq_tables, std::shared_ptr zmq_client) { DBConnector db("APPL_DB", 0, false); RedisPipeline pipeline(&db); // dtor of RedisPipeline will automatically flush data - unordered_map table_map; - + unordered_map> table_map; + for (auto &db_item : db_items) { dump_db_item(db_item); @@ -58,12 +85,13 @@ bool write_db_data(vector &db_items) } string table_name = key.substr(0, pos); string key_name = key.substr(pos + 1); - auto ret = table_map.emplace(std::piecewise_construct, std::forward_as_tuple(table_name), std::forward_as_tuple(&pipeline, table_name, true)); + + auto p_table= get_table(table_map, pipeline, table_name, zmq_tables, zmq_client); if (kfvOp(db_item) == SET_COMMAND) - ret.first->second.set(key_name, kfvFieldsValues(db_item), SET_COMMAND); + p_table->set(key_name, kfvFieldsValues(db_item), SET_COMMAND); else if (kfvOp(db_item) == DEL_COMMAND) - ret.first->second.del(key_name, DEL_COMMAND); + p_table->del(key_name, DEL_COMMAND); else { SWSS_LOG_ERROR("Invalid operation: %s\n", kfvOp(db_item).c_str()); @@ -182,6 +210,13 @@ int main(int argc, char **argv) } } + auto zmq_tables = load_zmq_tables(); + std::shared_ptr zmq_client = nullptr; + if (zmq_tables.size() > 0) + { + zmq_client = create_zmq_client(ZMQ_LOCAL_ADDRESS); + } + for (auto i : files) { SWSS_LOG_NOTICE("Loading config from JSON file:%s...", i.c_str()); @@ -203,7 +238,7 @@ int main(int argc, char **argv) return EXIT_FAILURE; } - if (!write_db_data(db_items)) + if (!write_db_data(db_items, zmq_tables, zmq_client)) { SWSS_LOG_ERROR("Failed applying data from JSON file %s", i.c_str()); return EXIT_FAILURE; diff --git a/swssconfig/swssplayer.cpp b/swssconfig/swssplayer.cpp index d999ace999f..406da017d4b 100644 --- a/swssconfig/swssplayer.cpp +++ b/swssconfig/swssplayer.cpp @@ -3,6 +3,9 @@ #include #include +#include "zmqclient.h" +#include "zmqproducerstatetable.h" +#include "orch_zmq_config.h" #include #include @@ -34,7 +37,30 @@ vector processFieldsValuesTuple(string s) return result; } -void processTokens(vector tokens) +shared_ptr get_table(unordered_map>& table_map, string table_name, set zmq_tables, std::shared_ptr zmq_client) +{ + shared_ptr p_table= nullptr; + auto findResult = table_map.find(table_name); + if (findResult == table_map.end()) + { + if ((zmq_tables.find(table_name) != zmq_tables.end()) && (zmq_client != nullptr)) { + p_table = make_shared(&db, table_name, *zmq_client, true); + } + else { + p_table = make_shared(&db, table_name); + } + + table_map.emplace(table_name, p_table); + } + else + { + p_table = findResult->second; + } + + return p_table; +} + +void processTokens(vector tokens, unordered_map>& table_map, set zmq_tables, std::shared_ptr zmq_client) { auto key = tokens[1]; @@ -43,18 +69,18 @@ void processTokens(vector tokens) auto table_name = v_key[0]; auto key_name = v_key[1]; - ProducerStateTable producer(&db, table_name); + auto p_producer= get_table(table_map, table_name, zmq_tables, zmq_client); /* Process the operation */ auto op = tokens[2]; if (op == SET_COMMAND) { auto tuples = processFieldsValuesTuple(tokens[3]); - producer.set(key_name, tuples, SET_COMMAND); + p_producer->set(key_name, tuples, SET_COMMAND); } else if (op == DEL_COMMAND) { - producer.del(key_name, DEL_COMMAND); + p_producer->del(key_name, DEL_COMMAND); } } @@ -69,10 +95,18 @@ int main(int argc, char **argv) ifstream file(argv[1]); string line; + auto zmq_tables = load_zmq_tables(); + std::shared_ptr zmq_client = nullptr; + if (zmq_tables.size() > 0) + { + zmq_client = create_zmq_client(ZMQ_LOCAL_ADDRESS); + } + + unordered_map> table_map; while (getline(file, line)) { auto tokens = tokenize(line, '|', 3); - processTokens(tokens); + processTokens(tokens, table_map, zmq_tables, zmq_client); line_index++; } diff --git a/teamsyncd/Makefile.am b/teamsyncd/Makefile.am index a13573bf259..c72498d9e30 100644 --- a/teamsyncd/Makefile.am +++ b/teamsyncd/Makefile.am @@ -15,7 +15,7 @@ teamsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) teamsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lhiredis -lswsscommon -lteam if GCOV_ENABLED -teamsyncd_LDADD += -lgcovpreload +teamsyncd_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED diff --git a/teamsyncd/teamsync.cpp b/teamsyncd/teamsync.cpp index 6d8c025911b..e8dcb1fc55a 100644 --- a/teamsyncd/teamsync.cpp +++ b/teamsyncd/teamsync.cpp @@ -162,9 +162,10 @@ void TeamSync::addLag(const string &lagName, int ifindex, bool admin_state, if (m_teamSelectables.find(lagName) != m_teamSelectables.end()) { auto tsync = m_teamSelectables[lagName]; - if (tsync->admin_state == admin_state && tsync->mtu == mtu) + if (tsync->admin_state == admin_state && tsync->oper_state == oper_state && tsync->mtu == mtu) return; tsync->admin_state = admin_state; + tsync->oper_state = oper_state; tsync->mtu = mtu; lag_update = false; } diff --git a/teamsyncd/teamsync.h b/teamsyncd/teamsync.h index deb5d841293..536a4e96511 100644 --- a/teamsyncd/teamsync.h +++ b/teamsyncd/teamsync.h @@ -44,6 +44,7 @@ class TeamSync : public NetMsg /* member_name -> enabled|disabled */ std::map m_lagMembers; bool admin_state; + bool oper_state; unsigned int mtu; protected: int onChange(); diff --git a/tests/README.md b/tests/README.md index bf93a842797..21d691b7937 100644 --- a/tests/README.md +++ b/tests/README.md @@ -15,60 +15,92 @@ SWSS, Redis, and all the other required components run inside a virtual switch D - [Known Issues/FAQs](#known-issues) ## Setting up your test environment -1. In order to set up your test environment you will need: +### System Requirements - - A machine running Ubuntu 18.04 or 20.04 - - A `generic` Linux kernel to install `team` - - `python3` + To set up your test environment, you will need: - You can check these dependencies with the following: + - A machine running **Ubuntu 22.04** + - **Python 3** - ``` - cat /etc/os-release | grep ".*18.04\|20.04.*" - uname -r | grep generic - python3 --version - ``` + You can check these dependencies with the following commands: -2. [Install Docker CE](https://docs.docker.com/install/linux/docker-ce/ubuntu/). Be sure to follow the [post-install instructions](https://docs.docker.com/install/linux/linux-postinstall/) so that you don't need sudo privileges to run docker commands. + ```bash + cat /etc/os-release | grep ".*22.04*" + uname -r | grep generic + python3 --version + ``` +### Team Kernel Module -3. Install the external dependencies needed to run the tests. + **Note:** + Check if the `team` kernel module is already installed by running: - ``` - sudo modprobe team - sudo apt install python3-pip net-tools bridge-utils ethtool vlan libnl-nf-3-200 libnl-cli-3-200 - sudo pip3 install docker pytest flaky redis distro dataclasses fstring - ``` + ```bash + lsmod | grep team + ``` - If you are running **Ubuntu 18.04** you will need to install this package: - ``` - sudo apt install libhiredis0.13 - ``` - ****Dash testcases aren't supported in Ubuntu 18.04**** + If the output is non-empty, you're good to proceed to the next step. + If the output is empty, run the following script to install the necessary modules: - If you are running **Ubuntu 20.04** you will need to install this package: - ``` - sudo apt install libhiredis0.14 - ``` - If you want to run DASH testcases, please download and install the latest ubuntu20.04 [dependencies](https://dev.azure.com/mssonic/build/_build?definitionId=1055&_a=summary&repositoryFilter=158&branchFilter=11237%2C11237%2C11237%2C11237%2C11237) of DASH from Azp. +```bash +sudo .azure-pipelines/build_and_install_module.sh +``` -4. Install `swsscommon`. +Once the script completes successfully, verify again: - ``` - sudo dpkg -i libswsscommon_1.0.0_amd64.deb python3-swsscommon_1.0.0_amd64.deb - ``` +```bash +lsmod | grep team +``` + +### Install Docker CE + + Install [Docker CE](https://docs.docker.com/engine/install/ubuntu/) from the official documentation. + + **Important:** Follow the [post-install instructions](https://docs.docker.com/engine/install/linux-postinstall/) to avoid needing `sudo` for Docker commands. + + +### Install External Dependencies + + Install packages required for running the VS tests: + +```bash +sudo apt-get install -y net-tools bridge-utils vlan libzmq3-dev libzmq5 \ + libboost-serialization1.74.0 libboost1.74-dev libboost-dev \ + libhiredis0.14 libyang-dev - You can get these two packages by: - - [Building it from scratch](https://github.com/sonic-net/sonic-swss-common) - - Downloading the latest build from Azure: - - [Ubuntu 20.04](https://sonic-build.azurewebsites.net/api/sonic/artifacts?branchName=master&definitionId=9&artifactName=sonic-swss-common.amd64.ubuntu20_04) +sudo apt install -y python3-pip net-tools bridge-utils ethtool vlan \ + libnl-nf-3-200 libnl-cli-3-200 -5. Load the `docker-sonic-vs.gz` file into docker. You can get the image by: - - [Building it from scratch](https://github.com/sonic-net/sonic-buildimage) - - Downloading the latest build from Azure: - - [docker-sonic-vs-asan.gz](https://sonic-build.azurewebsites.net/api/sonic/artifacts?branchName=master&platform=vs&target=target/docker-sonic-vs-asan.gz) - - [docker-sonic-vs.gz](https://sonic-build.azurewebsites.net/api/sonic/artifacts?branchName=master&platform=vs&target=target/docker-sonic-vs.gz) +sudo pip3 install docker pytest flaky redis distro dataclasses fstring \ + exabgp docker lcov_cobertura +``` + +### Install DASH Dependencies (Ubuntu 22.04) + +[Download latest artifacts](https://dev.azure.com/mssonic/build/_build?definitionId=1055&_a=summary&repositoryFilter=158&branchFilter=11237%2C11237%2C11237%2C11237%2C11237) + +```bash +sudo dpkg -i libdashapi.deb libprotobuf32.deb +``` + +### Install swsscommon + +[Download latest artifacts](https://sonic-build.azurewebsites.net/api/sonic/artifacts?branchName=master&definitionId=9&artifactName=sonic-swss-common.amd64.ubuntu22_04) + +```bash +sudo dpkg -i libswsscommon.deb +sudo dpkg -i python_swsscommon.deb +``` - Once you have the file, you can load it into docker by running `docker load < docker-sonic-vs.gz`. +### Download the latest `docker-sonic-vs.gz` file from Azure build artifacts. + +```bash +wget -O docker-sonic-vs.gz "https://sonic-build.azurewebsites.net/api/sonic/artifacts?branchName=master&platform=vs&target=target/docker-sonic-vs.gz" +``` + Load the image into Docker: + +```bash +docker load < docker-sonic-vs.gz +``` ## Running the tests ``` @@ -222,4 +254,14 @@ For those developing new features for SWSS or the DVS framework, you might find - Currently when pytest is run using `--force-flaky` and the last test case fails, then pytest tears down the module before retrying the failed test case and invokes the module setup again to run the failed test case. This is a known issue with [flaky](https://github.com/box/flaky/issues/128) and [pytest](https://github.com/pytest-dev/pytest-rerunfailures/issues/51). - Because of this issue, all the logs are lost except for the last test case as modules are torn down and set up again. The workaround for this is to include a dummy test case that always passes at the end of all test files/modules. + Because of this issue, all the logs are lost except for the last test case as modules are torn down and set up again. The workaround for this is to include a dummy test case that always passes at the end of all test files/modules. + +- Too many open files + + If some tests end with the error "Too many open files", you should check the maximum number of open files that are permitted on your system: + ``` + ulimit -a | grep "open files" + ``` + You can increase it by executing this command: `ulimit -n 8192`. Feel free to change `8192`. This value worked fine for me. + + **Note:** This change is only valid for the current terminal session. If you want a persistent change, append `ulimit -n 8192` to `~/.bashrc`. diff --git a/tests/buffer_model.py b/tests/buffer_model.py index ae2d1ecb796..f49257f483e 100644 --- a/tests/buffer_model.py +++ b/tests/buffer_model.py @@ -1,7 +1,12 @@ import re import time + lossless_profile_name_pattern = 'pg_lossless_([1-9][0-9]*000)_([1-9][0-9]*m)_profile' +zero_profile_name_pattern = '.+_zero_profile' +zero_pool_name_pattern = '.+_zero_pool' + + def enable_dynamic_buffer(config_db, cmd_runner): # check whether it's already running dynamic mode device_meta = config_db.get_entry('DEVICE_METADATA', 'localhost') @@ -55,7 +60,10 @@ def enable_dynamic_buffer(config_db, cmd_runner): time.sleep(20) -def disable_dynamic_buffer(config_db, cmd_runner): +def disable_dynamic_buffer(dvs): + config_db = dvs.get_config_db() + app_db = dvs.get_app_db() + device_meta = config_db.get_entry('DEVICE_METADATA', 'localhost') assert 'buffer_model' in device_meta, "'buffer_model' doesn't exist in DEVICE_METADATA|localhost" if device_meta['buffer_model'] == 'traditional': @@ -86,6 +94,32 @@ def disable_dynamic_buffer(config_db, cmd_runner): finally: # restart daemon - cmd_runner("supervisorctl restart buffermgrd") + dvs.runcmd("supervisorctl restart buffermgrd") + + # Remove all the PGs referencing non-default zero profiles + pgs = app_db.get_keys('BUFFER_PG_TABLE') + for key in pgs: + pg = app_db.get_entry('BUFFER_PG_TABLE', key) + if re.search(zero_profile_name_pattern, pg['profile']): + app_db.delete_entry('BUFFER_PG_TABLE', key) + + # Remove all the Qs referencing non-default zero profiles + qs = app_db.get_keys('BUFFER_QUEUE_TABLE') + for key in qs: + q = app_db.get_entry('BUFFER_QUEUE_TABLE', key) + if re.search(zero_profile_name_pattern, q['profile']): + app_db.delete_entry('BUFFER_QUEUE_TABLE', key) + + # Remove all the non-default zero profiles + profiles = app_db.get_keys('BUFFER_PROFILE_TABLE') + for key in profiles: + if re.search(zero_profile_name_pattern, key): + app_db.delete_entry('BUFFER_PROFILE_TABLE', key) + + # Remove all the non-default zero pools + pools = app_db.get_keys('BUFFER_POOL_TABLE') + for key in pools: + if re.search(zero_pool_name_pattern, key): + app_db.delete_entry('BUFFER_POOL_TABLE', key) time.sleep(20) diff --git a/tests/conftest.py b/tests/conftest.py index 9264417214d..0f30d00dbde 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -11,6 +11,7 @@ import sys import tarfile import io +import traceback from typing import Dict, Tuple from datetime import datetime @@ -28,6 +29,10 @@ from dvslib import dvs_policer from dvslib import dvs_hash from dvslib import dvs_switch +from dvslib import dvs_twamp +from dvslib import dvs_buffer +from dvslib import dvs_queue +from dvslib import dvs_flex_counter from buffer_model import enable_dynamic_buffer @@ -39,6 +44,8 @@ # Voq asics will have 16 fabric ports created (defined in Azure/sonic-buildimage#7629). FABRIC_NUM_PORTS = 16 +SINGLE_ASIC_VOQ_FS = "single_asic_voq_fs" + def ensure_system(cmd): rc, output = subprocess.getstatusoutput(cmd) if rc: @@ -102,6 +109,17 @@ def pytest_addoption(parser): type=int, help="number of ports") + parser.addoption("--enable-coverage", + action="store_true", + default=False, + help="Collect the test coverage information") + + parser.addoption("--switch-mode", + action="store", + default=None, + type=str, + help="Set switch mode information") + def random_string(size=4, chars=string.ascii_uppercase + string.digits): return "".join(random.choice(chars) for x in range(size)) @@ -268,6 +286,7 @@ class DockerVirtualSwitch: CONFIG_DB_ID = 4 FLEX_COUNTER_DB_ID = 5 STATE_DB_ID = 6 + DPU_APPL_DB_ID = 15 # FIXME: Should be broken up into helper methods in a later PR. def __init__( @@ -283,6 +302,8 @@ def __init__( newctnname: str = None, ctnmounts: Dict[str, str] = None, buffer_model: str = None, + enable_coverage: bool = False, + switch_mode: str = None ): self.basicd = ["redis-server", "rsyslogd"] self.swssd = [ @@ -295,7 +316,7 @@ def __init__( "portmgrd" ] self.syncd = ["syncd"] - self.rtd = ["fpmsyncd", "zebra", "staticd"] + self.rtd = ["fpmsyncd", "zebra", "staticd", "mgmtd"] self.teamd = ["teamsyncd", "teammgrd"] self.natd = ["natsyncd", "natmgrd"] self.alld = self.basicd + self.swssd + self.syncd + self.rtd + self.teamd + self.natd @@ -304,6 +325,8 @@ def __init__( self.dvsname = name self.vct = vct self.ctn = None + self.enable_coverage = enable_coverage + self.switch_mode = switch_mode self.cleanup = not keeptb @@ -367,7 +390,11 @@ def __init__( # Dynamically create a DVS container and servers else: - self.ctn_sw = self.client.containers.run("debian:jessie", + if 'DEFAULT_CONTAINER_REGISTRY' in os.environ: + cr_prefix = os.environ['DEFAULT_CONTAINER_REGISTRY'].rstrip("/") + "/" + else: + cr_prefix = '' + self.ctn_sw = self.client.containers.run(cr_prefix + "debian:jessie", privileged=True, detach=True, command="bash", @@ -429,6 +456,7 @@ def create_servers(self): def reset_dbs(self): # DB wrappers are declared here, lazy-loaded in the tests self.app_db = None + self.dpu_app_db = None self.asic_db = None self.counters_db = None self.config_db = None @@ -440,10 +468,37 @@ def del_appl_db(self): if getattr(self, 'appldb', False): del self.appldb + def collect_coverage(self): + if not self.enable_coverage: + return + try: + # Generate the gcda files + self.runcmd('killall5 -15') + time.sleep(1) + + # Stop the services to reduce the CPU comsuption + if self.cleanup: + self.runcmd('supervisorctl stop all') + + # Generate the converage info by lcov and copy to the host + cmd = f"docker exec {self.ctn.short_id} sh -c 'cd $BUILD_DIR; rm -rf **/.libs ./lib/libSaiRedis*; lcov -c --directory . --no-external --exclude tests --ignore-errors gcov,unused --output-file /tmp/coverage.info && lcov --add-tracefile /tmp/coverage.info -o /tmp/coverage.info; sed -i \"s#SF:$BUILD_DIR/#SF:#\" /tmp/coverage.info; lcov_cobertura /tmp/coverage.info -o /tmp/coverage.xml'" + subprocess.getstatusoutput(cmd) + cmd = f"docker exec {self.ctn.short_id} sh -c 'cd $BUILD_DIR; find . -name *.gcda -type f -exec tar -rf /tmp/gcda.tar {{}} \\;'" + subprocess.getstatusoutput(cmd) + cmd = f"docker cp {self.ctn.short_id}:/tmp/gcda.tar {self.ctn.short_id}.gcda.tar" + subprocess.getstatusoutput(cmd) + cmd = f"docker cp {self.ctn.short_id}:/tmp/coverage.info {self.ctn.short_id}.coverage.info" + subprocess.getstatusoutput(cmd) + cmd = f"docker cp {self.ctn.short_id}:/tmp/coverage.xml {self.ctn.short_id}.coverage.xml" + subprocess.getstatusoutput(cmd) + except: + traceback.print_exc() def destroy(self) -> None: self.del_appl_db() + self.collect_coverage() + # In case persistent dvs was used removed all the extra server link # that were created if self.persistent: @@ -451,10 +506,13 @@ def destroy(self) -> None: # persistent and clean-up flag are mutually exclusive elif self.cleanup: - self.ctn.remove(force=True) - self.ctn_sw.remove(force=True) - os.system(f"rm -rf {self.mount}") - self.destroy_servers() + try: + self.ctn.remove(force=True) + self.ctn_sw.remove(force=True) + os.system(f"rm -rf {self.mount}") + self.destroy_servers() + except docker.errors.NotFound: + print("Skipped the container not found error, the container has already removed.") def destroy_servers(self): for s in self.servers: @@ -474,6 +532,7 @@ def check_ready_status_and_init_db(self) -> None: # Initialize the databases. self.init_asic_db_validator() self.init_appl_db_validator() + self.init_dpu_appl_db_validator() self.reset_dbs() # Verify that SWSS has finished initializing. @@ -517,6 +576,9 @@ def init_asic_db_validator(self) -> None: def init_appl_db_validator(self) -> None: self.appldb = ApplDbValidator(self.APPL_DB_ID, self.redis_sock) + def init_dpu_appl_db_validator(self) -> None: + self.dpu_appldb = ApplDbValidator(self.DPU_APPL_DB_ID, self.redis_sock) + def check_swss_ready(self, timeout: int = 300) -> None: """Verify that SWSS is ready to receive inputs. @@ -531,7 +593,10 @@ def check_swss_ready(self, timeout: int = 300) -> None: self.get_config_db() metadata = self.config_db.get_entry('DEVICE_METADATA|localhost', '') if metadata.get('switch_type', 'npu') in ['voq', 'fabric']: - num_ports = NUM_PORTS + FABRIC_NUM_PORTS + if self.switch_mode and self.switch_mode == SINGLE_ASIC_VOQ_FS: + num_ports = NUM_PORTS + else: + num_ports = NUM_PORTS + FABRIC_NUM_PORTS # Verify that all ports have been initialized and configured app_db = self.get_app_db() @@ -551,8 +616,9 @@ def _polling_function(): # Verify that fabric ports are monitored in STATE_DB if metadata.get('switch_type', 'npu') in ['voq', 'fabric']: - self.get_state_db() - self.state_db.wait_for_n_keys("FABRIC_PORT_TABLE", FABRIC_NUM_PORTS) + if not self.switch_mode or (self.switch_mode and self.switch_mode != SINGLE_ASIC_VOQ_FS): + self.get_state_db() + self.state_db.wait_for_n_keys("FABRIC_PORT_TABLE", FABRIC_NUM_PORTS) def net_cleanup(self) -> None: """Clean up network, remove extra links.""" @@ -641,7 +707,7 @@ def get_logs(self) -> None: ensure_system(f"rm -rf {log_dir}") ensure_system(f"mkdir -p {log_dir}") - p = subprocess.Popen(["tar", "--no-same-owner", "-C", os.path.join("./", log_dir), "-x"], stdin=subprocess.PIPE) + p = subprocess.Popen(["tar", "--no-same-owner", "--exclude", "README", "-C", os.path.join("./", log_dir), "-x"], stdin=subprocess.PIPE) stream, _ = self.ctn.get_archive("/var/log/") for x in stream: @@ -698,6 +764,21 @@ def stop_zebra(self): self.runcmd(['sh', '-c', 'pkill -9 zebra']) time.sleep(5) + def stop_teamsyncd(self): + self.runcmd(['sh', '-c', 'pkill -9 teamsyncd']) + + time.sleep(5) + + def start_teamsyncd(self): + self.runcmd(['sh', '-c', 'supervisorctl start teamsyncd']) + + time.sleep(5) + + def restart_teammgrd(self): + self.runcmd(['sh', '-c', 'supervisorctl restart teammgrd']) + + time.sleep(5) + # deps: warm_reboot def start_fpmsyncd(self): self.runcmd(['sh', '-c', 'supervisorctl start fpmsyncd']) @@ -710,6 +791,12 @@ def stop_fpmsyncd(self): self.runcmd(['sh', '-c', 'pkill -x fpmsyncd']) time.sleep(1) + def disable_fpmsyncd(self): + self.runcmd(['sh', '-c', 'supervisorctl stop fpmsyncd']) + + # Let's give fpmsyncd a chance to connect to Zebra. + time.sleep(5) + # deps: warm_reboot def SubscribeAppDbObject(self, objpfx): r = redis.Redis(unix_socket_path=self.redis_sock, db=swsscommon.APPL_DB, @@ -718,6 +805,14 @@ def SubscribeAppDbObject(self, objpfx): pubsub.psubscribe("__keyspace@0__:%s*" % objpfx) return pubsub + # deps: warm_reboot + def SubscribeDpuAppDbObject(self, objpfx): + r = redis.Redis(unix_socket_path=self.redis_sock, db=swsscommon.DPU_APPL_DB, + encoding="utf-8", decode_responses=True) + pubsub = r.pubsub() + pubsub.psubscribe("__keyspace@0__:%s*" % objpfx) + return pubsub + # deps: warm_reboot def SubscribeAsicDbObject(self, objpfx): r = redis.Redis(unix_socket_path=self.redis_sock, db=swsscommon.ASIC_DB, @@ -1055,6 +1150,28 @@ def set_interface_status(self, interface, admin_status): tbl.set(interface, fvs) time.sleep(1) + def get_interface_oper_status(self, interface): + _, output = self.runcmd(f"ip --brief address show {interface}") + state = output.split()[1] + return state + + def get_interface_link_local_ipv6(self, interface, subnet=False): + """ + If subnet is True, the returned address will include the subnet length (e.g., fe80::aa:bbff:fecc:ddee/64) + """ + _, output = self.runcmd(f"ip --brief address show {interface}") + ipv6 = output.split()[2] + if not subnet: + slash = ipv6.find('/') + if slash > 0: + ipv6 = ipv6[0:slash] + return ipv6 + + def get_interface_mac(self, interface): + _, output = self.runcmd(f"ip --brief link show {interface}") + mac = output.split()[2] + return mac + # deps: acl, fdb_update, fdb, mirror_port_erspan, vlan, sub port intf def add_ip_address(self, interface, ip, vrf_name=None): if interface.startswith("PortChannel"): @@ -1154,6 +1271,7 @@ def remove_fdb(self, vlan, mac): # policer, port_dpb_vlan, vlan def setup_db(self): self.pdb = swsscommon.DBConnector(swsscommon.APPL_DB, self.redis_sock, 0) + self.ddb = swsscommon.DBConnector(swsscommon.DPU_APPL_DB, self.redis_sock, 0) self.adb = swsscommon.DBConnector(swsscommon.ASIC_DB, self.redis_sock, 0) self.cdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, self.redis_sock, 0) self.sdb = swsscommon.DBConnector(swsscommon.STATE_DB, self.redis_sock, 0) @@ -1311,6 +1429,11 @@ def set_nat_zone(self, interface, nat_zone): tbl.set(interface, fvs) time.sleep(1) + # db + def delete_entry_tbl(self, db, table, key): + tbl = swsscommon.Table(db, table) + tbl._del(key) + # deps: acl, crm, fdb def setReadOnlyAttr(self, obj, attr, val): db = swsscommon.DBConnector(swsscommon.ASIC_DB, self.redis_sock, 0) @@ -1343,6 +1466,12 @@ def get_app_db(self) -> ApplDbValidator: return self.app_db + def get_dpu_app_db(self) -> ApplDbValidator: + if not self.dpu_app_db: + self.dpu_app_db = DVSDatabase(self.DPU_APPL_DB_ID, self.redis_sock) + + return self.dpu_app_db + # FIXME: Now that AsicDbValidator is using DVSDatabase we should converge this with # that implementation. Save it for a follow-up PR. def get_asic_db(self) -> AsicDbValidator: @@ -1400,7 +1529,8 @@ def __init__( log_path=None, max_cpu=2, forcedvs=None, - topoFile=None + topoFile=None, + enable_coverage=False, ): self.ns = namespace self.chassbr = "br4chs" @@ -1414,6 +1544,7 @@ def __init__( self.log_path = log_path self.max_cpu = max_cpu self.forcedvs = forcedvs + self.enable_coverage = enable_coverage if self.ns is None: self.ns = random_string() @@ -1466,7 +1597,7 @@ def find_all_ctns(self): self.dvss[ctn.name] = DockerVirtualSwitch(ctn.name, self.imgname, self.keeptb, self.env, log_path=ctn.name, max_cpu=self.max_cpu, forcedvs=self.forcedvs, - vct=self) + vct=self, enable_coverage=self.enable_coverage) if self.chassbr is None and len(self.dvss) > 0: ret, res = self.ctn_runcmd(self.dvss.values()[0].ctn, "sonic-cfggen --print-data -j /usr/share/sonic/virtual_chassis/vct_connections.json") @@ -1537,6 +1668,8 @@ def handle_request(self): def destroy(self): self.verify_vct() + for dv in self.dvss.values(): + dv.collect_coverage() if self.keeptb: return self.oper = "delete" @@ -1571,6 +1704,11 @@ def create_vct_ctn(self, ctndir): vol = {} vol[chassis_config_dir] = {"bind": "/usr/share/sonic/virtual_chassis", "mode": "ro"} + # Mount database_config.json when connect_to_chassis_db is set to 1 + if defcfg.get("connect_to_chassis_db") == 1: + database_config_file = cwd + "/virtual_chassis/database_config.json" + vol[database_config_file] = {"bind": "/etc/sonic/database_config.json", "mode": "ro"} + # pass self.ns into the vs to be use for vs restarts by swss conftest. # connection to chassbr is setup by chassis_connect.py within the vs data = {} @@ -1587,7 +1725,8 @@ def create_vct_ctn(self, ctndir): max_cpu=self.max_cpu, forcedvs=self.forcedvs, vct=self,newctnname=ctnname, - ctnmounts=vol) + ctnmounts=vol, + enable_coverage=self.enable_coverage) self.set_ctninfo(ctndir, ctnname, self.dvss[ctnname].pid) return @@ -1759,6 +1898,8 @@ def manage_dvs(request) -> str: buffer_model = request.config.getoption("--buffer_model") force_recreate = request.config.getoption("--force-recreate-dvs") graceful_stop = request.config.getoption("--graceful-stop") + enable_coverage = request.config.getoption("--enable-coverage") + switch_mode = request.config.getoption("--switch-mode") dvs = None curr_dvs_env = [] # lgtm[py/unused-local-variable] @@ -1790,7 +1931,13 @@ def update_dvs(log_path, new_dvs_env=[]): dvs.get_logs() dvs.destroy() - dvs = DockerVirtualSwitch(name, imgname, keeptb, new_dvs_env, log_path, max_cpu, forcedvs, buffer_model = buffer_model) + vol = {} + if switch_mode and switch_mode == SINGLE_ASIC_VOQ_FS: + cwd = os.getcwd() + voq_configs = cwd + "/single_asic_voq_fs" + vol[voq_configs] = {"bind": "/usr/share/sonic/single_asic_voq_fs", "mode": "ro"} + + dvs = DockerVirtualSwitch(name, imgname, keeptb, new_dvs_env, log_path, max_cpu, forcedvs, buffer_model = buffer_model, enable_coverage=enable_coverage, ctnmounts=vol, switch_mode=switch_mode) curr_dvs_env = new_dvs_env @@ -1811,6 +1958,7 @@ def update_dvs(log_path, new_dvs_env=[]): if graceful_stop: dvs.stop_swss() dvs.stop_syncd() + dvs.get_logs() dvs.destroy() @@ -1831,7 +1979,7 @@ def dvs(request, manage_dvs) -> DockerVirtualSwitch: return manage_dvs(log_path, dvs_env) -@pytest.yield_fixture(scope="module") +@pytest.fixture(scope="module") def vst(request): vctns = request.config.getoption("--vctns") topo = request.config.getoption("--topo") @@ -1839,13 +1987,14 @@ def vst(request): keeptb = request.config.getoption("--keeptb") imgname = request.config.getoption("--imgname") max_cpu = request.config.getoption("--max_cpu") + enable_coverage = request.config.getoption("--enable-coverage") log_path = vctns if vctns else request.module.__name__ dvs_env = getattr(request.module, "DVS_ENV", []) if not topo: # use ecmp topology as default topo = "virtual_chassis/chassis_supervisor.json" vct = DockerVirtualChassisTopology(vctns, imgname, keeptb, dvs_env, log_path, max_cpu, - forcedvs, topo) + forcedvs, topo, enable_coverage) yield vct vct.get_logs(request.module.__name__) vct.destroy() @@ -1858,13 +2007,14 @@ def vct(request): keeptb = request.config.getoption("--keeptb") imgname = request.config.getoption("--imgname") max_cpu = request.config.getoption("--max_cpu") + enable_coverage = request.config.getoption("--enable-coverage") log_path = vctns if vctns else request.module.__name__ dvs_env = getattr(request.module, "DVS_ENV", []) if not topo: # use ecmp topology as default topo = "virtual_chassis/chassis_with_ecmp_neighbors.json" vct = DockerVirtualChassisTopology(vctns, imgname, keeptb, dvs_env, log_path, max_cpu, - forcedvs, topo) + forcedvs, topo, enable_coverage) yield vct vct.get_logs(request.module.__name__) vct.destroy() @@ -1919,7 +2069,8 @@ def dvs_vlan_manager(request, dvs): def dvs_port_manager(request, dvs): request.cls.dvs_port = dvs_port.DVSPort(dvs.get_asic_db(), dvs.get_app_db(), - dvs.get_config_db()) + dvs.get_config_db(), + dvs.get_counters_db()) @pytest.fixture(scope="class") @@ -1943,7 +2094,36 @@ def dvs_hash_manager(request, dvs): @pytest.fixture(scope="class") def dvs_switch_manager(request, dvs): - request.cls.dvs_switch = dvs_switch.DVSSwitch(dvs.get_asic_db()) + request.cls.dvs_switch = dvs_switch.DVSSwitch(dvs.get_asic_db(), + dvs.get_config_db(), + dvs.get_counters_db()) + +@pytest.fixture(scope="class") +def dvs_twamp_manager(request, dvs): + request.cls.dvs_twamp = dvs_twamp.DVSTwamp(dvs.get_asic_db(), + dvs.get_config_db(), + dvs.get_state_db(), + dvs.get_counters_db(), + dvs.get_app_db()) + +@pytest.fixture(scope="class") +def dvs_buffer_manager(request, dvs): + request.cls.dvs_buffer = dvs_buffer.DVSBuffer(dvs.get_asic_db(), + dvs.get_app_db(), + dvs.get_config_db(), + dvs.get_state_db(), + dvs.get_counters_db()) + +@pytest.fixture(scope="class") +def dvs_queue_manager(request, dvs): + request.cls.dvs_queue = dvs_queue.DVSQueue(dvs.get_asic_db(), + dvs.get_config_db(), + dvs.get_counters_db()) + +@pytest.fixture(scope="class") +def dvs_flex_counter_manager(request, dvs): + request.cls.dvs_flex_counter = dvs_flex_counter.DVSFlexCounter(dvs.get_config_db(), + dvs.get_flex_db()) ##################### DPB fixtures ########################################### def create_dpb_config_file(dvs): diff --git a/tests/create_appliance.py b/tests/create_appliance.py new file mode 100644 index 00000000000..c503b35e7d6 --- /dev/null +++ b/tests/create_appliance.py @@ -0,0 +1,42 @@ +#!/usr/bin/python3 + +""" + Connect to Dash orch with ZMQ and send create appliance request. + usage: + python3 create_appliance.py [appliance ID] + Example: + python3 create_appliance.py 1234 +""" + +from swsscommon import swsscommon +from dash_api.appliance_pb2 import * +import typing +import ipaddress +import socket +import sys + +def to_string(value): + if isinstance(value, bool): + return "true" if value else "false" + elif isinstance(value, bytes): + return value + return str(value) + +# connect to Dash ZMQ endpoint +db_connection = swsscommon.DBConnector("DPU_APPL_DB", 0) +zmq_client = swsscommon.ZmqClient("tcp://127.0.0.1:8100") +app_dash_appliance_table = swsscommon.ZmqProducerStateTable( + db_connection, + "DASH_APPLIANCE_TABLE", + zmq_client, + True) + +# prepare create appliance request +pairs_str = [] +pb = Appliance() +pb.sip.ipv4 = socket.htonl(int(ipaddress.ip_address("10.0.0.1"))) +pb.vm_vni = int(sys.argv[1]) +pairs_str.append(("pb", pb.SerializeToString())) + +# send create appliance request via ZMQ +app_dash_appliance_table.set("100", pairs_str) diff --git a/tests/dash/dash_configs.py b/tests/dash/dash_configs.py new file mode 100644 index 00000000000..9e5e005d641 --- /dev/null +++ b/tests/dash/dash_configs.py @@ -0,0 +1,298 @@ +import base64 +import socket +import uuid +from ipaddress import ip_address as IP + +from dash_api.appliance_pb2 import * +from dash_api.vnet_pb2 import * +from dash_api.eni_pb2 import * +from dash_api.route_pb2 import * +from dash_api.route_rule_pb2 import * +from dash_api.vnet_mapping_pb2 import * +from dash_api.route_type_pb2 import * +from dash_api.types_pb2 import * + +VNET_ENCAP = "vnet_encap" +VNET_DIRECT = "vnet_direct" +PRIVATELINK = "privatelink" +DECAP = "decap" + +SIP = "10.0.0.1" +UNDERLAY_IP = "25.1.1.1" +VNET_MAP_IP1 = "10.1.1.1" +VNET_MAP_IP2 = "10.1.1.2" +UNDERLAY_IP = "101.1.2.3" +OUTBOUND_ROUTE_PREFIX1 = "10.1.0.8/32" +OUTBOUND_ROUTE_PREFIX2 = "10.1.0.9/32" +OVERLAY_IP = "10.0.0.6" +PL_ENCODING_IP = "::56b2:0:20:0:0" +PL_ENCODING_MASK = "::ffff:ffff:ffff:0:0" +PL_UNDERLAY_SIP1 = "55.1.2.3" +PL_UNDERLAY_SIP2 = "55.2.3.4" +PL_OVERLAY_SIP = "fd40:108:0:d204:0:200::0" +PL_OVERLAY_SIP_MASK = "ffff:ffff:ffff:ffff:ffff:ffff::" +PL_OVERLAY_DIP = "2603:10e1:100:2::3401:203" +PL_OVERLAY_DIP_MASK = "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff" + +APPLIANCE_ID = "100" +VM_VNI = "4321" +ENCAP_VNI = 100 +VNET1 = "Vnet1" +VNET1_VNI = "45654" +VNET1_GUID = "559c6ce8-26ab-4193-b946-ccc6e8f930b2" +MAC_STRING = "F4939FEFC47E" +MAC_ADDRESS = "F4:93:9F:EF:C4:7E" +ENI_ID = "497f23d7-f0ac-4c99-a98f-59b470e8c7bd" +ROUTE_GROUP1 = "RouteGroup1" +ROUTE_GROUP2 = "RouteGroup2" +ROUTE_GROUP1_GUID = "48af6ce8-26cc-4293-bfa6-0126e8fcdeb2" +ROUTE_GROUP2_GUID = "58cf62e0-22cc-4693-baa6-012358fcdec9" +TUNNEL1 = "Tunnel1" +TUNNEL1_ENDPOINTS = [IP("99.99.99.1")] +TUNNEL1_VNI = "101" +TUNNEL2 = "Tunnel2" +TUNNEL2_ENDPOINTS = [IP("55.55.55.1") + i for i in range(5)] +TUNNEL2_VNI = "102" +TUNNEL3 = "Tunnel3" +TUNNEL3_ENDPOINTS = [IP("2001:db8::1")] +TUNNEL3_VNI = "103" +TUNNEL4 = "Tunnel4" +TUNNEL4_ENDPOINTS = [IP("77.77.77.1") + i for i in range(1, 50)] + [IP("2002:db8::1") + i for i in range(1, 50)] +TUNNEL4_VNI = "104" +TUNNEL5 = "Tunnel5" +TUNNEL5_ENDPOINTS = [IP("88.88.88.1")] * 2 +TUNNEL5_VNI = "105" + + +def fmt_tunnel_endpoints(endpoints): + return [ + {"ipv4": socket.htonl(int(endpoint))} + if endpoint.version == 4 else + {"ipv6": base64.b64encode(endpoint.packed)} + for endpoint in endpoints + ] + + +METER_POLICY_V4 = "DashMeterV4Policy" +METER_POLICY_V6 = "DashMeterV6Policy" +METER_RULE_1_NUM = "44" +METER_RULE_1_PRIORITY = "16" +METER_RULE_1_METERING_CLASS = "36" +METER_RULE_1_IP = "77.1.2.3" +METER_RULE_1_IP_MASK = "255.255.255.255" +METER_RULE_2_NUM = "66" +METER_RULE_2_PRIORITY = "20" +METER_RULE_2_METERING_CLASS = "50" +METER_RULE_2_IP = "2001:108:0:d204:0::0" +METER_RULE_2_IP_MASK = "ffff:ffff:ffff:ffff:ffff:ffff::" + +APPLIANCE_CONFIG = { + "sip": { + "ipv4": socket.htonl(int(IP(SIP))) + }, + "vm_vni": int(VM_VNI), + "local_region_id": 10 +} + +VNET_CONFIG = { + "vni": VNET1_VNI, + "guid": { + "value": base64.b64encode(bytes.fromhex(uuid.UUID(VNET1_GUID).hex)) + } +} + +ENI_CONFIG = { + "vnet": VNET1, + "underlay_ip": { + "ipv4": socket.htonl(int(IP(UNDERLAY_IP))) + }, + "mac_address": bytes.fromhex(MAC_STRING), + "eni_id": ENI_ID, + "admin_state": State.STATE_ENABLED, + "pl_underlay_sip": { + "ipv4": socket.htonl(int(IP(PL_UNDERLAY_SIP1))) + }, + "pl_sip_encoding": { + "ip": { + "ipv6": base64.b64encode(IP(PL_ENCODING_IP).packed) + }, + "mask": { + "ipv6": base64.b64encode(IP(PL_ENCODING_MASK).packed) + } + } +} + +VNET_MAPPING_CONFIG_VNET_ENCAP = { + "mac_address": bytes.fromhex(MAC_STRING), + "action_type": RoutingType.ROUTING_TYPE_VNET_ENCAP, + "underlay_ip": { + "ipv4": socket.htonl(int(IP(UNDERLAY_IP))) + }, +} + +VNET_MAPPING_CONFIG_PRIVATELINK = { + "action_type": RoutingType.ROUTING_TYPE_PRIVATELINK, + "underlay_ip": { + "ipv4": socket.htonl(int(IP(UNDERLAY_IP))) + }, + "overlay_sip_prefix": { + "ip": { + "ipv6": base64.b64encode(IP(PL_OVERLAY_SIP).packed) + }, + "mask": { + "ipv6": base64.b64encode(IP(PL_OVERLAY_SIP_MASK).packed) + } + }, + "overlay_dip_prefix": { + "ip": { + "ipv6": base64.b64encode(IP(PL_OVERLAY_DIP).packed) + }, + "mask": { + "ipv6": base64.b64encode(IP(PL_OVERLAY_DIP_MASK).packed) + } + }, +} + +VNET_MAPPING_CONFIG_PLNSG= { + "routing_type": RoutingType.ROUTING_TYPE_PRIVATELINK, + "underlay_ip": { + "ipv4": socket.htonl(int(IP(UNDERLAY_IP))) + }, + "overlay_sip_prefix": { + "ip": { + "ipv6": base64.b64encode(IP(PL_OVERLAY_SIP).packed) + }, + "mask": { + "ipv6": base64.b64encode(IP(PL_OVERLAY_SIP_MASK).packed) + } + }, + "overlay_dip_prefix": { + "ip": { + "ipv6": base64.b64encode(IP(PL_OVERLAY_DIP).packed) + }, + "mask": { + "ipv6": base64.b64encode(IP(PL_OVERLAY_DIP_MASK).packed) + } + }, + "tunnel": TUNNEL1 +} + +ROUTE_VNET_CONFIG = { + "routing_type": RoutingType.ROUTING_TYPE_VNET, + "vnet": VNET1, +} + +ROUTE_VNET_CONFIG_UNDERLAY_SIP = { + "routing_type": RoutingType.ROUTING_TYPE_VNET, + "vnet": VNET1, + "underlay_sip": { + "ipv4": socket.htonl(int(IP(PL_UNDERLAY_SIP2))) + } +} + +ROUTING_TYPE_VNET_ENCAP_CONFIG = { + "items": [ + { + "action_name": "action1", + "action_type": ActionType.ACTION_TYPE_MAPROUTING + }, + ] +} + +ROUTING_TYPE_PL_CONFIG = { + "items": [ + { + "action_name": "action1", + "action_type": ActionType.ACTION_TYPE_4_to_6 + }, + { + "action_name": "action2", + "action_type": ActionType.ACTION_TYPE_STATICENCAP, + "encap_type": EncapType.ENCAP_TYPE_NVGRE, + "vni": ENCAP_VNI + } + ] +} + +ROUTE_GROUP1_CONFIG = { + "guid": ROUTE_GROUP1_GUID, + "version": "rg_version" +} + +ROUTE_GROUP2_CONFIG = { + "guid": ROUTE_GROUP2_GUID, + "version": "rg_version" +} + +ENI_ROUTE_GROUP1_CONFIG = { + "group_id": ROUTE_GROUP1, +} + +ENI_ROUTE_GROUP2_CONFIG = { + "group_id": ROUTE_GROUP2, +} + +TUNNEL1_CONFIG = { + "endpoints": fmt_tunnel_endpoints(TUNNEL1_ENDPOINTS), + "encap_type": EncapType.ENCAP_TYPE_VXLAN, + "vni": TUNNEL1_VNI +} + +TUNNEL2_CONFIG = { + "endpoints": fmt_tunnel_endpoints(TUNNEL2_ENDPOINTS), + "encap_type": EncapType.ENCAP_TYPE_NVGRE, + "vni": TUNNEL2_VNI +} + +TUNNEL3_CONFIG = { + "endpoints": fmt_tunnel_endpoints(TUNNEL3_ENDPOINTS), + "encap_type": EncapType.ENCAP_TYPE_VXLAN, + "vni": TUNNEL3_VNI +} + +TUNNEL4_CONFIG = { + "endpoints": fmt_tunnel_endpoints(TUNNEL4_ENDPOINTS), + "encap_type": EncapType.ENCAP_TYPE_NVGRE, + "vni": TUNNEL4_VNI +} + +TUNNEL5_CONFIG = { + "endpoints": fmt_tunnel_endpoints(TUNNEL5_ENDPOINTS), + "encap_type": EncapType.ENCAP_TYPE_VXLAN, + "vni": TUNNEL5_VNI +} + + +METER_POLICY_V4_CONFIG = { + "ip_version": IpVersion.IP_VERSION_IPV4 +} + +METER_POLICY_V6_CONFIG = { + "ip_version": IpVersion.IP_VERSION_IPV6 +} + +METER_RULE_1_CONFIG = { + "priority": int(METER_RULE_1_PRIORITY), + "metering_class": int(METER_RULE_1_METERING_CLASS), + "ip_prefix": { + "ip": { + "ipv4": socket.htonl(int(IP(METER_RULE_1_IP))) + }, + "mask": { + "ipv4": socket.htonl(int(IP(METER_RULE_1_IP_MASK))) + } + }, +} + +METER_RULE_2_CONFIG = { + "priority": int(METER_RULE_2_PRIORITY), + "metering_class": int(METER_RULE_2_METERING_CLASS), + "ip_prefix": { + "ip": { + "ipv6": base64.b64encode(IP(METER_RULE_2_IP).packed) + }, + "mask": { + "ipv6": base64.b64encode(IP(METER_RULE_2_IP_MASK).packed) + } + }, +} diff --git a/tests/dash/dash_db.py b/tests/dash/dash_db.py new file mode 100644 index 00000000000..e7f884990be --- /dev/null +++ b/tests/dash/dash_db.py @@ -0,0 +1,318 @@ +from swsscommon import swsscommon +from dvslib.dvs_common import wait_for_result +import typing +import pytest +import time + +from dash_api.appliance_pb2 import * +from dash_api.vnet_pb2 import * +from dash_api.eni_pb2 import * +from dash_api.eni_route_pb2 import * +from dash_api.route_pb2 import * +from dash_api.route_group_pb2 import * +from dash_api.route_rule_pb2 import * +from dash_api.vnet_mapping_pb2 import * +from dash_api.route_type_pb2 import * +from dash_api.meter_policy_pb2 import * +from dash_api.meter_rule_pb2 import * +from dash_api.tunnel_pb2 import * +from dash_api.types_pb2 import * +from google.protobuf.json_format import ParseDict +from google.protobuf.message import Message + +ASIC_DASH_APPLIANCE_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_DASH_APPLIANCE" +ASIC_DIRECTION_LOOKUP_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_DIRECTION_LOOKUP_ENTRY" +ASIC_VIP_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VIP_ENTRY" +ASIC_VNET_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VNET" +ASIC_ENI_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ENI" +ASIC_ENI_ETHER_ADDR_MAP_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ENI_ETHER_ADDRESS_MAP_ENTRY" +ASIC_OUTBOUND_CA_TO_PA_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_CA_TO_PA_ENTRY" +ASIC_PA_VALIDATION_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_PA_VALIDATION_ENTRY" +ASIC_OUTBOUND_ROUTING_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_ENTRY" +ASIC_INBOUND_ROUTING_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_INBOUND_ROUTING_ENTRY" +ASIC_OUTBOUND_ROUTING_GROUP_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_GROUP" +ASIC_METER_POLICY_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_METER_POLICY" +ASIC_METER_RULE_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_METER_RULE" + +APP_DB_TO_PROTOBUF_MAP = { + swsscommon.APP_DASH_APPLIANCE_TABLE_NAME: Appliance, + swsscommon.APP_DASH_VNET_TABLE_NAME: Vnet, + swsscommon.APP_DASH_ENI_TABLE_NAME: Eni, + swsscommon.APP_DASH_VNET_MAPPING_TABLE_NAME: VnetMapping, + swsscommon.APP_DASH_ROUTE_TABLE_NAME: Route, + swsscommon.APP_DASH_ROUTE_RULE_TABLE_NAME: RouteRule, + swsscommon.APP_DASH_ENI_ROUTE_TABLE_NAME: EniRoute, + swsscommon.APP_DASH_ROUTING_TYPE_TABLE_NAME: RouteType, + swsscommon.APP_DASH_METER_POLICY_TABLE_NAME: MeterPolicy, + swsscommon.APP_DASH_METER_RULE_TABLE_NAME: MeterRule, + swsscommon.APP_DASH_ROUTE_GROUP_TABLE_NAME: RouteGroup, + swsscommon.APP_DASH_TUNNEL_TABLE_NAME: Tunnel +} + + +def del_all_keys(dash_db): + for table_name in APP_DB_TO_PROTOBUF_MAP.keys(): + keys = dash_db.get_app_db_keys(table_name) + for key in keys: + dash_db.remove_app_db_entry(table_name, key) + + +@pytest.fixture +def dash_db(dvs): + dash_db = DashDB(dvs) + yield dash_db + del_all_keys(dash_db) + + +@pytest.fixture(scope="module") +def dash_db_module(dvs): + dash_db = DashDB(dvs) + yield dash_db + del_all_keys(dash_db) + + +def to_string(value): + if isinstance(value, bool): + return "true" if value else "false" + elif isinstance(value, bytes): + return value + return str(value) + +class ProducerStateTable(swsscommon.ProducerStateTable): + def __setitem__(self, key: str, pairs: typing.Union[dict, list, tuple]): + pairs_str = [] + if isinstance(pairs, dict): + pairs = pairs.items() + for k, v in pairs: + pairs_str.append((to_string(k), to_string(v))) + self.set(key, pairs_str) + time.sleep(1) + + def __delitem__(self, key: str): + self.delete(str(key)) + time.sleep(1) + + +class Table(swsscommon.Table): + def __getitem__(self, key: str): + exists, result = self.get(str(key)) + if not exists: + return None + else: + return dict(result) + + def __contains__(self, key: str): + return self[key] is not None + + def get_keys(self): + return self.getKeys() + + def get_newly_created_oid(self, old_oids): + new_oids = self.asic_db.wait_for_n_keys(self, len(old_oids) + 1) + oid = [ids for ids in new_oids if ids not in old_oids] + return oid[0] + + +class DashDB(object): + + def parse_key_value(self, arglist): + if len(arglist) < 2: + raise ValueError("Invalid number of arguments") + # elif len(arglist) == 1: + # handle case where no value is passed (e.g. in remove_app_db_entry) + # key = arglist[0] + # value = None + else: + # concat all parts of the key, assume last arg to be the value + key = ":".join(arglist[:-1]) + value = arglist[-1] + return key, value + + def set_app_db_entry(self, table_name, *args): + key, value = self.parse_key_value(args) + if isinstance(value, dict): + pb = ParseDict(value, APP_DB_TO_PROTOBUF_MAP[table_name]()) + pb_string = pb.SerializeToString() + elif isinstance(value, Message): + pb_string = value.SerializeToString() + else: + pb_string = value + + table = ProducerStateTable(self.dvs.get_dpu_app_db().db_connection, table_name) + table[key] = {'pb': pb_string} + + def remove_app_db_entry(self, table_name, *key_parts): + key = ":".join(key_parts) + table = ProducerStateTable(self.dvs.get_dpu_app_db().db_connection, table_name) + del table[key] + + def get_asic_db_entry(self, table_name, key): + table = Table(self.dvs.get_asic_db().db_connection, table_name) + return table[key] + + def wait_for_asic_db_keys(self, table_name, min_keys=1, old_keys=None): + + def polling_function(): + table = Table(self.dvs.get_asic_db().db_connection, table_name) + keys = table.get_keys() + if old_keys: + keys = [key for key in keys if key not in old_keys] + return len(keys) >= min_keys, keys + + _, keys = wait_for_result(polling_function, failure_message=f"Found fewer than {min_keys} keys in ASIC_DB table {table_name}") + return keys + + def wait_for_asic_db_key_del(self, table_name, key): + def polling_function(): + table = Table(self.dvs.get_asic_db().db_connection, table_name) + return key not in table, None + + _, attrs = wait_for_result(polling_function, failure_message=f"ASIC_DB table {table_name} still has key {key}") + return attrs + + def wait_for_asic_db_field(self, table_name, key, field, expected_value=None): + + def polling_function(): + table = Table(self.dvs.get_asic_db().db_connection, table_name) + attrs = table[key] + if attrs is None or field not in attrs: + return False, None + + if expected_value is not None: + return attrs[field] == expected_value, attrs[field] + else: + return True, attrs[field] + + if expected_value is not None: + failure_message = f"Field {field} in ASIC_DB table {table_name} not equal to {expected_value}" + else: + failure_message = f"Field {field} not found in ASIC_DB table {table_name}" + success, value = wait_for_result(polling_function, failure_message=failure_message) + if success: + return value + else: + return None + + def get_attr_to_sai_object_map(self, table_name, attribute): + table = Table(self.dvs.get_asic_db().db_connection, table_name) + keys = table.get_keys() + attr_to_sai_object_map = {} + for key in keys: + attrs = table[key] + if attribute in attrs: + attr_to_sai_object_map[attrs[attribute]] = key + return attr_to_sai_object_map + + def get_app_db_keys(self, table_name): + table = Table(self.dvs.get_dpu_app_db().db_connection, table_name) + return table.get_keys() + + def get_asic_db_keys(self, table_name): + table = Table(self.dvs.get_asic_db().db_connection, table_name) + return table.get_keys() + + def __init__(self, dvs): + self.dvs = dvs + self.app_dash_routing_type_table = ProducerStateTable( + self.dvs.get_dpu_app_db().db_connection, "DASH_ROUTING_TYPE_TABLE") + self.app_dash_appliance_table = ProducerStateTable( + self.dvs.get_dpu_app_db().db_connection, "DASH_APPLIANCE_TABLE") + self.app_dash_vnet_table = ProducerStateTable( + self.dvs.get_dpu_app_db().db_connection, "DASH_VNET_TABLE") + self.app_dash_eni_table = ProducerStateTable( + self.dvs.get_dpu_app_db().db_connection, "DASH_ENI_TABLE") + self.app_dash_vnet_map_table = ProducerStateTable( + self.dvs.get_dpu_app_db().db_connection, "DASH_VNET_MAPPING_TABLE") + self.app_dash_route_table = ProducerStateTable( + self.dvs.get_dpu_app_db().db_connection, "DASH_ROUTE_TABLE") + self.app_dash_route_rule_table = ProducerStateTable( + self.dvs.get_dpu_app_db().db_connection, "DASH_ROUTE_RULE_TABLE") + self.app_dash_eni_route_table = ProducerStateTable( + self.dvs.get_dpu_app_db().db_connection, "DASH_ENI_ROUTE_TABLE") + self.app_dash_route_group_table = ProducerStateTable( + self.dvs.get_dpu_app_db().db_connection, "DASH_ROUTE_GROUP_TABLE") + self.app_dash_meter_policy_table = ProducerStateTable( + self.dvs.get_dpu_app_db().db_connection, "DASH_METER_POLICY_TABLE") + self.app_dash_meter_rule_table = ProducerStateTable( + self.dvs.get_dpu_app_db().db_connection, "DASH_METER_RULE_TABLE") + + self.asic_dash_appliance_table = Table( + self.dvs.get_asic_db().db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_DASH_APPLIANCE") + self.asic_direction_lookup_table = Table( + self.dvs.get_asic_db().db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_DIRECTION_LOOKUP_ENTRY") + self.asic_vip_table = Table( + self.dvs.get_asic_db().db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_VIP_ENTRY") + self.asic_dash_vnet_table = Table( + self.dvs.get_asic_db().db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_VNET") + self.asic_eni_table = Table( + self.dvs.get_asic_db().db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ENI") + self.asic_eni_ether_addr_map_table = Table( + self.dvs.get_asic_db().db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ENI_ETHER_ADDRESS_MAP_ENTRY") + self.asic_dash_outbound_ca_to_pa_table = Table( + self.dvs.get_asic_db().db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_CA_TO_PA_ENTRY") + self.asic_pa_validation_table = Table( + self.dvs.get_asic_db().db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_PA_VALIDATION_ENTRY") + self.asic_outbound_routing_table = Table( + self.dvs.get_asic_db().db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_ENTRY") + self.asic_inbound_routing_rule_table = Table( + self.dvs.get_asic_db().db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_INBOUND_ROUTING_ENTRY") + self.asic_outbound_routing_group_table = Table( + self.dvs.get_asic_db().db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_GROUP") + self.asic_meter_policy_table = Table( + self.dvs.get_asic_db().db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_METER_POLICY") + self.asic_meter_rule_table = Table( + self.dvs.get_asic_db().db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_METER_RULE") + + def create_appliance(self, appliance_id, attr_maps: dict): + self.app_dash_appliance_table[str(appliance_id)] = attr_maps + + def remove_appliance(self, appliance_id): + del self.app_dash_appliance_table[str(appliance_id)] + + def create_vnet(self, vnet, attr_maps: dict): + self.app_dash_vnet_table[str(vnet)] = attr_maps + + def remove_vnet(self, vnet): + del self.app_dash_vnet_table[str(vnet)] + + def create_eni(self, eni, attr_maps: dict): + self.app_dash_eni_table[str(eni)] = attr_maps + + def remove_eni(self, eni): + del self.app_dash_eni_table[str(eni)] + + def create_eni_route(self, eni, attr_maps: dict): + self.app_dash_eni_route_table[str(eni)] = attr_maps + + def remove_eni_route(self, eni): + del self.app_dash_eni_route_table[str(eni)] + + def create_vnet_mapping(self, vnet, ip, attr_maps: dict): + self.app_dash_vnet_map_table[str(vnet) + ":" + str(ip)] = attr_maps + + def remove_vnet_mapping(self, vnet, ip): + del self.app_dash_vnet_map_table[str(vnet) + ":" + str(ip)] + + def create_route(self, route_group, ip, attr_maps: dict): + self.app_dash_route_table[str(route_group) + ":" + str(ip)] = attr_maps + + def remove_route(self, route_group, ip): + del self.app_dash_route_table[str(route_group) + ":" + str(ip)] + + def create_route_group(self, route_group, attr_maps: dict): + self.app_dash_route_group_table[str(route_group)] = attr_maps + + def remove_route_group(self, route_group): + del self.app_dash_route_group_table[str(route_group)] + + def create_inbound_routing(self, mac_string, vni, ip, attr_maps: dict): + self.app_dash_route_rule_table[str(mac_string) + ":" + str(vni) + ":" + str(ip)] = attr_maps + + def remove_inbound_routing(self, mac_string, vni, ip): + del self.app_dash_route_rule_table[str(mac_string) + ":" + str(vni) + ":" + str(ip)] + + def create_routing_type(self, routing_type, attr_maps: dict): + self.app_dash_routing_type_table[str(routing_type)] = attr_maps + + def remove_routing_type(self, routing_type): + del self.app_dash_routing_type_table[str(routing_type)] diff --git a/tests/test_dash_acl.py b/tests/dash/test_dash_acl.py similarity index 71% rename from tests/test_dash_acl.py rename to tests/dash/test_dash_acl.py index 446f0ee7c2f..3feaabdefd1 100644 --- a/tests/test_dash_acl.py +++ b/tests/dash/test_dash_acl.py @@ -87,10 +87,12 @@ def __setitem__(self, key: str, pairs: Union[dict, list, tuple]): pairs_str.append((to_string(k), to_string(v))) self.table.set(key, pairs_str) self.keys.add(key) + time.sleep(1) def __delitem__(self, key: str): self.table.delete(str(key)) self.keys.discard(key) + time.sleep(1) def get_keys(self): return self.keys @@ -152,7 +154,7 @@ def __init__(self, dvs): for table in APPL_DB_TABLE_LIST: pst = ProduceStateTable( - self.dvs.get_app_db(), table + self.dvs.get_dpu_app_db(), table ) table_variable_name = "app_{}".format(table.lower()) # Based on swsscommon convention for table names, assume @@ -235,10 +237,6 @@ def create_acl_rule(self, group_id, rule_id, action, terminating, priority, prot self.app_dash_acl_rule_table[str(group_id) + ":" + str(rule_id)] = {"pb": pb.SerializeToString()} - - def remove_acl_rule(self, group_id, rule_id): - del self.app_dash_acl_rule_table[str(group_id) + ":" + str(rule_id)] - def create_acl_group(self, group_id, ip_version): pb = AclGroup() pb.ip_version = IpVersion.IP_VERSION_IPV4 @@ -325,11 +323,13 @@ def ctx(self, dvs): yield acl_context - # Manually cleanup by deleting all remaining APPL_DB keys - for table in acl_context.app_db_tables: - keys = table.get_keys() - for key in list(keys): - del table[key] + # manually restart DVS to ensure full cleanup + # This is a temporary workaround until VS SAI supports implicit deletion of DASH objects + dvs.runcmd('killall5 -15') + dvs.net_cleanup() + dvs.destroy_servers() + dvs.create_servers() + dvs.restart() for table in acl_context.asic_db_tables: table.wait_for_n_keys(num_keys=0) @@ -353,8 +353,8 @@ def test_acl_flow(self, ctx): src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) - rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] - group1_id= ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + rule1_id = ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + group1_id = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] assert rule1_attr["SAI_DASH_ACL_RULE_ATTR_PRIORITY"] == "1" assert rule1_attr["SAI_DASH_ACL_RULE_ATTR_ACTION"] == "SAI_DASH_ACL_RULE_ACTION_PERMIT_AND_CONTINUE" @@ -380,14 +380,7 @@ def test_acl_flow(self, ctx): priority=3, action=Action.ACTION_PERMIT, terminating=False, src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) - ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=3) - ctx.unbind_acl_in(self.eni_name, ACL_STAGE_1) - ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) - ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_2) - ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_3) - ctx.remove_acl_group(ACL_GROUP_1) - ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=0) - ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=0) + ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=4) def test_acl_group(self, ctx): ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV6) @@ -399,15 +392,12 @@ def test_acl_group(self, ctx): ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1) + # Skip below verification until VS SAI supports ACL rule implicit deletion # Remove group before removing its rule - ctx.remove_acl_group(ACL_GROUP_1) + # ctx.remove_acl_group(ACL_GROUP_1) # Wait a few seconds to make sure no changes are made # since group still contains a rule - time.sleep(3) - ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1) - - ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) - ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=0) + # ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=0) def test_empty_acl_group_binding(self, ctx): """ @@ -429,12 +419,46 @@ def test_empty_acl_group_binding(self, ctx): src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) # Now that the group contains a rule, expect binding to occur + ctx.bind_acl_out(self.eni_name, ACL_STAGE_1, v4_group_id = ACL_GROUP_1) ctx.asic_eni_table.wait_for_field_match(key=eni_key, expected_fields={sai_stage: acl_group_key}) # Unbinding should occur immediately ctx.unbind_acl_out(self.eni_name, ACL_STAGE_1) ctx.asic_eni_table.wait_for_field_match(key=eni_key, expected_fields={sai_stage: SAI_NULL_OID}) + def test_acl_rule_after_group_bind(self, ctx): + eni_key = ctx.asic_eni_table.get_keys()[0] + sai_stage = get_sai_stage(outbound=False, v4=True, stage_num=ACL_STAGE_1) + + ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV4) + acl_group_key = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1) + + self.bind_acl_group(ctx, ACL_STAGE_1, ACL_GROUP_1, acl_group_key) + + # The new rule should not be created since the group is bound + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_2, + priority=2, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + time.sleep(3) + ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1) + + # Unbinding the group + ctx.unbind_acl_in(self.eni_name, ACL_STAGE_1) + ctx.asic_eni_table.wait_for_field_match(key=eni_key, expected_fields={sai_stage: SAI_NULL_OID}) + + # Now the rule can be created since the group is no longer bound + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_2, + priority=2, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=2) + def test_acl_group_binding(self, ctx): eni_key = ctx.asic_eni_table.get_keys()[0] sai_stage = get_sai_stage(outbound=False, v4=True, stage_num=ACL_STAGE_2) @@ -480,18 +504,11 @@ def test_acl_rule(self, ctx): src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=2) - ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) - ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_2) - ctx.remove_acl_group(ACL_GROUP_1) - ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=0) - ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=0) - - @pytest.mark.parametrize("bind_group", [True, False]) - def test_prefix_single_tag(self, ctx, bind_group): + # @pytest.mark.parametrize("bind_group", [True, False]) + def test_prefix_single_tag(self, ctx): tag1_prefixes = {"1.1.1.0/24", "2.2.0.0/16"} ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) - tag2_prefixes = {"192.168.1.0/30", "192.168.2.0/30", "192.168.3.0/30"} ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tag2_prefixes) @@ -509,47 +526,43 @@ def test_prefix_single_tag(self, ctx, bind_group): assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == tag2_prefixes - if bind_group: - self.bind_acl_group(ctx, ACL_STAGE_1, ACL_GROUP_1, group1_id) + # Updating tags will not be supported until full tag support is implemented in SAI + # if bind_group: + # self.bind_acl_group(ctx, ACL_STAGE_1, ACL_GROUP_1, group1_id) - tag1_prefixes = {"1.1.2.0/24", "2.3.0.0/16"} - ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) + # tag1_prefixes = {"1.1.2.0/24", "2.3.0.0/16"} + # ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) - time.sleep(3) + # time.sleep(3) - rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] - rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + # rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + # rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] - if bind_group: - new_group1_id = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] - assert new_group1_id != group1_id - self.verify_group_is_bound_to_eni(ctx, ACL_STAGE_1, new_group1_id) + # if bind_group: + # new_group1_id = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + # assert new_group1_id != group1_id + # self.verify_group_is_bound_to_eni(ctx, ACL_STAGE_1, new_group1_id) - assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes - assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == tag2_prefixes + # assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes + # assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == tag2_prefixes - tag2_prefixes = {"192.168.2.0/30", "192.168.3.0/30"} - ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tag2_prefixes) + # tag2_prefixes = {"192.168.2.0/30", "192.168.3.0/30"} + # ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tag2_prefixes) - time.sleep(3) + # time.sleep(3) - ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1) - rule1_id = ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] - rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + # ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1) + # rule1_id = ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + # rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] - assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes - assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == tag2_prefixes + # assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes + # assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == tag2_prefixes - if bind_group: - ctx.unbind_acl_in(self.eni_name, ACL_STAGE_1) + # if bind_group: + # ctx.unbind_acl_in(self.eni_name, ACL_STAGE_1) - ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) - ctx.remove_acl_group(ACL_GROUP_1) - ctx.remove_prefix_tag(TAG_1) - ctx.remove_prefix_tag(TAG_2) - - @pytest.mark.parametrize("bind_group", [True, False]) - def test_multiple_tags(self, ctx, bind_group): + # @pytest.mark.parametrize("bind_group", [True, False]) + def test_multiple_tags(self, ctx): tag1_prefixes = {"1.1.1.0/24", "2.2.0.0/16"} ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) @@ -574,40 +587,35 @@ def test_multiple_tags(self, ctx, bind_group): assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes.union(tag2_prefixes) assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == tag2_prefixes.union(tag3_prefixes) - if bind_group: - self.bind_acl_group(ctx, ACL_STAGE_1, ACL_GROUP_1, group1_id) + # Updating tags will not be supported until full tag support is implemented in SAI + # if bind_group: + # self.bind_acl_group(ctx, ACL_STAGE_1, ACL_GROUP_1, group1_id) - tag2_prefixes = {"192.168.10.0/30", "192.168.11.0/30", "192.168.12.0/30"} - ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tag2_prefixes) + # tag2_prefixes = {"192.168.10.0/30", "192.168.11.0/30", "192.168.12.0/30"} + # ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tag2_prefixes) - tag3_prefixes = {"3.13.0.0/16", "3.14.0.0/16", "4.14.4.0/24", "5.15.5.0/24"} - ctx.create_prefix_tag(TAG_3, IpVersion.IP_VERSION_IPV4, tag3_prefixes) + # tag3_prefixes = {"3.13.0.0/16", "3.14.0.0/16", "4.14.4.0/24", "5.15.5.0/24"} + # ctx.create_prefix_tag(TAG_3, IpVersion.IP_VERSION_IPV4, tag3_prefixes) - time.sleep(3) - - if bind_group: - new_group1_id = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] - assert new_group1_id != group1_id + # time.sleep(3) - self.verify_group_is_bound_to_eni(ctx, ACL_STAGE_1, new_group1_id) + # if bind_group: + # new_group1_id = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + # assert new_group1_id != group1_id - rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] - rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + # self.verify_group_is_bound_to_eni(ctx, ACL_STAGE_1, new_group1_id) - assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes.union(tag2_prefixes) - assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == tag2_prefixes.union(tag3_prefixes) + # rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + # rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] - if bind_group: - ctx.unbind_acl_in(self.eni_name, ACL_STAGE_1) + # assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes.union(tag2_prefixes) + # assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == tag2_prefixes.union(tag3_prefixes) - ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) - ctx.remove_acl_group(ACL_GROUP_1) - ctx.remove_prefix_tag(TAG_1) - ctx.remove_prefix_tag(TAG_2) - ctx.remove_prefix_tag(TAG_3) + # if bind_group: + # ctx.unbind_acl_in(self.eni_name, ACL_STAGE_1) - @pytest.mark.parametrize("bind_group", [True, False]) - def test_multiple_tags_and_prefixes(self, ctx, bind_group): + # @pytest.mark.parametrize("bind_group", [True, False]) + def test_multiple_tags_and_prefixes(self, ctx): tag1_prefixes = {"1.1.1.0/24", "2.2.0.0/16"} ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) @@ -637,45 +645,40 @@ def test_multiple_tags_and_prefixes(self, ctx, bind_group): assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == super_set assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == prefix_list - if bind_group: - self.bind_acl_group(ctx, ACL_STAGE_1, ACL_GROUP_1, group1_id) - - tag1_prefixes = {"1.1.1.0/24", "2.2.0.0/16"} - ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) + # Updating tags will not be supported until full tag support is implemented in SAI + # if bind_group: + # self.bind_acl_group(ctx, ACL_STAGE_1, ACL_GROUP_1, group1_id) - tag2_prefixes = {"192.168.1.2/32", "192.168.2.2/32", "192.168.1.2/32"} - ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tag2_prefixes) + # tag1_prefixes = {"1.1.1.0/24", "2.2.0.0/16"} + # ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) - tag3_prefixes = {"3.3.0.0/16", "3.4.0.0/16", "4.4.4.0/24", "5.5.5.0/24"} - ctx.create_prefix_tag(TAG_3, IpVersion.IP_VERSION_IPV4, tag3_prefixes) + # tag2_prefixes = {"192.168.1.2/32", "192.168.2.2/32", "192.168.1.2/32"} + # ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tag2_prefixes) - time.sleep(3) + # tag3_prefixes = {"3.3.0.0/16", "3.4.0.0/16", "4.4.4.0/24", "5.5.5.0/24"} + # ctx.create_prefix_tag(TAG_3, IpVersion.IP_VERSION_IPV4, tag3_prefixes) - if bind_group: - new_group1_id = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] - assert new_group1_id != group1_id - self.verify_group_is_bound_to_eni(ctx, ACL_STAGE_1, new_group1_id) + # time.sleep(3) - rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] - rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + # if bind_group: + # new_group1_id = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + # assert new_group1_id != group1_id + # self.verify_group_is_bound_to_eni(ctx, ACL_STAGE_1, new_group1_id) - super_set = set() - super_set.update(tag1_prefixes, tag2_prefixes, tag3_prefixes) + # rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + # rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] - assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == super_set - assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == prefix_list + # super_set = set() + # super_set.update(tag1_prefixes, tag2_prefixes, tag3_prefixes) - if bind_group: - ctx.unbind_acl_in(self.eni_name, ACL_STAGE_1) + # assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == super_set + # assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == prefix_list - ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) - ctx.remove_acl_group(ACL_GROUP_1) - ctx.remove_prefix_tag(TAG_1) - ctx.remove_prefix_tag(TAG_2) - ctx.remove_prefix_tag(TAG_3) + # if bind_group: + # ctx.unbind_acl_in(self.eni_name, ACL_STAGE_1) - @pytest.mark.parametrize("bind_group", [True, False]) - def test_multiple_groups_prefix_single_tag(self, ctx, bind_group): + # @pytest.mark.parametrize("bind_group", [True, False]) + def test_multiple_groups_prefix_single_tag(self, ctx): groups = [ACL_GROUP_1, ACL_GROUP_2, ACL_GROUP_3] stages = [ACL_STAGE_1, ACL_STAGE_2, ACL_STAGE_3] @@ -696,44 +699,38 @@ def test_multiple_groups_prefix_single_tag(self, ctx, bind_group): rule_attrs = ctx.asic_dash_acl_rule_table[rid] assert prefix_list_to_set(rule_attrs["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes - if bind_group: - eni_stages = [] - eni_key = ctx.asic_eni_table.get_keys()[0] - for stage, group in zip(stages, groups): - ctx.bind_acl_in(self.eni_name, stage, group) - eni_stages.append(get_sai_stage(outbound=False, v4=True, stage_num=stage)) + # Updating tags will not be supported until full tag support is implemented in SAI + # if bind_group: + # eni_stages = [] + # eni_key = ctx.asic_eni_table.get_keys()[0] + # for stage, group in zip(stages, groups): + # ctx.bind_acl_in(self.eni_name, stage, group) + # eni_stages.append(get_sai_stage(outbound=False, v4=True, stage_num=stage)) - ctx.asic_eni_table.wait_for_fields(key=eni_key, expected_fields=eni_stages) - for stage in eni_stages: - assert ctx.asic_eni_table[eni_key][stage] in group_ids + # ctx.asic_eni_table.wait_for_fields(key=eni_key, expected_fields=eni_stages) + # for stage in eni_stages: + # assert ctx.asic_eni_table[eni_key][stage] in group_ids - tag1_prefixes = {"1.1.2.0/24", "2.3.0.0/16"} - ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) + # tag1_prefixes = {"1.1.2.0/24", "2.3.0.0/16"} + # ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) - time.sleep(3) + # time.sleep(3) - rule_ids = ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=3) + # rule_ids = ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=3) - for rid in rule_ids: - rule_attrs = ctx.asic_dash_acl_rule_table[rid] - assert prefix_list_to_set(rule_attrs["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes - - if bind_group: - new_group_ids = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=3) + # for rid in rule_ids: + # rule_attrs = ctx.asic_dash_acl_rule_table[rid] + # assert prefix_list_to_set(rule_attrs["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes - ctx.asic_eni_table.wait_for_fields(key=eni_key, expected_fields=eni_stages) - for stage in eni_stages: - assert ctx.asic_eni_table[eni_key][stage] in new_group_ids + # if bind_group: + # new_group_ids = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=3) - for stage in stages: - ctx.unbind_acl_in(self.eni_name, stage) + # ctx.asic_eni_table.wait_for_fields(key=eni_key, expected_fields=eni_stages) + # for stage in eni_stages: + # assert ctx.asic_eni_table[eni_key][stage] in new_group_ids - for group in groups: - ctx.remove_acl_rule(group, ACL_RULE_1) - ctx.remove_acl_group(group) - - ctx.remove_prefix_tag(TAG_1) - ctx.remove_prefix_tag(TAG_2) + # for stage in stages: + # ctx.unbind_acl_in(self.eni_name, stage) def test_tag_remove(self, ctx): tag1_prefixes = {"1.1.1.0/24", "2.2.0.0/16"} @@ -757,21 +754,45 @@ def test_tag_remove(self, ctx): ctx.remove_prefix_tag(TAG_1) time.sleep(1) - ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_2, priority=2, action=Action.ACTION_DENY, terminating=False, src_tag=[TAG_1], dst_addr=["192.168.1.2/30", "192.168.2.2/30", "192.168.3.2/30"], src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) - rule2_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule2_id = list(filter(lambda x: x != rule1_id, ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=2)))[0] rule2_attr = ctx.asic_dash_acl_rule_table[rule2_id] assert prefix_list_to_set(rule2_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes - ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) - ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_2) - ctx.remove_acl_group(ACL_GROUP_1) - ctx.remove_prefix_tag(TAG_1) - ctx.remove_prefix_tag(TAG_2) + def test_tag_create_delay(self, ctx): + ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV4) + ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + + # Create acl rule before the TAG1, TAG_2 + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_tag=[TAG_1], dst_tag=[TAG_2], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + + # The rule should not be created since the TAG_1, TAG_2 are not created yet + time.sleep(3) + ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=0) + + tagsrc_prefixes = {"1.2.3.4/32", "5.6.0.0/16"} + ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tagsrc_prefixes) + + # The rule should not be created since the TAG_2 is not created yet + time.sleep(3) + ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=0) + + tagdst_prefixes = {"10.20.30.40/32", "50.60.0.0/16"} + ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tagdst_prefixes) + + rule_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule_attr = ctx.asic_dash_acl_rule_table[rule_id] + + assert prefix_list_to_set(rule_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tagsrc_prefixes + assert prefix_list_to_set(rule_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == tagdst_prefixes # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down diff --git a/tests/dash/test_dash_meter.py b/tests/dash/test_dash_meter.py new file mode 100644 index 00000000000..0ef06f5ab44 --- /dev/null +++ b/tests/dash/test_dash_meter.py @@ -0,0 +1,164 @@ +from dash_api.appliance_pb2 import * +from dash_api.vnet_pb2 import * +from dash_api.eni_pb2 import * +from dash_api.eni_route_pb2 import * +from dash_api.route_pb2 import * +from dash_api.route_group_pb2 import * +from dash_api.route_rule_pb2 import * +from dash_api.vnet_mapping_pb2 import * +from dash_api.route_type_pb2 import * +from dash_api.meter_policy_pb2 import * +from dash_api.meter_rule_pb2 import * +from dash_api.types_pb2 import * +from dvslib.dvs_flex_counter import TestFlexCountersBase + +from dash_db import DashDB, dash_db_module as dash_db +from dash_db import ASIC_METER_POLICY_TABLE, ASIC_METER_RULE_TABLE, ASIC_ENI_TABLE +from dash_configs import * + +import time +import uuid +import ipaddress +import socket + +from dvslib.sai_utils import assert_sai_attribute_exists +from dvslib.dvs_common import PollingConfig, wait_for_result + +from swsscommon.swsscommon import ( + APP_DASH_METER_POLICY_TABLE_NAME, + APP_DASH_METER_RULE_TABLE_NAME, + APP_DASH_ENI_TABLE_NAME, + APP_DASH_VNET_TABLE_NAME, + APP_DASH_APPLIANCE_TABLE_NAME, +) + +meter_counter_group_meta = { + 'key': 'DASH_METER', + 'group_name': 'METER_STAT_COUNTER', + 'name_map': 'COUNTERS_ENI_NAME_MAP', + 'post_test': 'post_meter_counter_test' +} + +DVS_ENV = ["HWSKU=DPU-2P"] +NUM_PORTS = 2 + +ENTRIES = 2 +policy_v4_oid = 0 +policy_v6_oid = 0 +rule_v4_oid = 0 +rule_v6_oid = 0 + +class TestDashMeter(TestFlexCountersBase): + + def test_v4_meter(self, dash_db: DashDB): + global policy_v4_oid + global rule_v4_oid + + dash_db.set_app_db_entry(APP_DASH_METER_POLICY_TABLE_NAME, METER_POLICY_V4, METER_POLICY_V4_CONFIG) + policy_v4_oid = dash_db.wait_for_asic_db_keys(ASIC_METER_POLICY_TABLE)[0] + policy_attrs = dash_db.get_asic_db_entry(ASIC_METER_POLICY_TABLE, policy_v4_oid) + assert_sai_attribute_exists("SAI_METER_POLICY_ATTR_IP_ADDR_FAMILY", policy_attrs, "SAI_IP_ADDR_FAMILY_IPV4") + + dash_db.set_app_db_entry(APP_DASH_METER_RULE_TABLE_NAME, METER_POLICY_V4, METER_RULE_1_NUM, METER_RULE_1_CONFIG) + rule_v4_oid = dash_db.wait_for_asic_db_keys(ASIC_METER_RULE_TABLE)[0] + rule_attrs = dash_db.get_asic_db_entry(ASIC_METER_RULE_TABLE, rule_v4_oid) + assert_sai_attribute_exists("SAI_METER_RULE_ATTR_PRIORITY", rule_attrs, METER_RULE_1_PRIORITY) + assert_sai_attribute_exists("SAI_METER_RULE_ATTR_METER_CLASS", rule_attrs, METER_RULE_1_METERING_CLASS) + assert_sai_attribute_exists("SAI_METER_RULE_ATTR_METER_POLICY_ID", rule_attrs, policy_v4_oid) + assert_sai_attribute_exists("SAI_METER_RULE_ATTR_DIP", rule_attrs, METER_RULE_1_IP) + assert_sai_attribute_exists("SAI_METER_RULE_ATTR_DIP_MASK", rule_attrs, METER_RULE_1_IP_MASK) + + def test_v6_meter(self, dash_db: DashDB): + global policy_v6_oid + global rule_v6_oid + + dash_db.set_app_db_entry(APP_DASH_METER_POLICY_TABLE_NAME, METER_POLICY_V6, METER_POLICY_V6_CONFIG) + oids = dash_db.wait_for_asic_db_keys(ASIC_METER_POLICY_TABLE, min_keys=ENTRIES) + for oid in oids: + if oid != policy_v4_oid: + policy_v6_oid = oid + break + policy_attrs = dash_db.get_asic_db_entry(ASIC_METER_POLICY_TABLE, policy_v6_oid) + assert_sai_attribute_exists("SAI_METER_POLICY_ATTR_IP_ADDR_FAMILY", policy_attrs, "SAI_IP_ADDR_FAMILY_IPV6") + + dash_db.set_app_db_entry(APP_DASH_METER_RULE_TABLE_NAME, METER_POLICY_V6, METER_RULE_2_NUM, METER_RULE_2_CONFIG) + oids = dash_db.wait_for_asic_db_keys(ASIC_METER_RULE_TABLE, min_keys=ENTRIES) + for oid in oids: + if oid != rule_v4_oid: + rule_v6_oid = oid + break + rule_attrs = dash_db.get_asic_db_entry(ASIC_METER_RULE_TABLE, rule_v6_oid) + assert_sai_attribute_exists("SAI_METER_RULE_ATTR_METER_CLASS", rule_attrs, METER_RULE_2_METERING_CLASS) + assert_sai_attribute_exists("SAI_METER_RULE_ATTR_METER_POLICY_ID", rule_attrs, policy_v6_oid) + assert_sai_attribute_exists("SAI_METER_RULE_ATTR_DIP", rule_attrs, METER_RULE_2_IP) + assert_sai_attribute_exists("SAI_METER_RULE_ATTR_DIP_MASK", rule_attrs, METER_RULE_2_IP_MASK) + + def post_meter_counter_test(self, meta_data): + counters_keys = self.counters_db.db_connection.hgetall(meta_data['name_map']) + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map'], 'disable', check_name_map=False) + + for counter_entry in counters_keys.items(): + self.wait_for_id_list_remove(meta_data['group_name'], counter_entry[0], counter_entry[1]) + + def test_eni(self, dash_db: DashDB): + dash_db.set_app_db_entry(APP_DASH_APPLIANCE_TABLE_NAME, APPLIANCE_ID, APPLIANCE_CONFIG) + dash_db.set_app_db_entry(APP_DASH_VNET_TABLE_NAME, VNET1, VNET_CONFIG) + self.mac_string = "F4939FEFC47E" + self.mac_address = "F4:93:9F:EF:C4:7E" + pb = Eni() + pb.eni_id = "497f23d7-f0ac-4c99-a98f-59b470e8c7bd" + pb.mac_address = bytes.fromhex(self.mac_address.replace(":", "")) + pb.underlay_ip.ipv4 = socket.htonl(int(ipaddress.ip_address(UNDERLAY_IP))) + pb.admin_state = State.STATE_ENABLED + pb.vnet = VNET1 + pb.v4_meter_policy_id = METER_POLICY_V4 + pb.v6_meter_policy_id = METER_POLICY_V6 + dash_db.create_eni(self.mac_string, {"pb": pb.SerializeToString()}) + + eni_oid = dash_db.wait_for_asic_db_keys(ASIC_ENI_TABLE)[0] + attrs = dash_db.get_asic_db_entry(ASIC_ENI_TABLE, eni_oid) + assert_sai_attribute_exists("SAI_ENI_ATTR_V4_METER_POLICY_ID", attrs, policy_v4_oid); + assert_sai_attribute_exists("SAI_ENI_ATTR_V6_METER_POLICY_ID", attrs, policy_v6_oid); + + time.sleep(1) + self.verify_flex_counter_flow(dash_db.dvs, meter_counter_group_meta) + + def test_remove(self, dash_db: DashDB): + self.meter_policy_id = METER_POLICY_V4 + self.meter_rule_num = METER_RULE_1_NUM + self.mac_string = "F4939FEFC47E" + policy_found = False + rule_found = False + + ### verify meter policy cannot be removed with ENI bound to policy + dash_db.remove_app_db_entry(APP_DASH_METER_POLICY_TABLE_NAME, self.meter_policy_id) + time.sleep(20) + meter_policy_oids = dash_db.wait_for_asic_db_keys(ASIC_METER_POLICY_TABLE, min_keys=ENTRIES) + for oid in meter_policy_oids: + if oid == policy_v4_oid: + policy_found = True + break + assert(policy_found) + + ### remove ENI to allow meter rule/policy delete. + dash_db.remove_eni(self.mac_string) + dash_db.remove_app_db_entry(APP_DASH_VNET_TABLE_NAME, VNET1) + dash_db.remove_app_db_entry(APP_DASH_APPLIANCE_TABLE_NAME, APPLIANCE_ID) + + dash_db.remove_app_db_entry(APP_DASH_METER_RULE_TABLE_NAME, METER_POLICY_V4, METER_RULE_1_NUM) + dash_db.remove_app_db_entry(APP_DASH_METER_POLICY_TABLE_NAME, METER_POLICY_V4) + dash_db.wait_for_asic_db_key_del(ASIC_METER_RULE_TABLE, rule_v4_oid) + dash_db.wait_for_asic_db_key_del(ASIC_METER_POLICY_TABLE, policy_v4_oid) + meter_policy_oids = dash_db.wait_for_asic_db_keys(ASIC_METER_POLICY_TABLE) + meter_rule_oids = dash_db.wait_for_asic_db_keys(ASIC_METER_RULE_TABLE) + assert meter_policy_oids[0] == policy_v6_oid + assert meter_rule_oids[0] == rule_v6_oid + dash_db.remove_app_db_entry(APP_DASH_METER_RULE_TABLE_NAME, METER_POLICY_V6, METER_RULE_2_NUM) + dash_db.remove_app_db_entry(APP_DASH_METER_POLICY_TABLE_NAME, METER_POLICY_V6) + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down +# before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/dash/test_dash_pl.py b/tests/dash/test_dash_pl.py new file mode 100644 index 00000000000..f72cd2f7bf9 --- /dev/null +++ b/tests/dash/test_dash_pl.py @@ -0,0 +1,98 @@ +import pytest + +from dvslib.sai_utils import assert_sai_attribute_exists + +from dash_api.appliance_pb2 import * +from dash_api.vnet_pb2 import * +from dash_api.eni_pb2 import * +from dash_api.route_pb2 import * +from dash_api.route_rule_pb2 import * +from dash_api.vnet_mapping_pb2 import * +from dash_api.route_type_pb2 import * +from dash_api.types_pb2 import * + +from dash_db import dash_db, DashDB +from dash_configs import * +from sai_attrs import * +from swsscommon.swsscommon import ( + APP_DASH_APPLIANCE_TABLE_NAME, + APP_DASH_ENI_TABLE_NAME, + APP_DASH_VNET_TABLE_NAME, + APP_DASH_VNET_MAPPING_TABLE_NAME, + APP_DASH_ROUTE_TABLE_NAME, + APP_DASH_ENI_ROUTE_TABLE_NAME, + APP_DASH_ROUTING_TYPE_TABLE_NAME, + APP_DASH_ROUTE_GROUP_TABLE_NAME, + APP_DASH_TUNNEL_TABLE_NAME +) + +DVS_ENV = ["HWSKU=DPU-2P"] +NUM_PORTS = 2 + + +@pytest.fixture(autouse=True) +def common_setup_teardown(dash_db: DashDB, dvs): + dvs.runcmd("swssloglevel -l INFO -c orchagent") + dash_db.set_app_db_entry(APP_DASH_APPLIANCE_TABLE_NAME, APPLIANCE_ID, APPLIANCE_CONFIG) + dash_db.set_app_db_entry(APP_DASH_VNET_TABLE_NAME, VNET1, VNET_CONFIG) + dash_db.set_app_db_entry(APP_DASH_ENI_TABLE_NAME, ENI_ID, ENI_CONFIG) + dash_db.set_app_db_entry(APP_DASH_VNET_MAPPING_TABLE_NAME, VNET1, VNET_MAP_IP1, VNET_MAPPING_CONFIG_PRIVATELINK) + dash_db.set_app_db_entry(APP_DASH_ROUTE_GROUP_TABLE_NAME, ROUTE_GROUP1, ROUTE_GROUP1_CONFIG) + dash_db.set_app_db_entry(APP_DASH_ROUTING_TYPE_TABLE_NAME, PRIVATELINK, ROUTING_TYPE_PL_CONFIG) + # Don't set DASH_ROUTE_TABLE and DASH_ENI_ROUTE_TABLE entries here for flexibility, test cases will set them as needed + + yield + + # cleanup is handled automatically by the dash_db fixture + + +def test_pl_eni_attrs(dash_db: DashDB): + dash_db.set_app_db_entry(APP_DASH_ROUTE_TABLE_NAME, ROUTE_GROUP1, OUTBOUND_ROUTE_PREFIX1, ROUTE_VNET_CONFIG) + dash_db.set_app_db_entry(APP_DASH_ENI_ROUTE_TABLE_NAME, ENI_ID, ENI_ROUTE_GROUP1_CONFIG) + + enis = dash_db.wait_for_asic_db_keys("ASIC_STATE:SAI_OBJECT_TYPE_ENI") + eni_attrs = dash_db.get_asic_db_entry("ASIC_STATE:SAI_OBJECT_TYPE_ENI", enis[0]) + assert_sai_attribute_exists(SAI_ENI_ATTR_PL_UNDERLAY_SIP, eni_attrs, PL_UNDERLAY_SIP1) + assert_sai_attribute_exists(SAI_ENI_ATTR_PL_SIP, eni_attrs, PL_ENCODING_IP) + assert_sai_attribute_exists(SAI_ENI_ATTR_PL_SIP_MASK, eni_attrs, PL_ENCODING_MASK) + + +def test_pl_eni_override_underlay_sip(dash_db: DashDB): + dash_db.set_app_db_entry(APP_DASH_ROUTE_TABLE_NAME, ROUTE_GROUP1, OUTBOUND_ROUTE_PREFIX1, ROUTE_VNET_CONFIG_UNDERLAY_SIP) + dash_db.set_app_db_entry(APP_DASH_ENI_ROUTE_TABLE_NAME, ENI_ID, ENI_ROUTE_GROUP1_CONFIG) + + outbound_routing_keys = dash_db.wait_for_asic_db_keys("ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_ENTRY") + outbound_routing_attrs = dash_db.get_asic_db_entry("ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_ENTRY", outbound_routing_keys[0]) + assert_sai_attribute_exists(SAI_OUTBOUND_ROUTING_ENTRY_ATTR_UNDERLAY_SIP, outbound_routing_attrs, PL_UNDERLAY_SIP2) + + +def test_pl_outbound_ca_to_pa_attrs(dash_db: DashDB): + outbound_ca_to_pa_keys = dash_db.wait_for_asic_db_keys("ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_CA_TO_PA_ENTRY") + outbound_attrs = dash_db.get_asic_db_entry("ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_CA_TO_PA_ENTRY", outbound_ca_to_pa_keys[0]) + + assert_sai_attribute_exists(SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_ACTION, outbound_attrs, SAI_OUTBOUND_CA_TO_PA_ENTRY_ACTION_SET_PRIVATE_LINK_MAPPING) + assert_sai_attribute_exists(SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_SIP, outbound_attrs, PL_OVERLAY_SIP) + assert_sai_attribute_exists(SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_SIP_MASK, outbound_attrs, PL_OVERLAY_SIP_MASK) + assert_sai_attribute_exists(SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_DIP, outbound_attrs, PL_OVERLAY_DIP) + assert_sai_attribute_exists(SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_DIP_MASK, outbound_attrs, PL_OVERLAY_DIP_MASK) + assert_sai_attribute_exists(SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_TUNNEL_KEY, outbound_attrs, ENCAP_VNI) + assert_sai_attribute_exists(SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_DASH_ENCAPSULATION, outbound_attrs, SAI_DASH_ENCAPSULATION_NVGRE) + assert_sai_attribute_exists(SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_UNDERLAY_DIP, outbound_attrs, UNDERLAY_IP) + + dash_db.set_app_db_entry(APP_DASH_VNET_MAPPING_TABLE_NAME, VNET1, VNET_MAP_IP2, VNET_MAPPING_CONFIG_PLNSG) + dash_db.set_app_db_entry(APP_DASH_TUNNEL_TABLE_NAME, TUNNEL1, TUNNEL1_CONFIG) + new_keys = dash_db.wait_for_asic_db_keys("ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_CA_TO_PA_ENTRY", old_keys=outbound_ca_to_pa_keys) + assert len(new_keys) == 1, f"Expected 1 new outbound ca to pa entries, found {len(new_keys)}" + tunnels = dash_db.wait_for_asic_db_keys("ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL") + assert len(tunnels) == 1, f"Expected 1 tunnel, found {len(tunnels)}" + + outbound_attrs = dash_db.get_asic_db_entry("ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_CA_TO_PA_ENTRY", new_keys[0]) + assert_sai_attribute_exists(SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_ACTION, outbound_attrs, SAI_OUTBOUND_CA_TO_PA_ENTRY_ACTION_SET_PRIVATE_LINK_MAPPING) + assert_sai_attribute_exists(SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_SIP, outbound_attrs, PL_OVERLAY_SIP) + assert_sai_attribute_exists(SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_SIP_MASK, outbound_attrs, PL_OVERLAY_SIP_MASK) + assert_sai_attribute_exists(SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_DIP, outbound_attrs, PL_OVERLAY_DIP) + assert_sai_attribute_exists(SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_DIP_MASK, outbound_attrs, PL_OVERLAY_DIP_MASK) + assert_sai_attribute_exists(SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_TUNNEL_KEY, outbound_attrs, ENCAP_VNI) + assert_sai_attribute_exists(SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_DASH_ENCAPSULATION, outbound_attrs, SAI_DASH_ENCAPSULATION_NVGRE) + assert_sai_attribute_exists(SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_UNDERLAY_DIP, outbound_attrs, UNDERLAY_IP) + assert_sai_attribute_exists(SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_DASH_TUNNEL_ID, outbound_attrs, tunnels[0]) diff --git a/tests/dash/test_dash_route_group.py b/tests/dash/test_dash_route_group.py new file mode 100644 index 00000000000..f0d379cd133 --- /dev/null +++ b/tests/dash/test_dash_route_group.py @@ -0,0 +1,98 @@ +import pytest +import time +from dash_db import DashDB, dash_db +from dash_configs import * +from dvslib.sai_utils import assert_sai_attribute_exists +from sai_attrs import * +from swsscommon.swsscommon import ( + APP_DASH_APPLIANCE_TABLE_NAME, + APP_DASH_ENI_TABLE_NAME, + APP_DASH_VNET_TABLE_NAME, + APP_DASH_VNET_MAPPING_TABLE_NAME, + APP_DASH_ROUTE_TABLE_NAME, + APP_DASH_ENI_ROUTE_TABLE_NAME, + APP_DASH_ROUTING_TYPE_TABLE_NAME, + APP_DASH_ROUTE_GROUP_TABLE_NAME, +) + +DVS_ENV = ["HWSKU=DPU-2P"] +NUM_PORTS = 2 + +@pytest.fixture(autouse=True) +def common_setup_teardown(dash_db: DashDB): + dash_db.set_app_db_entry(APP_DASH_APPLIANCE_TABLE_NAME, APPLIANCE_ID, APPLIANCE_CONFIG) + dash_db.set_app_db_entry(APP_DASH_ROUTING_TYPE_TABLE_NAME, PRIVATELINK, ROUTING_TYPE_PL_CONFIG) + dash_db.set_app_db_entry(APP_DASH_VNET_TABLE_NAME, VNET1, VNET_CONFIG) + dash_db.set_app_db_entry(APP_DASH_ENI_TABLE_NAME, ENI_ID, ENI_CONFIG) + dash_db.set_app_db_entry(APP_DASH_VNET_MAPPING_TABLE_NAME, VNET1, VNET_MAP_IP1, VNET_MAPPING_CONFIG_PRIVATELINK) + # Don't set DASH_ROUTE_TABLE and DASH_ENI_ROUTE_TABLE entries here for flexibility, test cases will set them as needed + + yield + + dash_db.remove_app_db_entry(APP_DASH_ENI_ROUTE_TABLE_NAME, ENI_ID) + dash_db.remove_app_db_entry(APP_DASH_ROUTE_TABLE_NAME, ROUTE_GROUP1, OUTBOUND_ROUTE_PREFIX1) + dash_db.remove_app_db_entry(APP_DASH_ROUTE_TABLE_NAME, ROUTE_GROUP2, OUTBOUND_ROUTE_PREFIX1) + dash_db.remove_app_db_entry(APP_DASH_ROUTE_GROUP_TABLE_NAME, ROUTE_GROUP1) + dash_db.remove_app_db_entry(APP_DASH_ROUTE_GROUP_TABLE_NAME, ROUTE_GROUP2) + dash_db.remove_app_db_entry(APP_DASH_VNET_MAPPING_TABLE_NAME, VNET1, VNET_MAP_IP1) + dash_db.remove_app_db_entry(APP_DASH_ENI_TABLE_NAME, ENI_ID) + dash_db.remove_app_db_entry(APP_DASH_VNET_TABLE_NAME, VNET1) + dash_db.remove_app_db_entry(APP_DASH_ROUTING_TYPE_TABLE_NAME, PRIVATELINK) + dash_db.remove_app_db_entry(APP_DASH_APPLIANCE_TABLE_NAME, APPLIANCE_ID) + + +def test_rebind_eni_route_group(dash_db: DashDB): + dash_db.set_app_db_entry(APP_DASH_ROUTE_GROUP_TABLE_NAME, ROUTE_GROUP1, ROUTE_GROUP1_CONFIG) + dash_db.set_app_db_entry(APP_DASH_ROUTE_TABLE_NAME, ROUTE_GROUP1, OUTBOUND_ROUTE_PREFIX1, ROUTE_VNET_CONFIG) + rg1_oid = dash_db.wait_for_asic_db_keys("ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_GROUP")[0] + + dash_db.set_app_db_entry(APP_DASH_ROUTE_GROUP_TABLE_NAME, ROUTE_GROUP2, ROUTE_GROUP2_CONFIG) + dash_db.set_app_db_entry(APP_DASH_ROUTE_TABLE_NAME, ROUTE_GROUP2, OUTBOUND_ROUTE_PREFIX1, ROUTE_VNET_CONFIG_UNDERLAY_SIP) + oids = dash_db.wait_for_asic_db_keys("ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_GROUP", min_keys=2) + for oid in oids: + if oid != rg1_oid: + rg2_oid = oid + break + + dash_db.set_app_db_entry(APP_DASH_ENI_ROUTE_TABLE_NAME, ENI_ID, ENI_ROUTE_GROUP1_CONFIG) + + eni_key = dash_db.wait_for_asic_db_keys("ASIC_STATE:SAI_OBJECT_TYPE_ENI")[0] + dash_db.wait_for_asic_db_field("ASIC_STATE:SAI_OBJECT_TYPE_ENI", eni_key, SAI_ENI_ATTR_OUTBOUND_ROUTING_GROUP_ID, rg1_oid) + + dash_db.set_app_db_entry(APP_DASH_ENI_ROUTE_TABLE_NAME, ENI_ID, ENI_ROUTE_GROUP2_CONFIG) + dash_db.wait_for_asic_db_field("ASIC_STATE:SAI_OBJECT_TYPE_ENI", eni_key, SAI_ENI_ATTR_OUTBOUND_ROUTING_GROUP_ID, rg2_oid) + + +def test_duplicate_eni_route_group(dash_db: DashDB): + dash_db.set_app_db_entry(APP_DASH_ROUTE_GROUP_TABLE_NAME, ROUTE_GROUP1, ROUTE_GROUP1_CONFIG) + dash_db.set_app_db_entry(APP_DASH_ROUTE_TABLE_NAME, ROUTE_GROUP1, OUTBOUND_ROUTE_PREFIX1, ROUTE_VNET_CONFIG) + rg1_oid = dash_db.wait_for_asic_db_keys("ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_GROUP")[0] + + dash_db.set_app_db_entry(APP_DASH_ENI_ROUTE_TABLE_NAME, ENI_ID, ENI_ROUTE_GROUP1_CONFIG) + + eni_key = dash_db.wait_for_asic_db_keys("ASIC_STATE:SAI_OBJECT_TYPE_ENI")[0] + dash_db.wait_for_asic_db_field("ASIC_STATE:SAI_OBJECT_TYPE_ENI", eni_key, SAI_ENI_ATTR_OUTBOUND_ROUTING_GROUP_ID, rg1_oid) + + dash_db.set_app_db_entry(APP_DASH_ENI_ROUTE_TABLE_NAME, ENI_ID, ENI_ROUTE_GROUP1_CONFIG) + dash_db.wait_for_asic_db_field("ASIC_STATE:SAI_OBJECT_TYPE_ENI", eni_key, SAI_ENI_ATTR_OUTBOUND_ROUTING_GROUP_ID, rg1_oid) + + +@pytest.mark.skip(reason="Test will crash orchagent until VS SAI is updated with implicit deletion for DASH objects") +def test_bound_route_group_immutable(dash_db: DashDB): + dash_db.set_app_db_entry(APP_DASH_ROUTE_GROUP_TABLE_NAME, ROUTE_GROUP1, ROUTE_GROUP1_CONFIG) + num_route_groups = len(dash_db.wait_for_asic_db_keys("ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_GROUP")) + dash_db.set_app_db_entry(APP_DASH_ROUTE_TABLE_NAME, ROUTE_GROUP1, OUTBOUND_ROUTE_PREFIX1, ROUTE_VNET_CONFIG) + num_routes = len(dash_db.wait_for_asic_db_keys("ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_ENTRY")) + + dash_db.set_app_db_entry(APP_DASH_ENI_ROUTE_TABLE_NAME, ENI_ID, ENI_ROUTE_GROUP1_CONFIG) + dash_db.set_app_db_entry(APP_DASH_ROUTE_TABLE_NAME, ROUTE_GROUP1, OUTBOUND_ROUTE_PREFIX2, ROUTE_VNET_CONFIG_UNDERLAY_SIP) + time.sleep(3) + assert len(dash_db.wait_for_asic_db_keys("ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_ENTRY")) == num_routes + + dash_db.remove_app_db_entry(APP_DASH_ROUTE_TABLE_NAME, ROUTE_GROUP1, OUTBOUND_ROUTE_PREFIX1) + time.sleep(3) + assert len(dash_db.wait_for_asic_db_keys("ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_ENTRY")) == num_routes + + dash_db.remove_app_db_entry(APP_DASH_ROUTE_GROUP_TABLE_NAME, ROUTE_GROUP1) + time.sleep(3) + assert len(dash_db.wait_for_asic_db_keys("ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_GROUP")) == num_route_groups diff --git a/tests/dash/test_dash_tunnel.py b/tests/dash/test_dash_tunnel.py new file mode 100644 index 00000000000..28399019cc7 --- /dev/null +++ b/tests/dash/test_dash_tunnel.py @@ -0,0 +1,252 @@ +import base64 +import pytest +import socket +from ipaddress import ip_address as IP +from swsscommon.swsscommon import ( + APP_DASH_TUNNEL_TABLE_NAME, + APP_DASH_APPLIANCE_TABLE_NAME, +) + +import dash_configs as dc +from dash_db import dash_db_module, dash_db, DashDB +from dvslib.sai_utils import assert_sai_attribute_exists +import sai_attrs as sai +import dash_api.route_type_pb2 as rt + +DVS_ENV = ["HWSKU=DPU-2P"] +NUM_PORTS = 2 + +def get_expected_tunnel_ips(tunnel_config): + # We expect orchagent to ignore duplicate IPs, so use a set to ensure the expected IPs are unique + ips = set() + for endpoint in tunnel_config["endpoints"]: + if "ipv4" in endpoint: + ip = IP(socket.ntohl(endpoint["ipv4"])) + else: + ip = IP(base64.b64decode(endpoint["ipv6"])) + ips.add(str(ip)) + + return list(ips) + + +def verify_sai_tunnel_endpoints( + dash_db, tunnel_oid, expected_ips, prev_member_keys=None +): + """ + Check if tunnel members and nhops were created correctly. + If the tunnel has multiple endpoints, we expect one tunnel member and one tunnel nhop per unique endpoint. + If the tunnel has a single endpoint, we expect no tunnel members or next hops. + """ + tunnel_member_oids = [] + tunnel_nhop_oids = [] + tunnel_nhop_ips = [] + if len(expected_ips) > 1: + member_keys = dash_db.wait_for_asic_db_keys( + "ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL_MEMBER", + min_keys=len(expected_ips), + old_keys=prev_member_keys, + ) + assert len(member_keys) == len(expected_ips), \ + f"Expected {len(expected_ips)} tunnel members, but got: {len(member_keys)}" + for member in member_keys: + attrs = dash_db.get_asic_db_entry( + "ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL_MEMBER", member + ) + if attrs[sai.SAI_DASH_TUNNEL_MEMBER_ATTR_DASH_TUNNEL_ID] == tunnel_oid: + tunnel_member_oids.append(member) + nhop_oid = attrs[ + sai.SAI_DASH_TUNNEL_MEMBER_ATTR_DASH_TUNNEL_NEXT_HOP_ID + ] + nhop_ip = dash_db.get_asic_db_entry( + "ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL_NEXT_HOP", nhop_oid + ) + tunnel_nhop_ips.append(nhop_ip[sai.SAI_DASH_TUNNEL_NEXT_HOP_ATTR_DIP]) + tunnel_nhop_oids.append(nhop_oid) + + assert len(tunnel_nhop_oids) == len(expected_ips), \ + f"Expected {len(expected_ips)} tunnel nhops, but got: {len(tunnel_nhop_oids)}" + assert sorted(tunnel_nhop_ips) == sorted(expected_ips), \ + f"Expected tunnel nhop IPs: {expected_ips}, but got: {tunnel_nhop_ips}" + assert len(tunnel_member_oids) == len(expected_ips), \ + f"Expected {len(expected_ips)} tunnel members, but got: {len(tunnel_member_oids)}" + else: + member_keys = dash_db.wait_for_asic_db_keys( + "ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL_MEMBER", + min_keys=0, + old_keys=prev_member_keys, + ) + assert (len(member_keys) == 0), \ + f"Expected no tunnel members for single endpoint, but got: {len(member_keys)}" + + return tunnel_member_oids, tunnel_nhop_oids + + +def verify_sai_tunnel(dash_db, tunnel_oid, tunnel_config, prev_member_keys=None): + tunnel_attrs = dash_db.get_asic_db_entry( + "ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL", tunnel_oid + ) + expected_ips = get_expected_tunnel_ips(tunnel_config) + + if tunnel_config["encap_type"] == rt.EncapType.ENCAP_TYPE_VXLAN: + assert_sai_attribute_exists( + sai.SAI_DASH_TUNNEL_ATTR_DASH_ENCAPSULATION, + tunnel_attrs, + sai.SAI_DASH_ENCAPSULATION_VXLAN, + ) + else: + assert_sai_attribute_exists( + sai.SAI_DASH_TUNNEL_ATTR_DASH_ENCAPSULATION, + tunnel_attrs, + sai.SAI_DASH_ENCAPSULATION_NVGRE, + ) + assert_sai_attribute_exists( + sai.SAI_DASH_TUNNEL_ATTR_TUNNEL_KEY, tunnel_attrs, tunnel_config["vni"] + ) + assert_sai_attribute_exists( + sai.SAI_DASH_TUNNEL_ATTR_MAX_MEMBER_SIZE, + tunnel_attrs, + len(tunnel_config["endpoints"]), + ) + assert_sai_attribute_exists(sai.SAI_DASH_TUNNEL_ATTR_SIP, tunnel_attrs, dc.SIP) + + if len(expected_ips) == 1: + assert_sai_attribute_exists( + sai.SAI_DASH_TUNNEL_ATTR_DIP, tunnel_attrs, expected_ips[0] + ) + else: + assert ( + sai.SAI_DASH_TUNNEL_ATTR_DIP not in tunnel_attrs + ), "DIP attribute should not be present for multiple endpoints" + + return verify_sai_tunnel_endpoints( + dash_db, tunnel_oid, expected_ips, prev_member_keys + ) + + +@pytest.fixture(autouse=True) +def common_setup_teardown(dash_db_module: DashDB): + dash_db_module.set_app_db_entry( + APP_DASH_APPLIANCE_TABLE_NAME, dc.APPLIANCE_ID, dc.APPLIANCE_CONFIG + ) + yield + dash_db_module.remove_app_db_entry(APP_DASH_APPLIANCE_TABLE_NAME, dc.APPLIANCE_ID) + + +@pytest.fixture(autouse=True) +def tunnel_cleanup(dvs, dash_db: DashDB): + yield + tunnels = [dc.TUNNEL1, dc.TUNNEL2, dc.TUNNEL3, dc.TUNNEL4, dc.TUNNEL5] + for t in tunnels: + dash_db.remove_app_db_entry(APP_DASH_TUNNEL_TABLE_NAME, t) + + dvs.check_services_ready() + + +def test_dash_tunnel_single_endpoint(dash_db: DashDB): + dash_db.set_app_db_entry(APP_DASH_TUNNEL_TABLE_NAME, dc.TUNNEL1, dc.TUNNEL1_CONFIG) + tunnels = dash_db.wait_for_asic_db_keys("ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL") + assert len(tunnels) == 1 + verify_sai_tunnel(dash_db, tunnels[0], dc.TUNNEL1_CONFIG) + dash_db.remove_app_db_entry(APP_DASH_TUNNEL_TABLE_NAME, dc.TUNNEL1) + dash_db.wait_for_asic_db_key_del( + "ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL", tunnels[0] + ) + + +def test_dash_tunnel_duplicate_tunnels(dash_db: DashDB): + dash_db.set_app_db_entry(APP_DASH_TUNNEL_TABLE_NAME, dc.TUNNEL1, dc.TUNNEL1_CONFIG) + tunnels = dash_db.wait_for_asic_db_keys("ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL") + assert len(tunnels) == 1 + verify_sai_tunnel(dash_db, tunnels[0], dc.TUNNEL1_CONFIG) + + dash_db.set_app_db_entry(APP_DASH_TUNNEL_TABLE_NAME, dc.TUNNEL1, dc.TUNNEL2_CONFIG) + new_tunnels = dash_db.wait_for_asic_db_keys( + "ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL", min_keys=0, old_keys=tunnels + ) + assert ( + len(new_tunnels) == 0 + ), f"Expected no new tunnels, but got: {len(new_tunnels)}" + # The 2nd APP DB write should be rejected,so we expect SAI to still reflect the first config written + verify_sai_tunnel(dash_db, tunnels[0], dc.TUNNEL1_CONFIG) + + dash_db.remove_app_db_entry(APP_DASH_TUNNEL_TABLE_NAME, dc.TUNNEL1) + dash_db.wait_for_asic_db_key_del( + "ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL", tunnels[0] + ) + + +def test_dash_tunnel_multiple_endpoints(dash_db: DashDB): + dash_db.set_app_db_entry(APP_DASH_TUNNEL_TABLE_NAME, dc.TUNNEL2, dc.TUNNEL2_CONFIG) + tunnels = dash_db.wait_for_asic_db_keys("ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL") + assert len(tunnels) == 1 + verify_sai_tunnel( + dash_db, tunnels[0], dc.TUNNEL2_CONFIG + ) + + dash_db.remove_app_db_entry(APP_DASH_TUNNEL_TABLE_NAME, dc.TUNNEL2) + dash_db.wait_for_asic_db_key_del( + "ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL", tunnels[0] + ) + + +def test_dash_tunnel_duplicate_endpoints(dash_db: DashDB): + dash_db.set_app_db_entry(APP_DASH_TUNNEL_TABLE_NAME, dc.TUNNEL5, dc.TUNNEL5_CONFIG) + tunnel_oids = dash_db.wait_for_asic_db_keys( + "ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL" + ) + assert len(tunnel_oids) == 1 + tunnel_members, tunnel_nhops = verify_sai_tunnel( + dash_db, tunnel_oids[0], dc.TUNNEL5_CONFIG + ) + + dash_db.remove_app_db_entry(APP_DASH_TUNNEL_TABLE_NAME, dc.TUNNEL5) + for member in tunnel_members: + dash_db.wait_for_asic_db_key_del( + "ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL_MEMBER", member + ) + for nhop in tunnel_nhops: + dash_db.wait_for_asic_db_key_del( + "ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL_NEXT_HOP", nhop + ) + dash_db.wait_for_asic_db_key_del( + "ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL", tunnel_oids[0] + ) + + +def test_dash_multi_tunnel(dash_db: DashDB): + prev_tunnels, prev_members, prev_nhops = [], [], [] + tunnel_configs = [ + (dc.TUNNEL1, dc.TUNNEL1_CONFIG), + (dc.TUNNEL2, dc.TUNNEL2_CONFIG), + (dc.TUNNEL3, dc.TUNNEL3_CONFIG), + (dc.TUNNEL4, dc.TUNNEL4_CONFIG), + ] + + for tunnel_name, tunnel_config in tunnel_configs: + dash_db.set_app_db_entry(APP_DASH_TUNNEL_TABLE_NAME, tunnel_name, tunnel_config) + tunnel_oids = dash_db.wait_for_asic_db_keys( + "ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL", old_keys=prev_tunnels + ) + assert len(tunnel_oids) == 1 + member_oids, nhop_oids = verify_sai_tunnel( + dash_db, tunnel_oids[0], tunnel_config, prev_members + ) + prev_tunnels += tunnel_oids + prev_members += member_oids + prev_nhops += nhop_oids + + for tunnel_name, _ in tunnel_configs: + dash_db.remove_app_db_entry(APP_DASH_TUNNEL_TABLE_NAME, tunnel_name) + + for member in prev_members: + dash_db.wait_for_asic_db_key_del( + "ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL_MEMBER", member + ) + for nhop in prev_nhops: + dash_db.wait_for_asic_db_key_del( + "ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL_NEXT_HOP", nhop + ) + for tunnel in prev_tunnels: + dash_db.wait_for_asic_db_key_del( + "ASIC_STATE:SAI_OBJECT_TYPE_DASH_TUNNEL", tunnel + ) diff --git a/tests/dash/test_dash_vnet.py b/tests/dash/test_dash_vnet.py new file mode 100644 index 00000000000..dcc8be8c0ba --- /dev/null +++ b/tests/dash/test_dash_vnet.py @@ -0,0 +1,301 @@ +from dash_api.appliance_pb2 import * +from dash_api.vnet_pb2 import * +from dash_api.eni_pb2 import * +from dash_api.eni_route_pb2 import * +from dash_api.route_pb2 import * +from dash_api.route_group_pb2 import * +from dash_api.route_rule_pb2 import * +from dash_api.vnet_mapping_pb2 import * +from dash_api.route_type_pb2 import * +from dash_api.types_pb2 import * +from dvslib.dvs_flex_counter import TestFlexCountersBase + +from dash_db import ASIC_DASH_APPLIANCE_TABLE, ASIC_DIRECTION_LOOKUP_TABLE, ASIC_ENI_ETHER_ADDR_MAP_TABLE, ASIC_ENI_TABLE, ASIC_INBOUND_ROUTING_TABLE, ASIC_OUTBOUND_CA_TO_PA_TABLE, ASIC_OUTBOUND_ROUTING_GROUP_TABLE, ASIC_OUTBOUND_ROUTING_TABLE, ASIC_PA_VALIDATION_TABLE, ASIC_VIP_TABLE, ASIC_VNET_TABLE, DashDB, dash_db_module as dash_db +from dash_configs import * + +import time +import uuid +import ipaddress +import socket + +from dvslib.sai_utils import assert_sai_attribute_exists + +eni_counter_group_meta = { + 'key': 'ENI', + 'group_name': 'ENI_STAT_COUNTER', + 'name_map': 'COUNTERS_ENI_NAME_MAP', + 'post_test': 'post_eni_counter_test' +} + +DVS_ENV = ["HWSKU=DPU-2P"] +NUM_PORTS = 2 + +class TestDash(TestFlexCountersBase): + def test_appliance(self, dash_db: DashDB): + # verify vnet creation before appliance is rejected + vnet = "Vnet151" + vni = "75651" + guid = "559c6ce8-26ab-5651-b946-ccc6e8f930b2" + pb = Vnet() + pb.vni = int(vni) + pb.guid.value = bytes.fromhex(uuid.UUID(guid).hex) + dash_db.create_vnet(vnet, {"pb": pb.SerializeToString()}) + keys = dash_db.get_asic_db_keys(ASIC_VNET_TABLE) + time.sleep(2) + assert len(keys) == 0, "VNET wrongly pushed to SAI before DASH Appliance" + dash_db.remove_vnet(vnet) + + self.appliance_id = "100" + self.sip = "10.0.0.1" + self.vm_vni = "4321" + self.local_region_id = "10" + + # verify behavior when outbound_direction_lookup is specified + pb = Appliance() + pb.sip.ipv4 = socket.htonl(int(ipaddress.ip_address(self.sip))) + pb.vm_vni = int(self.vm_vni) + pb.local_region_id = int(self.local_region_id) + pb.outbound_direction_lookup = "dst_mac" + dash_db.create_appliance(self.appliance_id, {"pb": pb.SerializeToString()}) + direction_keys = dash_db.wait_for_asic_db_keys(ASIC_DIRECTION_LOOKUP_TABLE) + dl_attrs = dash_db.get_asic_db_entry(ASIC_DIRECTION_LOOKUP_TABLE, direction_keys[0]) + assert_sai_attribute_exists("SAI_DIRECTION_LOOKUP_ENTRY_ATTR_ACTION", dl_attrs, "SAI_DIRECTION_LOOKUP_ENTRY_ACTION_SET_INBOUND_DIRECTION") + dash_db.remove_appliance(self.appliance_id) + time.sleep(2) + + pb = Appliance() + pb.sip.ipv4 = socket.htonl(int(ipaddress.ip_address(self.sip))) + pb.vm_vni = int(self.vm_vni) + pb.local_region_id = int(self.local_region_id) + dash_db.create_appliance(self.appliance_id, {"pb": pb.SerializeToString()}) + + dash_appl_keys = dash_db.wait_for_asic_db_keys(ASIC_DASH_APPLIANCE_TABLE) + dash_appl_attrs = dash_db.get_asic_db_entry(ASIC_DASH_APPLIANCE_TABLE, dash_appl_keys[0]) + assert_sai_attribute_exists("SAI_DASH_APPLIANCE_ATTR_LOCAL_REGION_ID", dash_appl_attrs, self.local_region_id) + + direction_keys = dash_db.wait_for_asic_db_keys(ASIC_DIRECTION_LOOKUP_TABLE) + dl_attrs = dash_db.get_asic_db_entry(ASIC_DIRECTION_LOOKUP_TABLE, direction_keys[0]) + assert_sai_attribute_exists("SAI_DIRECTION_LOOKUP_ENTRY_ATTR_ACTION", dl_attrs, "SAI_DIRECTION_LOOKUP_ENTRY_ACTION_SET_OUTBOUND_DIRECTION") + # When outbound_direction_lookup is not specified, src mac is used by default + assert_sai_attribute_exists("SAI_DIRECTION_LOOKUP_ENTRY_ATTR_DASH_ENI_MAC_OVERRIDE_TYPE", dl_attrs, "SAI_DASH_ENI_MAC_OVERRIDE_TYPE_SRC_MAC") + + vip_keys = dash_db.wait_for_asic_db_keys(ASIC_VIP_TABLE) + vip_attrs = dash_db.get_asic_db_entry(ASIC_VIP_TABLE, vip_keys[0]) + assert_sai_attribute_exists("SAI_VIP_ENTRY_ATTR_ACTION", vip_attrs, "SAI_VIP_ENTRY_ACTION_ACCEPT") + + # verify duplicate appliance is not passed to SAI + dupl_appliance_id = "200" + pb = Appliance() + pb.sip.ipv4 = socket.htonl(int(ipaddress.ip_address("11.0.0.1"))) + pb.vm_vni = int(1111) + pb.local_region_id = int(self.local_region_id) + dash_db.create_appliance(dupl_appliance_id, {"pb": pb.SerializeToString()}) + time.sleep(2) + keys = dash_db.get_asic_db_keys(ASIC_DASH_APPLIANCE_TABLE) + assert len(keys) == 1, "duplicate DASH Appliance entry wrongly pushed to SAI" + dash_db.remove_appliance(dupl_appliance_id) + + + def test_vnet(self, dash_db: DashDB): + self.vnet = "Vnet1" + self.vni = "45654" + self.guid = "559c6ce8-26ab-4193-b946-ccc6e8f930b2" + pb = Vnet() + pb.vni = int(self.vni) + pb.guid.value = bytes.fromhex(uuid.UUID(self.guid).hex) + dash_db.create_vnet(self.vnet, {"pb": pb.SerializeToString()}) + + vnet_keys = dash_db.wait_for_asic_db_keys(ASIC_VNET_TABLE) + self.vnet_oid = vnet_keys[0] + vnet_attr = dash_db.get_asic_db_entry(ASIC_VNET_TABLE, self.vnet_oid) + assert_sai_attribute_exists("SAI_VNET_ATTR_VNI", vnet_attr, self.vni) + + def post_eni_counter_test(self, meta_data): + counters_keys = self.counters_db.db_connection.hgetall(meta_data['name_map']) + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map'], 'disable', check_name_map=False) + + for counter_entry in counters_keys.items(): + self.wait_for_id_list_remove(meta_data['group_name'], counter_entry[0], counter_entry[1]) + + def test_eni(self, dash_db: DashDB): + self.vnet = "Vnet1" + self.mac_string = "F4939FEFC47E" + self.mac_address = "F4:93:9F:EF:C4:7E" + self.eni_id = "497f23d7-f0ac-4c99-a98f-59b470e8c7bd" + self.underlay_ip = "25.1.1.1" + self.admin_state = "enabled" + pb = Eni() + pb.eni_id = self.eni_id + pb.mac_address = bytes.fromhex(self.mac_address.replace(":", "")) + pb.underlay_ip.ipv4 = socket.htonl(int(ipaddress.ip_address(self.underlay_ip))) + pb.admin_state = State.STATE_ENABLED + pb.vnet = self.vnet + dash_db.create_eni(self.mac_string, {"pb": pb.SerializeToString()}) + + vnets = dash_db.wait_for_asic_db_keys(ASIC_VNET_TABLE) + self.vnet_oid = vnets[0] + enis = dash_db.wait_for_asic_db_keys(ASIC_ENI_TABLE) + self.eni_oid = enis[0] + attrs = dash_db.get_asic_db_entry(ASIC_ENI_TABLE, self.eni_oid) + + assert_sai_attribute_exists("SAI_ENI_ATTR_VNET_ID", attrs, str(self.vnet_oid)) + assert_sai_attribute_exists("SAI_ENI_ATTR_ADMIN_STATE", attrs, "true") + + time.sleep(1) + self.verify_flex_counter_flow(dash_db.dvs, eni_counter_group_meta) + + eni_addr_maps = dash_db.wait_for_asic_db_keys(ASIC_ENI_ETHER_ADDR_MAP_TABLE) + attrs = dash_db.get_asic_db_entry(ASIC_ENI_ETHER_ADDR_MAP_TABLE, eni_addr_maps[0]) + assert_sai_attribute_exists("SAI_ENI_ETHER_ADDRESS_MAP_ENTRY_ATTR_ENI_ID", attrs, str(self.eni_oid)) + + # test admin state update + pb.admin_state = State.STATE_DISABLED + dash_db.create_eni(self.mac_string, {"pb": pb.SerializeToString()}) + dash_db.wait_for_asic_db_field(ASIC_ENI_TABLE, self.eni_oid, "SAI_ENI_ATTR_ADMIN_STATE", "false") + + def test_vnet_map(self, dash_db: DashDB): + self.vnet = "Vnet1" + self.ip1 = "10.1.1.1" + self.ip2 = "10.1.1.2" + self.mac_address = "F4:93:9F:EF:C4:7E" + self.routing_type = "vnet_encap" + self.underlay_ip = "101.1.2.3" + self.vnet_map_metering_class_or = "222" + route_type_msg = RouteType() + route_action = RouteTypeItem() + route_action.action_name = "action1" + route_action.action_type = ACTION_TYPE_STATICENCAP + route_action.encap_type = ENCAP_TYPE_NVGRE + route_type_msg.items.append(route_action) + dash_db.create_routing_type(self.routing_type, {"pb": route_type_msg.SerializeToString()}) + pb = VnetMapping() + pb.mac_address = bytes.fromhex(self.mac_address.replace(":", "")) + pb.action_type = RoutingType.ROUTING_TYPE_VNET_ENCAP + pb.metering_class_or = int(self.vnet_map_metering_class_or) + pb.underlay_ip.ipv4 = socket.htonl(int(ipaddress.ip_address(self.underlay_ip))) + pb.use_dst_vni = False + + dash_db.create_vnet_mapping(self.vnet, self.ip1, {"pb": pb.SerializeToString()}) + dash_db.create_vnet_mapping(self.vnet, self.ip2, {"pb": pb.SerializeToString()}) + + vnet_ca_to_pa_maps = dash_db.wait_for_asic_db_keys(ASIC_OUTBOUND_CA_TO_PA_TABLE, min_keys=2) + attrs = dash_db.get_asic_db_entry(ASIC_OUTBOUND_CA_TO_PA_TABLE, vnet_ca_to_pa_maps[0]) + assert_sai_attribute_exists("SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_UNDERLAY_DIP", attrs, self.underlay_ip) + assert_sai_attribute_exists("SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_DMAC", attrs, self.mac_address) + assert_sai_attribute_exists("SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_DASH_ENCAPSULATION", attrs, "SAI_DASH_ENCAPSULATION_NVGRE") + assert_sai_attribute_exists("SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_METER_CLASS_OR", attrs, self.vnet_map_metering_class_or) + + vnet_pa_validation_maps = dash_db.wait_for_asic_db_keys(ASIC_PA_VALIDATION_TABLE) + pa_validation_attrs = dash_db.get_asic_db_entry(ASIC_PA_VALIDATION_TABLE, vnet_pa_validation_maps[0]) + assert_sai_attribute_exists("SAI_PA_VALIDATION_ENTRY_ATTR_ACTION", pa_validation_attrs, "SAI_PA_VALIDATION_ENTRY_ACTION_PERMIT") + + def test_outbound_routing(self, dash_db: DashDB): + pb = RouteGroup() + self.group_id = ROUTE_GROUP1 + dash_db.create_route_group(self.group_id, {"pb": pb.SerializeToString()}) + + outbound_routing_group_entries = dash_db.wait_for_asic_db_keys(ASIC_OUTBOUND_ROUTING_GROUP_TABLE) + + self.vnet = "Vnet1" + self.ip = "10.1.0.0/24" + self.action_type = "vnet_direct" + self.overlay_ip = "10.0.0.6" + self.outbound_metering_class_or = "333" + self.outbound_metering_class_and = "369" + pb = Route() + pb.action_type = RoutingType.ROUTING_TYPE_VNET_DIRECT + pb.vnet_direct.vnet = self.vnet + pb.vnet_direct.overlay_ip.ipv4 = socket.htonl(int(ipaddress.ip_address(self.overlay_ip))) + pb.metering_class_or = int(self.outbound_metering_class_or) + pb.metering_class_and = int(self.outbound_metering_class_and) + dash_db.create_route(self.group_id, self.ip, {"pb": pb.SerializeToString()}) + + outbound_routing_entries = dash_db.wait_for_asic_db_keys(ASIC_OUTBOUND_ROUTING_TABLE) + routing_attrs = dash_db.get_asic_db_entry(ASIC_OUTBOUND_ROUTING_TABLE, outbound_routing_entries[0]) + assert_sai_attribute_exists("SAI_OUTBOUND_ROUTING_ENTRY_ATTR_ACTION", routing_attrs, "SAI_OUTBOUND_ROUTING_ENTRY_ACTION_ROUTE_VNET_DIRECT") + assert_sai_attribute_exists("SAI_OUTBOUND_ROUTING_ENTRY_ATTR_OVERLAY_IP", routing_attrs, self.overlay_ip) + assert_sai_attribute_exists("SAI_OUTBOUND_ROUTING_ENTRY_ATTR_DST_VNET_ID", routing_attrs) + assert_sai_attribute_exists("SAI_OUTBOUND_ROUTING_ENTRY_ATTR_METER_CLASS_OR", routing_attrs, self.outbound_metering_class_or) + assert_sai_attribute_exists("SAI_OUTBOUND_ROUTING_ENTRY_ATTR_METER_CLASS_AND", routing_attrs, self.outbound_metering_class_and) + + def test_outbound_routing_dependency(self, dash_db: DashDB): + vnet = "Vnet2" + prefix1 = "10.1.1.0/24" + prefix2 = "10.1.2.0/24" + overlay_ip = "10.0.0.7" + group_id = ROUTE_GROUP1 + guid = "559c6ce8-26ab-5651-b946-ccc6e8f930b2" + + pb = Route() + pb.action_type = RoutingType.ROUTING_TYPE_VNET + pb.vnet = vnet + dash_db.create_route(group_id, prefix1, {"pb": pb.SerializeToString()}) + + pb = Route() + pb.action_type = RoutingType.ROUTING_TYPE_VNET_DIRECT + pb.vnet_direct.vnet = vnet + pb.vnet_direct.overlay_ip.ipv4 = socket.htonl(int(ipaddress.ip_address(overlay_ip))) + dash_db.create_route(group_id, prefix2, {"pb": pb.SerializeToString()}) + + time.sleep(2) + keys = dash_db.get_asic_db_keys(ASIC_OUTBOUND_ROUTING_TABLE) + # Outbound routes for prefix1 and prefix2 are not ready before Vnet2 creation + assert len(keys) == 1 + + pb = Vnet() + pb.vni = int("45655") + pb.guid.value = bytes.fromhex(uuid.UUID(guid).hex) + dash_db.create_vnet(vnet, {"pb": pb.SerializeToString()}) + keys = dash_db.wait_for_asic_db_keys(ASIC_VNET_TABLE, min_keys=2) + assert len(keys) == 2 + + routing_entries = dash_db.wait_for_asic_db_keys(ASIC_OUTBOUND_ROUTING_TABLE, min_keys=3) + # Outbound routes for prefix1 and prefix2 are ready after Vnet2 creation + assert len(routing_entries) == 3 + + dash_db.remove_route(group_id, prefix1) + dash_db.remove_route(group_id, prefix2) + dash_db.remove_vnet(vnet) + + def test_eni_route(self, dash_db: DashDB): + pb = EniRoute() + pb.group_id = ROUTE_GROUP1 + self.mac_string = "F4939FEFC47E" + dash_db.create_eni_route(self.mac_string, {"pb": pb.SerializeToString()}) + + enis = dash_db.wait_for_asic_db_keys(ASIC_ENI_TABLE) + outbound_routing_group_entries = dash_db.get_asic_db_keys(ASIC_OUTBOUND_ROUTING_GROUP_TABLE) + dash_db.wait_for_asic_db_field(ASIC_ENI_TABLE, enis[0], "SAI_ENI_ATTR_OUTBOUND_ROUTING_GROUP_ID", outbound_routing_group_entries[0]) + + def test_inbound_routing(self, dash_db: DashDB): + self.mac_string = "F4939FEFC47E" + self.vnet = "Vnet1" + self.vni = "3251" + self.ip = "10.1.1.1" + self.action_type = "decap" + self.pa_validation = "true" + self.priority = "1" + self.protocol = "0" + self.inbound_metering_class_or = "444" + self.inbound_metering_class_and = "468" + pb = RouteRule() + pb.pa_validation = True + pb.priority = int(self.priority) + pb.protocol = int(self.protocol) + pb.vnet = self.vnet + pb.metering_class_or = int(self.inbound_metering_class_or) + pb.metering_class_and = int(self.inbound_metering_class_and) + + dash_db.create_inbound_routing(self.mac_string, self.vni, self.ip, {"pb": pb.SerializeToString()}) + + inbound_routing_entries = dash_db.wait_for_asic_db_keys(ASIC_INBOUND_ROUTING_TABLE) + attrs = dash_db.get_asic_db_entry(ASIC_INBOUND_ROUTING_TABLE, inbound_routing_entries[0]) + assert_sai_attribute_exists("SAI_INBOUND_ROUTING_ENTRY_ATTR_ACTION", attrs, "SAI_INBOUND_ROUTING_ENTRY_ACTION_TUNNEL_DECAP_PA_VALIDATE") + assert_sai_attribute_exists("SAI_INBOUND_ROUTING_ENTRY_ATTR_METER_CLASS_OR", attrs, self.inbound_metering_class_or) + assert_sai_attribute_exists("SAI_INBOUND_ROUTING_ENTRY_ATTR_METER_CLASS_AND", attrs, self.inbound_metering_class_and) + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down +# before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/dvslib/dvs_acl.py b/tests/dvslib/dvs_acl.py index 4315da37982..2656ac63e00 100644 --- a/tests/dvslib/dvs_acl.py +++ b/tests/dvslib/dvs_acl.py @@ -331,6 +331,30 @@ def verify_acl_table_action_list( for action in expected_action_list: assert action in action_list + def create_dscp_acl_rule( + self, + table_name: str, + rule_name: str, + qualifiers: Dict[str, str], + action: str, + priority: str = "2020" + ) -> None: + """Create a new DSCP ACL rule in the given table. + Args: + table_name: The name of the ACL table to add the rule to. + rule_name: The name of the ACL rule. + qualifiers: The list of qualifiers to add to the rule. + action: DSCP value. + priority: The priority of the rule. + """ + fvs = { + "priority": priority, + "DSCP_ACTION": action + } + + for k, v in qualifiers.items(): + fvs[k] = v + self.config_db.create_entry("ACL_RULE", "{}|{}".format(table_name, rule_name), fvs) def create_acl_rule( self, @@ -599,7 +623,7 @@ def verify_acl_rule_generic( elif k == "SAI_ACL_ENTRY_ATTR_ADMIN_STATE": assert v == "true" elif k in sai_qualifiers: - assert sai_qualifiers[k](v) + assert sai_qualifiers[k](v), "Unexpected value for SAI qualifier: key={}, value={}".format(k, v) else: assert False, "Unknown SAI qualifier: key={}, value={}".format(k, v) @@ -685,6 +709,17 @@ def _match_acl_range(sai_acl_range): return True return _match_acl_range + + def get_acl_counter_oid(self, acl_rule_id=None) -> str: + if not acl_rule_id: + acl_rule_id = self._get_acl_rule_id() + + entry = self.asic_db.wait_for_entry("ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY", acl_rule_id) + counter_oid = entry.get("SAI_ACL_ENTRY_ATTR_ACTION_COUNTER") + return counter_oid + + def get_acl_rule_id(self) -> str: + return self._get_acl_rule_id() def _get_acl_rule_id(self) -> str: num_keys = len(self.asic_db.default_acl_entries) + 1 @@ -719,6 +754,8 @@ def _check_acl_entry_base( elif "SAI_ACL_ENTRY_ATTR_ACTION_NO_NAT" in k: assert action == "DO_NOT_NAT" assert v == "true" + elif "SAI_ACL_ENTRY_ATTR_FIELD_TUNNEL_TERMINATED" in k: + assert v == "true" elif k in qualifiers: assert qualifiers[k](v) else: @@ -742,7 +779,12 @@ def _check_acl_entry_counters_map(self, acl_entry_oid: str): return rule_to_counter_map = self.counters_db.get_entry("ACL_COUNTER_RULE_MAP", "") counter_to_rule_map = {v: k for k, v in rule_to_counter_map.items()} - assert counter_oid in counter_to_rule_map + assert counter_oid in counter_to_rule_map + + def check_acl_counter_not_in_counters_map(self, acl_counter_oid: str): + rule_to_counter_map = self.counters_db.get_entry("ACL_COUNTER_RULE_MAP", "") + counter_to_rule_map = {v: k for k, v in rule_to_counter_map.items()} + assert acl_counter_oid not in counter_to_rule_map def verify_acl_table_status( self, diff --git a/tests/dvslib/dvs_buffer.py b/tests/dvslib/dvs_buffer.py new file mode 100644 index 00000000000..36ab5afad97 --- /dev/null +++ b/tests/dvslib/dvs_buffer.py @@ -0,0 +1,223 @@ +"""Utilities for interacting with BUFFER objects when writing VS tests.""" + +from typing import Dict, List + + +class DVSBuffer: + """Manage buffer objects on the virtual switch.""" + + ASIC_BUFFER_PROFILE = "ASIC_STATE:SAI_OBJECT_TYPE_BUFFER_PROFILE" + ASIC_PRIORITY_GROUP = "ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP" + + APPL_BUFFER_PROFILE = "BUFFER_PROFILE_TABLE" + + CONFIG_BUFFER_PROFILE = "BUFFER_PROFILE" + CONFIG_BUFFER_PG = "BUFFER_PG" + + CONFIG_DEVICE_METADATA = "DEVICE_METADATA" + KEY_DEVICE_METADATA_LOCALHOST = "localhost" + + STATE_BUFFER_MAX_PARAM = "BUFFER_MAX_PARAM_TABLE" + KEY_BUFFER_MAX_PARAM_GLOBAL = "global" + + COUNTERS_PG_NAME_MAP = "COUNTERS_PG_NAME_MAP" + + def __init__(self, asic_db, app_db, config_db, state_db, counters_db): + """Create a new DVS buffer manager.""" + self.asic_db = asic_db + self.app_db = app_db + self.config_db = config_db + self.state_db = state_db + self.counters_db = counters_db + + def get_buffer_pg_keys( + self, + port_name: str, + pg_index: str + ) -> List[str]: + """Get priority group buffer keys from CONFIG DB.""" + keyList = [] + + keys = self.config_db.get_keys(self.CONFIG_BUFFER_PG) + + for key in keys: + if port_name in key: + assert "|" in key, \ + "Malformed priority group buffer entry: key={}".format(key) + _, pg = key.split("|") + + if "-" in pg: + idx1, idx2 = pg.split("-") + if int(idx1) <= int(pg_index) and int(pg_index) <= int(idx2): + keyList.append(key) + else: + if int(pg_index) == int(pg): + keyList.append(key) + + return keyList + + def get_buffer_pg_value( + self, + pg_buffer_key: str, + pg_buffer_field: str = "profile" + ) -> str: + """Get priority group buffer value from CONFIG DB.""" + attr_list = [ pg_buffer_field ] + fvs = self.config_db.wait_for_fields(self.CONFIG_BUFFER_PG, pg_buffer_key, attr_list) + + return fvs[pg_buffer_field] + + def update_buffer_pg( + self, + pg_buffer_key: str, + pg_buffer_profile: str + ) -> None: + """Update priority group in CONFIG DB.""" + attr_dict = { + "profile": pg_buffer_profile + } + self.config_db.update_entry(self.CONFIG_BUFFER_PG, pg_buffer_key, attr_dict) + + def remove_buffer_pg( + self, + pg_buffer_key: str + ) -> None: + """Remove priority group from CONFIG DB.""" + self.config_db.delete_entry(self.CONFIG_BUFFER_PG, pg_buffer_key) + + def is_dynamic_buffer_model( + self + ) -> bool: + """Checks whether traditional/dynamic buffer model is configured in CONFIG DB.""" + fvs = self.config_db.wait_for_entry(self.CONFIG_DEVICE_METADATA, self.KEY_DEVICE_METADATA_LOCALHOST) + return fvs.get("buffer_model", "") == "dynamic" + + def wait_for_buffer_profiles( + self + ) -> None: + """Verify all buffer profiles are in ASIC DB.""" + zeroBufferProfileList = [ + "ingress_lossy_pg_zero_profile", + "ingress_lossy_zero_profile", + "ingress_lossless_zero_profile", + "egress_lossy_zero_profile", + "egress_lossless_zero_profile" + ] + bufferProfileList = list(self.config_db.get_keys(self.CONFIG_BUFFER_PROFILE)) + + if self.is_dynamic_buffer_model(): + bufferProfileList.extend(zeroBufferProfileList) + + self.app_db.wait_for_matching_keys(self.APPL_BUFFER_PROFILE, bufferProfileList) + self.asic_db.wait_for_n_keys(self.ASIC_BUFFER_PROFILE, len(bufferProfileList)) + + def get_buffer_profile_ids( + self, + expected: int = None + ) -> List[str]: + """Get all buffer profile ids from ASIC DB.""" + if expected is None: + return self.asic_db.get_keys(self.ASIC_BUFFER_PROFILE) + + return self.asic_db.wait_for_n_keys(self.ASIC_BUFFER_PROFILE, expected) + + def create_buffer_profile( + self, + buffer_profile_name: str, + qualifiers: Dict[str, str] + ) -> None: + """Create buffer profile in CONFIG DB.""" + self.config_db.create_entry(self.CONFIG_BUFFER_PROFILE, buffer_profile_name, qualifiers) + + def remove_buffer_profile( + self, + buffer_profile_name: str + ) -> None: + """Remove buffer profile from CONFIG DB.""" + self.config_db.delete_entry(self.CONFIG_BUFFER_PROFILE, buffer_profile_name) + + def update_buffer_profile( + self, + buffer_profile_name: str, + qualifiers: Dict[str, str] + ) -> None: + """Update buffer profile in CONFIG DB.""" + self.config_db.update_entry(self.CONFIG_BUFFER_PROFILE, buffer_profile_name, qualifiers) + + def verify_buffer_profile( + self, + sai_buffer_profile_id: str, + sai_qualifiers: Dict[str, str] + ) -> None: + """Verify that buffer profile object has correct ASIC DB representation.""" + self.asic_db.wait_for_field_match(self.ASIC_BUFFER_PROFILE, sai_buffer_profile_id, sai_qualifiers) + + def update_buffer_mmu( + self, + mmu_size: str + ) -> None: + """Update buffer MMU size in STATE DB.""" + attr_dict = { + "mmu_size": mmu_size + } + self.state_db.update_entry(self.STATE_BUFFER_MAX_PARAM, self.KEY_BUFFER_MAX_PARAM_GLOBAL, attr_dict) + + def remove_buffer_mmu( + self + ) -> None: + """Remove buffer MMU size from STATE DB.""" + self.state_db.delete_entry(self.STATE_BUFFER_MAX_PARAM, self.KEY_BUFFER_MAX_PARAM_GLOBAL) + + def is_priority_group_exists( + self, + port_name: str, + pg_index: str + ) -> bool: + """Verify priority group existence in CONFIG DB.""" + key = "{}|{}".format(port_name, pg_index) + fvs = self.config_db.get_entry(self.CONFIG_BUFFER_PG, key) + + return bool(fvs) + + def get_priority_group_id( + self, + port_name: str, + pg_index: str + ) -> str: + """Get priority group id from COUNTERS DB.""" + field = "{}:{}".format(port_name, pg_index) + + attr_list = [ field ] + fvs = self.counters_db.wait_for_fields(self.COUNTERS_PG_NAME_MAP, "", attr_list) + + return fvs[field] + + def update_priority_group( + self, + port_name: str, + pg_index: str, + buffer_profile_name: str + ) -> None: + """Update priority group in CONFIG DB.""" + attr_dict = { + "profile": buffer_profile_name + } + key = "{}|{}".format(port_name, pg_index) + self.config_db.update_entry(self.CONFIG_BUFFER_PG, key, attr_dict) + + def remove_priority_group( + self, + port_name: str, + pg_index: str + ) -> None: + """Remove priority group from CONFIG DB.""" + key = "{}|{}".format(port_name, pg_index) + self.config_db.delete_entry(self.CONFIG_BUFFER_PG, key) + + def verify_priority_group( + self, + sai_priority_group_id: str, + sai_qualifiers: Dict[str, str] + ) -> None: + """Verify that priority group object has correct ASIC DB representation.""" + self.asic_db.wait_for_field_match(self.ASIC_PRIORITY_GROUP, sai_priority_group_id, sai_qualifiers) diff --git a/tests/dvslib/dvs_database.py b/tests/dvslib/dvs_database.py index 553c0d77105..548137688f0 100644 --- a/tests/dvslib/dvs_database.py +++ b/tests/dvslib/dvs_database.py @@ -4,7 +4,7 @@ - Reference DBs by name rather than ID/socket - Add support for ProducerStateTable """ -from typing import Dict, List +from typing import Dict, List, Callable from swsscommon import swsscommon from swsscommon.swsscommon import SonicDBConfig from dvslib.dvs_common import wait_for_result, PollingConfig @@ -96,8 +96,12 @@ def delete_entry(self, table_name: str, key: str) -> None: table_name: The name of the table where the entry is being removed. key: The key that maps to the entry being removed. """ - table = swsscommon.Table(self.db_connection, table_name) - table._del(key) # pylint: disable=protected-access + if self.db_connection.getDbId() == swsscommon.APPL_DB: + table = swsscommon.ProducerStateTable(self.db_connection, table_name) + table._del(key) + else: + table = swsscommon.Table(self.db_connection, table_name) + table._del(key) # pylint: disable=protected-access def delete_field(self, table_name: str, key: str, field: str) -> None: """Remove a field from an entry stored at `key` in the specified table. @@ -109,7 +113,19 @@ def delete_field(self, table_name: str, key: str, field: str) -> None: """ table = swsscommon.Table(self.db_connection, table_name) table.hdel(key, field) - + + def set_field(self, table_name: str, key: str, field: str, value: str) -> None: + """Add/Update a field in an entry stored at `key` in the specified table. + + Args: + table_name: The name of the table where the entry is being removed. + key: The key that maps to the entry being added/updated. + field: The field that needs to be added/updated. + value: The value that is set for the field. + """ + table = swsscommon.Table(self.db_connection, table_name) + table.hset(key, field, value) + def get_keys(self, table_name: str) -> List[str]: """Get all of the keys stored in the specified table. @@ -201,6 +217,7 @@ def wait_for_field_match( key: str, expected_fields: Dict[str, str], polling_config: PollingConfig = PollingConfig(), + comparator: Callable[[], bool] = None, failure_message: str = None, ) -> Dict[str, str]: """Wait for the entry stored at `key` to have the specified field/values and retrieve it. @@ -222,6 +239,11 @@ def wait_for_field_match( def access_function(): fv_pairs = self.get_entry(table_name, key) + + if comparator is not None: + result = all(comparator(k, fv_pairs.get(k, None), v) for k, v in expected_fields.items()) + return (result, fv_pairs) + return ( all(fv_pairs.get(k) == v for k, v in expected_fields.items()), fv_pairs, diff --git a/tests/dvslib/dvs_flex_counter.py b/tests/dvslib/dvs_flex_counter.py new file mode 100644 index 00000000000..af993271f85 --- /dev/null +++ b/tests/dvslib/dvs_flex_counter.py @@ -0,0 +1,177 @@ +import time + +from typing import Dict +from swsscommon import swsscommon + + +NUMBER_OF_RETRIES = 10 + + +class DVSFlexCounter(object): + """Manage flex counter objects on the virtual switch.""" + + CONFIG_FLEX_COUNTER = swsscommon.CFG_FLEX_COUNTER_TABLE_NAME + + FLEX_FLEX_COUNTER = swsscommon.FLEX_COUNTER_TABLE + FLEX_FLEX_COUNTER_GROUP = swsscommon.FLEX_COUNTER_GROUP_TABLE + + def __init__(self, cfgdb, flexdb): + """Create a new DVS flex counter manager.""" + self.config_db = cfgdb + self.flex_db = flexdb + + def update_flex_counter( + self, + group_name: str, + attr_dict: Dict[str, str] + ) -> None: + """Update flex counter in CONFIG DB.""" + self.config_db.update_entry(self.CONFIG_FLEX_COUNTER, group_name, attr_dict) + + def verify_flex_counter( + self, + stat_name: str, + qualifiers: Dict[str, str] + ) -> None: + """Verify that flex counter object has correct FLEX_COUNTER DB representation.""" + self.flex_db.wait_for_field_match(self.FLEX_FLEX_COUNTER_GROUP, stat_name, qualifiers) + + def set_interval( + self, + group_name: str, + interval: str + ) -> None: + """Set flex counter poll interval in CONFIG DB.""" + attr_dict = { + swsscommon.POLL_INTERVAL_FIELD: interval + } + self.update_flex_counter(group_name, attr_dict) + + def set_status( + self, + group_name: str, + status: str + ) -> None: + """Set flex counter status in CONFIG DB.""" + attr_dict = { + swsscommon.FLEX_COUNTER_STATUS_FIELD: status + } + self.update_flex_counter(group_name, attr_dict) + + +class TestFlexCountersBase(object): + + def setup_dbs(self, dvs): + self.config_db = dvs.get_config_db() + self.flex_db = dvs.get_flex_db() + self.counters_db = dvs.get_counters_db() + self.app_db = dvs.get_app_db() + + def wait_for_table(self, table): + for retry in range(NUMBER_OF_RETRIES): + counters_keys = self.counters_db.db_connection.hgetall(table) + if len(counters_keys) > 0: + return + else: + time.sleep(1) + + assert False, str(table) + " not created in Counters DB" + + def wait_for_table_empty(self, table): + for retry in range(NUMBER_OF_RETRIES): + counters_keys = self.counters_db.db_connection.hgetall(table) + if len(counters_keys) == 0: + return + else: + time.sleep(1) + + assert False, str(table) + " is still in Counters DB" + + def wait_for_id_list(self, stat, name, oid): + for retry in range(NUMBER_OF_RETRIES): + id_list = self.flex_db.db_connection.hgetall("FLEX_COUNTER_TABLE:" + stat + ":" + oid).items() + if len(id_list) > 0: + return + else: + time.sleep(1) + + assert False, "No ID list for counter " + str(name) + + def wait_for_id_list_remove(self, stat, name, oid): + for retry in range(NUMBER_OF_RETRIES): + id_list = self.flex_db.db_connection.hgetall("FLEX_COUNTER_TABLE:" + stat + ":" + oid).items() + if len(id_list) == 0: + return + else: + time.sleep(1) + + assert False, "ID list for counter " + str(name) + " is still there" + + def wait_for_interval_set(self, group, interval): + interval_value = None + for retry in range(NUMBER_OF_RETRIES): + interval_value = self.flex_db.db_connection.hget("FLEX_COUNTER_GROUP_TABLE:" + group, 'POLL_INTERVAL') + if interval_value == interval: + return + else: + time.sleep(1) + + assert False, "Polling interval is not applied to FLEX_COUNTER_GROUP_TABLE for group {}, expect={}, actual={}".format(group, interval, interval_value) + + def set_flex_counter_group_status(self, group, map, status='enable', check_name_map=True): + group_stats_entry = {"FLEX_COUNTER_STATUS": status} + self.config_db.create_entry("FLEX_COUNTER_TABLE", group, group_stats_entry) + if check_name_map: + if status == 'enable': + self.wait_for_table(map) + else: + self.wait_for_table_empty(map) + + def verify_flex_counters_populated(self, map, stat): + counters_keys = self.counters_db.db_connection.hgetall(map) + for counter_entry in counters_keys.items(): + name = counter_entry[0] + oid = counter_entry[1] + self.wait_for_id_list(stat, name, oid) + + def set_flex_counter_group_interval(self, key, group, interval): + group_stats_entry = {"POLL_INTERVAL": interval} + self.config_db.create_entry("FLEX_COUNTER_TABLE", key, group_stats_entry) + self.wait_for_interval_set(group, interval) + + def verify_no_flex_counters_tables(self, counter_stat): + counters_stat_keys = self.flex_db.get_keys("FLEX_COUNTER_TABLE:" + counter_stat) + assert len(counters_stat_keys) == 0, "FLEX_COUNTER_TABLE:" + str(counter_stat) + " tables exist before enabling the flex counter group" + + def verify_flex_counter_flow(self, dvs, meta_data): + """ + The test will check there are no flex counters tables on FlexCounter DB when the counters are disabled. + After enabling each counter group, the test will check the flow of creating flex counters tables on FlexCounter DB. + For some counter types the MAPS on COUNTERS DB will be created as well after enabling the counter group, this will be also verified on this test. + """ + self.setup_dbs(dvs) + counter_key = meta_data['key'] + counter_stat = meta_data['group_name'] + counter_map = meta_data['name_map'] + pre_test = meta_data.get('pre_test') + post_test = meta_data.get('post_test') + meta_data['dvs'] = dvs + + self.verify_no_flex_counters_tables(counter_stat) + + if pre_test: + if not hasattr(self, pre_test): + assert False, "Test object does not have the method {}".format(pre_test) + cb = getattr(self, pre_test) + cb(meta_data) + + self.set_flex_counter_group_status(counter_key, counter_map) + self.verify_flex_counters_populated(counter_map, counter_stat) + self.set_flex_counter_group_interval(counter_key, counter_stat, '2500') + + if post_test: + if not hasattr(self, post_test): + assert False, "Test object does not have the method {}".format(post_test) + cb = getattr(self, post_test) + cb(meta_data) + diff --git a/tests/dvslib/dvs_hash.py b/tests/dvslib/dvs_hash.py index 5ac896962c7..66f364b7e00 100644 --- a/tests/dvslib/dvs_hash.py +++ b/tests/dvslib/dvs_hash.py @@ -1,5 +1,6 @@ """Utilities for interacting with HASH objects when writing VS tests.""" from typing import Dict, List +import time class DVSHash: @@ -21,6 +22,7 @@ def update_switch_hash( ) -> None: """Update switch hash global in Config DB.""" self.config_db.update_entry(self.CDB_SWITCH_HASH, self.KEY_SWITCH_HASH_GLOBAL, qualifiers) + time.sleep(1) def get_hash_ids( self, diff --git a/tests/dvslib/dvs_port.py b/tests/dvslib/dvs_port.py index 330245099c0..f2b476a7b07 100644 --- a/tests/dvslib/dvs_port.py +++ b/tests/dvslib/dvs_port.py @@ -1,4 +1,5 @@ """Utilities for interacting with PORT objects when writing VS tests.""" + from typing import Dict, List from swsscommon import swsscommon @@ -8,14 +9,25 @@ class DVSPort(object): ASIC_DB = swsscommon.ASIC_DB APPL_DB = swsscommon.APPL_DB - CFGDB_PORT = "PORT" - APPDB_PORT = "PORT_TABLE" - ASICDB_PORT = "ASIC_STATE:SAI_OBJECT_TYPE_PORT" + CHANNEL_UNITTEST = "SAI_VS_UNITTEST_CHANNEL" + + ASIC_VIDTORID = "VIDTORID" + ASIC_PORT = "ASIC_STATE:SAI_OBJECT_TYPE_PORT" + + APPL_PORT = "PORT_TABLE" - def __init__(self, asicdb, appdb, cfgdb): + CONFIG_PORT = "PORT" + CONFIG_BUFFER_INGRESS_PROFILE_LIST = "BUFFER_PORT_INGRESS_PROFILE_LIST" + CONFIG_BUFFER_EGRESS_PROFILE_LIST = "BUFFER_PORT_EGRESS_PROFILE_LIST" + + COUNTERS_COUNTERS = "COUNTERS" + COUNTERS_PORT_NAME_MAP = "COUNTERS_PORT_NAME_MAP" + + def __init__(self, asicdb, appdb, cfgdb, counters_db): self.asic_db = asicdb self.app_db = appdb self.config_db = cfgdb + self.counters_db = counters_db def create_port_generic( self, @@ -24,21 +36,21 @@ def create_port_generic( speed: str, qualifiers: Dict[str, str] = {} ) -> None: - """Create PORT in Config DB.""" + """Create PORT in CONFIG DB.""" attr_dict = { "lanes": lanes, "speed": speed, **qualifiers } - self.config_db.create_entry(self.CFGDB_PORT, port_name, attr_dict) + self.config_db.create_entry(self.CONFIG_PORT, port_name, attr_dict) def remove_port_generic( self, port_name: str )-> None: - """Remove PORT from Config DB.""" - self.config_db.delete_entry(self.CFGDB_PORT, port_name) + """Remove PORT from CONFIG DB.""" + self.config_db.delete_entry(self.CONFIG_PORT, port_name) def remove_port(self, port_name): self.config_db.delete_field("CABLE_LENGTH", "AZURE", port_name) @@ -60,8 +72,45 @@ def update_port( port_name: str, attr_dict: Dict[str, str] ) -> None: - """Update PORT in Config DB.""" - self.config_db.update_entry(self.CFGDB_PORT, port_name, attr_dict) + """Update PORT in CONFIG DB.""" + self.config_db.update_entry(self.CONFIG_PORT, port_name, attr_dict) + + def verify_port( + self, + sai_port_id: str, + sai_qualifiers: Dict[str, str] + ) -> None: + """Verify that port object has correct ASIC DB representation. + + Args: + sai_port_id: The specific port id to check in ASIC DB. + sai_qualifiers: The expected set of SAI qualifiers to be found in ASIC DB. + """ + def comparator(k, v1, v2): + def profile_list_handler(v1, v2): + if v1 is None: + return False + bpList = v1[v1.index(":")+1:].split(",") + return set(bpList) == set(v2) + + if k == "SAI_PORT_ATTR_QOS_INGRESS_BUFFER_PROFILE_LIST": + return profile_list_handler(v1, v2) + elif k == "SAI_PORT_ATTR_QOS_EGRESS_BUFFER_PROFILE_LIST": + return profile_list_handler(v1, v2) + + return v1 == v2 + + self.asic_db.wait_for_field_match(self.ASIC_PORT, sai_port_id, sai_qualifiers, comparator=comparator) + + def get_port_id( + self, + port_name: str + ) -> str: + """Get port id from COUNTERS DB.""" + attr_list = [ port_name ] + fvs = self.counters_db.wait_for_fields(self.COUNTERS_PORT_NAME_MAP, "", attr_list) + + return fvs[port_name] def get_port_ids( self, @@ -74,10 +123,10 @@ def get_port_ids( if dbid == swsscommon.ASIC_DB: conn = self.asic_db - table = self.ASICDB_PORT + table = self.ASIC_PORT elif dbid == swsscommon.APPL_DB: conn = self.app_db - table = self.APPDB_PORT + table = self.APPL_PORT else: raise RuntimeError("Interface not implemented") @@ -93,3 +142,79 @@ def verify_port_count( ) -> None: """Verify that there are N PORT objects in ASIC/APP DB.""" self.get_port_ids(expected, dbid) + + def set_port_counter( + self, + sai_port_id: str, + sai_qualifiers: Dict[str, str] + ) -> None: + """Set port counter value in ASIC DB.""" + attr_list = [ sai_port_id ] + fvs = self.asic_db.wait_for_fields(self.ASIC_VIDTORID, "", attr_list) + + ntf = swsscommon.NotificationProducer(self.asic_db.db_connection, self.CHANNEL_UNITTEST) + + # Enable test mode + fvp = swsscommon.FieldValuePairs() + ntf.send("enable_unittests", "true", fvp) + + # Set queue stats + key = fvs[sai_port_id] + fvp = swsscommon.FieldValuePairs(list(sai_qualifiers.items())) + ntf.send("set_stats", str(key), fvp) + + # Disable test mode + fvp = swsscommon.FieldValuePairs() + ntf.send("enable_unittests", "false", fvp) + + def verify_port_counter( + self, + sai_port_id: str, + sai_qualifiers: Dict[str, str] + ) -> None: + """Verify that port counter object has correct COUNTERS DB representation.""" + self.counters_db.wait_for_field_match(self.COUNTERS_COUNTERS, sai_port_id, sai_qualifiers) + + def update_buffer_profile_list( + self, + port_name: str, + profile_list: str, + ingress: bool = True + ) -> None: + """Update ingress/egress buffer profile list in CONFIG DB.""" + attr_dict = { + "profile_list": profile_list + } + table_name = self.CONFIG_BUFFER_INGRESS_PROFILE_LIST if ingress else self.CONFIG_BUFFER_EGRESS_PROFILE_LIST + self.config_db.update_entry(table_name, port_name, attr_dict) + + def remove_buffer_profile_list( + self, + port_name: str, + ingress: bool = True + ) -> None: + """Remove ingress/egress buffer profile list from CONFIG DB.""" + table_name = self.CONFIG_BUFFER_INGRESS_PROFILE_LIST if ingress else self.CONFIG_BUFFER_EGRESS_PROFILE_LIST + self.config_db.delete_entry(table_name, port_name) + + def is_buffer_profile_list_exists( + self, + port_name: str, + ingress: bool = True + ) -> str: + """Verify ingress/egress buffer profile list existence in CONFIG DB.""" + table_name = self.CONFIG_BUFFER_INGRESS_PROFILE_LIST if ingress else self.CONFIG_BUFFER_EGRESS_PROFILE_LIST + fvs = self.config_db.get_entry(table_name, port_name) + + return bool(fvs) + + def get_buffer_profile_list( + self, + port_name: str, + ingress: bool = True + ) -> str: + """Get ingress/egress buffer profile list from CONFIG DB.""" + table_name = self.CONFIG_BUFFER_INGRESS_PROFILE_LIST if ingress else self.CONFIG_BUFFER_EGRESS_PROFILE_LIST + fvs = self.config_db.wait_for_entry(table_name, port_name) + + return fvs["profile_list"].split(",") diff --git a/tests/dvslib/dvs_queue.py b/tests/dvslib/dvs_queue.py new file mode 100644 index 00000000000..ca687352ec3 --- /dev/null +++ b/tests/dvslib/dvs_queue.py @@ -0,0 +1,127 @@ +"""Utilities for interacting with QUEUE objects when writing VS tests.""" + +from typing import Dict, Union +from swsscommon import swsscommon + + +class DVSQueue: + """Manage queue objects on the virtual switch.""" + + CHANNEL_UNITTEST = "SAI_VS_UNITTEST_CHANNEL" + + ASIC_VIDTORID = "VIDTORID" + ASIC_QUEUE = "ASIC_STATE:SAI_OBJECT_TYPE_QUEUE" + + CONFIG_BUFFER_QUEUE = "BUFFER_QUEUE" + + COUNTERS_COUNTERS = "COUNTERS" + COUNTERS_QUEUE_NAME_MAP = "COUNTERS_QUEUE_NAME_MAP" + + OID_NULL = "oid:0x0" + + def __init__(self, asic_db, config_db, counters_db): + """Create a new DVS queue manager.""" + self.asic_db = asic_db + self.config_db = config_db + self.counters_db = counters_db + + def get_queue_id( + self, + port_name: str, + queue_index: str + ) -> str: + """Get queue id from COUNTERS DB.""" + field = "{}:{}".format(port_name, queue_index) + + attr_list = [ field ] + fvs = self.counters_db.wait_for_fields(self.COUNTERS_QUEUE_NAME_MAP, "", attr_list) + + return fvs[field] + + def get_queue_buffer_profile_id( + self, + port_name: str, + queue_index: str + ) -> str: + """Get queue buffer profile id from ASIC DB.""" + field = "SAI_QUEUE_ATTR_BUFFER_PROFILE_ID" + + sai_queue_id = self.get_queue_id(port_name, queue_index) + attr_list = [ field ] + fvs = self.asic_db.wait_for_fields(self.ASIC_QUEUE, sai_queue_id, attr_list) + + if fvs[field] == self.OID_NULL: + attr_dict = { + field: self.OID_NULL + } + fvs = self.asic_db.wait_for_field_negative_match(self.ASIC_QUEUE, sai_queue_id, attr_dict) + + return fvs[field] + + def get_queue_buffer_profile_name( + self, + port_name: str, + queue_index: str + ) -> str: + """Get queue buffer profile name from CONFIG DB.""" + def get_buffer_queue_key(port: str, idx: str) -> Union[str, None]: + keys = self.config_db.get_keys(self.CONFIG_BUFFER_QUEUE) + + for key in keys: + if port in key: + assert "|" in key, \ + "Malformed queue buffer entry: key={}".format(key) + _, queue = key.split("|") + + if "-" in queue: + idx1, idx2 = queue.split("-") + if int(idx1) <= int(idx) and int(idx) <= int(idx2): + return key + else: + if int(idx) == int(queue): + return key + + return None + + key = get_buffer_queue_key(port_name, queue_index) + assert key is not None, \ + "Queue buffer profile name doesn't exist: port={}, queue={}".format(port_name, queue_index) + + field = "profile" + + attr_list = [ field ] + fvs = self.config_db.wait_for_fields(self.CONFIG_BUFFER_QUEUE, key, attr_list) + + return fvs[field] + + def set_queue_counter( + self, + sai_queue_id: str, + sai_qualifiers: Dict[str, str] + ) -> None: + """Set queue counter value in ASIC DB.""" + attr_list = [ sai_queue_id ] + fvs = self.asic_db.wait_for_fields(self.ASIC_VIDTORID, "", attr_list) + + ntf = swsscommon.NotificationProducer(self.asic_db.db_connection, self.CHANNEL_UNITTEST) + + # Enable test mode + fvp = swsscommon.FieldValuePairs() + ntf.send("enable_unittests", "true", fvp) + + # Set queue stats + key = fvs[sai_queue_id] + fvp = swsscommon.FieldValuePairs(list(sai_qualifiers.items())) + ntf.send("set_stats", str(key), fvp) + + # Disable test mode + fvp = swsscommon.FieldValuePairs() + ntf.send("enable_unittests", "false", fvp) + + def verify_queue_counter( + self, + sai_queue_id: str, + sai_qualifiers: Dict[str, str] + ) -> None: + """Verify that queue counter object has correct COUNTERS DB representation.""" + self.counters_db.wait_for_field_match(self.COUNTERS_COUNTERS, sai_queue_id, sai_qualifiers) diff --git a/tests/dvslib/dvs_switch.py b/tests/dvslib/dvs_switch.py index b57dc7082fa..a76d5268e7c 100644 --- a/tests/dvslib/dvs_switch.py +++ b/tests/dvslib/dvs_switch.py @@ -1,15 +1,34 @@ """Utilities for interacting with SWITCH objects when writing VS tests.""" + from typing import Dict, List +from swsscommon import swsscommon class DVSSwitch: """Manage switch objects on the virtual switch.""" - ADB_SWITCH = "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH" + CHANNEL_UNITTEST = "SAI_VS_UNITTEST_CHANNEL" + + ASIC_VIDTORID = "VIDTORID" + ASIC_SWITCH = "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH" + + CONFIG_SWITCH_TRIMMING = "SWITCH_TRIMMING" + KEY_SWITCH_TRIMMING_GLOBAL = "GLOBAL" - def __init__(self, asic_db): + COUNTERS_COUNTERS = "COUNTERS" + + def __init__(self, asic_db, config_db, counters_db): """Create a new DVS switch manager.""" self.asic_db = asic_db + self.config_db = config_db + self.counters_db = counters_db + + def update_switch_trimming( + self, + qualifiers: Dict[str, str] + ) -> None: + """Update switch trimming global in CONFIG DB.""" + self.config_db.update_entry(self.CONFIG_SWITCH_TRIMMING, self.KEY_SWITCH_TRIMMING_GLOBAL, qualifiers) def get_switch_ids( self, @@ -24,10 +43,10 @@ def get_switch_ids( The list of switch ids in ASIC DB. """ if expected is None: - return self.asic_db.get_keys(self.ADB_SWITCH) + return self.asic_db.get_keys(self.ASIC_SWITCH) num_keys = len(self.asic_db.default_switch_keys) + expected - keys = self.asic_db.wait_for_n_keys(self.ADB_SWITCH, num_keys) + keys = self.asic_db.wait_for_n_keys(self.ASIC_SWITCH, num_keys) for k in self.asic_db.default_switch_keys: assert k in keys @@ -45,7 +64,7 @@ def verify_switch_count( """ self.get_switch_ids(expected) - def verify_switch_generic( + def verify_switch( self, sai_switch_id: str, sai_qualifiers: Dict[str, str] @@ -56,41 +75,36 @@ def verify_switch_generic( sai_switch_id: The specific switch id to check in ASIC DB. sai_qualifiers: The expected set of SAI qualifiers to be found in ASIC DB. """ - entry = self.asic_db.wait_for_entry(self.ADB_SWITCH, sai_switch_id) - - for k, v in entry.items(): - if k == "NULL": - continue - elif k in sai_qualifiers: - if k == "SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_ALGORITHM": - assert sai_qualifiers[k] == v - elif k == "SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_ALGORITHM": - assert sai_qualifiers[k] == v - else: - assert False, "Unknown SAI qualifier: key={}, value={}".format(k, v) + self.asic_db.wait_for_field_match(self.ASIC_SWITCH, sai_switch_id, sai_qualifiers) - def verify_switch( + def set_switch_counter( self, sai_switch_id: str, - sai_qualifiers: Dict[str, str], - strict: bool = False + sai_qualifiers: Dict[str, str] ) -> None: - """Verify that switch object has correct ASIC DB representation. + """Set switch counter value in ASIC DB.""" + attr_list = [ sai_switch_id ] + fvs = self.asic_db.wait_for_fields(self.ASIC_VIDTORID, "", attr_list) - Args: - sai_switch_id: The specific switch id to check in ASIC DB. - sai_qualifiers: The expected set of SAI qualifiers to be found in ASIC DB. - strict: Specifies whether verification should be strict - """ - if strict: - self.verify_switch_generic(sai_switch_id, sai_qualifiers) - return + ntf = swsscommon.NotificationProducer(self.asic_db.db_connection, self.CHANNEL_UNITTEST) - entry = self.asic_db.wait_for_entry(self.ADB_SWITCH, sai_switch_id) + # Enable test mode + fvp = swsscommon.FieldValuePairs() + ntf.send("enable_unittests", "true", fvp) - attr_dict = { - **entry, - **sai_qualifiers - } + # Set switch stats + key = fvs[sai_switch_id] + fvp = swsscommon.FieldValuePairs(list(sai_qualifiers.items())) + ntf.send("set_stats", str(key), fvp) - self.verify_switch_generic(sai_switch_id, attr_dict) + # Disable test mode + fvp = swsscommon.FieldValuePairs() + ntf.send("enable_unittests", "false", fvp) + + def verify_switch_counter( + self, + sai_switch_id: str, + sai_qualifiers: Dict[str, str] + ) -> None: + """Verify that switch counter object has correct COUNTERS DB representation.""" + self.counters_db.wait_for_field_match(self.COUNTERS_COUNTERS, sai_switch_id, sai_qualifiers) diff --git a/tests/dvslib/dvs_twamp.py b/tests/dvslib/dvs_twamp.py new file mode 100644 index 00000000000..864b072bd62 --- /dev/null +++ b/tests/dvslib/dvs_twamp.py @@ -0,0 +1,98 @@ +"""Utilities for interacting with TWAMP Light objects when writing VS tests.""" + +class DVSTwamp(object): + def __init__(self, adb, cdb, sdb, cntrdb, appdb): + self.asic_db = adb + self.config_db = cdb + self.state_db = sdb + self.counters_db = cntrdb + self.app_db = appdb + + def create_twamp_light_session_sender_packet_count(self, name, sip, sport, dip, dport, packet_count=100, tx_interval=100, timeout=5, stats_interval=None): + twamp_light_entry = {"mode": "LIGHT", + "role": "SENDER", + "src_ip": sip, + "src_udp_port": sport, + "dst_ip": dip, + "dst_udp_port": dport, + "packet_count": packet_count, + "tx_interval": tx_interval, + "timeout": timeout + } + if stats_interval: + twamp_light_entry["statistics_interval"] = str(stats_interval) + else: + twamp_light_entry["statistics_interval"] = str(int(packet_count) * int(tx_interval) + int(timeout)*1000) + self.config_db.create_entry("TWAMP_SESSION", name, twamp_light_entry) + + def create_twamp_light_session_sender_continuous(self, name, sip, sport, dip, dport, monitor_time=0, tx_interval=100, timeout=5, stats_interval=None): + twamp_light_entry = {"mode": "LIGHT", + "role": "SENDER", + "src_ip": sip, + "src_udp_port": sport, + "dst_ip": dip, + "dst_udp_port": dport, + "monitor_time": monitor_time, + "tx_interval": tx_interval, + "timeout": timeout + } + if stats_interval: + twamp_light_entry["statistics_interval"] = str(stats_interval) + else: + twamp_light_entry["statistics_interval"] = str(int(monitor_time)*1000) + self.config_db.create_entry("TWAMP_SESSION", name, twamp_light_entry) + + def create_twamp_light_session_reflector(self, name, sip, sport, dip, dport): + twamp_light_entry = {"mode": "LIGHT", + "role": "REFLECTOR", + "src_ip": sip, + "src_udp_port": sport, + "dst_ip": dip, + "dst_udp_port": dport + } + self.config_db.create_entry("TWAMP_SESSION", name, twamp_light_entry) + + def start_twamp_light_sender(self, name): + twamp_light_entry = {"admin_state": "enabled"} + self.config_db.create_entry("TWAMP_SESSION", name, twamp_light_entry) + + def stop_twamp_light_sender(self, name): + twamp_light_entry = {"admin_state": "disabled"} + self.config_db.create_entry("TWAMP_SESSION", name, twamp_light_entry) + + def remove_twamp_light_session(self, name): + self.config_db.delete_entry("TWAMP_SESSION", name) + + def get_twamp_light_session_status(self, name): + return self.get_twamp_light_session_state(name)["status"] + + def get_twamp_light_session_state(self, name): + tbl = swsscommon.Table(self.sdb, "TWAMP_SESSION_TABLE") + (status, fvs) = tbl.get(name) + assert status == True + assert len(fvs) > 0 + return { fv[0]: fv[1] for fv in fvs } + + def verify_session_status(self, name, status="active", expected=1): + self.state_db.wait_for_n_keys("TWAMP_SESSION_TABLE", expected) + if expected: + self.state_db.wait_for_field_match("TWAMP_SESSION_TABLE", name, {"status": status}) + + def verify_no_session(self): + self.config_db.wait_for_n_keys("TWAMP_SESSION", 0) + self.state_db.wait_for_n_keys("TWAMP_SESSION_TABLE", 0) + + def verify_session_asic_db(self, dvs, name, asic_table=None, expected=1): + session_oids = self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_TWAMP_SESSION", expected) + session_oid = session_oids[0] + dvs.asic_db.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_TWAMP_SESSION", session_oid, asic_table) + + def verify_session_counter_db(self, dvs, name, counter_table=None, expected=1, expected_item=1): + fvs = dvs.counters_db.get_entry("COUNTERS_TWAMP_SESSION_NAME_MAP", "") + fvs = dict(fvs) + total_key = self.counters_db.db_connection.keys("COUNTERS:{}".format(fvs[name])) + assert len(total_key) == expected, "TWAMP Light counter entries are not available in counter db" + dvs.counters_db.wait_for_field_match("COUNTERS", fvs[name], counter_table) + item_keys = self.counters_db.db_connection.keys("COUNTERS:{}:INDEX:*".format(fvs[name])) + assert len(item_keys) == expected_item, "TWAMP Light counter entries are not available in counter db" + diff --git a/tests/dvslib/dvs_vlan.py b/tests/dvslib/dvs_vlan.py index 418f3be6661..303fa46c05d 100644 --- a/tests/dvslib/dvs_vlan.py +++ b/tests/dvslib/dvs_vlan.py @@ -13,6 +13,11 @@ def create_vlan(self, vlanID): vlan_entry = {"vlanid": vlanID} self.config_db.create_entry("VLAN", vlan, vlan_entry) + def create_vlan_with_mac(self, vlanID, mac): + vlan = f"Vlan{vlanID}" + vlan_entry = {"vlanid": vlanID, "mac": mac} + self.config_db.create_entry("VLAN", vlan, vlan_entry) + def create_vlan_interface(self, vlanID): vlan = "Vlan{}".format(vlanID) vlan_intf_entry = {} diff --git a/tests/dvslib/sai_utils.py b/tests/dvslib/sai_utils.py new file mode 100644 index 00000000000..c5acaea722d --- /dev/null +++ b/tests/dvslib/sai_utils.py @@ -0,0 +1,17 @@ +from ipaddress import ip_address as IP + + +def assert_sai_attribute_exists(attr_name, attrs, expected_val=None): + assert attr_name in attrs, f"Attribute {attr_name} not found in {attrs}" + if expected_val is not None: + expected = expected_val + actual = attrs[attr_name] + # Attempt to convert to specific types to avoid string comparison when possible + for type in [int, IP]: + try: + expected = type(expected) + actual = type(actual) + break + except ValueError: + continue + assert actual == expected, f"Attribute {attr_name} value mismatch. Expected: {expected}, Actual: {actual}" diff --git a/tests/evpn_tunnel.py b/tests/evpn_tunnel.py index 346064e004b..98dc45f2c78 100644 --- a/tests/evpn_tunnel.py +++ b/tests/evpn_tunnel.py @@ -569,6 +569,8 @@ def check_vxlan_sip_tunnel(self, dvs, tunnel_name, src_ip, vidlist, vnidlist, 'SAI_TUNNEL_ATTR_ENCAP_MAPPERS': encapstr, 'SAI_TUNNEL_ATTR_PEER_MODE': 'SAI_TUNNEL_PEER_MODE_P2MP', 'SAI_TUNNEL_ATTR_ENCAP_SRC_IP': src_ip, + 'SAI_TUNNEL_ATTR_ENCAP_TTL_MODE': 'SAI_TUNNEL_TTL_MODE_PIPE_MODEL', + 'SAI_TUNNEL_ATTR_ENCAP_TTL_VAL': '255', } ) @@ -675,6 +677,8 @@ def check_vxlan_dip_tunnel(self, dvs, vtep_name, src_ip, dip): 'SAI_TUNNEL_ATTR_ENCAP_MAPPERS': encapstr, 'SAI_TUNNEL_ATTR_ENCAP_SRC_IP': src_ip, 'SAI_TUNNEL_ATTR_ENCAP_DST_IP': dip, + 'SAI_TUNNEL_ATTR_ENCAP_TTL_MODE': 'SAI_TUNNEL_TTL_MODE_PIPE_MODEL', + 'SAI_TUNNEL_ATTR_ENCAP_TTL_VAL': '255', } ret = self.helper.get_key_with_attr(asic_db, self.ASIC_TUNNEL_TABLE, expected_tun_attributes) diff --git a/tests/gcov_support.sh b/tests/gcov_support.sh index 94e2b6aba17..8582d5899d8 100755 --- a/tests/gcov_support.sh +++ b/tests/gcov_support.sh @@ -132,8 +132,8 @@ lcov_merge_all() cp $1/lcov_cobertura.py $1/common_work/gcov/ python $1/common_work/gcov/lcov_cobertura.py total.info -o coverage.xml - sed -i "s#../../__w/1/s/##" coverage.xml - sed -i "s#......__w.1.s.##" coverage.xml + sed -i "s#\.\./s/##" coverage.xml + sed -i "s#\.\.\.s\.##" coverage.xml cd gcov_output/ if [ ! -d ${ALLMERGE_DIR} ]; then diff --git a/tests/mock_tests/Makefile.am b/tests/mock_tests/Makefile.am index b5da2e2fbbc..5ace5aed5f3 100644 --- a/tests/mock_tests/Makefile.am +++ b/tests/mock_tests/Makefile.am @@ -1,8 +1,12 @@ FLEX_CTR_DIR = $(top_srcdir)/orchagent/flex_counter DEBUG_CTR_DIR = $(top_srcdir)/orchagent/debug_counter P4_ORCH_DIR = $(top_srcdir)/orchagent/p4orch +DASH_ORCH_DIR = $(top_srcdir)/orchagent/dash DASH_PROTO_DIR = $(top_srcdir)/orchagent/dash/proto +CFLAGS = -g -O0 +CXXFLAGS = -g -O0 + CFLAGS_SAI = -I /usr/include/sai TESTS = tests tests_intfmgrd tests_teammgrd tests_portsyncd tests_fpmsyncd tests_response_publisher @@ -22,9 +26,10 @@ LDADD_GTEST = -L/usr/src/gtest ## Orchagent Unit Tests -tests_INCLUDES = -I $(FLEX_CTR_DIR) -I $(DEBUG_CTR_DIR) -I $(top_srcdir)/lib -I$(top_srcdir)/cfgmgr -I$(top_srcdir)/orchagent -I$(P4_ORCH_DIR)/tests -I$(top_srcdir)/warmrestart +tests_INCLUDES = -I $(FLEX_CTR_DIR) -I $(DEBUG_CTR_DIR) -I $(top_srcdir)/lib -I$(top_srcdir)/cfgmgr -I$(top_srcdir)/orchagent -I$(P4_ORCH_DIR)/tests -I$(DASH_ORCH_DIR) -I$(top_srcdir)/warmrestart tests_SOURCES = aclorch_ut.cpp \ + aclorch_rule_ut.cpp \ portsorch_ut.cpp \ routeorch_ut.cpp \ qosorch_ut.cpp \ @@ -36,14 +41,17 @@ tests_SOURCES = aclorch_ut.cpp \ saispy_ut.cpp \ consumer_ut.cpp \ sfloworh_ut.cpp \ + tunneldecaporch_ut.cpp \ ut_saihelper.cpp \ mock_orchagent_main.cpp \ mock_dbconnector.cpp \ mock_consumerstatetable.cpp \ + mock_subscriberstatetable.cpp \ common/mock_shell_command.cpp \ mock_table.cpp \ mock_hiredis.cpp \ mock_redisreply.cpp \ + mock_sai_api.cpp \ bulker_ut.cpp \ portmgr_ut.cpp \ sflowmgrd_ut.cpp \ @@ -55,9 +63,28 @@ tests_SOURCES = aclorch_ut.cpp \ mux_rollback_ut.cpp \ warmrestartassist_ut.cpp \ test_failure_handling.cpp \ + switchorch_ut.cpp \ + warmrestarthelper_ut.cpp \ + neighorch_ut.cpp \ + dashenifwdorch_ut.cpp \ + dashorch_ut.cpp \ + dashvnetorch_ut.cpp \ + dashhaorch_ut.cpp \ + dashrouteorch_ut.cpp \ + dashportmaporch_ut.cpp \ + twamporch_ut.cpp \ + stporch_ut.cpp \ + flexcounter_ut.cpp \ + mock_orch_test.cpp \ + mock_dash_orch_test.cpp \ + zmq_orch_ut.cpp \ + mock_saihelper.cpp \ + mirrororch_ut.cpp \ + $(top_srcdir)/warmrestart/warmRestartHelper.cpp \ $(top_srcdir)/lib/gearboxutils.cpp \ $(top_srcdir)/lib/subintf.cpp \ $(top_srcdir)/lib/recorder.cpp \ + $(top_srcdir)/lib/orch_zmq_config.cpp \ $(top_srcdir)/orchagent/orchdaemon.cpp \ $(top_srcdir)/orchagent/orch.cpp \ $(top_srcdir)/orchagent/notifications.cpp \ @@ -77,6 +104,7 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/orchagent/copporch.cpp \ $(top_srcdir)/orchagent/tunneldecaporch.cpp \ $(top_srcdir)/orchagent/qosorch.cpp \ + $(top_srcdir)/orchagent/buffer/bufferhelper.cpp \ $(top_srcdir)/orchagent/bufferorch.cpp \ $(top_srcdir)/orchagent/mirrororch.cpp \ $(top_srcdir)/orchagent/fdborch.cpp \ @@ -90,6 +118,8 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/orchagent/saiattr.cpp \ $(top_srcdir)/orchagent/switch/switch_capabilities.cpp \ $(top_srcdir)/orchagent/switch/switch_helper.cpp \ + $(top_srcdir)/orchagent/switch/trimming/capabilities.cpp \ + $(top_srcdir)/orchagent/switch/trimming/helper.cpp \ $(top_srcdir)/orchagent/switchorch.cpp \ $(top_srcdir)/orchagent/pfcwdorch.cpp \ $(top_srcdir)/orchagent/pfcactionhandler.cpp \ @@ -99,6 +129,7 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/orchagent/vrforch.cpp \ $(top_srcdir)/orchagent/countercheckorch.cpp \ $(top_srcdir)/orchagent/vxlanorch.cpp \ + $(top_srcdir)/orchagent/tunneltermhelper.cpp \ $(top_srcdir)/orchagent/vnetorch.cpp \ $(top_srcdir)/orchagent/dtelorch.cpp \ $(top_srcdir)/orchagent/flexcounterorch.cpp \ @@ -111,22 +142,40 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/orchagent/mlagorch.cpp \ $(top_srcdir)/orchagent/isolationgrouporch.cpp \ $(top_srcdir)/orchagent/macsecorch.cpp \ + $(top_srcdir)/orchagent/macsecpost.cpp \ $(top_srcdir)/orchagent/lagid.cpp \ $(top_srcdir)/orchagent/bfdorch.cpp \ + $(top_srcdir)/orchagent/icmporch.cpp \ $(top_srcdir)/orchagent/srv6orch.cpp \ $(top_srcdir)/orchagent/nvgreorch.cpp \ $(top_srcdir)/cfgmgr/portmgr.cpp \ $(top_srcdir)/cfgmgr/sflowmgr.cpp \ $(top_srcdir)/orchagent/zmqorch.cpp \ + $(top_srcdir)/orchagent/dash/dashenifwdorch.cpp \ + $(top_srcdir)/orchagent/dash/dashenifwdinfo.cpp \ $(top_srcdir)/orchagent/dash/dashaclorch.cpp \ $(top_srcdir)/orchagent/dash/dashorch.cpp \ $(top_srcdir)/orchagent/dash/dashaclgroupmgr.cpp \ $(top_srcdir)/orchagent/dash/dashtagmgr.cpp \ $(top_srcdir)/orchagent/dash/dashrouteorch.cpp \ + $(top_srcdir)/orchagent/dash/dashtunnelorch.cpp \ $(top_srcdir)/orchagent/dash/dashvnetorch.cpp \ + $(top_srcdir)/orchagent/dash/dashhaorch.cpp \ + $(top_srcdir)/orchagent/dash/dashmeterorch.cpp \ + $(top_srcdir)/orchagent/dash/dashportmaporch.cpp \ $(top_srcdir)/cfgmgr/buffermgrdyn.cpp \ $(top_srcdir)/warmrestart/warmRestartAssist.cpp \ - $(top_srcdir)/orchagent/dash/pbutils.cpp + $(top_srcdir)/orchagent/dash/pbutils.cpp \ + $(top_srcdir)/cfgmgr/coppmgr.cpp \ + $(top_srcdir)/orchagent/twamporch.cpp \ + $(top_srcdir)/orchagent/stporch.cpp \ + $(top_srcdir)/orchagent/nexthopkey.cpp \ + $(top_srcdir)/orchagent/high_frequency_telemetry/hftelorch.cpp \ + $(top_srcdir)/orchagent/high_frequency_telemetry/hftelprofile.cpp \ + $(top_srcdir)/orchagent/high_frequency_telemetry/counternameupdater.cpp \ + $(top_srcdir)/orchagent/high_frequency_telemetry/hftelutils.cpp \ + $(top_srcdir)/orchagent/high_frequency_telemetry/hftelgroup.cpp + tests_SOURCES += $(FLEX_CTR_DIR)/flex_counter_manager.cpp $(FLEX_CTR_DIR)/flex_counter_stat_manager.cpp $(FLEX_CTR_DIR)/flow_counter_handler.cpp $(FLEX_CTR_DIR)/flowcounterrouteorch.cpp tests_SOURCES += $(DEBUG_CTR_DIR)/debug_counter.cpp $(DEBUG_CTR_DIR)/drop_counter.cpp @@ -212,24 +261,29 @@ tests_teammgrd_SOURCES = teammgrd/teammgr_ut.cpp \ tests_teammgrd_INCLUDES = $(tests_INCLUDES) -I$(top_srcdir)/cfgmgr -I$(top_srcdir)/lib tests_teammgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) tests_teammgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) $(tests_teammgrd_INCLUDES) -tests_teammgrd_LDADD = $(LDADD_GTEST) $(LDADD_SAI) -lnl-genl-3 -lhiredis -lhiredis \ - -lswsscommon -lswsscommon -lgtest -lgtest_main -lzmq -lnl-3 -lnl-route-3 -lpthread -lgmock -lgmock_main +tests_teammgrd_LDADD = $(LDADD_GTEST) $(LDADD_SAI) -ldl -lhiredis \ + -lswsscommon -lgtest -lgtest_main -lzmq -lpthread -lgmock -lgmock_main ## fpmsyncd unit tests tests_fpmsyncd_SOURCES = fpmsyncd/test_fpmlink.cpp \ fpmsyncd/test_routesync.cpp \ + fpmsyncd/receive_srv6_steer_routes_ut.cpp \ + fpmsyncd/receive_srv6_mysids_ut.cpp \ + fpmsyncd/ut_helpers_fpmsyncd.cpp \ fake_netlink.cpp \ fake_warmstarthelper.cpp \ fake_producerstatetable.cpp \ mock_dbconnector.cpp \ mock_table.cpp \ mock_hiredis.cpp \ + $(top_srcdir)/lib/orch_zmq_config.cpp \ $(top_srcdir)/warmrestart/ \ $(top_srcdir)/fpmsyncd/fpmlink.cpp \ $(top_srcdir)/fpmsyncd/routesync.cpp -tests_fpmsyncd_INCLUDES = $(tests_INCLUDES) -I$(top_srcdir)/tests_fpmsyncd -I$(top_srcdir)/lib -I$(top_srcdir)/warmrestart +tests_fpmsyncd_INCLUDES = $(tests_INCLUDES) -I$(top_srcdir)/tests_fpmsyncd -I$(top_srcdir)/lib -I$(top_srcdir)/warmrestart -I$(top_srcdir)/fpmsyncd +tests_fpmsyncd_CXXFLAGS = -Wl,-wrap,rtnl_link_i2name tests_fpmsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) tests_fpmsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) $(tests_fpmsyncd_INCLUDES) tests_fpmsyncd_LDADD = $(LDADD_GTEST) $(LDADD_SAI) -lnl-genl-3 -lhiredis -lhiredis \ @@ -251,4 +305,3 @@ tests_response_publisher_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CF tests_response_publisher_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) $(tests_response_publisher_INCLUDES) tests_response_publisher_LDADD = $(LDADD_GTEST) $(LDADD_SAI) -lnl-genl-3 -lhiredis -lhiredis \ -lswsscommon -lswsscommon -lgtest -lgtest_main -lzmq -lnl-3 -lnl-route-3 -lpthread - diff --git a/tests/mock_tests/aclorch_rule_ut.cpp b/tests/mock_tests/aclorch_rule_ut.cpp new file mode 100644 index 00000000000..277986ade4a --- /dev/null +++ b/tests/mock_tests/aclorch_rule_ut.cpp @@ -0,0 +1,304 @@ +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_sai_api.h" +#include "mock_orch_test.h" +#include "check.h" + +EXTERN_MOCK_FNS + +/* + This test provides a framework to mock create_acl_entry & remove_acl_entry API's +*/ +namespace aclorch_rule_test +{ + DEFINE_SAI_GENERIC_API_MOCK(acl, acl_entry); + /* To mock Redirect Action functionality */ + DEFINE_SAI_GENERIC_API_MOCK(next_hop, next_hop); + + using namespace ::testing; + using namespace std; + using namespace saimeta; + using namespace swss; + using namespace mock_orch_test; + + struct SaiMockState + { + /* Add extra attributes on demand */ + vector create_attrs; + sai_status_t create_status = SAI_STATUS_SUCCESS; + sai_status_t remove_status = SAI_STATUS_SUCCESS; + sai_object_id_t remove_oid; + sai_object_id_t create_oid; + + sai_status_t handleCreate(sai_object_id_t *sai, sai_object_id_t switch_id, uint32_t attr_count, const sai_attribute_t *attr_list) + { + *sai = create_oid; + create_attrs.clear(); + for (uint32_t i = 0; i < attr_count; ++i) + { + create_attrs.emplace_back(attr_list[i]); + } + return create_status; + } + + sai_status_t handleRemove(sai_object_id_t oid) + { + EXPECT_EQ(oid, remove_oid); + return remove_status; + } + }; + + struct AclOrchRuleTest : public MockOrchTest + { + unique_ptr aclMockState; + + void PostSetUp() override + { + INIT_SAI_API_MOCK(acl); + INIT_SAI_API_MOCK(next_hop); + MockSaiApis(); + + aclMockState = make_unique(); + /* Port init done is a pre-req for Aclorch */ + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_app_db.get(), APP_PORT_TABLE_NAME, 1, 1), gPortsOrch, APP_PORT_TABLE_NAME)); + consumer->addToSync({ { "PortInitDone", EMPTY_PREFIX, { { "", "" } } } }); + static_cast(gPortsOrch)->doTask(*consumer.get()); + } + + void PreTearDown() override + { + aclMockState.reset(); + RestoreSaiApis(); + DEINIT_SAI_API_MOCK(next_hop); + DEINIT_SAI_API_MOCK(acl); + } + + void doAclTableTypeTask(const deque &entries) + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_config_db.get(), CFG_ACL_TABLE_TYPE_TABLE_NAME, 1, 1), + gAclOrch, CFG_ACL_TABLE_TYPE_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gAclOrch)->doTask(*consumer); + } + + void doAclTableTask(const deque &entries) + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_config_db.get(), CFG_ACL_TABLE_TABLE_NAME, 1, 1), + gAclOrch, CFG_ACL_TABLE_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gAclOrch)->doTask(*consumer); + } + + void doAclRuleTask(const deque &entries) + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_config_db.get(), CFG_ACL_RULE_TABLE_NAME, 1, 1), + gAclOrch, CFG_ACL_RULE_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gAclOrch)->doTask(*consumer); + } + }; + + struct AclRedirectActionTest : public AclOrchRuleTest + { + string acl_table_type = "TEST_ACL_TABLE_TYPE"; + string acl_table = "TEST_ACL_TABLE"; + string acl_rule = "TEST_ACL_RULE"; + + string mock_tunnel_name = "tunnel0"; + string mock_invalid_tunnel_name = "tunnel1"; + string mock_src_ip = "20.0.0.1"; + string mock_nh_ip_str = "20.0.0.3"; + string mock_invalid_nh_ip_str = "20.0.0.4"; + sai_object_id_t nh_oid = 0x400000000064d; + + void PostSetUp() override + { + AclOrchRuleTest::PostSetUp(); + + /* Create a tunnel */ + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_app_db.get(), APP_VXLAN_TUNNEL_TABLE_NAME, 1, 1), + m_VxlanTunnelOrch, APP_VXLAN_TUNNEL_TABLE_NAME)); + + consumer->addToSync( + deque( + { + { + mock_tunnel_name, + SET_COMMAND, + { + { "src_ip", mock_src_ip } + } + } + } + )); + static_cast(m_VxlanTunnelOrch)->doTask(*consumer.get()); + + populateAclTale(); + setDefaultMockState(); + } + + void PreTearDown() override + { + AclOrchRuleTest::PreTearDown(); + + /* Delete the Tunnel Object */ + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_app_db.get(), APP_VXLAN_TUNNEL_TABLE_NAME, 1, 1), + m_VxlanTunnelOrch, APP_VXLAN_TUNNEL_TABLE_NAME)); + + consumer->addToSync( + deque( + { + { + mock_tunnel_name, + DEL_COMMAND, + { } + } + } + )); + static_cast(m_VxlanTunnelOrch)->doTask(*consumer.get()); + } + + void createTunnelNH(string ip, uint32_t vni) + { + IpAddress mock_nh_ip(ip); + ASSERT_EQ(m_VxlanTunnelOrch->createNextHopTunnel(mock_tunnel_name, mock_nh_ip, MacAddress(), vni), nh_oid); + } + + void populateAclTale() + { + /* Create a Table type and Table */ + doAclTableTypeTask({ + { + acl_table_type, + SET_COMMAND, + { + { ACL_TABLE_TYPE_MATCHES, string(MATCH_DST_IP) + "," + MATCH_TUNNEL_TERM }, + { ACL_TABLE_TYPE_ACTIONS, ACTION_REDIRECT_ACTION }, + } + } + }); + doAclTableTask({ + { + acl_table, + SET_COMMAND, + { + { ACL_TABLE_TYPE, acl_table_type }, + { ACL_TABLE_STAGE, STAGE_INGRESS }, + } + } + }); + } + + void addTunnelNhRule(string ip, string tunnel_name, string vni) + { + string redirect_str = ip + "@" + tunnel_name; + if (!vni.empty()) + { + redirect_str = redirect_str + ',' + vni; + } + + /* Create a rule */ + doAclRuleTask({ + { + acl_table + "|" + acl_rule, + SET_COMMAND, + { + { RULE_PRIORITY, "9999" }, + { MATCH_DST_IP, "10.0.0.1/24" }, + { MATCH_TUNNEL_TERM, "true" }, + { ACTION_REDIRECT_ACTION, redirect_str } + } + } + }); + } + + void delTunnelNhRule() + { + doAclRuleTask( + { + { + acl_table + "|" + acl_rule, + DEL_COMMAND, + { } + } + }); + } + + void setDefaultMockState() + { + aclMockState->create_status = SAI_STATUS_SUCCESS; + aclMockState->remove_status = SAI_STATUS_SUCCESS; + aclMockState->create_oid = nh_oid; + aclMockState->remove_oid = nh_oid; + } + }; + + TEST_F(AclRedirectActionTest, TunnelNH) + { + EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop).WillOnce(DoAll(SetArgPointee<0>(nh_oid), + Return(SAI_STATUS_SUCCESS) + )); + EXPECT_CALL(*mock_sai_acl_api, create_acl_entry).WillOnce(testing::Invoke(aclMockState.get(), &SaiMockState::handleCreate)); + addTunnelNhRule(mock_nh_ip_str, mock_tunnel_name, "1000"); + + /* Verify SAI attributes and if the rule is created */ + SaiAttributeList attr_list(SAI_OBJECT_TYPE_ACL_ENTRY, vector({ + { "SAI_ACL_ENTRY_ATTR_TABLE_ID", sai_serialize_object_id(gAclOrch->getTableById(acl_table)) }, + { "SAI_ACL_ENTRY_ATTR_PRIORITY", "9999" }, + { "SAI_ACL_ENTRY_ATTR_ADMIN_STATE", "true" }, + { "SAI_ACL_ENTRY_ATTR_ACTION_COUNTER", "oid:0xfffffffffff"}, + { "SAI_ACL_ENTRY_ATTR_FIELD_DST_IP", "10.0.0.1&mask:255.255.255.0"}, + { "SAI_ACL_ENTRY_ATTR_FIELD_TUNNEL_TERMINATED", "true"}, + { "SAI_ACL_ENTRY_ATTR_ACTION_REDIRECT", sai_serialize_object_id(nh_oid) } + }), false); + vector skip_list = {false, false, false, true, false, false, false}; /* skip checking counter */ + ASSERT_TRUE(Check::AttrListSubset(SAI_OBJECT_TYPE_ACL_ENTRY, aclMockState->create_attrs, attr_list, skip_list)); + ASSERT_TRUE(gAclOrch->getAclRule(acl_table, acl_rule)); + + /* ACLRule is deleted along with Nexthop */ + EXPECT_CALL(*mock_sai_next_hop_api, remove_next_hop).Times(1).WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(*mock_sai_acl_api, remove_acl_entry).WillOnce(testing::Invoke(aclMockState.get(), &SaiMockState::handleRemove)); + delTunnelNhRule(); + ASSERT_FALSE(gAclOrch->getAclRule(acl_table, acl_rule)); + } + + TEST_F(AclRedirectActionTest, TunnelNH_ExistingNhObject) + { + EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop).WillOnce(DoAll(SetArgPointee<0>(nh_oid), + Return(SAI_STATUS_SUCCESS) + )); + EXPECT_CALL(*mock_sai_acl_api, create_acl_entry).WillOnce(testing::Invoke(aclMockState.get(), &SaiMockState::handleCreate)); + createTunnelNH(mock_nh_ip_str, 1000); + addTunnelNhRule(mock_nh_ip_str, mock_tunnel_name, "1000"); + ASSERT_TRUE(gAclOrch->getAclRule(acl_table, acl_rule)); + + /* ACL Rule is deleted but nexthop is not deleted */ + EXPECT_CALL(*mock_sai_acl_api, remove_acl_entry).WillOnce(testing::Invoke(aclMockState.get(), &SaiMockState::handleRemove)); + EXPECT_CALL(*mock_sai_next_hop_api, remove_next_hop).Times(0); + delTunnelNhRule(); + ASSERT_FALSE(gAclOrch->getAclRule(acl_table, acl_rule)); + } + + TEST_F(AclRedirectActionTest, TunnelNH_InvalidTunnel) + { + EXPECT_CALL(*mock_sai_acl_api, create_acl_entry).Times(0); + addTunnelNhRule(mock_nh_ip_str, mock_invalid_tunnel_name, ""); + ASSERT_FALSE(gAclOrch->getAclRule(acl_table, acl_rule)); + } + + TEST_F(AclRedirectActionTest, TunnelNH_InvalidNextHop) + { + EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop).WillOnce( + Return(SAI_STATUS_FAILURE) /* create next hop fails */ + ); + EXPECT_CALL(*mock_sai_acl_api, create_acl_entry).Times(0); + addTunnelNhRule(mock_invalid_nh_ip_str, mock_tunnel_name, ""); + ASSERT_FALSE(gAclOrch->getAclRule(acl_table, acl_rule)); + } +} diff --git a/tests/mock_tests/aclorch_ut.cpp b/tests/mock_tests/aclorch_ut.cpp old mode 100644 new mode 100755 index 8a05e9188ef..999c6aca503 --- a/tests/mock_tests/aclorch_ut.cpp +++ b/tests/mock_tests/aclorch_ut.cpp @@ -24,6 +24,8 @@ extern sai_port_api_t *sai_port_api; extern sai_vlan_api_t *sai_vlan_api; extern sai_bridge_api_t *sai_bridge_api; extern sai_route_api_t *sai_route_api; +extern sai_route_api_t *sai_neighbor_api; +extern sai_route_api_t *sai_next_hop_api; extern sai_mpls_api_t *sai_mpls_api; extern sai_next_hop_group_api_t* sai_next_hop_group_api; extern string gMySwitchType; @@ -318,6 +320,8 @@ namespace aclorch_test sai_api_query(SAI_API_PORT, (void **)&sai_port_api); sai_api_query(SAI_API_VLAN, (void **)&sai_vlan_api); sai_api_query(SAI_API_ROUTE, (void **)&sai_route_api); + sai_api_query(SAI_API_NEIGHBOR, (void **)&sai_neighbor_api); + sai_api_query(SAI_API_NEXT_HOP, (void **)&sai_next_hop_api); sai_api_query(SAI_API_MPLS, (void **)&sai_mpls_api); sai_api_query(SAI_API_ACL, (void **)&sai_acl_api); sai_api_query(SAI_API_NEXT_HOP_GROUP, (void **)&sai_next_hop_group_api); @@ -414,11 +418,16 @@ namespace aclorch_test gFgNhgOrch = new FgNhgOrch(m_config_db.get(), m_app_db.get(), m_state_db.get(), fgnhg_tables, gNeighOrch, gIntfsOrch, gVrfOrch); ASSERT_EQ(gSrv6Orch, nullptr); - vector srv6_tables = { - APP_SRV6_SID_LIST_TABLE_NAME, - APP_SRV6_MY_SID_TABLE_NAME + TableConnector srv6_sid_list_table(m_app_db.get(), APP_SRV6_SID_LIST_TABLE_NAME); + TableConnector srv6_my_sid_table(m_app_db.get(), APP_SRV6_MY_SID_TABLE_NAME); + TableConnector srv6_my_sid_cfg_table(m_config_db.get(), CFG_SRV6_MY_SID_TABLE_NAME); + + vector srv6_tables = { + srv6_sid_list_table, + srv6_my_sid_table, + srv6_my_sid_cfg_table }; - gSrv6Orch = new Srv6Orch(m_app_db.get(), srv6_tables, gSwitchOrch, gVrfOrch, gNeighOrch); + gSrv6Orch = new Srv6Orch(m_config_db.get(), m_app_db.get(), srv6_tables, gSwitchOrch, gVrfOrch, gNeighOrch); ASSERT_EQ(gRouteOrch, nullptr); const int routeorch_pri = 5; @@ -440,7 +449,7 @@ namespace aclorch_test ASSERT_EQ(gMirrorOrch, nullptr); gMirrorOrch = new MirrorOrch(stateDbMirrorSession, confDbMirrorSession, - gPortsOrch, gRouteOrch, gNeighOrch, gFdbOrch, policer_orch); + gPortsOrch, gRouteOrch, gNeighOrch, gFdbOrch, policer_orch, gSwitchOrch); auto consumer = unique_ptr(new Consumer( new swss::ConsumerStateTable(m_app_db.get(), APP_PORT_TABLE_NAME, 1, 1), gPortsOrch, APP_PORT_TABLE_NAME)); @@ -459,6 +468,8 @@ namespace aclorch_test gMirrorOrch = nullptr; delete gRouteOrch; gRouteOrch = nullptr; + delete gFlowCounterRouteOrch; + gFlowCounterRouteOrch = nullptr; delete gSrv6Orch; gSrv6Orch = nullptr; delete gNeighOrch; @@ -488,6 +499,8 @@ namespace aclorch_test sai_vlan_api = nullptr; sai_bridge_api = nullptr; sai_route_api = nullptr; + sai_neighbor_api = nullptr; + sai_next_hop_api = nullptr; sai_mpls_api = nullptr; } @@ -884,6 +897,13 @@ namespace aclorch_test return false; } } + else if (attr_value == PACKET_ACTION_COPY) + { + if (it->second.getSaiAttr().value.aclaction.parameter.s32 != SAI_PACKET_ACTION_COPY) + { + return false; + } + } else { // unknown attr_value @@ -947,6 +967,30 @@ namespace aclorch_test return false; } } + else if (attr_name == MATCH_INNER_SRC_MAC || attr_name == MATCH_INNER_DST_MAC) + { + + auto it_field = rule_matches.find(attr_name == MATCH_INNER_SRC_MAC ? SAI_ACL_ENTRY_ATTR_FIELD_INNER_SRC_MAC : + SAI_ACL_ENTRY_ATTR_FIELD_INNER_DST_MAC); + if (it_field == rule_matches.end()) + { + return false; + } + + if (attr_value != sai_serialize_mac(it_field->second.getSaiAttr().value.aclfield.data.mac)) + { + std::cerr << "MAC didn't match, Expected:" << attr_value << "\n" \ + << "Recieved: " << sai_serialize_mac(it_field->second.getSaiAttr().value.aclfield.data.mac) << "\n" ; + return false; + } + + if ("FF:FF:FF:FF:FF:FF" != sai_serialize_mac(it_field->second.getSaiAttr().value.aclfield.mask.mac)) + { + std::cerr << "MAC Mask didn't match, Expected: FF:FF:FF:FF:FF:FF\n" \ + << "Recieved: " << sai_serialize_mac(it_field->second.getSaiAttr().value.aclfield.data.mac) << "\n" ; + return false; + } + } else { // unknown attr_name @@ -970,13 +1014,18 @@ namespace aclorch_test return false; } } - else if (attr_name == MATCH_SRC_IP || attr_name == MATCH_DST_IP || attr_name == MATCH_SRC_IPV6) + else if (attr_name == MATCH_SRC_IP || attr_name == MATCH_DST_IP || attr_name == MATCH_SRC_IPV6 + || attr_name == MATCH_INNER_DST_MAC || attr_name == MATCH_INNER_SRC_MAC) { if (!validateAclRuleMatch(acl_rule, attr_name, attr_value)) { return false; } } + else if (attr_name == RULE_PRIORITY) + { + continue; + } else { // unknown attr_name @@ -1402,6 +1451,7 @@ namespace aclorch_test // Table not created without table type ASSERT_FALSE(orch->getAclTable(aclTableName)); + auto matches = string(MATCH_SRC_IP) + comma + MATCH_ETHER_TYPE + comma + MATCH_L4_SRC_PORT_RANGE + comma + MATCH_BTH_OPCODE + comma + MATCH_AETH_SYNDROME + comma + MATCH_TUNNEL_TERM; orch->doAclTableTypeTask( deque( { @@ -1411,7 +1461,7 @@ namespace aclorch_test { { ACL_TABLE_TYPE_MATCHES, - string(MATCH_SRC_IP) + comma + MATCH_ETHER_TYPE + comma + MATCH_L4_SRC_PORT_RANGE + comma + MATCH_BTH_OPCODE + comma + MATCH_AETH_SYNDROME + matches }, { ACL_TABLE_TYPE_BPOINT_TYPES, @@ -1435,6 +1485,7 @@ namespace aclorch_test { "SAI_ACL_TABLE_ATTR_FIELD_ACL_RANGE_TYPE", "1:SAI_ACL_RANGE_TYPE_L4_SRC_PORT_RANGE" }, { "SAI_ACL_TABLE_ATTR_FIELD_BTH_OPCODE", "true" }, { "SAI_ACL_TABLE_ATTR_FIELD_AETH_SYNDROME", "true" }, + { "SAI_ACL_TABLE_ATTR_FIELD_TUNNEL_TERMINATED", "true" }, }; ASSERT_TRUE(validateAclTable( @@ -1551,11 +1602,46 @@ namespace aclorch_test ASSERT_FALSE(orch->getAclRule(aclTableName, aclRuleName)); - orch->doAclTableTypeTask( + // Verify ACL_RULE with TUNN_TERM attribute + orch->doAclRuleTask( deque( { { - aclTableTypeName, + aclTableName + "|" + "TUNN_TERM_RULE0", + SET_COMMAND, + { + { MATCH_SRC_IP, "1.1.1.1/32" }, + { ACTION_PACKET_ACTION, PACKET_ACTION_DROP }, + { MATCH_TUNNEL_TERM, "true" } + } + }, + { + aclTableName + "|" + "TUNN_TERM_RULE1", + SET_COMMAND, + { + { MATCH_SRC_IP, "2.1.1.1/32" }, + { ACTION_PACKET_ACTION, PACKET_ACTION_DROP }, + { MATCH_TUNNEL_TERM, "false" } + } + } + } + ) + ); + + // Verify if the rules are created + ASSERT_TRUE(orch->getAclRule(aclTableName, "TUNN_TERM_RULE0")); + ASSERT_TRUE(orch->getAclRule(aclTableName, "TUNN_TERM_RULE1")); + + orch->doAclRuleTask( + deque( + { + { + aclTableName + "|" + "TUNN_TERM_RULE0", + DEL_COMMAND, + {} + }, + { + aclTableName + "|" + "TUNN_TERM_RULE1", DEL_COMMAND, {} } @@ -1563,6 +1649,22 @@ namespace aclorch_test ) ); + // Make sure the rules are deleted + ASSERT_FALSE(orch->getAclRule(aclTableName, "TUNN_TERM_RULE0")); + ASSERT_FALSE(orch->getAclRule(aclTableName, "TUNN_TERM_RULE1")); + + orch->doAclTableTypeTask( + deque( + { + { + aclTableTypeName, + DEL_COMMAND, + {} + } + } + ) + ); + // Table still exists ASSERT_TRUE(orch->getAclTable(aclTableName)); ASSERT_FALSE(orch->getAclTableType(aclTableTypeName)); @@ -1887,4 +1989,379 @@ namespace aclorch_test // Restore sai_switch_api. sai_switch_api = old_sai_switch_api; } + + TEST_F(AclOrchTest, Match_Inner_Mac) + { + string aclTableTypeName = "MAC_MATCH_TABLE_TYPE"; + string aclTableName = "MAC_MATCH_TABLE"; + string aclRuleName = "MAC_MATCH_RULE0"; + + auto orch = createAclOrch(); + + auto matches = string(MATCH_INNER_DST_MAC) + comma + string(MATCH_INNER_SRC_MAC); + orch->doAclTableTypeTask( + deque( + { + { + aclTableTypeName, + SET_COMMAND, + { + { ACL_TABLE_TYPE_MATCHES, matches}, + { ACL_TABLE_TYPE_ACTIONS, ACTION_PACKET_ACTION } + } + } + }) + ); + + orch->doAclTableTask( + deque( + { + { + aclTableName, + SET_COMMAND, + { + { ACL_TABLE_TYPE, aclTableTypeName }, + { ACL_TABLE_STAGE, STAGE_INGRESS } + } + } + }) + ); + + ASSERT_TRUE(orch->getAclTable(aclTableName)); + + auto tableOid = orch->getTableById(aclTableName); + ASSERT_NE(tableOid, SAI_NULL_OBJECT_ID); + const auto &aclTables = orch->getAclTables(); + auto it_table = aclTables.find(tableOid); + ASSERT_NE(it_table, aclTables.end()); + + const auto &aclTableObject = it_table->second; + + auto kvfAclRule = deque({ + { + aclTableName + "|" + aclRuleName, + SET_COMMAND, + { + { RULE_PRIORITY, "9999" }, + { MATCH_INNER_DST_MAC, "FF:EE:DD:CC:BB:AA" }, + { MATCH_INNER_SRC_MAC, "11:22:33:44:55:66" }, + { ACTION_PACKET_ACTION, PACKET_ACTION_DROP } + } + } + }); + orch->doAclRuleTask(kvfAclRule); + + auto it_rule = aclTableObject.rules.find(aclRuleName); + ASSERT_NE(it_rule, aclTableObject.rules.end()); + ASSERT_TRUE(validateAclRuleByConfOp(*it_rule->second, kfvFieldsValues(kvfAclRule.front()))); + } + + TEST_F(AclOrchTest, AclInnerSourceMacRewriteTableValidation) + { + const string aclTableTypeName = "INNER_SRC_MAC_REWRITE_TABLE_TYPE"; + const string aclTableName = "INNER_SRC_MAC_REWRITE_TABLE"; + const string aclRuleName = "INNER_SRC_MAC_REWRITE_RULE"; + + auto orch = createAclOrch(); + + // Creating a new custom table type INNER_SRC_MAC_REWRITE_TABLE_TYPE + orch->doAclTableTypeTask( + deque( + { + { + aclTableTypeName, + SET_COMMAND, + { + { + ACL_TABLE_TYPE_MATCHES, + string(MATCH_INNER_SRC_IP) + comma + MATCH_TUNNEL_VNI + }, + { + ACL_TABLE_TYPE_BPOINT_TYPES, + string(BIND_POINT_TYPE_PORT) + comma + BIND_POINT_TYPE_PORTCHANNEL + }, + { + ACL_TABLE_TYPE_ACTIONS, + ACTION_INNER_SRC_MAC_REWRITE_ACTION + } + } + } + } + ) + ); + + // Creating a table of the type INNER_SRC_MAC_REWRITE_TABLE_TYPE + orch->doAclTableTask( + deque( + { + { + aclTableName, + SET_COMMAND, + { + { ACL_TABLE_DESCRIPTION, "Inner src mac rewrite test table" }, + { ACL_TABLE_TYPE, aclTableTypeName}, + { ACL_TABLE_STAGE, STAGE_EGRESS }, + { ACL_TABLE_PORTS, "1,2" } + } + } + } + ) + ); + + ASSERT_TRUE(orch->getAclTable(aclTableName)); + + auto fvs = vector{ + { "SAI_ACL_TABLE_ATTR_FIELD_INNER_SRC_IP", "true" }, + { "SAI_ACL_TABLE_ATTR_FIELD_TUNNEL_VNI", "true" } + }; + + ASSERT_TRUE(validateAclTable( + orch->getAclTable(aclTableName)->getOid(), + *orch->getAclTable(aclTableName), + make_shared(SAI_OBJECT_TYPE_ACL_TABLE, fvs, false)) + ); + + orch->doAclRuleTask( + deque( + { + { + aclTableName + "|" + aclRuleName, + SET_COMMAND, + { + { MATCH_INNER_SRC_IP, "1.1.1.1/24" }, + { MATCH_TUNNEL_VNI, "233" }, + { ACTION_PACKET_ACTION, PACKET_ACTION_DROP }, + } + } + } + ) + ); + + // Packet action is not supported on this table + ASSERT_FALSE(orch->getAclRule(aclTableName, aclRuleName)); + + orch->doAclRuleTask( + deque( + { + { + aclTableName + "|" + aclRuleName, + SET_COMMAND, + { + { MATCH_INNER_SRC_IP, "1.1.1." }, + { ACTION_INNER_SRC_MAC_REWRITE_ACTION, "AA:BB:CC:DD:44:66" } + } + } + } + ) + ); + + // Invalid Inner src ip not supported on this table + ASSERT_FALSE(orch->getAclRule(aclTableName, aclRuleName)); + + orch->doAclRuleTask( + deque( + { + { + aclTableName + "|" + aclRuleName, + SET_COMMAND, + { + { MATCH_INNER_SRC_IP, "1.1.1.1/24" }, + { ACTION_INNER_SRC_MAC_REWRITE_ACTION, "BB:CC:DD:44:66" } + } + } + } + ) + ); + + // Invalid mac address not supported on this table + ASSERT_FALSE(orch->getAclRule(aclTableName, aclRuleName)); + + orch->doAclRuleTask( + deque( + { + { + aclTableName + "|" + aclRuleName, + SET_COMMAND, + { + { MATCH_INNER_SRC_IP, "1.1.1.1/24" }, + { MATCH_TUNNEL_VNI, "233" }, + { ACTION_INNER_SRC_MAC_REWRITE_ACTION, "AA:BB:CC:DD:44:66" } + } + } + } + ) + ); + + // Inner src mac action is supported on this table + ASSERT_TRUE(orch->getAclRule(aclTableName, aclRuleName)); + + // Rule update verification + class AclRuleTest : public AclRuleInnerSrcMacRewrite + { + public: + AclRuleTest(AclOrch* orch, string rule, string table): + AclRuleInnerSrcMacRewrite(orch, rule, table, true) + {} + + void setCounterEnabled(bool enabled) + { + m_createCounter = enabled; + } + + void disableMatch(sai_acl_entry_attr_t attr) + { + m_matches.erase(attr); + } + }; + + // First Update, 2 matches and 1 action added to the rule + auto rule = make_shared(orch->m_aclOrch, aclRuleName, aclTableName); + ASSERT_TRUE(rule->validateAddPriority(RULE_PRIORITY, "800")); + ASSERT_TRUE(rule->validateAddMatch(MATCH_INNER_SRC_IP, "2.2.2.2")); + ASSERT_FALSE(rule->validateAddMatch(MATCH_SRC_IP, "12.13.12.12/24")); + ASSERT_TRUE(rule->validateAddAction(ACTION_INNER_SRC_MAC_REWRITE_ACTION, "60:30:34:AB:CD:EF")); + ASSERT_TRUE(rule->validateAddMatch(MATCH_TUNNEL_VNI, "1000")); + + ASSERT_TRUE(orch->m_aclOrch->addAclRule(rule, aclTableName)); + ASSERT_EQ(getAclRuleSaiAttribute(*rule, SAI_ACL_ENTRY_ATTR_PRIORITY), "800"); + ASSERT_EQ(getAclRuleSaiAttribute(*rule, SAI_ACL_ENTRY_ATTR_FIELD_INNER_SRC_IP), "2.2.2.2&mask:255.255.255.255"); + ASSERT_EQ(getAclRuleSaiAttribute(*rule, SAI_ACL_ENTRY_ATTR_FIELD_TUNNEL_VNI), "1000&mask:0xffffffff"); + ASSERT_EQ(getAclRuleSaiAttribute(*rule, SAI_ACL_ENTRY_ATTR_ACTION_SET_INNER_SRC_MAC), "60:30:34:AB:CD:EF"); + + // Second update, Inner src ip and tunnel vni correctly updated + auto updatedRule = make_shared(*rule); + ASSERT_TRUE(updatedRule->validateAddPriority(RULE_PRIORITY, "900")); + ASSERT_TRUE(updatedRule->validateAddMatch(MATCH_INNER_SRC_IP, "2.3.2.2/21")); + ASSERT_TRUE(updatedRule->validateAddMatch(MATCH_TUNNEL_VNI, "1100")); + + // Invalid action & extra match src ip are invalidated + ASSERT_FALSE(updatedRule->validateAddMatch(MATCH_SRC_IP, "12.13.12.12/24")); + ASSERT_FALSE(updatedRule->validateAddAction(ACTION_INNER_SRC_MAC_REWRITE_ACTION, "60:30:34:AB:CD")); + + ASSERT_TRUE(orch->m_aclOrch->updateAclRule(updatedRule)); + ASSERT_EQ(getAclRuleSaiAttribute(*rule, SAI_ACL_ENTRY_ATTR_PRIORITY), "900"); + ASSERT_EQ(getAclRuleSaiAttribute(*rule, SAI_ACL_ENTRY_ATTR_FIELD_TUNNEL_VNI), "1100&mask:0xffffffff"); + + // SRC IP SAI attribute is updated even though the match is not validated + ASSERT_EQ(getAclRuleSaiAttribute(*rule, SAI_ACL_ENTRY_ATTR_FIELD_SRC_IP), "12.13.12.12&mask:255.255.255.0"); + ASSERT_EQ(getAclRuleSaiAttribute(*rule, SAI_ACL_ENTRY_ATTR_FIELD_INNER_SRC_IP), "2.3.2.2&mask:255.255.248.0"); + ASSERT_EQ(getAclRuleSaiAttribute(*rule, SAI_ACL_ENTRY_ATTR_ACTION_SET_INNER_SRC_MAC), "60:30:34:AB:CD:EF"); + + // Third update, change in 2 matches and with invalid action and disable counter + auto updatedRule2 = make_shared(*updatedRule); + updatedRule2->setCounterEnabled(false); + ASSERT_TRUE(updatedRule2->validateAddMatch(MATCH_INNER_SRC_IP, "3.3.3.3/24")); + ASSERT_TRUE(updatedRule2->validateAddMatch(MATCH_TUNNEL_VNI, "1100")); + ASSERT_FALSE(updatedRule2->validateAddAction(ACTION_INNER_SRC_MAC_REWRITE_ACTION, "")); + ASSERT_TRUE(orch->m_aclOrch->updateAclRule(updatedRule2)); + + // Verify if the match type is not disabled + updatedRule2->disableMatch(SAI_ACL_ENTRY_ATTR_FIELD_INNER_SRC_IP); + ASSERT_TRUE(validateAclRuleCounter(*orch->m_aclOrch->getAclRule(aclTableName, aclRuleName), false)); + ASSERT_EQ(getAclRuleSaiAttribute(*rule, SAI_ACL_ENTRY_ATTR_PRIORITY), "900"); + ASSERT_EQ(getAclRuleSaiAttribute(*rule, SAI_ACL_ENTRY_ATTR_FIELD_INNER_SRC_IP), "3.3.3.3&mask:255.255.255.0"); + ASSERT_EQ(getAclRuleSaiAttribute(*rule, SAI_ACL_ENTRY_ATTR_FIELD_TUNNEL_VNI), "1100&mask:0xffffffff"); + ASSERT_EQ(getAclRuleSaiAttribute(*rule, SAI_ACL_ENTRY_ATTR_ACTION_SET_INNER_SRC_MAC), "60:30:34:AB:CD:EF"); + + // Re-enable counter + auto updatedRule3 = make_shared(*updatedRule2); + updatedRule3->setCounterEnabled(true); + ASSERT_TRUE(orch->m_aclOrch->updateAclRule(updatedRule3)); + ASSERT_TRUE(validateAclRuleCounter(*orch->m_aclOrch->getAclRule(aclTableName, aclRuleName), true)); + + // Remove rule + ASSERT_TRUE(orch->m_aclOrch->removeAclRule(rule->getTableId(), rule->getId())); + + orch->doAclTableTypeTask( + deque( + { + { + aclTableTypeName, + DEL_COMMAND, + {} + } + } + ) + ); + + // Table still exists + ASSERT_TRUE(orch->getAclTable(aclTableName)); + ASSERT_FALSE(orch->getAclTableType(aclTableTypeName)); + + orch->doAclTableTask( + deque( + { + { + aclTableName, + DEL_COMMAND, + {} + } + } + ) + ); + + // Table is removed + ASSERT_FALSE(orch->getAclTable(aclTableName)); + + } + + TEST_F(AclOrchTest, AclRule_TrimDisableAction) + { + const std::string aclTableTypeName = "TRIM_TYPE"; + const std::string aclTableName = "TRIM_TABLE"; + const std::string aclRuleName = "TRIM_RULE"; + + // Create ACL OA + + auto orch = createAclOrch(); + + // Create ACL table type + + auto tableTypeKofvt = std::deque({ + { + aclTableTypeName, + SET_COMMAND, + { + { ACL_TABLE_TYPE_MATCHES, MATCH_SRC_IP }, + { ACL_TABLE_TYPE_ACTIONS, ACTION_DISABLE_TRIM }, + { ACL_TABLE_TYPE_BPOINT_TYPES, BIND_POINT_TYPE_PORT }, + } + } + }); + orch->doAclTableTypeTask(tableTypeKofvt); + ASSERT_NE(orch->getAclTableType(aclTableTypeName), nullptr); + + // Create ACL table + + auto tableKofvt = std::deque({ + { + aclTableName, + SET_COMMAND, + { + { ACL_TABLE_DESCRIPTION, "Test trim table" }, + { ACL_TABLE_TYPE, aclTableTypeName }, + { ACL_TABLE_STAGE, STAGE_INGRESS }, + { ACL_TABLE_PORTS, "1,2" }, + } + } + }); + orch->doAclTableTask(tableKofvt); + ASSERT_NE(orch->getAclTable(aclTableName), nullptr); + + // Create ACL rule + + auto ruleKofvt = std::deque({ + { + aclTableName + "|" + aclRuleName, + SET_COMMAND, + { + { RULE_PRIORITY, "999" }, + { MATCH_SRC_IP, "1.1.1.1/32" }, + { ACTION_PACKET_ACTION, PACKET_ACTION_DISABLE_TRIM }, + } + } + }); + orch->doAclRuleTask(ruleKofvt); + ASSERT_NE(orch->getAclRule(aclTableName, aclRuleName), nullptr); + } } // namespace nsAclOrchTest diff --git a/tests/mock_tests/buffermgrdyn_ut.cpp b/tests/mock_tests/buffermgrdyn_ut.cpp index 1c23a17410b..97c9d44df08 100644 --- a/tests/mock_tests/buffermgrdyn_ut.cpp +++ b/tests/mock_tests/buffermgrdyn_ut.cpp @@ -9,8 +9,8 @@ #include "mock_table.h" #define private public #include "buffermgrdyn.h" -#undef private #include "warm_restart.h" +#undef private extern string gMySwitchType; @@ -137,6 +137,11 @@ namespace buffermgrdyn_test {"size", "1024000"} }; + testBufferProfile["ingress_lossy_profile"] = { + {"dynamic_th", "7"}, + {"pool", "ingress_lossless_pool"}, + {"size", "0"} + }; testBufferProfile["ingress_lossless_profile"] = { {"dynamic_th", "7"}, {"pool", "ingress_lossless_pool"}, @@ -462,6 +467,46 @@ namespace buffermgrdyn_test } } + void VerifyPgExists(const string &port, const string &pg, bool shouldExist) + { + if (shouldExist) + { + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup[port].find(pg) != m_dynamicBuffer->m_portPgLookup[port].end()) + << "PG " << pg << " should exist for port " << port; + } + else + { + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup[port].find(pg) == m_dynamicBuffer->m_portPgLookup[port].end()) + << "PG " << pg << " should not exist for port " << port; + } + } + + void VerifyPgProfile(const string &port, const string &pg, const string &expectedProfile) + { + ASSERT_EQ(m_dynamicBuffer->m_portPgLookup[port][pg].running_profile_name, expectedProfile) + << "PG " << pg << " should have profile " << expectedProfile; + } + + void VerifyPgProfileEmpty(const string &port, const string &pg) + { + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup[port][pg].running_profile_name.empty()) + << "PG " << pg << " should have an empty profile"; + } + + void VerifyProfileExists(const string &profile, bool shouldExist) + { + if (shouldExist) + { + ASSERT_TRUE(m_dynamicBuffer->m_bufferProfileLookup.find(profile) != m_dynamicBuffer->m_bufferProfileLookup.end()) + << "Profile " << profile << " should exist"; + } + else + { + ASSERT_TRUE(m_dynamicBuffer->m_bufferProfileLookup.find(profile) == m_dynamicBuffer->m_bufferProfileLookup.end()) + << "Profile " << profile << " should not exist"; + } + } + void TearDown() override { delete m_dynamicBuffer; @@ -522,8 +567,8 @@ namespace buffermgrdyn_test InitDefaultBufferProfile(); appBufferProfileTable.getKeys(keys); - ASSERT_EQ(keys.size(), 3); - ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 3); + ASSERT_EQ(keys.size(), 4); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 4); for (auto i : testBufferProfile) { CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], testBufferProfile[i.first]); @@ -647,7 +692,7 @@ namespace buffermgrdyn_test appBufferPoolTable.getKeys(keys); ASSERT_EQ(keys.size(), 3); ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 3); - ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 3); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 4); for (auto i : testBufferProfile) { CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], testBufferProfile[i.first]); @@ -823,6 +868,54 @@ namespace buffermgrdyn_test } } + TEST_F(BufferMgrDynTest, BufferMgrDynTestReclaimingBufferProfileList) + { + vector fieldValues; + + SetUpReclaimingBuffer(); + shared_ptr> zero_profile = make_shared>(zeroProfile); + + InitDefaultLosslessParameter(); + InitMmuSize(); + + StartBufferManager(zero_profile); + + stateBufferTable.set("Ethernet0", + { + {"max_priority_groups", "8"}, + {"max_queues", "16"} + }); + m_dynamicBuffer->addExistingData(&stateBufferTable); + static_cast(m_dynamicBuffer)->doTask(); + + statePortTable.set("Ethernet0", + { + {"supported_speeds", "100000,50000,40000,25000,10000,1000"} + }); + InitPort("Ethernet0", "down"); + + InitBufferPool(); + InitDefaultBufferProfile(); + + InitBufferProfileList("Ethernet0", "ingress_lossless_profile", bufferIngProfileListTable); + InitBufferProfileList("Ethernet0", "egress_lossless_profile,egress_lossy_profile", bufferEgrProfileListTable); + + // No profile lists in the database until buffer pools are ready + CheckProfileList("Ethernet0", true, "ingress_lossless_profile", false); + CheckProfileList("Ethernet0", false, "egress_lossless_profile,egress_lossy_profile", false); + + // Make buffer pools ready + SetPortInitDone(); + m_dynamicBuffer->doTask(m_selectableTable); + + // Zero profile lists should be in the database + ASSERT_TRUE(appBufferIngProfileListTable.get("Ethernet0", fieldValues)); + ASSERT_EQ(fvValue(fieldValues[0]), "ingress_lossless_zero_profile"); + fieldValues.clear(); + ASSERT_TRUE(appBufferEgrProfileListTable.get("Ethernet0", fieldValues)); + ASSERT_EQ(fvValue(fieldValues[0]), "egress_lossless_zero_profile,egress_lossy_zero_profile"); + } + /* * Clear qos with reclaiming buffer * @@ -885,8 +978,8 @@ namespace buffermgrdyn_test InitDefaultBufferProfile(); appBufferProfileTable.getKeys(keys); - ASSERT_EQ(keys.size(), 3); - ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 3); + ASSERT_EQ(keys.size(), 4); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 4); for (auto i : testBufferProfile) { CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], testBufferProfile[i.first]); @@ -1219,8 +1312,8 @@ namespace buffermgrdyn_test ASSERT_EQ(keys.size(), 3); InitDefaultBufferProfile(); appBufferProfileTable.getKeys(keys); - ASSERT_EQ(keys.size(), 3); - ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 3); + ASSERT_EQ(keys.size(), 4); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 4); m_dynamicBuffer->m_bufferCompletelyInitialized = true; m_dynamicBuffer->m_waitApplyAdditionalZeroProfiles = 0; @@ -1423,4 +1516,526 @@ namespace buffermgrdyn_test HandleTable(cableLengthTable); ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet12"].state, PORT_READY); } + + /* + Purpose: To verify the behavior of the buffer mgr dynamic when the cable length is set to "0m". + Here set to 0m indicates no lossless profile will be created, can still create lossy profile. + Steps: + 1. Initialize default lossless parameters and MMU size + 2. Initialize port and verify initial state + 3. Set port initialization as done and process tasks + 4. Initialize buffer pools and verify + 5. Initialize buffer profiles and PGs with 5m cable length + 6. Verify PG configuration with 5m cable length + 7. Create a lossy PG and change cable length to 0m and verify lossy PG profile still there + 8. Verify that no 0m profile is created and existing profile is removed + 9. Verify that the running_profile_name is cleared for lossless PGs + 10. Verify that the 5m profile is removed + 11. Try to create a new lossless PG with 0m cable length + 12. Verify that the PG exists but has no profile assigned + 13. Change cable length back to 5m and verify profiles are restored correctly + 14. Verify that profiles are removed again when cable length is set back to 0m + 15. Additional verification of PG state + 16. MTU updates work correctly with non-zero cable length + 17. Create a lossy PG and change cable length to 0m + 18. Verify that lossy PG keeps its profile while lossless PGs have empty profiles + 19. Verify that lossless profiles are removed when cable length is set back to 0m + 20. Update cable length to 0m + 21. Verify that lossy PG keeps its profile while lossless PGs have empty profiles + */ + + TEST_F(BufferMgrDynTest, SkipProfileCreationForZeroCableLength) + { + vector fieldValues; + vector keys; + + // SETUP: Initialize the environment + // 1. Initialize default lossless parameters and MMU size + InitDefaultLosslessParameter(); + InitMmuSize(); + StartBufferManager(); + + // 2. Initialize port and verify initial state + InitPort(); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_INITIALIZING); + + // 3. Set port initialization as done and process tasks + SetPortInitDone(); + m_dynamicBuffer->doTask(m_selectableTable); + + // 4. Initialize buffer pools and verify + ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 0); + InitBufferPool(); + ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 3); + appBufferPoolTable.getKeys(keys); + ASSERT_EQ(keys.size(), 3); + + // 5. Initialize buffer profiles and PGs with 5m cable length + InitBufferPg("Ethernet0|3-4"); + InitDefaultBufferProfile(); + appBufferProfileTable.getKeys(keys); + ASSERT_EQ(keys.size(), 4); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 4); + InitCableLength("Ethernet0", "5m"); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_READY); + + // 6. Verify PG configuration with 5m cable length + auto expectedProfile = "pg_lossless_100000_5m_profile"; + CheckPg("Ethernet0", "Ethernet0:3-4", expectedProfile); + + // TEST CASE 1: No new lossless profile is created when cable length is "0m" + // 7. Create a lossy PG and change cable length to 0m and verify lossy PG profile still there + InitBufferPg("Ethernet0|0", "ingress_lossy_profile"); + cableLengthTable.set("AZURE", {{"Ethernet0", "0m"}}); + HandleTable(cableLengthTable); + VerifyPgExists("Ethernet0", "Ethernet0:0", true); + VerifyPgProfile("Ethernet0", "Ethernet0:0", "ingress_lossy_profile"); + + // 8. Verify that no 0m profile is created and existing profile is removed + auto zeroMProfile = "pg_lossless_100000_0m_profile"; + ASSERT_TRUE(m_dynamicBuffer->m_bufferProfileLookup.find(zeroMProfile) == m_dynamicBuffer->m_bufferProfileLookup.end()) + << "No lossless profile should be created for 0m cable length"; + + // 9. Verify that the running_profile_name is cleared for lossless PGs + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup["Ethernet0"]["Ethernet0:3-4"].running_profile_name.empty()) + << "Running profile name should be empty for lossless PGs when cable length is 0m"; + + // 10. Verify that the 5m profile is removed + ASSERT_TRUE(m_dynamicBuffer->m_bufferProfileLookup.find("pg_lossless_100000_5m_profile") == m_dynamicBuffer->m_bufferProfileLookup.end()) + << "Previous lossless profile should be removed when cable length is 0m"; + + // TEST CASE 2: No new lossless PG is created when cable length is "0m" + // 11. Try to create a new lossless PG with 0m cable length + InitBufferPg("Ethernet0|6"); + + // 12. Verify that the PG exists but has no profile assigned + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup["Ethernet0"].find("Ethernet0:6") != m_dynamicBuffer->m_portPgLookup["Ethernet0"].end()) + << "PG should be created even with 0m cable length"; + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup["Ethernet0"]["Ethernet0:6"].running_profile_name.empty()) + << "No profile should be assigned to lossless PG when cable length is 0m"; + VerifyPgExists("Ethernet0", "Ethernet0:0", true); + VerifyPgProfile("Ethernet0", "Ethernet0:0", "ingress_lossy_profile"); + + // TEST CASE 3: Profiles are restored when cable length is changed back to non-zero + // 13. Change cable length back to 5m + cableLengthTable.set("AZURE", {{"Ethernet0", "5m"}}); + HandleTable(cableLengthTable); + m_dynamicBuffer->doTask(); + + // 14. Verify that profiles are restored correctly + CheckPg("Ethernet0", "Ethernet0:3-4", "pg_lossless_100000_5m_profile"); + CheckPg("Ethernet0", "Ethernet0:6", "pg_lossless_100000_5m_profile"); + + // 15. Additional verification of PG state + VerifyPgExists("Ethernet0", "Ethernet0:0", true); + VerifyPgProfile("Ethernet0", "Ethernet0:0", "ingress_lossy_profile"); + VerifyPgExists("Ethernet0", "Ethernet0:3-4", true); + VerifyPgExists("Ethernet0", "Ethernet0:6", true); + VerifyPgProfile("Ethernet0", "Ethernet0:3-4", "pg_lossless_100000_5m_profile"); + VerifyPgProfile("Ethernet0", "Ethernet0:6", "pg_lossless_100000_5m_profile"); + + // TEST CASE 4: Profiles are removed again when cable length is set back to 0m + // 16. Change cable length back to 0m + cableLengthTable.set("AZURE", {{"Ethernet0", "0m"}}); + HandleTable(cableLengthTable); + m_dynamicBuffer->doTask(); + + // 17. Verify that profiles are removed but PGs remain + VerifyPgExists("Ethernet0", "Ethernet0:0", true); + VerifyPgProfile("Ethernet0", "Ethernet0:0", "ingress_lossy_profile"); + VerifyProfileExists("pg_lossless_100000_0m_profile", false); + VerifyProfileExists("pg_lossless_100000_5m_profile", false); + VerifyPgExists("Ethernet0", "Ethernet0:3-4", true); + VerifyPgExists("Ethernet0", "Ethernet0:6", true); + VerifyPgProfileEmpty("Ethernet0", "Ethernet0:3-4"); + VerifyPgProfileEmpty("Ethernet0", "Ethernet0:6"); + + // TEST CASE 5: MTU updates work correctly with non-zero cable length + // 18. Change cable length to 5m and update MTU + cableLengthTable.set("AZURE", {{"Ethernet0", "5m"}}); + HandleTable(cableLengthTable); + portTable.set("Ethernet0", {{"mtu", "4096"}}); + HandleTable(portTable); + + // 19. Verify profiles are created correctly with new MTU + CheckPg("Ethernet0", "Ethernet0:3-4", "pg_lossless_100000_5m_mtu4096_profile"); + CheckPg("Ethernet0", "Ethernet0:6", "pg_lossless_100000_5m_mtu4096_profile"); + + // 20. Update cable length to 0m + InitBufferPg("Ethernet0|0", "ingress_lossy_profile"); + cableLengthTable.set("AZURE", {{"Ethernet0", "0m"}}); + HandleTable(cableLengthTable); + + // 21. Verify that lossy PG keeps its profile while lossless PGs have empty profiles + VerifyPgExists("Ethernet0", "Ethernet0:0", true); + VerifyPgExists("Ethernet0", "Ethernet0:3-4", true); + VerifyPgExists("Ethernet0", "Ethernet0:6", true); + VerifyPgProfile("Ethernet0", "Ethernet0:0", "ingress_lossy_profile"); + VerifyPgProfileEmpty("Ethernet0", "Ethernet0:3-4"); + VerifyPgProfileEmpty("Ethernet0", "Ethernet0:6"); + VerifyProfileExists("pg_lossless_100000_0m_profile", false); + VerifyProfileExists("pg_lossless_100000_5m_profile", false); + VerifyProfileExists("pg_lossless_100000_5m_mtu4096_profile", false); + } + + /* + * Test checkSharedBufferPoolSize execution logic + * This test verifies the condition logic for when recalculateSharedBufferPool should be executed + * Logic: + * - Non-warm start: execute as soon as MMU size is available. + * - Warm start: execute only if both buffer is completely initialized AND buffer pools are ready. + */ + TEST_F(BufferMgrDynTest, TestCheckSharedBufferPoolSizeExecutionLogic) + { + // Initialize basic setup + InitDefaultLosslessParameter(); + InitMmuSize(); + StartBufferManager(); + + InitPort(); + SetPortInitDone(); + m_dynamicBuffer->doTask(m_selectableTable); + + // TEST CASE 1: MMU size empty - should not execute + m_dynamicBuffer->m_mmuSize = ""; + m_dynamicBuffer->m_bufferCompletelyInitialized = false; + m_dynamicBuffer->m_bufferPoolReady = false; + + // Verify the condition logic - should be false when MMU size empty + // New condition: !m_mmuSize.empty() && (!WarmStart::isWarmStart() || (WarmStart::isWarmStart() && (m_bufferCompletelyInitialized || !m_bufferPoolReady))) + bool conditionShouldNotExecute = !m_dynamicBuffer->m_mmuSize.empty() && + (!WarmStart::isWarmStart() || (WarmStart::isWarmStart() && (m_dynamicBuffer->m_bufferCompletelyInitialized || !m_dynamicBuffer->m_bufferPoolReady))); + EXPECT_FALSE(conditionShouldNotExecute) << "Condition should evaluate to false when MMU size is empty"; + + // Call checkSharedBufferPoolSize - should not execute recalculateSharedBufferPool + m_dynamicBuffer->checkSharedBufferPoolSize(false); + + // TEST CASE 2: MMU size available, buffer not initialized, buffer pool not ready (non-warm-start) - should execute + m_dynamicBuffer->m_mmuSize = "136209408"; + m_dynamicBuffer->m_bufferCompletelyInitialized = false; + m_dynamicBuffer->m_bufferPoolReady = false; + + // Verify initial state + EXPECT_FALSE(m_dynamicBuffer->m_bufferPoolReady) << "Initial state: m_bufferPoolReady should be false"; + + // Call checkSharedBufferPoolSize - should execute recalculateSharedBufferPool + // New condition: !m_mmuSize.empty() && (!WarmStart::isWarmStart() || (m_bufferCompletelyInitialized || !m_bufferPoolReady)) + // In non-warm-start: true && (!false || (false || !false)) = true && (true || true) = true && true = true + m_dynamicBuffer->checkSharedBufferPoolSize(false); + + // Verify the condition logic + EXPECT_FALSE(WarmStart::isWarmStart()) << "Test setup is non-warm-start"; + bool conditionShouldExecute = !m_dynamicBuffer->m_mmuSize.empty() && + (!WarmStart::isWarmStart() || (WarmStart::isWarmStart() && (m_dynamicBuffer->m_bufferCompletelyInitialized || !m_dynamicBuffer->m_bufferPoolReady))); + EXPECT_TRUE(conditionShouldExecute) << "Condition should evaluate to true for execution in non-warm-start"; + + // TEST CASE 3: MMU size available, buffer not initialized, buffer pool ready (non-warm-start) - should execute + // In new logic, non-warm start always executes as soon as MMU size is available + m_dynamicBuffer->m_bufferCompletelyInitialized = false; + m_dynamicBuffer->m_bufferPoolReady = true; + + // Verify the condition logic - should be true in non-warm-start + // New condition: true && (!false || (false || !true)) = true && (true || false) = true && true = true + bool conditionShouldExecute3 = !m_dynamicBuffer->m_mmuSize.empty() && + (!WarmStart::isWarmStart() || (WarmStart::isWarmStart() && (m_dynamicBuffer->m_bufferCompletelyInitialized || !m_dynamicBuffer->m_bufferPoolReady))); + EXPECT_TRUE(conditionShouldExecute3) << "Condition should evaluate to true in non-warm-start (true && true = true)"; + + // Call checkSharedBufferPoolSize - should execute + m_dynamicBuffer->checkSharedBufferPoolSize(false); + EXPECT_TRUE(m_dynamicBuffer->m_bufferPoolReady) << "m_bufferPoolReady should remain true"; + + // TEST CASE 4: MMU size available, buffer initialized, buffer pool ready (non-warm-start) - should execute + m_dynamicBuffer->m_bufferCompletelyInitialized = true; + m_dynamicBuffer->m_bufferPoolReady = true; + + // Verify the condition logic - should be true (normal case after initialization) + // New condition: true && (!false || (true || !true)) = true && (true || true) = true && true = true + bool conditionShouldExecute4 = !m_dynamicBuffer->m_mmuSize.empty() && + (!WarmStart::isWarmStart() || (WarmStart::isWarmStart() && (m_dynamicBuffer->m_bufferCompletelyInitialized || !m_dynamicBuffer->m_bufferPoolReady))); + EXPECT_TRUE(conditionShouldExecute4) << "Condition should evaluate to true (true && true = true)"; + + // Call checkSharedBufferPoolSize - should execute (normal case) + m_dynamicBuffer->checkSharedBufferPoolSize(false); + EXPECT_TRUE(m_dynamicBuffer->m_bufferPoolReady) << "m_bufferPoolReady should remain true after normal execution"; + + // TEST CASE 5: MMU size available, buffer initialized, buffer pool not ready (non-warm-start) - should execute + m_dynamicBuffer->m_bufferCompletelyInitialized = true; + m_dynamicBuffer->m_bufferPoolReady = false; + + // Verify the condition logic - should be true in non-warm-start + // New condition: true && (!false || (true || !false)) = true && (true || true) = true && true = true + bool conditionShouldExecute5 = !m_dynamicBuffer->m_mmuSize.empty() && + (!WarmStart::isWarmStart() || (WarmStart::isWarmStart() && (m_dynamicBuffer->m_bufferCompletelyInitialized || !m_dynamicBuffer->m_bufferPoolReady))); + EXPECT_TRUE(conditionShouldExecute5) << "Condition should evaluate to true in non-warm-start"; + + // Call checkSharedBufferPoolSize - should execute + m_dynamicBuffer->checkSharedBufferPoolSize(false); + + // TEST CASE 6: Warm start with buffer not initialized and pool not ready - should execute + // During warm start, execute when buffer is completely initialized OR buffer pools are not ready + m_dynamicBuffer->m_bufferCompletelyInitialized = false; + m_dynamicBuffer->m_bufferPoolReady = false; + + // New condition: true && (false || (false || !false)) = true && (false || true) = true && true = true + EXPECT_FALSE(WarmStart::isWarmStart()) << "Default test setup is non-warm-start"; + // If it were warm start with buffer not initialized and pool not ready, the condition would evaluate to true, + // thus executing calculation during warm start to prepare the buffer pool + + // TEST CASE 7: Warm start with buffer initialized and pool ready - should execute + // This ensures consistency during warm start + m_dynamicBuffer->m_bufferCompletelyInitialized = true; + m_dynamicBuffer->m_bufferPoolReady = true; + + // If it were warm start: true && (false || (true || !true)) = true && (false || true) = true && true = true + // This would execute during warm start when buffer is initialized or pool not ready + bool conditionShouldExecute7 = !m_dynamicBuffer->m_mmuSize.empty() && + (!WarmStart::isWarmStart() || (WarmStart::isWarmStart() && (m_dynamicBuffer->m_bufferCompletelyInitialized || !m_dynamicBuffer->m_bufferPoolReady))); + EXPECT_TRUE(conditionShouldExecute7) << "Condition should evaluate to true when both buffer initialized and pool ready"; + } + + /* + * Test isHeadroomResourceValid startup optimization + * This test verifies the early return condition that skips validation during startup + * Logic: + * - Non-warm start: never skip validation. + * - Warm start: skip only if initialization has not completed. + */ + TEST_F(BufferMgrDynTest, TestIsHeadroomResourceValidFastStartOptimization) + { + // Initialize basic setup + InitDefaultLosslessParameter(); + InitMmuSize(); + StartBufferManager(); + + InitPort(); + SetPortInitDone(); + m_dynamicBuffer->doTask(m_selectableTable); + + InitBufferPool(); + InitDefaultBufferProfile(); + + // Create a test buffer profile + buffer_profile_t testProfile; + testProfile.name = "test_lossless_profile"; + testProfile.size = "1024"; + testProfile.xon = "512"; + testProfile.xoff = "512"; + testProfile.lossless = true; + testProfile.pool_name = "ingress_lossless_pool"; + + // TEST CASE 1: Buffer not initialized in non-warm-start - should NOT skip validation + // In new logic, non-warm start never skips validation + m_dynamicBuffer->m_bufferCompletelyInitialized = false; + EXPECT_FALSE(WarmStart::isWarmStart()) << "Test setup should be non-warm-start"; + + // New condition: WarmStart::isWarmStart() && !m_bufferCompletelyInitialized + // In non-warm-start: false && !false = false && true = false (do not skip, proceed with validation) + bool shouldSkip = WarmStart::isWarmStart() && !m_dynamicBuffer->m_bufferCompletelyInitialized; + EXPECT_FALSE(shouldSkip) << "Should not skip validation in non-warm-start even when buffer not initialized"; + + // TEST CASE 2: Test with different profile types in non-warm-start + buffer_profile_t lossyProfile; + lossyProfile.name = "test_lossy_profile"; + lossyProfile.size = "0"; + lossyProfile.lossless = false; + lossyProfile.pool_name = "ingress_lossy_pool"; + + // For lossy profile with empty new_pg, should still return true (existing logic) + bool result = m_dynamicBuffer->isHeadroomResourceValid("Ethernet0", lossyProfile, ""); + EXPECT_TRUE(result) << "isHeadroomResourceValid should return true for lossy profile with empty new_pg"; + + // TEST CASE 3: Buffer completely initialized in non-warm-start - should NOT skip validation + m_dynamicBuffer->m_bufferCompletelyInitialized = true; + + // New condition: false && !true = false && false = false (do not skip) + shouldSkip = WarmStart::isWarmStart() && !m_dynamicBuffer->m_bufferCompletelyInitialized; + EXPECT_FALSE(shouldSkip) << "Should not skip validation in non-warm-start when buffer initialized"; + + // For lossy profile with empty new_pg, should still return true (existing logic) + result = m_dynamicBuffer->isHeadroomResourceValid("Ethernet0", lossyProfile, ""); + EXPECT_TRUE(result) << "isHeadroomResourceValid should return true for lossy profile with empty new_pg even when initialized"; + + // TEST CASE 4: Verify non-warm-start always performs validation + // Create a profile that might fail normal validation + buffer_profile_t invalidProfile; + invalidProfile.name = "invalid_profile"; + invalidProfile.size = "999999999"; // Very large size + invalidProfile.xon = "999999999"; + invalidProfile.xoff = "999999999"; + invalidProfile.lossless = true; + invalidProfile.pool_name = "non_existent_pool"; + + // In non-warm start, should not skip validation regardless of buffer initialization state + m_dynamicBuffer->m_bufferCompletelyInitialized = false; + // New condition: false && !false = false && true = false (do not skip, will perform validation) + shouldSkip = WarmStart::isWarmStart() && !m_dynamicBuffer->m_bufferCompletelyInitialized; + EXPECT_FALSE(shouldSkip) << "Should not skip validation in non-warm-start"; + + // TEST CASE 5: Verify condition logic in non-warm-start + // The new logic ensures validation always happens in non-warm-start + m_dynamicBuffer->m_bufferCompletelyInitialized = false; + + // Test with lossless profile and new_pg (will trigger validation in non-warm-start) + // New condition: false && !false = false (do not skip) + shouldSkip = WarmStart::isWarmStart() && !m_dynamicBuffer->m_bufferCompletelyInitialized; + EXPECT_FALSE(shouldSkip) << "Non-warm-start should always perform validation"; + + m_dynamicBuffer->m_bufferCompletelyInitialized = true; + shouldSkip = WarmStart::isWarmStart() && !m_dynamicBuffer->m_bufferCompletelyInitialized; + EXPECT_FALSE(shouldSkip) << "Non-warm-start should always perform validation regardless of initialization state"; + + // TEST CASE 6: Warm start (fast-reboot) scenario - skip only when buffer not initialized + // New logic: validation is skipped only during warm start while initialization is incomplete + // New condition: WarmStart::isWarmStart() && !m_bufferCompletelyInitialized + EXPECT_FALSE(WarmStart::isWarmStart()) << "Default test setup is non-warm-start"; + + // If it were warm start with buffer not initialized: + // true && !false = true && true = true (skip validation to save time) + m_dynamicBuffer->m_bufferCompletelyInitialized = false; + // Simulated warm start condition: true && true = true (would skip) + + // If it were warm start with buffer initialized: + // true && !true = true && false = false (do not skip, perform validation for consistency) + m_dynamicBuffer->m_bufferCompletelyInitialized = true; + // Simulated warm start condition: true && false = false (would not skip) + + // This ensures validation happens in warm start once initialization completes for consistency + } + + /* + * Test checkSharedBufferPoolSize with warm restart enabled + * This test verifies the warm restart code paths are covered + */ + TEST_F(BufferMgrDynTest, TestCheckSharedBufferPoolSizeWarmRestart) + { + // Initialize basic setup + InitDefaultLosslessParameter(); + InitMmuSize(); + + // Enable warm restart for buffermgrd + Table warmRestartEnableTable(m_state_db.get(), "WARM_RESTART_ENABLE_TABLE"); + warmRestartEnableTable.set("buffermgrd", + { + {"enable", "true"} + }); + + // Enable warm start in the WarmStart singleton + WarmStart::getInstance().m_enabled = true; + + StartBufferManager(); + InitPort(); + SetPortInitDone(); + m_dynamicBuffer->doTask(m_selectableTable); + + // Verify warm start is still enabled after initialization + EXPECT_TRUE(WarmStart::isWarmStart()) << "Warm start should still be enabled after initialization"; + + // TEST CASE 1: Warm start with buffer not initialized and pool not ready - should execute + // New condition: true && (true || (false || !false)) = true && (true || true) = true + m_dynamicBuffer->m_mmuSize = "136209408"; + m_dynamicBuffer->m_bufferCompletelyInitialized = false; + m_dynamicBuffer->m_bufferPoolReady = false; + + // If warm start is enabled, the condition should still execute when pool not ready + bool conditionShouldExecute1 = !m_dynamicBuffer->m_mmuSize.empty() && + (!WarmStart::isWarmStart() || (WarmStart::isWarmStart() && (m_dynamicBuffer->m_bufferCompletelyInitialized || !m_dynamicBuffer->m_bufferPoolReady))); + // In warm start: true && (false || (false || true)) = true && (false || true) = true + EXPECT_TRUE(conditionShouldExecute1) << "Condition should execute in warm start when pool not ready"; + + m_dynamicBuffer->checkSharedBufferPoolSize(false); + + // TEST CASE 2: Warm start with buffer initialized and pool not ready - should execute + m_dynamicBuffer->m_bufferCompletelyInitialized = true; + m_dynamicBuffer->m_bufferPoolReady = false; + + // New condition: true && (!false || (true && (true || !false))) = true && (true || true) = true + bool conditionShouldExecute2 = !m_dynamicBuffer->m_mmuSize.empty() && + (!WarmStart::isWarmStart() || (WarmStart::isWarmStart() && (m_dynamicBuffer->m_bufferCompletelyInitialized || !m_dynamicBuffer->m_bufferPoolReady))); + EXPECT_TRUE(conditionShouldExecute2) << "Condition should execute in warm start when buffer initialized"; + + m_dynamicBuffer->checkSharedBufferPoolSize(false); + + // TEST CASE 3: Warm start with buffer initialized and pool ready - should execute + m_dynamicBuffer->m_bufferCompletelyInitialized = true; + m_dynamicBuffer->m_bufferPoolReady = true; + + // New condition: true && (!false || (true && (true || !true))) = true && (true || true) = true + bool conditionShouldExecute3 = !m_dynamicBuffer->m_mmuSize.empty() && + (!WarmStart::isWarmStart() || (WarmStart::isWarmStart() && (m_dynamicBuffer->m_bufferCompletelyInitialized || !m_dynamicBuffer->m_bufferPoolReady))); + EXPECT_TRUE(conditionShouldExecute3) << "Condition should execute in warm start when both initialized and ready"; + + m_dynamicBuffer->checkSharedBufferPoolSize(false); + + // TEST CASE 4: Warm start with buffer not initialized and pool ready - should NOT execute + m_dynamicBuffer->m_bufferCompletelyInitialized = false; + m_dynamicBuffer->m_bufferPoolReady = true; + + // New condition with explicit warm-start gating: + // true && (!false || (true && (false || false))) = true && (true || false) = true + // But with bufferCompletelyInitialized=false and bufferPoolReady=true: + // true && (false || (true && false)) = false -> should not execute + bool conditionShouldNotExecute4 = !m_dynamicBuffer->m_mmuSize.empty() && + (!WarmStart::isWarmStart() || (WarmStart::isWarmStart() && (m_dynamicBuffer->m_bufferCompletelyInitialized || !m_dynamicBuffer->m_bufferPoolReady))); + // In warm start: !WarmStart::isWarmStart() = false, m_bufferCompletelyInitialized = false, !m_bufferPoolReady = false + // So: true && (false || (false || false)) = true && false = false + EXPECT_FALSE(conditionShouldNotExecute4) << "Condition should not execute when warm start is enabled, buffer not initialized and pool ready"; + + m_dynamicBuffer->checkSharedBufferPoolSize(false); + } + + /* + * Test isHeadroomResourceValid with warm restart enabled + * This test verifies the warm restart skip logic is covered + */ + TEST_F(BufferMgrDynTest, TestIsHeadroomResourceValidWarmRestart) + { + // Initialize basic setup + InitDefaultLosslessParameter(); + InitMmuSize(); + + // Enable warm restart for buffermgrd + Table warmRestartEnableTable(m_state_db.get(), "WARM_RESTART_ENABLE_TABLE"); + warmRestartEnableTable.set("buffermgrd", + { + {"enable", "true"} + }); + + // CRITICAL: Enable warm start in WarmStart singleton so isWarmStart() returns true + WarmStart::getInstance().m_enabled = true; + + StartBufferManager(); + InitPort(); + SetPortInitDone(); + + // Create a lossless buffer profile for testing + buffer_profile_t testProfile; + testProfile.name = "test_lossless_profile"; + testProfile.size = "1024"; + testProfile.xon = "100"; + testProfile.xoff = "200"; + testProfile.threshold = "3"; + testProfile.pool_name = "ingress_lossless_pool"; + testProfile.lossless = true; + + // TEST CASE 1: Warm start with buffer not initialized - should skip validation + // This will execute the "return true;" at line 1065 + m_dynamicBuffer->m_bufferCompletelyInitialized = false; + + // Verify the condition is true + bool shouldSkip = WarmStart::isWarmStart() && !m_dynamicBuffer->m_bufferCompletelyInitialized; + EXPECT_TRUE(shouldSkip) << "Should skip validation in warm start when buffer not initialized"; + + // CRITICAL: Actually call isHeadroomResourceValid to execute line 1065 + bool result = m_dynamicBuffer->isHeadroomResourceValid("Ethernet0", testProfile, "3-4"); + EXPECT_TRUE(result) << "isHeadroomResourceValid should return true during warm start when buffer not initialized"; + + // TEST CASE 2: Warm start with buffer initialized - should NOT skip validation + m_dynamicBuffer->m_bufferCompletelyInitialized = true; + + shouldSkip = WarmStart::isWarmStart() && !m_dynamicBuffer->m_bufferCompletelyInitialized; + EXPECT_FALSE(shouldSkip) << "Should NOT skip validation in warm start when buffer initialized"; + + // Cleanup: Disable warm start + WarmStart::getInstance().m_enabled = false; + } } diff --git a/tests/mock_tests/bufferorch_ut.cpp b/tests/mock_tests/bufferorch_ut.cpp index 2cd15ee549d..07432705623 100644 --- a/tests/mock_tests/bufferorch_ut.cpp +++ b/tests/mock_tests/bufferorch_ut.cpp @@ -29,6 +29,7 @@ namespace bufferorch_test shared_ptr m_config_db; shared_ptr m_state_db; shared_ptr m_chassis_app_db; + shared_ptr m_counters_db; uint32_t _ut_stub_expected_profile_count; uint32_t _ut_stub_port_profile_list_add_count; @@ -120,22 +121,67 @@ namespace bufferorch_test return pold_sai_queue_api->set_queue_attribute(queue_id, attr); } + sai_status_t _ut_stub_sai_set_ports_attribute( + uint32_t object_count, + const sai_object_id_t *object_id, + const sai_attribute_t *attr_list, + sai_bulk_op_error_mode_t mode, + sai_status_t *object_statuses) + { + for (size_t i = 0; i < object_count; i++) + { + object_statuses[i] = _ut_stub_sai_set_port_attribute(object_id[i], attr_list + i); + } + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_stub_sai_set_ingress_priority_groups_attribute( + uint32_t object_count, + const sai_object_id_t *object_id, + const sai_attribute_t *attr_list, + sai_bulk_op_error_mode_t mode, + sai_status_t *object_statuses) + { + for (size_t i = 0; i < object_count; i++) + { + object_statuses[i] = _ut_stub_sai_set_ingress_priority_group_attribute(object_id[i], attr_list + i); + } + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_stub_sai_set_queues_attribute( + uint32_t object_count, + const sai_object_id_t *object_id, + const sai_attribute_t *attr_list, + sai_bulk_op_error_mode_t mode, + sai_status_t *object_statuses) + { + for (size_t i = 0; i < object_count; i++) + { + object_statuses[i] = _ut_stub_sai_set_queue_attribute(object_id[i], attr_list + i); + } + return SAI_STATUS_SUCCESS; + } + void _hook_sai_apis() { ut_sai_port_api = *sai_port_api; pold_sai_port_api = sai_port_api; ut_sai_port_api.set_port_attribute = _ut_stub_sai_set_port_attribute; + ut_sai_port_api.set_ports_attribute = _ut_stub_sai_set_ports_attribute; sai_port_api = &ut_sai_port_api; ut_sai_buffer_api = *sai_buffer_api; pold_sai_buffer_api = sai_buffer_api; ut_sai_buffer_api.set_ingress_priority_group_attribute = _ut_stub_sai_set_ingress_priority_group_attribute; + ut_sai_buffer_api.set_ingress_priority_groups_attribute = _ut_stub_sai_set_ingress_priority_groups_attribute; ut_sai_buffer_api.set_buffer_profile_attribute = _ut_stub_sai_set_buffer_profile_attribute; sai_buffer_api = &ut_sai_buffer_api; ut_sai_queue_api = *sai_queue_api; pold_sai_queue_api = sai_queue_api; ut_sai_queue_api.set_queue_attribute = _ut_stub_sai_set_queue_attribute; + ut_sai_queue_api.set_queues_attribute = _ut_stub_sai_set_queues_attribute; sai_queue_api = &ut_sai_queue_api; } @@ -199,6 +245,7 @@ namespace bufferorch_test m_config_db = make_shared("CONFIG_DB", 0); m_state_db = make_shared("STATE_DB", 0); m_app_state_db = make_shared("APPL_STATE_DB", 0); + m_counters_db = make_shared("COUNTERS_DB", 0); if(gMySwitchType == "voq") m_chassis_app_db = make_shared("CHASSIS_APP_DB", 0); @@ -254,11 +301,18 @@ namespace bufferorch_test }; vector flex_counter_tables = { - CFG_FLEX_COUNTER_TABLE_NAME + CFG_FLEX_COUNTER_TABLE_NAME, + CFG_DEVICE_METADATA_TABLE_NAME }; auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); gDirectory.set(flexCounterOrch); + const vector stel_tables = { + CFG_HIGH_FREQUENCY_TELEMETRY_PROFILE_TABLE_NAME, + CFG_HIGH_FREQUENCY_TELEMETRY_GROUP_TABLE_NAME + }; + gHFTOrch = new HFTelOrch(m_config_db.get(), m_state_db.get(), stel_tables); + ASSERT_EQ(gPortsOrch, nullptr); gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); @@ -373,11 +427,21 @@ namespace bufferorch_test i.second->clear(); } + // Clean up FlexCounterOrch + auto* flexCounterOrch = gDirectory.get(); + if (flexCounterOrch) + { + delete flexCounterOrch; + } + gDirectory.m_values.clear(); delete gCrmOrch; gCrmOrch = nullptr; + delete gBufferOrch; + gBufferOrch = nullptr; + delete gSwitchOrch; gSwitchOrch = nullptr; @@ -792,4 +856,119 @@ namespace bufferorch_test _ut_stub_buffer_profile_sanity_check = false; _unhook_sai_apis(); } + + TEST_F(BufferOrchTest, BufferOrchTestCreateOnlyConfigDbBuffersDynamicUpdate) + { + // Get FlexCounterOrch from directory + auto* flexCounterOrch = gDirectory.get(); + ASSERT_NE(flexCounterOrch, nullptr); + + // Test Phase 1: Initial State Verification + // Set up initial configuration with create_only_config_db_buffers = false and verify it + ASSERT_FALSE(flexCounterOrch->isCreateOnlyConfigDbBuffers()); + + // Test Phase 2: Enable Flex Counter and Verify All Counters Added + // Add configuration to enable flex counter for watermark + Table flexCounterTable = Table(m_config_db.get(), CFG_FLEX_COUNTER_TABLE_NAME); + flexCounterTable.set("PG_WATERMARK", { + {"FLEX_COUNTER_STATUS", "enable"}, + {"POLL_INTERVAL", "1000"} + }); + flexCounterTable.set("QUEUE_WATERMARK", { + {"FLEX_COUNTER_STATUS", "enable"}, + {"POLL_INTERVAL", "1000"} + }); + flexCounterOrch->addExistingData(&flexCounterTable); + static_cast(flexCounterOrch)->doTask(); + + // Verify all counters are added to counter database (because create_only_config_db_buffers = false) + Table countersPgTable = Table(m_counters_db.get(), COUNTERS_PG_NAME_MAP); + Table countersQueueTable = Table(m_counters_db.get(), COUNTERS_QUEUE_NAME_MAP); + + // Check that counter database has entries for all PGs and queues + // These tables use empty string "" as key and store all entries as fields + std::vector pgFields; + std::vector queueFields; + countersPgTable.get("", pgFields); + countersQueueTable.get("", queueFields); + ASSERT_GT(pgFields.size(), 0); + ASSERT_GT(queueFields.size(), 0); + + // Test Phase 3: Add Individual Buffer Configurations (Should NOT be added to map) + // Clear existing counter entries + countersPgTable.del(""); + countersQueueTable.del(""); + + // Add buffer PG and buffer queue configuration + Table bufferPgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); + Table bufferQueueTable = Table(m_app_db.get(), APP_BUFFER_QUEUE_TABLE_NAME); + + bufferPgTable.set("Ethernet0:0", { + {"profile", "ingress_lossy_profile"} + }); + bufferQueueTable.set("Ethernet0:0", { + {"profile", "ingress_lossy_profile"} + }); + + gBufferOrch->addExistingData(&bufferPgTable); + gBufferOrch->addExistingData(&bufferQueueTable); + static_cast(gBufferOrch)->doTask(); + + // Verify they are NOT added to the counter database (because create_only_config_db_buffers = false) + // Individual buffer configurations should not create counter database entries again + pgFields.clear(); + queueFields.clear(); + countersPgTable.get("", pgFields); + countersQueueTable.get("", queueFields); + ASSERT_EQ(pgFields.size(), 0); + ASSERT_EQ(queueFields.size(), 0); + + // Test Phase 4: Dynamic Configuration Update to true + // Update DEVICE_METADATA table + Table deviceMetadataTable = Table(m_config_db.get(), CFG_DEVICE_METADATA_TABLE_NAME); + deviceMetadataTable.set("localhost", { + {"create_only_config_db_buffers", "true"} + }); + flexCounterOrch->addExistingData(&deviceMetadataTable); + static_cast(flexCounterOrch)->doTask(); + + // Verify configuration change + ASSERT_TRUE(flexCounterOrch->isCreateOnlyConfigDbBuffers()); + + // Test Phase 5: Add Individual Buffer Configurations (Should be added to map) + // Add buffer PG and buffer queue configuration for different objects + bufferPgTable.set("Ethernet0:1", { + {"profile", "ingress_lossy_profile"} + }); + bufferQueueTable.set("Ethernet0:1", { + {"profile", "ingress_lossy_profile"} + }); + + gBufferOrch->addExistingData(&bufferPgTable); + gBufferOrch->addExistingData(&bufferQueueTable); + static_cast(gBufferOrch)->doTask(); + + // Verify they ARE added to the counter database (because create_only_config_db_buffers = true) + // Individual buffer configurations should create counter database entries + pgFields.clear(); + queueFields.clear(); + countersPgTable.get("", pgFields); + countersQueueTable.get("", queueFields); + ASSERT_EQ(pgFields.size(), 1); + ASSERT_EQ(queueFields.size(), 1); + + // Verify the specific entries exist + { + std::string value; + bool found = countersPgTable.hget("", "Ethernet0:1", value); + ASSERT_TRUE(found); + ASSERT_FALSE(value.empty()); + } + { + std::string value; + bool found = countersQueueTable.hget("", "Ethernet0:1", value); + ASSERT_TRUE(found); + ASSERT_FALSE(value.empty()); + } + } } diff --git a/tests/mock_tests/bulker_ut.cpp b/tests/mock_tests/bulker_ut.cpp index 6210cc0969d..88d18993c18 100644 --- a/tests/mock_tests/bulker_ut.cpp +++ b/tests/mock_tests/bulker_ut.cpp @@ -2,6 +2,7 @@ #include "bulker.h" extern sai_route_api_t *sai_route_api; +extern sai_neighbor_api_t *sai_neighbor_api; namespace bulker_test { @@ -17,12 +18,18 @@ namespace bulker_test { ASSERT_EQ(sai_route_api, nullptr); sai_route_api = new sai_route_api_t(); + + ASSERT_EQ(sai_neighbor_api, nullptr); + sai_neighbor_api = new sai_neighbor_api_t(); } void TearDown() override { delete sai_route_api; sai_route_api = nullptr; + + delete sai_neighbor_api; + sai_neighbor_api = nullptr; } }; @@ -142,4 +149,246 @@ namespace bulker_test // Confirm route entry is not pending removal ASSERT_FALSE(gRouteBulker.bulk_entry_pending_removal(route_entry_non_remove)); } + + TEST_F(BulkerTest, NeighborBulker) + { + // Create bulker + EntityBulker gNeighBulker(sai_neighbor_api, 1000); + deque object_statuses; + + // Check max bulk size + ASSERT_EQ(gNeighBulker.max_bulk_size, 1000); + + // Create a dummy neighbor entry + sai_neighbor_entry_t neighbor_entry_remove; + neighbor_entry_remove.ip_address.addr_family = SAI_IP_ADDR_FAMILY_IPV4; + neighbor_entry_remove.ip_address.addr.ip4 = 0x10000001; + neighbor_entry_remove.rif_id = 0x0; + neighbor_entry_remove.switch_id = 0x0; + + // Put neighbor entry into remove + object_statuses.emplace_back(); + gNeighBulker.remove_entry(&object_statuses.back(), &neighbor_entry_remove); + + // Confirm neighbor entry is pending removal + ASSERT_TRUE(gNeighBulker.bulk_entry_pending_removal(neighbor_entry_remove)); + } + + TEST_F(BulkerTest, BulkerPendingRemovalOrSet_OnlyRemoval) + { + // Create bulker + EntityBulker gRouteBulker(sai_route_api, 1000); + deque object_statuses; + + // Create a dummy route entry for removal + sai_route_entry_t route_entry_remove; + route_entry_remove.destination.addr_family = SAI_IP_ADDR_FAMILY_IPV4; + route_entry_remove.destination.addr.ip4 = htonl(0x0a00000f); + route_entry_remove.destination.mask.ip4 = htonl(0xffffff00); + route_entry_remove.vr_id = 0x0; + route_entry_remove.switch_id = 0x0; + + // Put route entry into remove + object_statuses.emplace_back(); + gRouteBulker.remove_entry(&object_statuses.back(), &route_entry_remove); + + // Confirm route entry is pending removal + ASSERT_TRUE(gRouteBulker.bulk_entry_pending_removal(route_entry_remove)); + + // Confirm route entry is detected by bulk_entry_pending_removal_or_set + ASSERT_TRUE(gRouteBulker.bulk_entry_pending_removal_or_set(route_entry_remove)); + } + + TEST_F(BulkerTest, BulkerPendingRemovalOrSet_OnlySet) + { + // Create bulker + EntityBulker gRouteBulker(sai_route_api, 1000); + deque object_statuses; + + // Create a dummy route entry for setting + sai_route_entry_t route_entry_set; + route_entry_set.destination.addr_family = SAI_IP_ADDR_FAMILY_IPV4; + route_entry_set.destination.addr.ip4 = htonl(0x0a00000f); + route_entry_set.destination.mask.ip4 = htonl(0xffffff00); + route_entry_set.vr_id = 0x0; + route_entry_set.switch_id = 0x0; + + // Set packet action for route (this adds to setting_entries) + sai_attribute_t route_attr; + route_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + route_attr.value.s32 = SAI_PACKET_ACTION_DROP; + + object_statuses.emplace_back(); + gRouteBulker.set_entry_attribute(&object_statuses.back(), &route_entry_set, &route_attr); + + // Confirm route entry is NOT pending removal + ASSERT_FALSE(gRouteBulker.bulk_entry_pending_removal(route_entry_set)); + + // Confirm route entry IS detected by bulk_entry_pending_removal_or_set (because it's in setting_entries) + ASSERT_TRUE(gRouteBulker.bulk_entry_pending_removal_or_set(route_entry_set)); + } + + TEST_F(BulkerTest, BulkerPendingRemovalOrSet_BothRemovalAndSet) + { + // Create bulker + EntityBulker gRouteBulker(sai_route_api, 1000); + deque object_statuses; + + // Create two different route entries + sai_route_entry_t route_entry_remove; + route_entry_remove.destination.addr_family = SAI_IP_ADDR_FAMILY_IPV4; + route_entry_remove.destination.addr.ip4 = htonl(0x0a00000f); + route_entry_remove.destination.mask.ip4 = htonl(0xffffff00); + route_entry_remove.vr_id = 0x0; + route_entry_remove.switch_id = 0x0; + + sai_route_entry_t route_entry_set; + route_entry_set.destination.addr_family = SAI_IP_ADDR_FAMILY_IPV4; + route_entry_set.destination.addr.ip4 = htonl(0x0a00010f); + route_entry_set.destination.mask.ip4 = htonl(0xffffff00); + route_entry_set.vr_id = 0x0; + route_entry_set.switch_id = 0x0; + + // Put first route entry into remove + object_statuses.emplace_back(); + gRouteBulker.remove_entry(&object_statuses.back(), &route_entry_remove); + + // Set attribute for second route entry + sai_attribute_t route_attr; + route_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + route_attr.value.s32 = SAI_PACKET_ACTION_DROP; + + object_statuses.emplace_back(); + gRouteBulker.set_entry_attribute(&object_statuses.back(), &route_entry_set, &route_attr); + + // Confirm both entries are detected by bulk_entry_pending_removal_or_set + ASSERT_TRUE(gRouteBulker.bulk_entry_pending_removal_or_set(route_entry_remove)); + ASSERT_TRUE(gRouteBulker.bulk_entry_pending_removal_or_set(route_entry_set)); + + // Confirm only the removal entry is detected by bulk_entry_pending_removal + ASSERT_TRUE(gRouteBulker.bulk_entry_pending_removal(route_entry_remove)); + ASSERT_FALSE(gRouteBulker.bulk_entry_pending_removal(route_entry_set)); + } + + TEST_F(BulkerTest, BulkerPendingRemovalOrSet_NeitherRemovalNorSet) + { + // Create bulker + EntityBulker gRouteBulker(sai_route_api, 1000); + deque object_statuses; + + // Create a dummy route entry that is not added to bulker + sai_route_entry_t route_entry_none; + route_entry_none.destination.addr_family = SAI_IP_ADDR_FAMILY_IPV4; + route_entry_none.destination.addr.ip4 = htonl(0x0a00000f); + route_entry_none.destination.mask.ip4 = htonl(0xffffff00); + route_entry_none.vr_id = 0x0; + route_entry_none.switch_id = 0x0; + + // Confirm route entry is NOT pending removal + ASSERT_FALSE(gRouteBulker.bulk_entry_pending_removal(route_entry_none)); + + // Confirm route entry is NOT detected by bulk_entry_pending_removal_or_set + ASSERT_FALSE(gRouteBulker.bulk_entry_pending_removal_or_set(route_entry_none)); + } + + TEST_F(BulkerTest, BulkerPendingRemovalOrSet_DefaultRouteScenario) + { + // This test simulates the default route scenario described in the code comments: + // A DEL event occurs and automatically adds a DROP action (creating a setting_entry), + // then a subsequent SET operation needs to check for both pending removals AND pending sets. + + EntityBulker gRouteBulker(sai_route_api, 1000); + deque object_statuses; + + // Create a default route entry (0.0.0.0/0) + sai_route_entry_t default_route; + default_route.destination.addr_family = SAI_IP_ADDR_FAMILY_IPV4; + default_route.destination.addr.ip4 = 0; // 0.0.0.0 + default_route.destination.mask.ip4 = 0; // /0 + default_route.vr_id = 0x0; + default_route.switch_id = 0x0; + + // Simulate DEL event: Set DROP action for default route + sai_attribute_t route_attr; + route_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + route_attr.value.s32 = SAI_PACKET_ACTION_DROP; + + object_statuses.emplace_back(); + gRouteBulker.set_entry_attribute(&object_statuses.back(), &default_route, &route_attr); + + // Verify the route is in setting_entries + ASSERT_EQ(gRouteBulker.setting_entries_count(), 1); + + // Verify bulk_entry_pending_removal returns false (not in removing_entries) + ASSERT_FALSE(gRouteBulker.bulk_entry_pending_removal(default_route)); + + // Verify bulk_entry_pending_removal_or_set returns true (in setting_entries) + ASSERT_TRUE(gRouteBulker.bulk_entry_pending_removal_or_set(default_route)); + + // This ensures that when a subsequent SET operation checks if the route needs to be updated, + // it will correctly detect that there's a pending operation (the DROP action) + } + + TEST_F(BulkerTest, BulkerPendingRemovalOrSet_IPv6Route) + { + // Test with IPv6 route to ensure the function works with different address families + EntityBulker gRouteBulker(sai_route_api, 1000); + deque object_statuses; + + // Create an IPv6 route entry + sai_route_entry_t ipv6_route; + ipv6_route.destination.addr_family = SAI_IP_ADDR_FAMILY_IPV6; + // Set IPv6 address 2001:db8::1/64 + uint8_t ipv6_addr[16] = {0x20, 0x01, 0x0d, 0xb8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}; + uint8_t ipv6_mask[16] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0}; + memcpy(ipv6_route.destination.addr.ip6, ipv6_addr, 16); + memcpy(ipv6_route.destination.mask.ip6, ipv6_mask, 16); + ipv6_route.vr_id = 0x0; + ipv6_route.switch_id = 0x0; + + // Set packet action for IPv6 route + sai_attribute_t route_attr; + route_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + route_attr.value.s32 = SAI_PACKET_ACTION_FORWARD; + + object_statuses.emplace_back(); + gRouteBulker.set_entry_attribute(&object_statuses.back(), &ipv6_route, &route_attr); + + // Verify the IPv6 route is detected by bulk_entry_pending_removal_or_set + ASSERT_TRUE(gRouteBulker.bulk_entry_pending_removal_or_set(ipv6_route)); + ASSERT_FALSE(gRouteBulker.bulk_entry_pending_removal(ipv6_route)); + } + + TEST_F(BulkerTest, BulkerPendingRemovalOrSet_AfterClear) + { + // Test that bulk_entry_pending_removal_or_set returns false after clearing the bulker + EntityBulker gRouteBulker(sai_route_api, 1000); + deque object_statuses; + + // Create a route entry + sai_route_entry_t route_entry; + route_entry.destination.addr_family = SAI_IP_ADDR_FAMILY_IPV4; + route_entry.destination.addr.ip4 = htonl(0x0a00000f); + route_entry.destination.mask.ip4 = htonl(0xffffff00); + route_entry.vr_id = 0x0; + route_entry.switch_id = 0x0; + + // Add to setting_entries + sai_attribute_t route_attr; + route_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + route_attr.value.s32 = SAI_PACKET_ACTION_DROP; + + object_statuses.emplace_back(); + gRouteBulker.set_entry_attribute(&object_statuses.back(), &route_entry, &route_attr); + + // Verify it's detected before clear + ASSERT_TRUE(gRouteBulker.bulk_entry_pending_removal_or_set(route_entry)); + + // Clear the bulker + gRouteBulker.clear(); + + // Verify it's NOT detected after clear + ASSERT_FALSE(gRouteBulker.bulk_entry_pending_removal_or_set(route_entry)); + ASSERT_FALSE(gRouteBulker.bulk_entry_pending_removal(route_entry)); + } } diff --git a/tests/mock_tests/check.h b/tests/mock_tests/check.h index d1b095562de..a13f2abd3cf 100644 --- a/tests/mock_tests/check.h +++ b/tests/mock_tests/check.h @@ -42,40 +42,94 @@ struct Check std::cerr << "Expected: " << meta->attridname << "\n"; } } - continue; } - const int MAX_BUF_SIZE = 0x4000; - std::string act_str; - std::string exp_str; + const sai_attribute_t* act = &act_attr_list[i]; + const sai_attribute_t* exp = &exp_attr_list.get_attr_list()[i]; + if (!Check::AttrValue(objecttype, id, act, exp)) + { + return false; + } + } + + return true; + } + + static bool AttrValue(sai_object_type_t objecttype, sai_attr_id_t id, const sai_attribute_t* act, const sai_attribute_t* exp) + { + auto meta = sai_metadata_get_attr_metadata(objecttype, id); + assert(meta != nullptr); + + const int MAX_BUF_SIZE = 0x4000; + std::vector act_buf(MAX_BUF_SIZE); + std::vector exp_buf(MAX_BUF_SIZE); + + act_buf.reserve(MAX_BUF_SIZE); + exp_buf.reserve(MAX_BUF_SIZE); + + auto act_len = sai_serialize_attribute_value(act_buf.data(), meta, &act->value); + auto exp_len = sai_serialize_attribute_value(exp_buf.data(), meta, &exp->value); - act_str.reserve(MAX_BUF_SIZE); - exp_str.reserve(MAX_BUF_SIZE); + assert(act_len < act_str.size()); + assert(act_len < exp_str.size()); - auto act_len = sai_serialize_attribute_value(&act_str[0], meta, &act_attr_list[i].value); - auto exp_len = sai_serialize_attribute_value(&exp_str[0], meta, &exp_attr_list.get_attr_list()[i].value); + act_buf.resize(act_len); + exp_buf.resize(exp_len); - assert(act_len < act_str.size()); - assert(act_len < exp_str.size()); + std::string act_str(act_buf.begin(), act_buf.end()); + std::string exp_str(exp_buf.begin(), exp_buf.end()); + + if (act_len != exp_len) + { + std::cerr << "AttrValue length failed\n"; + std::cerr << "Actual: " << act_len << "," << act_str << "\n"; + std::cerr << "Expected: " << exp_len << "," << exp_str << "\n"; + return false; + } - if (act_len != exp_len) + if (act_str != exp_str) + { + std::cerr << "AttrValue string failed\n"; + std::cerr << "Actual: " << act_str << "\n"; + std::cerr << "Expected: " << exp_str << "\n"; + return false; + } + return true; + } + + static bool AttrListSubset(sai_object_type_t objecttype, const std::vector &act_attr_list, + saimeta::SaiAttributeList &exp_attr_list, const std::vector skip_check) + { + /* + Size of attributes should be equal and in the same order. + If the validation has to be skipped for certain attributes populate the skip_check. + */ + if (act_attr_list.size() != exp_attr_list.get_attr_count()) + { + std::cerr << "AttrListSubset size mismatch\n"; + return false; + } + if (act_attr_list.size() != skip_check.size()) + { + std::cerr << "AttrListSubset size mismatch\n"; + return false; + } + + for (uint32_t i = 0; i < exp_attr_list.get_attr_count(); ++i) + { + if (skip_check[i]) { - std::cerr << "AttrListEq failed\n"; - std::cerr << "Actual: " << act_str << "\n"; - std::cerr << "Expected: " << exp_str << "\n"; - return false; + continue; } - - if (act_str != exp_str) + sai_attr_id_t id = exp_attr_list.get_attr_list()[i].id; + const sai_attribute_t* act = &act_attr_list[i]; + const sai_attribute_t* exp = &exp_attr_list.get_attr_list()[i]; + if (!Check::AttrValue(objecttype, id, act, exp)) { - std::cerr << "AttrListEq failed\n"; - std::cerr << "Actual: " << act_str << "\n"; - std::cerr << "Expected: " << exp_str << "\n"; return false; } } - return true; } }; diff --git a/tests/mock_tests/consumer_ut.cpp b/tests/mock_tests/consumer_ut.cpp index 500bf458790..f0008a964b1 100644 --- a/tests/mock_tests/consumer_ut.cpp +++ b/tests/mock_tests/consumer_ut.cpp @@ -10,6 +10,25 @@ namespace consumer_test { using namespace std; + class TestOrch : public Orch + { + public: + TestOrch(swss::DBConnector *db, string tableName) + :Orch(db, tableName), + m_notification_count(0) + { + } + + void doTask(Consumer& consumer) + { + std::cout << "TestOrch::doTask " << consumer.m_toSync.size() << std::endl; + m_notification_count += consumer.m_toSync.size(); + consumer.m_toSync.clear(); + } + + long m_notification_count; + }; + struct ConsumerTest : public ::testing::Test { shared_ptr m_app_db; @@ -322,4 +341,31 @@ namespace consumer_test validate_syncmap(consumer->m_toSync, 1, key, exp_kofv); } + + TEST_F(ConsumerTest, ConsumerPops_notification_count) + { + int consumer_pops_batch_size = 10; + TestOrch test_orch(m_config_db.get(), "CFG_TEST_TABLE"); + Consumer test_consumer( + new swss::ConsumerStateTable(m_config_db.get(), "CFG_TEST_TABLE", consumer_pops_batch_size, 1), &test_orch, "CFG_TEST_TABLE"); + swss::ProducerStateTable producer_table(m_config_db.get(), "CFG_TEST_TABLE"); + + m_config_db->flushdb(); + for (int notification_count = 0; notification_count< consumer_pops_batch_size*2; notification_count++) + { + std::vector fields; + FieldValueTuple t("test_field", "test_value"); + fields.push_back(t); + producer_table.set(std::to_string(notification_count), fields); + + cout << "ConsumerPops_notification_count:: add key: " << notification_count << endl; + } + + // consumer should pops consumer_pops_batch_size notifications + test_consumer.execute(); + ASSERT_EQ(test_orch.m_notification_count, consumer_pops_batch_size); + + test_consumer.execute(); + ASSERT_EQ(test_orch.m_notification_count, consumer_pops_batch_size*2); + } } diff --git a/tests/mock_tests/copp_cfg.json b/tests/mock_tests/copp_cfg.json index 46d921b8276..f23228ed549 100644 --- a/tests/mock_tests/copp_cfg.json +++ b/tests/mock_tests/copp_cfg.json @@ -48,6 +48,16 @@ "cbs":"600", "red_action":"drop" }, + "queue1_group3": { + "trap_action":"trap", + "trap_priority":"1", + "queue": "1", + "meter_type":"packets", + "mode":"sr_tcm", + "cir":"200", + "cbs":"200", + "red_action":"drop" + }, "queue2_group1": { "cbs": "1000", "cir": "1000", @@ -106,6 +116,11 @@ "sflow": { "trap_group": "queue2_group1", "trap_ids": "sample_packet" + }, + "neighbor_miss": { + "trap_ids": "neighbor_miss", + "trap_group": "queue1_group3", + "always_enabled": "true" } } } diff --git a/tests/mock_tests/copp_ut.cpp b/tests/mock_tests/copp_ut.cpp index 1c3b766e1ca..f5d0b85cf5d 100644 --- a/tests/mock_tests/copp_ut.cpp +++ b/tests/mock_tests/copp_ut.cpp @@ -4,34 +4,14 @@ #include "warm_restart.h" #include "ut_helper.h" #include "coppmgr.h" -#include "coppmgr.cpp" #include #include + using namespace std; using namespace swss; -void create_init_file() -{ - int status = system("sudo mkdir /etc/sonic/"); - ASSERT_EQ(status, 0); - - status = system("sudo chmod 777 /etc/sonic/"); - ASSERT_EQ(status, 0); - - status = system("sudo cp copp_cfg.json /etc/sonic/"); - ASSERT_EQ(status, 0); -} - -void cleanup() -{ - int status = system("sudo rm -rf /etc/sonic/"); - ASSERT_EQ(status, 0); -} - TEST(CoppMgrTest, CoppTest) { - create_init_file(); - const vector cfg_copp_tables = { CFG_COPP_TRAP_TABLE_NAME, CFG_COPP_GROUP_TABLE_NAME, @@ -65,12 +45,10 @@ TEST(CoppMgrTest, CoppTest) {"trap_ids", "ip2me"} }); - CoppMgr coppmgr(&cfgDb, &appDb, &stateDb, cfg_copp_tables); + CoppMgr coppmgr(&cfgDb, &appDb, &stateDb, cfg_copp_tables, "./copp_cfg.json"); string overide_val; coppTable.hget("queue1_group1", "cbs",overide_val); EXPECT_EQ( overide_val, "6000"); - - cleanup(); } diff --git a/tests/mock_tests/copporch_ut.cpp b/tests/mock_tests/copporch_ut.cpp index fa7c360f01b..8e91df04c06 100644 --- a/tests/mock_tests/copporch_ut.cpp +++ b/tests/mock_tests/copporch_ut.cpp @@ -33,6 +33,18 @@ namespace copporch_test static_cast(this->coppOrch.get())->doTask(*consumer); } + task_process_status doProcessCoppRule(const std::deque &entries) + { + // ConsumerStateTable is used for APP DB + auto consumer = std::unique_ptr(new Consumer( + new ConsumerStateTable(this->appDb.get(), APP_COPP_TABLE_NAME, 1, 1), + this->coppOrch.get(), APP_COPP_TABLE_NAME + )); + + consumer->addToSync(entries); + return Portal::CoppOrchInternal::processCoppRule(*coppOrch, *consumer); + } + CoppOrch& get() { return *coppOrch; @@ -281,6 +293,15 @@ namespace copporch_test std::vector resourcesList; }; + TEST_F(CoppOrchTest, VerifySupportedTrapIds) + { + MockCoppOrch coppOrch; + + const auto &supportedTrapIds = Portal::CoppOrchInternal::getSupportedTrapIds(coppOrch.get()); + EXPECT_TRUE(supportedTrapIds.find(SAI_HOSTIF_TRAP_TYPE_IP2ME) != supportedTrapIds.end()); + EXPECT_TRUE(supportedTrapIds.find(SAI_HOSTIF_TRAP_TYPE_NEIGHBOR_MISS) != supportedTrapIds.end()); + } + TEST_F(CoppOrchTest, TrapGroup_AddRemove) { const std::string trapGroupName = "queue4_group1"; @@ -322,7 +343,7 @@ namespace copporch_test } } - TEST_F(CoppOrchTest, TrapGroupWithPolicer_AddRemove) + TEST_F(CoppOrchTest, TrapGroupWithPolicer_AddUpdateRemove) { const std::string trapGroupName = "queue4_group2"; @@ -341,6 +362,7 @@ namespace copporch_test { copp_queue_field, "4" }, { copp_policer_meter_type_field, "packets" }, { copp_policer_mode_field, "sr_tcm" }, + { copp_policer_color_field, "aware" }, { copp_policer_cir_field, "600" }, { copp_policer_cbs_field, "600" }, { copp_policer_action_red_field, "drop" } @@ -358,8 +380,27 @@ namespace copporch_test const auto &trapGroupOid = cit1->second; const auto &cit2 = trapGroupPolicerMap.find(trapGroupOid); EXPECT_TRUE(cit2 != trapGroupPolicerMap.end()); + EXPECT_TRUE(cit2->second.meter == SAI_METER_TYPE_PACKETS); + EXPECT_TRUE(cit2->second.mode == SAI_POLICER_MODE_SR_TCM); + + /* Update the non create only attributes */ + auto tableKofvt2 = std::deque( + { + { + trapGroupName, + SET_COMMAND, + { + { copp_policer_cir_field, "1000" }, + { copp_policer_cbs_field, "1000" }, + { copp_policer_action_red_field, "drop" } + } + } + } + ); + ASSERT_EQ(coppOrch.doProcessCoppRule(tableKofvt2), task_process_status::task_success); } + // Delete CoPP Trap Group { auto tableKofvt = std::deque( @@ -376,13 +417,60 @@ namespace copporch_test } } + TEST_F(CoppOrchTest, TrapGroupWithPolicer_nothrowExec) + { + const std::string trapGroupName = "queue4_group2"; + + MockCoppOrch coppOrch; + + { + // Create CoPP Trap Group + auto tableKofvt = std::deque( + { + { + trapGroupName, + SET_COMMAND, + { + { copp_trap_action_field, "copy" }, + { copp_trap_priority_field, "4" }, + { copp_queue_field, "4" }, + { copp_policer_meter_type_field, "packets" }, + { copp_policer_mode_field, "sr_tcm" }, + { copp_policer_cir_field, "600" }, + { copp_policer_cbs_field, "600" }, + { copp_policer_action_red_field, "drop" } + } + } + } + ); + coppOrch.doCoppTableTask(tableKofvt); + + // Update create-only Policer Attributes + auto tableKofvt2 = std::deque( + { + { + trapGroupName, + SET_COMMAND, + { + { copp_policer_meter_type_field, "bytes" }, + { copp_policer_mode_field, "tr_tcm" }, + { copp_policer_color_field, "blind" }, + } + } + } + ); + EXPECT_NO_THROW(coppOrch.doProcessCoppRule(tableKofvt2)); + } + } + TEST_F(CoppOrchTest, Trap_AddRemove) { const std::string trapGroupName = "queue4_group1"; - const std::string trapNameList = "bgp,bgpv6"; + const std::string trapNameList = "bgp,bgpv6,neighbor_miss"; const std::set trapIDSet = { SAI_HOSTIF_TRAP_TYPE_BGP, - SAI_HOSTIF_TRAP_TYPE_BGPV6 + SAI_HOSTIF_TRAP_TYPE_BGPV6, + SAI_HOSTIF_TRAP_TYPE_NEIGHBOR_MISS }; MockCoppOrch coppOrch; @@ -412,6 +500,8 @@ namespace copporch_test const auto &tgOid = cit->second; const auto &tidList = Portal::CoppOrchInternal::getTrapIdsFromTrapGroup(coppOrch.get(), tgOid); const auto &tidSet = std::set(tidList.begin(), tidList.end()); + + // Verify that bgp, bgpv6 and neighbor_miss are installed EXPECT_TRUE(trapIDSet == tidSet); } diff --git a/tests/mock_tests/dashenifwdorch_ut.cpp b/tests/mock_tests/dashenifwdorch_ut.cpp new file mode 100644 index 00000000000..83cda7cfb29 --- /dev/null +++ b/tests/mock_tests/dashenifwdorch_ut.cpp @@ -0,0 +1,624 @@ +#include "mock_orch_test.h" +#include "gtest/gtest.h" +#include "gmock/gmock.h" +#include "mock_table.h" +#define protected public +#define private public +#include "dash/dashenifwdorch.h" +#undef public +#undef protected + +using namespace ::testing; + +namespace dashenifwdorch_ut +{ + /* Mock API Calls to other orchagents */ + class MockEniFwdCtx : public EniFwdCtxBase { + public: + using EniFwdCtxBase::EniFwdCtxBase; + + void initialize() override {} + MOCK_METHOD(std::string, getRouterIntfsAlias, (const swss::IpAddress&, const string& vrf), (override)); + MOCK_METHOD(bool, isNeighborResolved, (const NextHopKey&), (override)); + MOCK_METHOD(void, resolveNeighbor, (const NextHopKey&), (override)); + MOCK_METHOD(bool, findVnetVni, (const std::string&, uint64_t&), (override)); + MOCK_METHOD(bool, findVnetTunnel, (const std::string&, std::string&), (override)); + MOCK_METHOD((std::map&), getAllPorts, (), (override)); + }; + + class DashEniFwdOrchTest : public Test + { + public: + unique_ptr cfgDb; + unique_ptr applDb; + unique_ptr chassisApplDb; + unique_ptr
dpuTable; + unique_ptr
remoteDpuTable; + unique_ptr
vdpuTable; + + unique_ptr
eniFwdTable; + unique_ptr
aclRuleTable; + unique_ptr eniOrch; + shared_ptr ctx; + + /* Test values */ + string alias_dpu = "Vlan1000"; + string test_vip = "10.2.0.1/32"; + string vnet_name = "Vnet_1000"; + string tunnel_name = "mock_tunnel"; + string test_mac = "aa:bb:cc:dd:ee:ff"; + string test_mac2 = "ff:ee:dd:cc:bb:aa"; + string test_mac_key = "AABBCCDDEEFF"; + string test_mac2_key = "FFEEDDCCBBAA"; + string local_pav4 = "10.0.0.1"; + string remote_pav4 = "10.0.0.2"; + string remote_2_pav4 = "10.0.0.3"; + string local_npuv4 = "20.0.0.1"; + string remote_npuv4 = "20.0.0.2"; + string remote_2_npuv4 = "20.0.0.3"; + + std::map allPorts; + uint64_t test_vni = 1000; + int BASE_PRIORITY = 9996; + + void populateDpuTable() + { + /* Add 1 local and 1 cluster DPU */ + dpuTable->set("local_dpu", + { + { DashEniFwd::PA_V4, local_pav4 }, + { DashEniFwd::STATE, "up" }, + { "gnmi_port", "50051" }, + { "local_port", "8080" }, + }, SET_COMMAND); + + dpuTable->set("local_down_dpu", + { + { DashEniFwd::PA_V4, local_pav4 }, + { DashEniFwd::STATE, "down" }, + }, SET_COMMAND); + + remoteDpuTable->set("remote_dpu", + { + { DashEniFwd::PA_V4, remote_pav4 }, + { DashEniFwd::NPU_V4, remote_npuv4 }, + }, SET_COMMAND); + + remoteDpuTable->set("remote_dpu2", + { + { DashEniFwd::PA_V4, remote_2_pav4 }, + { DashEniFwd::NPU_V4, remote_2_npuv4 }, + }, SET_COMMAND); + + vdpuTable->set("vdpu0", + { + { DashEniFwd::DPU_IDS, "local_dpu" }, + }, SET_COMMAND); + + vdpuTable->set("vdpu1", + { + { DashEniFwd::DPU_IDS, "remote_dpu" }, + }, SET_COMMAND); + + vdpuTable->set("vdpu2", + { + { DashEniFwd::DPU_IDS, "remote_dpu2" }, + }, SET_COMMAND); + + vdpuTable->set("vdpu3", + { + { DashEniFwd::DPU_IDS, "invalid_dpu" }, + }, SET_COMMAND); + + vdpuTable->set("vdpu4", + { + { DashEniFwd::DPU_IDS, "local_down_dpu" }, + }, SET_COMMAND); + } + + void populateVip() + { + Table vipTable(cfgDb.get(), DashEniFwd::VIP_TABLE); + vipTable.set(test_vip, {{}}); + } + + void doDashEniFwdTableTask(DBConnector* applDb, const deque &entries) + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(applDb, APP_DASH_ENI_FORWARD_TABLE, 1, 1), + eniOrch.get(), APP_DASH_ENI_FORWARD_TABLE)); + + consumer->addToSync(entries); + eniOrch->doTask(*consumer); + } + + void checkKFV(Table* m_table, const std::string& key, const std::vector& expectedValues) { + std::string val; + for (const auto& fv : expectedValues) { + const std::string& field = fvField(fv); + const std::string& expectedVal = fvValue(fv); + EXPECT_TRUE(m_table->hget(key, field, val)) + << "Failed to retrieve field " << field << " from key " << key; + EXPECT_EQ(val, expectedVal) + << "Mismatch for field " << field << " for key " << key + << ": expected " << expectedVal << ", got " << val; + } + } + + void checkRuleUninstalled(string key) + { + std::string val; + EXPECT_FALSE(aclRuleTable->hget(key, MATCH_DST_IP, val)) + << key << ": Still Exist"; + } + + void checkNoKeyExists(Table* m_table, string expected_key) + { + std::string val; + std::vector keys; + m_table->getKeys(keys); + for (auto& key : keys) { + if (key == expected_key) + { + EXPECT_FALSE(true) << expected_key << ": Still Exist"; + } + } + } + + void SetUp() override { + testing_db::reset(); + cfgDb = make_unique("CONFIG_DB", 0); + applDb = make_unique("APPL_DB", 0); + chassisApplDb = make_unique("CHASSIS_APP_DB", 0); + /* Initialize tables */ + dpuTable = make_unique
(cfgDb.get(), DashEniFwd::DPU_TABLE); + remoteDpuTable = make_unique
(cfgDb.get(), DashEniFwd::REMOTE_DPU_TABLE); + vdpuTable = make_unique
(cfgDb.get(), DashEniFwd::VDPU_TABLE); + + eniFwdTable = make_unique
(applDb.get(), APP_DASH_ENI_FORWARD_TABLE); + aclRuleTable = make_unique
(applDb.get(), APP_ACL_RULE_TABLE_NAME); + /* Populate DPU Configuration */ + populateDpuTable(); + populateVip(); + eniOrch = make_unique(cfgDb.get(), applDb.get(), APP_DASH_ENI_FORWARD_TABLE, nullptr); + + /* Clear the default context and Patch with the Mock */ + ctx = make_shared(cfgDb.get(), applDb.get()); + /* Create a set of ports */ + allPorts["Ethernet0"] = Port("Ethernet0", Port::PHY); + allPorts["Ethernet4"] = Port("Ethernet4", Port::PHY); + allPorts["Ethernet8"] = Port("Ethernet8", Port::PHY); + allPorts["Ethernet16"] = Port("Ethernet16", Port::PHY); + allPorts["PortChannel1011"] = Port("PortChannel1012", Port::LAG); + allPorts["PortChannel1012"] = Port("Ethernet16", Port::LAG); + allPorts["PortChannel1011"].m_members.insert("Ethernet8"); + allPorts["PortChannel1012"].m_members.insert("Ethernet16"); + ON_CALL(*ctx, getAllPorts()).WillByDefault(ReturnRef(allPorts)); + + eniOrch->ctx.reset(); + eniOrch->ctx = ctx; + eniOrch->ctx->populateDpuRegistry(); + eniOrch->ctx_initialized_ = true; + } + }; + + /* + Test getting the PA, NPU address of a DPU and dpuType + */ + TEST_F(DashEniFwdOrchTest, TestDpuRegistry) + { + dpu_type_t type; + swss::IpAddress pa_v4; + swss::IpAddress npu_v4; + + EniFwdCtx ctx(cfgDb.get(), applDb.get()); + ctx.populateDpuRegistry(); + + EXPECT_TRUE(ctx.dpu_info.getType("vdpu0", type)); + EXPECT_EQ(type, dpu_type_t::LOCAL); + EXPECT_TRUE(ctx.dpu_info.getPaV4("vdpu0", pa_v4)); + EXPECT_EQ(pa_v4.to_string(), local_pav4); + + EXPECT_TRUE(ctx.dpu_info.getType("vdpu1", type)); + EXPECT_EQ(type, dpu_type_t::CLUSTER); + EXPECT_TRUE(ctx.dpu_info.getPaV4("vdpu1", pa_v4)); + EXPECT_EQ(pa_v4.to_string(), remote_pav4); + EXPECT_TRUE(ctx.dpu_info.getNpuV4("vdpu1", npu_v4)); + EXPECT_EQ(npu_v4.to_string(), remote_npuv4); + + EXPECT_TRUE(ctx.dpu_info.getNpuV4("vdpu2", npu_v4)); + EXPECT_EQ(npu_v4.to_string(), remote_2_npuv4); + + /* Invalid DPU */ + EXPECT_FALSE(ctx.dpu_info.getNpuV4("vdpu3", npu_v4)); + EXPECT_FALSE(ctx.dpu_info.getType("vdpu3", type)); + EXPECT_FALSE(ctx.dpu_info.getPaV4("vdpu3", pa_v4)); + + /* Down DPU */ + EXPECT_FALSE(ctx.dpu_info.getNpuV4("vdpu4", npu_v4)); + EXPECT_FALSE(ctx.dpu_info.getType("vdpu4", type)); + EXPECT_FALSE(ctx.dpu_info.getPaV4("vdpu4", pa_v4)); + + vector exp_ids = {"vdpu0", "vdpu1", "vdpu2"}; + auto ids = ctx.dpu_info.getIds(); + std::sort(ids.begin(), ids.end()); + EXPECT_EQ(ids, exp_ids); + } + + /* + VNI is provided by HaMgrd, Resolve Neighbor + */ + TEST_F(DashEniFwdOrchTest, LocalNeighbor) + { + auto nh_ip = swss::IpAddress(local_pav4); + NextHopKey nh = {nh_ip, alias_dpu}; + /* Mock calls to intfsOrch and neighOrch + If neighbor is already resolved, resolveNeighbor is not called */ + EXPECT_CALL(*ctx, getRouterIntfsAlias(nh_ip, _)).WillOnce(Return(alias_dpu)); /* Once per local endpoint */ + EXPECT_CALL(*ctx, isNeighborResolved(nh)).Times(2).WillRepeatedly(Return(true)); + EXPECT_CALL(*ctx, resolveNeighbor(nh)).Times(0); + + doDashEniFwdTableTask(applDb.get(), + deque( + { + { + vnet_name + ":" + test_mac, + SET_COMMAND, + { + { DashEniFwd::VDPU_IDS, "vdpu0,vdpu1" }, + { DashEniFwd::PRIMARY, "vdpu0" }, // Local endpoint is the primary + } + } + } + ) + ); + + /* Check ACL Rules */ + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key, { + { ACTION_REDIRECT_ACTION , local_pav4 }, { MATCH_DST_IP, test_vip }, + { RULE_PRIORITY, to_string(BASE_PRIORITY) }, + { MATCH_INNER_DST_MAC, test_mac } + }); + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key+ "_TERM", { + { ACTION_REDIRECT_ACTION, local_pav4 }, { MATCH_DST_IP, test_vip }, + { RULE_PRIORITY, to_string(BASE_PRIORITY + rule_type_t::TUNNEL_TERM) }, + { MATCH_INNER_DST_MAC, test_mac }, + { MATCH_TUNNEL_TERM, "true"} + }); + } + + /* + VNI is provided by HaMgrd, UnResolved Neighbor + */ + TEST_F(DashEniFwdOrchTest, LocalNeighbor_Unresolved) + { + auto nh_ip = swss::IpAddress(local_pav4); + NextHopKey nh = {nh_ip, alias_dpu}; + /* 1 for initLocalEndpoints */ + EXPECT_CALL(*ctx, getRouterIntfsAlias(nh_ip, _)).WillOnce(Return(alias_dpu)); + + /* Neighbor is not resolved, 1 per rule + 1 for initLocalEndpoints */ + EXPECT_CALL(*ctx, isNeighborResolved(nh)).Times(5).WillRepeatedly(Return(false)); + /* resolveNeighbor is called because the neigh is not resolved */ + EXPECT_CALL(*ctx, resolveNeighbor(nh)).Times(5); /* 1 per rule + 1 for initLocalEndpoints */ + + eniOrch->initLocalEndpoints(); + + /* Populate 2 ENI's */ + doDashEniFwdTableTask(applDb.get(), + deque( + { + { + vnet_name + ":" + test_mac, + SET_COMMAND, + { + { DashEniFwd::VDPU_IDS, "vdpu0,vdpu1" }, + { DashEniFwd::PRIMARY, "vdpu0" }, // Local endpoint is the primary + } + }, + { + vnet_name + ":" + test_mac2, + SET_COMMAND, + { + { DashEniFwd::VDPU_IDS, "vdpu0,vdpu1" }, + { DashEniFwd::PRIMARY, "vdpu0" }, // Local endpoint is the primary + } + } + } + ) + ); + + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac_key); + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac_key+ "_TERM"); + + /* Neighbor is resolved, Trigger a nexthop update (1 for Neigh Update) * 4 for Types of Rules */ + EXPECT_CALL(*ctx, isNeighborResolved(nh)).Times(4).WillRepeatedly(Return(true)); + + NeighborEntry temp_entry = nh; + NeighborUpdate update = { temp_entry, MacAddress(), true }; + eniOrch->update(SUBJECT_TYPE_NEIGH_CHANGE, static_cast(&update)); + + /* Check ACL Rules */ + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key, { + { ACTION_REDIRECT_ACTION, local_pav4 } + }); + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key+ "_TERM", { + { ACTION_REDIRECT_ACTION, local_pav4 }, { MATCH_TUNNEL_TERM, "true"} + }); + } + + /* + Remote Endpoint + */ + TEST_F(DashEniFwdOrchTest, RemoteNeighbor) + { + EXPECT_CALL(*ctx, getRouterIntfsAlias(_, _)).WillOnce(Return(alias_dpu)); + /* calls to neighOrch expected for tunn termination entries */ + EXPECT_CALL(*ctx, isNeighborResolved(_)).Times(2).WillRepeatedly(Return(true)); + + EXPECT_CALL(*ctx, findVnetTunnel(vnet_name, _)).Times(2) // Once per non-tunnel term rules + .WillRepeatedly(DoAll( + SetArgReferee<1>(tunnel_name), + Return(true) + )); + + EXPECT_CALL(*ctx, findVnetVni(vnet_name, _)).Times(2) // Called once per ENI + .WillRepeatedly(DoAll( + SetArgReferee<1>(test_vni), + Return(true) + )); + + doDashEniFwdTableTask(applDb.get(), + deque( + { + { + vnet_name + ":" + test_mac, + SET_COMMAND, + { + { DashEniFwd::VDPU_IDS, "vdpu0,vdpu1" }, + { DashEniFwd::PRIMARY, "vdpu1" }, // Remote endpoint is the primary + } + }, + { + vnet_name + ":" + test_mac2, + SET_COMMAND, + { + { DashEniFwd::VDPU_IDS, "vdpu0,vdpu1" }, + { DashEniFwd::PRIMARY, "vdpu1" }, // Remote endpoint is the primary + } + } + } + ) + ); + + /* Check ACL Rules */ + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key, { + { ACTION_REDIRECT_ACTION, remote_npuv4 + "@" + tunnel_name + "," + to_string(test_vni) } + }); + + /* Delete all ENI's */ + doDashEniFwdTableTask(applDb.get(), + deque( + { + { + vnet_name + ":" + test_mac2, + DEL_COMMAND, + { } + }, + { + vnet_name + ":" + test_mac, + DEL_COMMAND, + { } + } + } + ) + ); + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac2_key ); + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac2_key + "_TERM"); + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac_key); + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac_key+ "_TERM"); + } + + /* + Remote Endpoint with an update to switch to Local Endpoint + */ + TEST_F(DashEniFwdOrchTest, RemoteNeighbor_SwitchToLocal) + { + EXPECT_CALL(*ctx, getRouterIntfsAlias(_, _)).WillOnce(Return(alias_dpu)); + /* 1 calls made for tunnel termination rules */ + EXPECT_CALL(*ctx, isNeighborResolved(_)).Times(1).WillRepeatedly(Return(true)); + EXPECT_CALL(*ctx, findVnetTunnel(vnet_name, _)).Times(1) // Once per non-tunnel term rules + .WillRepeatedly(DoAll( + SetArgReferee<1>(tunnel_name), + Return(true) + )); + EXPECT_CALL(*ctx, findVnetVni(vnet_name, _)).Times(1) // Called once per ENI + .WillRepeatedly(DoAll( + SetArgReferee<1>(test_vni), + Return(true) + )); + + doDashEniFwdTableTask(applDb.get(), + deque( + { + { + vnet_name + ":" + test_mac, + SET_COMMAND, + { + { DashEniFwd::VDPU_IDS, "vdpu0,vdpu1" }, + { DashEniFwd::PRIMARY, "vdpu1" }, // Remote endpoint is the primary + } + } + } + ) + ); + + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key, { + { ACTION_REDIRECT_ACTION, remote_npuv4 + "@" + tunnel_name + ',' + to_string(test_vni) } + }); + + /* 1 calls will be made for non tunnel termination rules after primary switch */ + EXPECT_CALL(*ctx, isNeighborResolved(_)).Times(1).WillRepeatedly(Return(true)); + + doDashEniFwdTableTask(applDb.get(), + deque( + { + { + vnet_name + ":" + test_mac, + SET_COMMAND, + { + { DashEniFwd::PRIMARY, "vdpu0" }, // Primary is Local now + } + } + } + ) + ); + } + + /* + T1 doesn't host the ENI, Both the enndpoints are Remote. + No Tunnel Termination Rules expected + */ + TEST_F(DashEniFwdOrchTest, RemoteNeighbor_NoTunnelTerm) + { + EXPECT_CALL(*ctx, findVnetTunnel(vnet_name, _)).Times(1) // Only 1 rule is created + .WillRepeatedly(DoAll( + SetArgReferee<1>(tunnel_name), + Return(true) + )); + EXPECT_CALL(*ctx, findVnetVni(vnet_name, _)).Times(1) // Called once per ENI + .WillRepeatedly(DoAll( + SetArgReferee<1>(test_vni), + Return(true) + )); + + doDashEniFwdTableTask(applDb.get(), + deque( + { + { + vnet_name + ":" + test_mac, + SET_COMMAND, + { + { DashEniFwd::VDPU_IDS, "vdpu1,vdpu2" }, + { DashEniFwd::PRIMARY, "vdpu2" }, // Remote endpoint is the primary + } + } + } + ) + ); + + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key, { + { ACTION_REDIRECT_ACTION, remote_2_npuv4 + "@" + tunnel_name + ',' + to_string(test_vni) } + }); + + /* Tunnel termination rules are not installed */ + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac_key+ "_TERM"); + } + + /* + Test ACL Table and Table Type config with reference counting + */ + TEST_F(DashEniFwdOrchTest, TestAclTableConfig) + { + Table aclTableType(applDb.get(), APP_ACL_TABLE_TYPE_TABLE_NAME); + Table aclTable(applDb.get(), APP_ACL_TABLE_TABLE_NAME); + Table portTable(cfgDb.get(), CFG_PORT_TABLE_NAME); + + portTable.set("Ethernet0", + { + { "lanes", "0,1,2,3" } + }, SET_COMMAND); + + portTable.set("Ethernet4", + { + { "lanes", "4,5,6,7" }, + { PORT_ROLE, PORT_ROLE_DPC } + }, SET_COMMAND); + + // Initially no ACL table should exist + checkNoKeyExists(&aclTable, "ENI"); + checkNoKeyExists(&aclTableType, "ENI_REDIRECT"); + + // Create first ACL rule - should create the table + vector fv1 = { + { RULE_PRIORITY, "9996" }, + { MATCH_DST_IP, test_vip }, + { MATCH_INNER_DST_MAC, test_mac }, + { ACTION_REDIRECT_ACTION, local_pav4 } + }; + eniOrch->ctx->createAclRule("ENI:rule1", fv1); + + // Verify ACL table and table type were created after first rule + checkKFV(&aclTableType, "ENI_REDIRECT", { + { ACL_TABLE_TYPE_MATCHES, "DST_IP,INNER_DST_MAC,TUNNEL_TERM" }, + { ACL_TABLE_TYPE_ACTIONS, "REDIRECT_ACTION" }, + { ACL_TABLE_TYPE_BPOINT_TYPES, "PORT,PORTCHANNEL" } + }); + + checkKFV(&aclTable, "ENI", { + { ACL_TABLE_TYPE, "ENI_REDIRECT" }, + { ACL_TABLE_STAGE, STAGE_INGRESS }, + { ACL_TABLE_PORTS, "Ethernet0,PortChannel1011,PortChannel1012" } + }); + + // Create second and third ACL rules - table should still exist + vector fv2 = { + { RULE_PRIORITY, "9997" }, + { MATCH_DST_IP, test_vip }, + { MATCH_INNER_DST_MAC, test_mac2 }, + { ACTION_REDIRECT_ACTION, local_pav4 } + }; + eniOrch->ctx->createAclRule("ENI:rule2", fv2); + + vector fv3 = { + { RULE_PRIORITY, "9998" }, + { MATCH_DST_IP, test_vip }, + { MATCH_INNER_DST_MAC, test_mac }, + { ACTION_REDIRECT_ACTION, remote_pav4 } + }; + eniOrch->ctx->createAclRule("ENI:rule3", fv3); + + // Verify rule count is 3 + EXPECT_EQ(eniOrch->ctx->acl_rule_count_, 3); + + // Delete first two rules - table should still exist + eniOrch->ctx->deleteAclRule("ENI:rule1"); + EXPECT_EQ(eniOrch->ctx->acl_rule_count_, 2); + + eniOrch->ctx->deleteAclRule("ENI:rule2"); + EXPECT_EQ(eniOrch->ctx->acl_rule_count_, 1); + + // Table should still exist + checkKFV(&aclTable, "ENI", { + { ACL_TABLE_TYPE, "ENI_REDIRECT" } + }); + + // Delete last rule - table should be removed + eniOrch->ctx->deleteAclRule("ENI:rule3"); + EXPECT_EQ(eniOrch->ctx->acl_rule_count_, 0); + + // Verify ACL table and table type were deleted after last rule + checkNoKeyExists(&aclTable, "ENI"); + checkNoKeyExists(&aclTableType, "ENI_REDIRECT"); + } +} + +namespace mock_orch_test +{ + TEST_F(MockOrchTest, EniFwdCtx) + { + EniFwdCtx ctx(m_config_db.get(), m_app_db.get()); + ASSERT_NO_THROW(ctx.initialize()); + + NextHopKey nh(IpAddress("10.0.0.1"), "Ethernet0"); + ASSERT_NO_THROW(ctx.isNeighborResolved(nh)); + ASSERT_NO_THROW(ctx.resolveNeighbor(nh)); + ASSERT_NO_THROW(ctx.getRouterIntfsAlias(IpAddress("10.0.0.1"))); + + uint64_t vni; + ASSERT_NO_THROW(ctx.findVnetVni("Vnet_1000", vni)); + string tunnel; + ASSERT_NO_THROW(ctx.findVnetTunnel("Vnet_1000", tunnel)); + ASSERT_NO_THROW(ctx.getAllPorts()); + } +} diff --git a/tests/mock_tests/dashhaorch_ut.cpp b/tests/mock_tests/dashhaorch_ut.cpp new file mode 100644 index 00000000000..1bf09c4886c --- /dev/null +++ b/tests/mock_tests/dashhaorch_ut.cpp @@ -0,0 +1,1070 @@ +#include "mock_orch_test.h" +#include "mock_table.h" +#include "mock_sai_api.h" +#include "gtest/gtest.h" +#include "gmock/gmock.h" +#include "dash/dashhaorch.h" +#include "pbutils.h" +using namespace ::testing; + +extern redisReply *mockReply; +extern sai_redis_communication_mode_t gRedisCommunicationMode; + +EXTERN_MOCK_FNS + +namespace dashhaorch_ut +{ + DEFINE_SAI_GENERIC_APIS_MOCK(dash_ha, ha_set, ha_scope); + + using namespace mock_orch_test; + + class MockBfdOrch : public BfdOrch + { + public: + + MockBfdOrch(DBConnector* db, DBConnector* state_db) + : BfdOrch(db, APP_BFD_SESSION_TABLE_NAME, TableConnector(state_db, STATE_BFD_SESSION_TABLE_NAME)) {} + + void createSoftwareBfdSession( + const std::string& key, + const std::vector& data) override + { + createSoftwareBfdSession_invoked_times++; + } + + void removeSoftwareBfdSession( + const std::string& key) override + { + removeSoftwareBfdSession_invoked_times++; + } + + void removeAllSoftwareBfdSessions() override + { + removeAllSoftwareBfdSessions_invoked_times++; + } + + uint32_t createSoftwareBfdSession_invoked_times = 0; + uint32_t removeSoftwareBfdSession_invoked_times = 0; + uint32_t removeAllSoftwareBfdSessions_invoked_times = 0; + + }; + + class DashHaOrchTestable : public DashHaOrch + { + public: + void doTask(swss::NotificationConsumer &consumer) { DashHaOrch::doTask(consumer); } + }; + + class DashHaOrchTest : public MockOrchTest + { + protected: + std::unique_ptr m_mockBfdOrch; + + void PostSetUp() override + { + m_mockBfdOrch = std::make_unique(m_app_db.get(), m_state_db.get()); + + vector dash_ha_tables = { + APP_DASH_HA_SET_TABLE_NAME, + APP_DASH_HA_SCOPE_TABLE_NAME + }; + m_dashHaOrch = new DashHaOrch(m_dpu_app_db.get(), dash_ha_tables, m_DashOrch, m_mockBfdOrch.get(), m_dpu_app_state_db.get(), nullptr); + gDirectory.set(m_dashHaOrch); + ut_orch_list.push_back((Orch **)&m_dashHaOrch); + } + + void ApplySaiMock() + { + INIT_SAI_API_MOCK(dash_ha); + MockSaiApis(); + } + + void PreTearDown() override + { + RestoreSaiApis(); + DEINIT_SAI_API_MOCK(dash_ha); + } + + dash::ha_set::HaSet HaSetPbObject() + { + dash::ha_set::HaSet ha_set = dash::ha_set::HaSet(); + swss::IpAddress vip_v4("1.1.1.1"); + swss::IpAddress vip_v6("::1"); + swss::IpAddress npu_ip("2.2.2.2"); + swss::IpAddress local_ip("3.3.3.3"); + // swss::IpAddress peer_ip("4.4.4.4"); + swss::IpAddress peer_ip("::2"); + + ha_set.set_version("1"); + ha_set.set_scope(dash::types::HA_SCOPE_DPU); + ha_set.mutable_vip_v4()->set_ipv4(vip_v4.getV4Addr()); + ha_set.mutable_vip_v6()->set_ipv6(reinterpret_cast(vip_v6.getV6Addr())); + ha_set.mutable_local_npu_ip()->set_ipv4(npu_ip.getV4Addr()); + ha_set.mutable_local_ip()->set_ipv4(local_ip.getV4Addr()); + // ha_set.mutable_peer_ip()->set_ipv4(peer_ip.getV4Addr()); + ha_set.mutable_peer_ip()->set_ipv6(reinterpret_cast(peer_ip.getV6Addr())); + ha_set.set_cp_data_channel_port(100); + ha_set.set_dp_channel_dst_port(200); + ha_set.set_dp_channel_src_port_min(0); + ha_set.set_dp_channel_src_port_max(1000); + ha_set.set_dp_channel_probe_interval_ms(1000); + ha_set.set_dp_channel_probe_fail_threshold(3); + + return ha_set; + } + + void CreateHaSet() + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_dpu_app_db.get(), APP_DASH_HA_SET_TABLE_NAME, 1, 1), + m_dashHaOrch, APP_DASH_HA_SET_TABLE_NAME)); + + consumer->addToSync( + deque( + { + { + "HA_SET_1", + SET_COMMAND, + { + {"version", "1"}, + {"vip_v4", "10.0.0.1"}, + {"vip_v6", "3:2::1:0"}, + {"owner", "dpu"}, + {"scope", "dpu"}, + {"local_npu_ip", "192.168.1.10"}, + {"local_ip", "192.168.2.1"}, + {"peer_ip", "192.168.2.2"}, + {"cp_data_channel_port", "4789"}, + {"dp_channel_dst_port", "4790"}, + {"dp_channel_src_port_min", "5000"}, + {"dp_channel_src_port_max", "6000"}, + {"dp_channel_probe_interval_ms", "1000"}, + {"dp_channel_probe_fail_threshold", "3"} + } + } + } + ) + ); + static_cast(m_dashHaOrch)->doTask(*consumer.get()); + } + + void UpdatePeerIp(std::string peer_ip) + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_dpu_app_db.get(), APP_DASH_HA_SET_TABLE_NAME, 1, 1), + m_dashHaOrch, APP_DASH_HA_SET_TABLE_NAME)); + + std::vector> fields = {{"version", "2"}}; + if (!peer_ip.empty()) { + fields.push_back({"peer_ip", peer_ip}); + } + + consumer->addToSync( + deque( + { + { + "HA_SET_1", + SET_COMMAND, + fields + } + } + ) + ); + static_cast(m_dashHaOrch)->doTask(*consumer.get()); + } + + void InvalidIpAddresses() + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_dpu_app_db.get(), APP_DASH_HA_SET_TABLE_NAME, 1, 1), + m_dashHaOrch, APP_DASH_HA_SET_TABLE_NAME)); + + consumer->addToSync( + deque( + { + { + "HA_SET_1", + SET_COMMAND, + { + {"version", "1"}, + {"vip_v4", "invalid_ip"}, + {"vip_v6", ""}, + {"owner", "dpu"}, + {"scope", "dpu"}, + {"local_npu_ip", "192.168.1.10"}, + {"local_ip", "3:2::1:0"}, + {"peer_ip", "300:300:300:300"}, + {"cp_data_channel_port", "4789"}, + {"dp_channel_dst_port", "4790"}, + {"dp_channel_src_port_min", "5000"}, + {"dp_channel_src_port_max", "6000"}, + {"dp_channel_probe_interval_ms", "1000"}, + {"dp_channel_probe_fail_threshold", "3"} + } + } + } + ) + ); + static_cast(m_dashHaOrch)->doTask(*consumer.get()); + } + + void InvalidField() + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_dpu_app_db.get(), APP_DASH_HA_SET_TABLE_NAME, 1, 1), + m_dashHaOrch, APP_DASH_HA_SET_TABLE_NAME)); + + consumer->addToSync( + deque( + { + { + "HA_SET_1", + SET_COMMAND, + { + {"version", "1"}, + {"vip_v4", "10.0.0.1"}, + {"vip_v6", "3:2::1:0"}, + {"owner", "dpu"}, + {"scope", "dpu"}, + {"local_npu_ip", "192.168.1.10"}, + {"local_ip", "192.168.2.1"}, + {"peer_ip", "192.168.2.2"}, + {"cp_data_channel_port", "4789"}, + {"dp_channel_dst_port", "4790"}, + {"dp_channel_src_port_min", "5000"}, + {"dp_channel_src_port_max", "6000"}, + {"dp_channel_probe_interval_ms", "1000"}, + {"dp_channel_probe_fail_threshold", "3"}, + {"invalid_field", "invalid_value"} + } + } + } + ) + ); + static_cast(m_dashHaOrch)->doTask(*consumer.get()); + } + + void CreateEniScopeHaSet() + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_dpu_app_db.get(), APP_DASH_HA_SET_TABLE_NAME, 1, 1), + m_dashHaOrch, APP_DASH_HA_SET_TABLE_NAME)); + + consumer->addToSync( + deque( + { + { + "HA_SET_1", + SET_COMMAND, + { + {"version", "1"}, + {"vip_v4", "10.0.0.1"}, + {"vip_v6", "fc00::1"}, + {"owner", "switch"}, + {"scope", "eni"}, + {"local_npu_ip", "192.168.1.10"}, + {"local_ip", "192.168.2.1"}, + {"peer_ip", "192.168.2.2"}, + {"cp_data_channel_port", "4789"}, + {"dp_channel_dst_port", "4790"}, + {"dp_channel_src_port_min", "5000"}, + {"dp_channel_src_port_max", "6000"}, + {"dp_channel_probe_interval_ms", "1000"}, + {"dp_channel_probe_fail_threshold", "3"} + } + } + } + ) + ); + static_cast(m_dashHaOrch)->doTask(*consumer.get()); + } + + void RemoveHaSet() + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_dpu_app_db.get(), APP_DASH_HA_SET_TABLE_NAME, 1, 1), + m_dashHaOrch, APP_DASH_HA_SET_TABLE_NAME)); + + consumer->addToSync( + deque( + { + { + "HA_SET_1", + DEL_COMMAND, + { } + } + } + ) + ); + static_cast(m_dashHaOrch)->doTask(*consumer.get()); + } + + void CreateHaScope() + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_dpu_app_db.get(), APP_DASH_HA_SCOPE_TABLE_NAME, 1, 1), + m_dashHaOrch, APP_DASH_HA_SCOPE_TABLE_NAME)); + + consumer->addToSync( + deque( + { + { + "HA_SET_1", + SET_COMMAND, + { + {"version", "1"}, + {"ha_role", "dead"}, + {"ha_set_id", "HA_SET_1"}, + {"vip_v4", "10.0.0.1"}, + {"vip_v6", "3:2::1:0"}, + {"disabled", "true"} + } + } + } + ) + ); + static_cast(m_dashHaOrch)->doTask(*consumer.get()); + } + + void CreateHaScopeLessFields() + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_dpu_app_db.get(), APP_DASH_HA_SCOPE_TABLE_NAME, 1, 1), + m_dashHaOrch, APP_DASH_HA_SCOPE_TABLE_NAME)); + + consumer->addToSync( + deque( + { + { + "HA_SET_1", + SET_COMMAND, + { + {"version", "1"}, + {"ha_role", "dead"}, + } + } + } + ) + ); + static_cast(m_dashHaOrch)->doTask(*consumer.get()); + } + + void RemoveHaScope() + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_dpu_app_db.get(), APP_DASH_HA_SCOPE_TABLE_NAME, 1, 1), + m_dashHaOrch, APP_DASH_HA_SCOPE_TABLE_NAME)); + + consumer->addToSync( + deque( + { + { + "HA_SET_1", + DEL_COMMAND, + { } + } + } + ) + ); + static_cast(m_dashHaOrch)->doTask(*consumer.get()); + } + + void SetHaScopeHaRole(std::string role="active") + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_dpu_app_db.get(), APP_DASH_HA_SCOPE_TABLE_NAME, 1, 1), + m_dashHaOrch, APP_DASH_HA_SCOPE_TABLE_NAME)); + + consumer->addToSync( + deque( + { + { + "HA_SET_1", + SET_COMMAND, + { + {"version", "1"}, + {"ha_role", role}, + {"disabled", "false"} + } + } + } + ) + ); + static_cast(m_dashHaOrch)->doTask(*consumer.get()); + } + + void SetHaScopeActivateRoleRequest() + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_dpu_app_db.get(), APP_DASH_HA_SCOPE_TABLE_NAME, 1, 1), + m_dashHaOrch, APP_DASH_HA_SCOPE_TABLE_NAME)); + + consumer->addToSync( + deque( + { + { + "HA_SET_1", + SET_COMMAND, + { + {"version", "1"}, + {"ha_role", "active"}, + {"activate_role_requested", "true"} + } + } + } + ) + ); + static_cast(m_dashHaOrch)->doTask(*consumer.get()); + } + + void SetHaScopeFlowReconcileRequest() + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_dpu_app_db.get(), APP_DASH_HA_SCOPE_TABLE_NAME, 1, 1), + m_dashHaOrch, APP_DASH_HA_SCOPE_TABLE_NAME)); + + consumer->addToSync( + deque( + { + { + "HA_SET_1", + SET_COMMAND, + { + {"version", "1"}, + {"ha_role", "active"}, + {"flow_reconcile_requested", "true"} + } + } + } + ) + ); + static_cast(m_dashHaOrch)->doTask(*consumer.get()); + } + + void RandomTable() + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_dpu_app_db.get(), "RANDOM_TABLE", 1, 1), + m_dashHaOrch, "RANDOM_TABLE")); + + consumer->addToSync( + deque( + { + { + "random_key", + SET_COMMAND, + { + { "pb", "random" } + } + } + } + ) + ); + static_cast(m_dashHaOrch)->doTask(*consumer.get()); + } + + void InvalidHaScopePbString() + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_dpu_app_db.get(), APP_DASH_HA_SCOPE_TABLE_NAME, 1, 1), + m_dashHaOrch, APP_DASH_HA_SCOPE_TABLE_NAME)); + + consumer->addToSync( + deque( + { + { + "HA_SET_1", + SET_COMMAND, + { + { "pb", "invalid" } + } + } + } + ) + ); + static_cast(m_dashHaOrch)->doTask(*consumer.get()); + } + + void InvalidHaSetPbString() + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_dpu_app_db.get(), APP_DASH_HA_SET_TABLE_NAME, 1, 1), + m_dashHaOrch, APP_DASH_HA_SET_TABLE_NAME)); + + consumer->addToSync( + deque( + { + { + "HA_SET_1", + SET_COMMAND, + { + { "pb", "invalid" } + } + } + } + ) + ); + static_cast(m_dashHaOrch)->doTask(*consumer.get()); + } + + void HaSetScopeUnspecified() + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_dpu_app_db.get(), APP_DASH_HA_SET_TABLE_NAME, 1, 1), + m_dashHaOrch, APP_DASH_HA_SET_TABLE_NAME)); + + dash::ha_set::HaSet ha_set = dash::ha_set::HaSet(); + ha_set.set_version("1"); + ha_set.set_scope(dash::types::HA_SCOPE_UNSPECIFIED); + + consumer->addToSync( + deque( + { + { + "HA_SET_1", + SET_COMMAND, + { + { "pb", ha_set.SerializeAsString() } + } + } + } + ) + ); + static_cast(m_dashHaOrch)->doTask(*consumer.get()); + } + + void CreateSoftwareBfdSession(string bfd_session_key = "default:default:192.168.1.100") + { + auto bfd_consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_dpu_app_db.get(), APP_BFD_SESSION_TABLE_NAME , 1, 1), + m_dashHaOrch, APP_BFD_SESSION_TABLE_NAME )); + + vector bfd_session_data = { + {"local_addr", "192.168.1.1"}, + {"tx_interval", "1000"}, + {"rx_interval", "1000"}, + {"multiplier", "3"}, + {"type", "async_active"}, + {"multihop", "true"} + }; + + bfd_consumer->addToSync( + deque( + { + { + bfd_session_key, + SET_COMMAND, + bfd_session_data + } + } + ) + ); + + static_cast(m_dashHaOrch)->doTask(*bfd_consumer.get()); + } + + void deleteSoftwareBfdSession(string bfd_session_key = "default:default:192.168.1.100") + { + auto bfd_consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(m_dpu_app_db.get(), APP_BFD_SESSION_TABLE_NAME , 1, 1), + m_dashHaOrch, APP_BFD_SESSION_TABLE_NAME )); + + bfd_consumer->addToSync( + deque( + { + { + bfd_session_key, + DEL_COMMAND, + {} + } + } + ) + ); + + static_cast(m_dashHaOrch)->doTask(*bfd_consumer.get()); + } + + void HaSetEvent(sai_ha_set_event_t event_type) + { + mockReply = (redisReply *)calloc(sizeof(redisReply), 1); + mockReply->type = REDIS_REPLY_ARRAY; + mockReply->elements = 3; // REDIS_PUBLISH_MESSAGE_ELEMNTS + mockReply->element = (redisReply **)calloc(sizeof(redisReply *), mockReply->elements); + mockReply->element[2] = (redisReply *)calloc(sizeof(redisReply), 1); + mockReply->element[2]->type = REDIS_REPLY_STRING; + + sai_ha_set_event_data_t event; + memset(&event, 0, sizeof(event)); + event.ha_set_id = m_dashHaOrch->getHaScopeEntries().begin()->second.ha_scope_id; + event.event_type = event_type; + + std::string data = sai_serialize_ha_set_event_ntf(1, &event); + + std::vector notifyValues; + FieldValueTuple opdata(SAI_SWITCH_NOTIFICATION_NAME_HA_SET_EVENT, data); + notifyValues.push_back(opdata); + std::string msg = swss::JSon::buildJson(notifyValues); + + mockReply->element[2]->str = (char*)calloc(1, msg.length() + 1); + memcpy(mockReply->element[2]->str, msg.c_str(), msg.length()); + + auto exec = static_cast(m_dashHaOrch->getExecutor("HA_SET_NOTIFICATIONS")); + auto consumer = exec->getNotificationConsumer(); + consumer->readData(); + static_cast(m_dashHaOrch)->doTask(*consumer); + mockReply = nullptr; + + sai_redis_communication_mode_t old_mode = gRedisCommunicationMode; + gRedisCommunicationMode = SAI_REDIS_COMMUNICATION_MODE_ZMQ_SYNC; + on_ha_set_event(1, &event); + gRedisCommunicationMode = old_mode; + } + + void HaScopeEvent(sai_ha_scope_event_t event_type, + sai_dash_ha_role_t ha_role, + sai_dash_ha_state_t ha_state) + { + mockReply = (redisReply *)calloc(sizeof(redisReply), 1); + mockReply->type = REDIS_REPLY_ARRAY; + mockReply->elements = 3; // REDIS_PUBLISH_MESSAGE_ELEMNTS + mockReply->element = (redisReply **)calloc(sizeof(redisReply *), mockReply->elements); + mockReply->element[2] = (redisReply *)calloc(sizeof(redisReply), 1); + mockReply->element[2]->type = REDIS_REPLY_STRING; + + sai_ha_scope_event_data_t event; + memset(&event, 0, sizeof(event)); + event.ha_scope_id = m_dashHaOrch->getHaScopeEntries().begin()->second.ha_scope_id; + event.event_type = event_type; + event.ha_role = ha_role; + event.ha_state = ha_state; + event.flow_version = sai_uint32_t(0); + + ASSERT_EQ(to_string(event.flow_version), "0"); + + std::string data = sai_serialize_ha_scope_event_ntf(1, &event); + + std::vector notifyValues; + FieldValueTuple opdata(SAI_SWITCH_NOTIFICATION_NAME_HA_SCOPE_EVENT, data); + notifyValues.push_back(opdata); + std::string msg = swss::JSon::buildJson(notifyValues); + + mockReply->element[2]->str = (char*)calloc(1, msg.length() + 1); + memcpy(mockReply->element[2]->str, msg.c_str(), msg.length()); + + mockReply->element[2]->str = (char*)calloc(1, msg.length() + 1); + memcpy(mockReply->element[2]->str, msg.c_str(), msg.length()); + + auto exec = static_cast(m_dashHaOrch->getExecutor("HA_SCOPE_NOTIFICATIONS")); + auto consumer = exec->getNotificationConsumer(); + consumer->readData(); + static_cast(m_dashHaOrch)->doTask(*consumer); + mockReply = nullptr; + + sai_redis_communication_mode_t old_mode = gRedisCommunicationMode; + gRedisCommunicationMode = SAI_REDIS_COMMUNICATION_MODE_ZMQ_SYNC; + on_ha_scope_event(1, &event); + gRedisCommunicationMode = old_mode; + } + }; + + TEST_F(DashHaOrchTest, AddRemoveHaSet) + { + EXPECT_CALL(*mock_sai_dash_ha_api, create_ha_set) + .Times(1) + .WillOnce(Return(SAI_STATUS_SUCCESS)); + + CreateHaSet(); + + auto ha_set_entry = m_dashHaOrch->getHaSetEntries().find("HA_SET_1"); + sai_ip_address_t sai_vip_v4 = {}; + sai_ip_address_t sai_vip_v6 = {}; + + EXPECT_TRUE(to_sai(ha_set_entry->second.metadata.vip_v4(), sai_vip_v4)); + EXPECT_TRUE(to_sai(ha_set_entry->second.metadata.vip_v6(), sai_vip_v6)); + + EXPECT_EQ(sai_vip_v4.addr_family, SAI_IP_ADDR_FAMILY_IPV4); + uint32_t expected_v4 = htonl((10 << 24) | (0 << 16) | (0 << 8) | 1); + EXPECT_EQ(sai_vip_v4.addr.ip4, expected_v4); + + // Expected bytes for IPv6 address "3:2::1:0" + // 0003:0002:0000:0000:0000:0000:0001:0000 + uint8_t expected_v6[16] = { + 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00 + }; + + for (int i = 0; i < 16; i++) { + EXPECT_EQ(sai_vip_v6.addr.ip6[i], expected_v6[i]) + << "IPv6 VIP byte " << i << " mismatch. Expected: " + << std::hex << (int)expected_v6[i] << ", Got: " + << std::hex << (int)sai_vip_v6.addr.ip6[i]; + } + + HaSetEvent(SAI_HA_SET_EVENT_DP_CHANNEL_UP); + + EXPECT_CALL(*mock_sai_dash_ha_api, remove_ha_set) + .Times(1) + .WillOnce(Return(SAI_STATUS_SUCCESS)); + + RemoveHaSet(); + } + + TEST_F(DashHaOrchTest, UpdatePeerIp) + { + CreateHaSet(); + UpdatePeerIp("192.168.2.100"); + + auto ha_set_entry = m_dashHaOrch->getHaSetEntries().find("HA_SET_1"); + dash::types::IpAddress peer_ip; + to_pb("192.168.2.100", peer_ip); + EXPECT_EQ(to_string(ha_set_entry->second.metadata.peer_ip()), to_string(peer_ip)); + + UpdatePeerIp("invalid_ip"); + EXPECT_EQ(to_string(ha_set_entry->second.metadata.peer_ip()), to_string(peer_ip)); + + UpdatePeerIp(""); + EXPECT_EQ(to_string(ha_set_entry->second.metadata.peer_ip()), to_string(peer_ip)); + } + + TEST_F(DashHaOrchTest, InvalidIpAddresses) + { + EXPECT_CALL(*mock_sai_dash_ha_api, create_ha_set) + .Times(0); + + InvalidIpAddresses(); + } + + TEST_F(DashHaOrchTest, InvalidField) + { + EXPECT_CALL(*mock_sai_dash_ha_api, create_ha_set) + .Times(1); + + InvalidField(); + } + + TEST_F(DashHaOrchTest, HaSetAlreadyExists) + { + CreateHaSet(); + + EXPECT_CALL(*mock_sai_dash_ha_api, create_ha_set) + .Times(0); + + CreateHaSet(); + + EXPECT_CALL(*mock_sai_dash_ha_api, remove_ha_set) + .Times(1) + .WillOnce(Return(SAI_STATUS_SUCCESS)); + + RemoveHaSet(); + } + + TEST_F(DashHaOrchTest, AddRemoveHaScope) + { + CreateHaSet(); + + EXPECT_CALL(*mock_sai_dash_ha_api, create_ha_scope) + .Times(1) + .WillOnce(Return(SAI_STATUS_SUCCESS)); + + CreateHaScope(); + + EXPECT_TRUE(m_dashHaOrch->getHaScopeEntries().size() == 1); + EXPECT_TRUE(m_dashHaOrch->getHaScopeEntries().find("HA_SET_1") != m_dashHaOrch->getHaScopeEntries().end()); + + // HA Scope already exists + EXPECT_CALL(*mock_sai_dash_ha_api, create_ha_scope) + .Times(0); + CreateHaScope(); + + EXPECT_TRUE(m_dashHaOrch->getHaScopeEntries().size() == 1); + EXPECT_TRUE(m_dashHaOrch->getHaScopeEntries().find("HA_SET_1") != m_dashHaOrch->getHaScopeEntries().end()); + + EXPECT_CALL(*mock_sai_dash_ha_api, remove_ha_scope) + .Times(1) + .WillOnce(Return(SAI_STATUS_SUCCESS)); + + RemoveHaScope(); + + EXPECT_TRUE(m_dashHaOrch->getHaScopeEntries().size() == 0); + } + + TEST_F(DashHaOrchTest, AddRemoveHaScopeLessFields) + { + CreateHaSet(); + + EXPECT_CALL(*mock_sai_dash_ha_api, create_ha_scope) + .Times(1); + + CreateHaScopeLessFields(); + + EXPECT_TRUE(m_dashHaOrch->getHaScopeEntries().size() == 1); + EXPECT_TRUE(m_dashHaOrch->getHaScopeEntries().find("HA_SET_1") != m_dashHaOrch->getHaScopeEntries().end()); + + // HA Scope already exists + EXPECT_CALL(*mock_sai_dash_ha_api, create_ha_scope) + .Times(0); + CreateHaScope(); + + EXPECT_TRUE(m_dashHaOrch->getHaScopeEntries().size() == 1); + EXPECT_TRUE(m_dashHaOrch->getHaScopeEntries().find("HA_SET_1") != m_dashHaOrch->getHaScopeEntries().end()); + + EXPECT_CALL(*mock_sai_dash_ha_api, remove_ha_scope) + .Times(1); + + RemoveHaScope(); + + EXPECT_TRUE(m_dashHaOrch->getHaScopeEntries().size() == 0); + } + + + TEST_F(DashHaOrchTest, AddRemoveEniHaScope) + { + CreateEniScopeHaSet(); + + EXPECT_CALL(*mock_sai_dash_ha_api, create_ha_scope) + .Times(1) + .WillOnce(Return(SAI_STATUS_SUCCESS)); + + CreateHaScope(); + + EXPECT_CALL(*mock_sai_dash_ha_api, remove_ha_scope) + .Times(1) + .WillOnce(Return(SAI_STATUS_SUCCESS)); + + RemoveHaScope(); + } + + TEST_F(DashHaOrchTest, NoHaSetFound) + { + EXPECT_CALL(*mock_sai_dash_ha_api, create_ha_scope) + .Times(0); + + CreateHaScope(); + + EXPECT_CALL(*mock_sai_dash_ha_api, remove_ha_scope) + .Times(0); + + RemoveHaScope(); + + EXPECT_CALL(*mock_sai_dash_ha_api, remove_ha_set) + .Times(0); + + RemoveHaSet(); + } + + TEST_F(DashHaOrchTest, SetHaScopeHaRole) + { + CreateHaSet(); + CreateHaScope(); + + EXPECT_EQ(to_sai(m_dashHaOrch->getHaScopeEntries().find("HA_SET_1")->second.metadata.ha_role()), SAI_DASH_HA_ROLE_DEAD); + EXPECT_TRUE(m_dashHaOrch->getHaScopeEntries().find("HA_SET_1")->second.metadata.disabled()); + + SetHaScopeHaRole(); + HaScopeEvent(SAI_HA_SCOPE_EVENT_STATE_CHANGED, + SAI_DASH_HA_ROLE_ACTIVE, SAI_DASH_HA_STATE_ACTIVE); + EXPECT_EQ(to_sai(m_dashHaOrch->getHaScopeEntries().find("HA_SET_1")->second.metadata.ha_role()), SAI_DASH_HA_ROLE_ACTIVE); + EXPECT_FALSE(m_dashHaOrch->getHaScopeEntries().find("HA_SET_1")->second.metadata.disabled()); + + SetHaScopeHaRole(""); + HaScopeEvent(SAI_HA_SCOPE_EVENT_STATE_CHANGED, + SAI_DASH_HA_ROLE_DEAD, SAI_DASH_HA_STATE_DEAD); + EXPECT_EQ(to_sai(m_dashHaOrch->getHaScopeEntries().find("HA_SET_1")->second.metadata.ha_role()), SAI_DASH_HA_ROLE_DEAD); + + SetHaScopeHaRole("dead"); + HaScopeEvent(SAI_HA_SCOPE_EVENT_STATE_CHANGED, + SAI_DASH_HA_ROLE_DEAD, SAI_DASH_HA_STATE_DEAD); + EXPECT_EQ(to_sai(m_dashHaOrch->getHaScopeEntries().find("HA_SET_1")->second.metadata.ha_role()), SAI_DASH_HA_ROLE_DEAD); + + SetHaScopeHaRole("standby"); + HaScopeEvent(SAI_HA_SCOPE_EVENT_STATE_CHANGED, + SAI_DASH_HA_ROLE_STANDBY, SAI_DASH_HA_STATE_STANDBY); + EXPECT_EQ(to_sai(m_dashHaOrch->getHaScopeEntries().find("HA_SET_1")->second.metadata.ha_role()), SAI_DASH_HA_ROLE_STANDBY); + + SetHaScopeHaRole("standalone"); + HaScopeEvent(SAI_HA_SCOPE_EVENT_STATE_CHANGED, + SAI_DASH_HA_ROLE_STANDALONE, SAI_DASH_HA_STATE_STANDALONE); + EXPECT_EQ(to_sai(m_dashHaOrch->getHaScopeEntries().find("HA_SET_1")->second.metadata.ha_role()), SAI_DASH_HA_ROLE_STANDALONE); + + SetHaScopeHaRole("switching_to_active"); + HaScopeEvent(SAI_HA_SCOPE_EVENT_STATE_CHANGED, + SAI_DASH_HA_ROLE_SWITCHING_TO_ACTIVE, SAI_DASH_HA_STATE_PENDING_ACTIVE_ACTIVATION); + EXPECT_EQ(to_sai(m_dashHaOrch->getHaScopeEntries().find("HA_SET_1")->second.metadata.ha_role()), SAI_DASH_HA_ROLE_SWITCHING_TO_ACTIVE); + + RemoveHaScope(); + RemoveHaSet(); + } + + TEST_F(DashHaOrchTest, LastRoleStartTime) + { + CreateHaSet(); + CreateHaScope(); + + std::time_t last_role_start_time = m_dashHaOrch->getHaScopeEntries().begin()->second.last_role_start_time; + + sleep(1); // Ensure time difference + + HaScopeEvent(SAI_HA_SCOPE_EVENT_STATE_CHANGED, + SAI_DASH_HA_ROLE_ACTIVE, SAI_DASH_HA_STATE_ACTIVE); + + EXPECT_TRUE(m_dashHaOrch->getHaScopeEntries().begin()->second.last_role_start_time > last_role_start_time); + + last_role_start_time = m_dashHaOrch->getHaScopeEntries().begin()->second.last_role_start_time; + + sleep(1); + + HaScopeEvent(SAI_HA_SCOPE_EVENT_STATE_CHANGED, + SAI_DASH_HA_ROLE_ACTIVE, SAI_DASH_HA_STATE_ACTIVE); + + EXPECT_TRUE(m_dashHaOrch->getHaScopeEntries().begin()->second.last_role_start_time == last_role_start_time); + + RemoveHaScope(); + RemoveHaSet(); + } + + TEST_F(DashHaOrchTest, HaScopeActivateRoleRequest) + { + CreateHaSet(); + CreateHaScope(); + + HaScopeEvent(SAI_HA_SCOPE_EVENT_STATE_CHANGED, + SAI_DASH_HA_ROLE_SWITCHING_TO_ACTIVE, SAI_DASH_HA_STATE_PENDING_ACTIVE_ACTIVATION); + + EXPECT_EQ(to_sai(m_dashHaOrch->getHaScopeEntries().find("HA_SET_1")->second.metadata.ha_role()), SAI_DASH_HA_ROLE_SWITCHING_TO_ACTIVE); + + EXPECT_CALL(*mock_sai_dash_ha_api, set_ha_scope_attribute) + .Times(2); // Set ha_role and activate_role_requested + + SetHaScopeActivateRoleRequest(); + + HaScopeEvent(SAI_HA_SCOPE_EVENT_STATE_CHANGED, + SAI_DASH_HA_ROLE_ACTIVE, SAI_DASH_HA_STATE_ACTIVE); + + EXPECT_EQ(to_sai(m_dashHaOrch->getHaScopeEntries().find("HA_SET_1")->second.metadata.ha_role()), SAI_DASH_HA_ROLE_ACTIVE); + + RemoveHaScope(); + RemoveHaSet(); + } + + TEST_F(DashHaOrchTest, HaScopeFlowReconcileRequest) + { + CreateHaSet(); + CreateHaScope(); + + HaScopeEvent(SAI_HA_SCOPE_EVENT_FLOW_RECONCILE_NEEDED, + SAI_DASH_HA_ROLE_ACTIVE, SAI_DASH_HA_STATE_ACTIVE); + + EXPECT_TRUE(m_dashHaOrch->getHaScopeEntries().find("HA_SET_1")->second.metadata.disabled()); + + EXPECT_CALL(*mock_sai_dash_ha_api, set_ha_scope_attribute) + .Times(1); + + SetHaScopeFlowReconcileRequest(); + + EXPECT_TRUE(m_dashHaOrch->getHaScopeEntries().find("HA_SET_1")->second.metadata.disabled()); + + RemoveHaScope(); + RemoveHaSet(); + } + + TEST_F(DashHaOrchTest, HaScopeSplitBrain) + { + CreateHaSet(); + CreateHaScope(); + + HaScopeEvent(SAI_HA_SCOPE_EVENT_SPLIT_BRAIN_DETECTED, + SAI_DASH_HA_ROLE_ACTIVE, SAI_DASH_HA_STATE_ACTIVE); + + RemoveHaScope(); + RemoveHaSet(); + } + + TEST_F(DashHaOrchTest, InvalidInput) + { + RandomTable(); + InvalidHaScopePbString(); + InvalidHaSetPbString(); + + EXPECT_EQ(m_dashHaOrch->getHaScopeEntries().find("HA_SET_1"), m_dashHaOrch->getHaScopeEntries().end()); + EXPECT_EQ(m_dashHaOrch->getHaSetEntries().find("HA_SET_1"), m_dashHaOrch->getHaSetEntries().end()); + + HaSetScopeUnspecified(); + CreateHaScope(); + } + + TEST_F(DashHaOrchTest, BfdSessionHandlingEni) + { + CreateEniScopeHaSet(); + CreateHaScope(); + + CreateSoftwareBfdSession(); + + EXPECT_EQ(m_mockBfdOrch->createSoftwareBfdSession_invoked_times, 1); + EXPECT_EQ(m_dashHaOrch->getBfdSessionPendingCreation().size(), 0); + + SetHaScopeHaRole(); + HaScopeEvent(SAI_HA_SCOPE_EVENT_STATE_CHANGED, + SAI_DASH_HA_ROLE_ACTIVE, SAI_DASH_HA_STATE_ACTIVE); + + SetHaScopeHaRole("dead"); + HaScopeEvent(SAI_HA_SCOPE_EVENT_STATE_CHANGED, + SAI_DASH_HA_ROLE_DEAD, SAI_DASH_HA_STATE_DEAD); + EXPECT_EQ(m_mockBfdOrch->removeAllSoftwareBfdSessions_invoked_times, 0); + + RemoveHaScope(); + RemoveHaSet(); + } + + TEST_F(DashHaOrchTest, BfdSessionHandlingDpu) + { + CreateHaSet(); + CreateHaScope(); + + CreateSoftwareBfdSession(); + EXPECT_EQ(m_mockBfdOrch->createSoftwareBfdSession_invoked_times, 0); + + SetHaScopeHaRole(); + HaScopeEvent(SAI_HA_SCOPE_EVENT_STATE_CHANGED, + SAI_DASH_HA_ROLE_ACTIVE, SAI_DASH_HA_STATE_PENDING_ACTIVE_ACTIVATION); + EXPECT_EQ(m_mockBfdOrch->createSoftwareBfdSession_invoked_times, 0); + + // bfd sessions should be created when ha_state is set to active + HaScopeEvent(SAI_HA_SCOPE_EVENT_STATE_CHANGED, + SAI_DASH_HA_ROLE_ACTIVE, SAI_DASH_HA_STATE_ACTIVE); + EXPECT_EQ(m_mockBfdOrch->createSoftwareBfdSession_invoked_times, 1); + + CreateSoftwareBfdSession("default:default:192.168.1.101"); + EXPECT_EQ(m_mockBfdOrch->createSoftwareBfdSession_invoked_times, 2); + EXPECT_EQ(m_dashHaOrch->getBfdSessionPendingCreation().size(), 2); + + deleteSoftwareBfdSession("default:default:192.168.1.101"); + EXPECT_EQ(m_mockBfdOrch->removeSoftwareBfdSession_invoked_times, 1); + EXPECT_EQ(m_dashHaOrch->getBfdSessionPendingCreation().size(), 1); + + // bfd sessions should be removed immediately when ha_role is set to dead + SetHaScopeHaRole("dead"); + EXPECT_EQ(m_mockBfdOrch->removeAllSoftwareBfdSessions_invoked_times, 1); + + RemoveHaScope(); + RemoveHaSet(); + } + + TEST_F(DashHaOrchTest, BfdSessionHandlingNoHaScope) + { + CreateHaSet(); + + CreateSoftwareBfdSession(); + EXPECT_EQ(m_mockBfdOrch->createSoftwareBfdSession_invoked_times, 0); + + CreateHaScope(); + EXPECT_EQ(m_mockBfdOrch->createSoftwareBfdSession_invoked_times, 0); + + SetHaScopeHaRole(); + HaScopeEvent(SAI_HA_SCOPE_EVENT_STATE_CHANGED, + SAI_DASH_HA_ROLE_ACTIVE, SAI_DASH_HA_STATE_ACTIVE); + EXPECT_EQ(m_mockBfdOrch->createSoftwareBfdSession_invoked_times, 1); + + RemoveHaScope(); + RemoveHaSet(); + } +} diff --git a/tests/mock_tests/dashorch_ut.cpp b/tests/mock_tests/dashorch_ut.cpp new file mode 100644 index 00000000000..924e0883e80 --- /dev/null +++ b/tests/mock_tests/dashorch_ut.cpp @@ -0,0 +1,565 @@ +#define private public +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_sai_api.h" +#include "mock_dash_orch_test.h" +#include "dash_api/appliance.pb.h" +#include "dash_api/route_type.pb.h" +#include "dash_api/eni.pb.h" +#include "dash_api/qos.pb.h" +#include "dash_api/eni_route.pb.h" +#include "dash_api/types.pb.h" + + +EXTERN_MOCK_FNS + +namespace dashorch_test +{ + class MockDashHaOrch : public DashHaOrch + { + public: + MockDashHaOrch(DBConnector *db, const std::vector &tableNames, DashOrch *dash_orch, BfdOrch *bfd_orch, DBConnector *app_state_db, ZmqServer *zmqServer) + : DashHaOrch(db, tableNames, dash_orch, bfd_orch, app_state_db, zmqServer) {} + + HaScopeEntry getHaScopeForEni(const std::string& eni) override + { + HaScopeEntry entry; + + entry.ha_scope_id = 0x123456789ABCDEF0ULL; + entry.metadata.set_ha_role(dash::types::HA_ROLE_ACTIVE); + entry.metadata.set_disabled(false); + + return entry; + } + }; + + DEFINE_SAI_GENERIC_APIS_MOCK(dash_eni, eni) + DEFINE_SAI_ENTRY_APIS_MOCK(dash_trusted_vni, global_trusted_vni, eni_trusted_vni) + DEFINE_SAI_ENTRY_APIS_MOCK(dash_direction_lookup, direction_lookup) + using namespace mock_orch_test; + using ::testing::DoAll; + using ::testing::Return; + using ::testing::SetArgPointee; + using ::testing::SaveArg; + using ::testing::SaveArgPointee; + using ::testing::Invoke; + using ::testing::InSequence; + using dash::types::ValueOrRange; + + ValueOrRange GenVni(int value) + { + ValueOrRange vni; + vni.set_value(value); + return vni; + } + ValueOrRange GenVni(int min, int max) + { + ValueOrRange vni; + vni.mutable_range()->set_min(min); + vni.mutable_range()->set_max(max); + return vni; + } + + ValueOrRange vni_value1 = GenVni(1000); + ValueOrRange vni_value2 = GenVni(2000); + ValueOrRange vni_range1 = GenVni(3000, 4000); + ValueOrRange vni_range2 = GenVni(5000, 6000); + + std::string GetVniString(const ValueOrRange &vni) + { + if (vni.has_value()) { + return std::to_string(vni.value()); + } else if (vni.has_range()) { + return std::to_string(vni.range().min()) + "_" + std::to_string(vni.range().max()); + } else { + return "Invalid VNI"; + } + } + class DashOrchTest : public MockDashOrchTest, public ::testing::WithParamInterface> { + private: + std::unique_ptr m_mock_dash_ha_orch; + + void ApplySaiMock() + { + INIT_SAI_API_MOCK(dash_eni); + INIT_SAI_API_MOCK(dash_trusted_vni); + INIT_SAI_API_MOCK(dash_direction_lookup); + MockSaiApis(); + } + + void PostSetUp() + { + m_mock_dash_ha_orch = std::make_unique(m_dpu_app_db.get(), std::vector{APP_DASH_HA_SET_TABLE_NAME, APP_DASH_HA_SCOPE_TABLE_NAME}, m_DashOrch, nullptr, m_dpu_app_state_db.get(), nullptr); + + m_DashOrch->setDashHaOrch(m_mock_dash_ha_orch.get()); + } + + void PreTearDown() override + { + RestoreSaiApis(); + DEINIT_SAI_API_MOCK(dash_direction_lookup); + DEINIT_SAI_API_MOCK(dash_trusted_vni); + DEINIT_SAI_API_MOCK(dash_eni); + } + + public: + void VerifyTrustedVniEntry(sai_u32_range_t &actual_entry, const ValueOrRange &expected_vni) + { + if (expected_vni.has_value()) { + EXPECT_EQ(actual_entry.min, expected_vni.value()); + EXPECT_EQ(actual_entry.max, expected_vni.value()); + } else if (expected_vni.has_range()) { + EXPECT_EQ(actual_entry.min, expected_vni.range().min()); + EXPECT_EQ(actual_entry.max, expected_vni.range().max()); + } else { + FAIL() << "Invalid ValueOrRange provided"; + } + } + void VerifyEniMode(std::vector &actual_attrs, sai_dash_eni_mode_t expected_mode) + { + for (auto attr : actual_attrs) { + if (attr.id == SAI_ENI_ATTR_DASH_ENI_MODE) { + EXPECT_EQ(attr.value.u32, expected_mode); + return; + } + } + FAIL() << "SAI_ENI_ATTR_DASH_ENI_MODE not found in attributes"; + } + void VerifyDirectionLookup(std::vector &actual_attrs, sai_direction_lookup_entry_action_t expected_lookup) + { + for (auto attr : actual_attrs) { + if (attr.id == SAI_DIRECTION_LOOKUP_ENTRY_ATTR_ACTION) { + EXPECT_EQ(attr.value.u32, expected_lookup); + return; + } + } + FAIL() << "SAI_DIRECTION_LOOKUP_ENTRY_ATTR_ACTION not found in attributes"; + } + void VerifyNoAttribute(std::vector &actual_attrs, sai_object_id_t attr_id) + { + for (auto attr : actual_attrs) { + if (attr.id == attr_id) { + FAIL() << "Unexpected attribute found in attributes"; + } + } + return ; + } + }; + + TEST_F(DashOrchTest, GetNonExistRoutingType) + { + dash::route_type::RouteType route_type; + bool success = m_DashOrch->getRouteTypeActions(dash::route_type::RoutingType::ROUTING_TYPE_DIRECT, route_type); + EXPECT_FALSE(success); + } + + TEST_F(DashOrchTest, DuplicateRoutingTypeEntry) + { + dash::route_type::RouteType route_type1; + dash::route_type::RouteTypeItem *item1 = route_type1.add_items(); + item1->set_action_type(dash::route_type::ActionType::ACTION_TYPE_STATICENCAP); + bool success = m_DashOrch->addRoutingTypeEntry(dash::route_type::RoutingType::ROUTING_TYPE_VNET, route_type1); + EXPECT_TRUE(success); + EXPECT_EQ(m_DashOrch->routing_type_entries_.size(), 1); + EXPECT_EQ(m_DashOrch->routing_type_entries_[dash::route_type::RoutingType::ROUTING_TYPE_VNET].items()[0].action_type(), item1->action_type()); + + dash::route_type::RouteType route_type2; + dash::route_type::RouteTypeItem *item2 = route_type2.add_items(); + item2->set_action_type(dash::route_type::ActionType::ACTION_TYPE_DECAP); + success = m_DashOrch->addRoutingTypeEntry(dash::route_type::RoutingType::ROUTING_TYPE_VNET, route_type2); + EXPECT_TRUE(success); + EXPECT_EQ(m_DashOrch->routing_type_entries_[dash::route_type::RoutingType::ROUTING_TYPE_VNET].items()[0].action_type(), item1->action_type()); + } + + TEST_F(DashOrchTest, RemoveNonExistRoutingType) + { + bool success = m_DashOrch->removeRoutingTypeEntry(dash::route_type::RoutingType::ROUTING_TYPE_DROP); + EXPECT_TRUE(success); + } + + TEST_F(DashOrchTest, SetEniMode) + { + CreateApplianceEntry(); + CreateVnet(); + + Table eni_table = Table(m_app_db.get(), APP_DASH_ENI_TABLE_NAME); + std::vector actual_attrs; + + dash::eni::Eni eni = BuildEniEntry(); + + EXPECT_CALL(*mock_sai_dash_eni_api, create_eni).Times(3) + .WillRepeatedly( + DoAll( + [&actual_attrs](sai_object_id_t *eni_id, sai_object_id_t switch_id, uint32_t attr_count, const sai_attribute_t *attr_list) { + actual_attrs.assign(attr_list, attr_list + attr_count); + }, + Invoke(old_sai_dash_eni_api, &sai_dash_eni_api_t::create_eni) // Call the original function + ) + ); + + SetDashTable(APP_DASH_ENI_TABLE_NAME, "eni1", eni); + VerifyEniMode(actual_attrs, SAI_DASH_ENI_MODE_VM); + SetDashTable(APP_DASH_ENI_TABLE_NAME, "eni1", eni, false); + + eni.set_eni_mode(dash::eni::MODE_FNIC); + SetDashTable(APP_DASH_ENI_TABLE_NAME, "eni1", eni); + VerifyEniMode(actual_attrs, SAI_DASH_ENI_MODE_FNIC); + SetDashTable(APP_DASH_ENI_TABLE_NAME, "eni1", eni, false); + + eni.set_eni_mode(dash::eni::MODE_UNSPECIFIED); + SetDashTable(APP_DASH_ENI_TABLE_NAME, "eni1", eni); + VerifyEniMode(actual_attrs, SAI_DASH_ENI_MODE_VM); // Default + SetDashTable(APP_DASH_ENI_TABLE_NAME, "eni1", eni, false); + } + + TEST_F(DashOrchTest, CreateRemoveApplianceTrustedVnisSingle) + { + int trusted_vni = 100; + dash::appliance::Appliance appliance = BuildApplianceEntry(); + appliance.mutable_trusted_vnis()->set_value(trusted_vni); + + sai_global_trusted_vni_entry_t actual_entry; + + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, create_global_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&actual_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::create_global_trusted_vni_entry))); + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, remove_global_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&actual_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::remove_global_trusted_vni_entry))); + + SetDashTable(APP_DASH_APPLIANCE_TABLE_NAME, appliance1, appliance); + EXPECT_EQ(actual_entry.vni_range.min, trusted_vni); + EXPECT_EQ(actual_entry.vni_range.max, trusted_vni); + + SetDashTable(APP_DASH_APPLIANCE_TABLE_NAME, appliance1, dash::appliance::Appliance(), false); + EXPECT_EQ(actual_entry.vni_range.min, trusted_vni); + EXPECT_EQ(actual_entry.vni_range.max, trusted_vni); + } + + TEST_F(DashOrchTest, CreateRemoveApplianceTrustedVnisRange) + { + int min_trusted_vni = 500; + int max_trusted_vni = 600; + dash::appliance::Appliance appliance = BuildApplianceEntry(); + appliance.mutable_trusted_vnis()->mutable_range()->set_min(min_trusted_vni); + appliance.mutable_trusted_vnis()->mutable_range()->set_max(max_trusted_vni); + + sai_global_trusted_vni_entry_t actual_entry; + + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, create_global_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&actual_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::create_global_trusted_vni_entry))); + + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, remove_global_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&actual_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::remove_global_trusted_vni_entry))); + + SetDashTable(APP_DASH_APPLIANCE_TABLE_NAME, appliance1, appliance); + EXPECT_EQ(actual_entry.vni_range.min, min_trusted_vni); + EXPECT_EQ(actual_entry.vni_range.max, max_trusted_vni); + + SetDashTable(APP_DASH_APPLIANCE_TABLE_NAME, appliance1, dash::appliance::Appliance(), false); + EXPECT_EQ(actual_entry.vni_range.min, min_trusted_vni); + EXPECT_EQ(actual_entry.vni_range.max, max_trusted_vni); + } + + TEST_F(DashOrchTest, CreateRemoveEniTrustedVnisSingle) + { + CreateApplianceEntry(); + CreateVnet(); + + int trusted_vni = 200; + dash::eni::Eni eni = BuildEniEntry(); + eni.mutable_trusted_vnis()->set_value(trusted_vni); + + sai_eni_trusted_vni_entry_t actual_entry; + + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, create_eni_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&actual_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::create_eni_trusted_vni_entry))); + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, remove_eni_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&actual_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::remove_eni_trusted_vni_entry))); + + SetDashTable(APP_DASH_ENI_TABLE_NAME, eni1, eni); + EXPECT_EQ(actual_entry.vni_range.min, trusted_vni); + EXPECT_EQ(actual_entry.vni_range.max, trusted_vni); + + SetDashTable(APP_DASH_ENI_TABLE_NAME, eni1, dash::eni::Eni(), false); + EXPECT_EQ(actual_entry.vni_range.min, trusted_vni); + EXPECT_EQ(actual_entry.vni_range.max, trusted_vni); + } + + TEST_F(DashOrchTest, CreateRemoveEniTrustedVnisRange) + { + CreateApplianceEntry(); + CreateVnet(); + + int min_trusted_vni = 700; + int max_trusted_vni = 800; + dash::eni::Eni eni = BuildEniEntry(); + eni.mutable_trusted_vnis()->mutable_range()->set_min(min_trusted_vni); + eni.mutable_trusted_vnis()->mutable_range()->set_max(max_trusted_vni); + + sai_eni_trusted_vni_entry_t actual_entry; + + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, create_eni_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&actual_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::create_eni_trusted_vni_entry))); + + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, remove_eni_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&actual_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::remove_eni_trusted_vni_entry))); + + SetDashTable(APP_DASH_ENI_TABLE_NAME, eni1, eni); + EXPECT_EQ(actual_entry.vni_range.min, min_trusted_vni); + EXPECT_EQ(actual_entry.vni_range.max, max_trusted_vni); + + SetDashTable(APP_DASH_ENI_TABLE_NAME, eni1, dash::eni::Eni(), false); + EXPECT_EQ(actual_entry.vni_range.min, min_trusted_vni); + EXPECT_EQ(actual_entry.vni_range.max, max_trusted_vni); + } + + TEST_F(DashOrchTest, DuplicateSetEniTrustedVniSingle) + { + CreateApplianceEntry(); + CreateVnet(); + + int trusted_vni = 300; + dash::eni::Eni eni = BuildEniEntry(); + eni.mutable_trusted_vnis()->set_value(trusted_vni); + + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, create_eni_trusted_vni_entry).Times(1); + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, remove_eni_trusted_vni_entry).Times(0); + + SetDashTable(APP_DASH_ENI_TABLE_NAME, eni1, eni); + SetDashTable(APP_DASH_ENI_TABLE_NAME, eni1, eni); + } + + TEST_F(DashOrchTest, DuplicateSetEniTrustedVniRange) + { + CreateApplianceEntry(); + CreateVnet(); + + int min_trusted_vni = 900; + int max_trusted_vni = 1000; + dash::eni::Eni eni = BuildEniEntry(); + eni.mutable_trusted_vnis()->mutable_range()->set_min(min_trusted_vni); + eni.mutable_trusted_vnis()->mutable_range()->set_max(max_trusted_vni); + + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, create_eni_trusted_vni_entry).Times(1); + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, remove_eni_trusted_vni_entry).Times(0); + + SetDashTable(APP_DASH_ENI_TABLE_NAME, eni1, eni); + SetDashTable(APP_DASH_ENI_TABLE_NAME, eni1, eni); + } + + TEST_P(DashOrchTest, ChangeEniTrustedVni) + { + CreateApplianceEntry(); + CreateVnet(); + + ValueOrRange orig_vni, changed_vni; + std::tie(orig_vni, changed_vni) = GetParam(); + + dash::eni::Eni eni = BuildEniEntry(); + sai_eni_trusted_vni_entry_t actual_entry; + sai_eni_trusted_vni_entry_t removed_entry; + to_sai(changed_vni, removed_entry.vni_range); + + { + InSequence seq; + + // Initial set + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, create_eni_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&actual_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::create_eni_trusted_vni_entry))); + + // We expect 3 additional changes, orig->changed, changed->orig, and orig->changed + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, remove_eni_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&removed_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::remove_eni_trusted_vni_entry))); + + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, create_eni_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&actual_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::create_eni_trusted_vni_entry))); + + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, remove_eni_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&removed_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::remove_eni_trusted_vni_entry))); + + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, create_eni_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&actual_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::create_eni_trusted_vni_entry))); + + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, remove_eni_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&removed_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::remove_eni_trusted_vni_entry))); + + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, create_eni_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&actual_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::create_eni_trusted_vni_entry))); + } + + for (int i = 0; i < 2; i++) + { + eni.mutable_trusted_vnis()->CopyFrom(orig_vni); + SetDashTable(APP_DASH_ENI_TABLE_NAME, eni1, eni); + VerifyTrustedVniEntry(removed_entry.vni_range, changed_vni); + VerifyTrustedVniEntry(actual_entry.vni_range, orig_vni); + + eni.mutable_trusted_vnis()->CopyFrom(changed_vni); + SetDashTable(APP_DASH_ENI_TABLE_NAME, eni1, eni); + VerifyTrustedVniEntry(removed_entry.vni_range, orig_vni); + VerifyTrustedVniEntry(actual_entry.vni_range, changed_vni); + } + } + + TEST_P(DashOrchTest, ChangeApplianceTrustedVni) + { + ValueOrRange orig_vni, changed_vni; + std::tie(orig_vni, changed_vni) = GetParam(); + + dash::appliance::Appliance appliance = BuildApplianceEntry(); + sai_global_trusted_vni_entry_t actual_entry; + sai_global_trusted_vni_entry_t removed_entry; + to_sai(changed_vni, removed_entry.vni_range); + + { + InSequence seq; + + // Initial set + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, create_global_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&actual_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::create_global_trusted_vni_entry))); + + // We expect 3 additional changes, orig->changed, changed->orig, and orig->changed + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, remove_global_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&removed_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::remove_global_trusted_vni_entry))); + + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, create_global_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&actual_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::create_global_trusted_vni_entry))); + + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, remove_global_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&removed_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::remove_global_trusted_vni_entry))); + + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, create_global_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&actual_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::create_global_trusted_vni_entry))); + + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, remove_global_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&removed_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::remove_global_trusted_vni_entry))); + + EXPECT_CALL(*mock_sai_dash_trusted_vni_api, create_global_trusted_vni_entry) + .WillOnce( + DoAll( + SaveArgPointee<0>(&actual_entry), + Invoke(old_sai_dash_trusted_vni_api, &sai_dash_trusted_vni_api_t::create_global_trusted_vni_entry))); + } + + for (int i = 0; i < 2; i++) + { + appliance.mutable_trusted_vnis()->CopyFrom(orig_vni); + SetDashTable(APP_DASH_APPLIANCE_TABLE_NAME, appliance1, appliance); + VerifyTrustedVniEntry(removed_entry.vni_range, changed_vni); + VerifyTrustedVniEntry(actual_entry.vni_range, orig_vni); + + appliance.mutable_trusted_vnis()->CopyFrom(changed_vni); + SetDashTable(APP_DASH_APPLIANCE_TABLE_NAME, appliance1, appliance); + VerifyTrustedVniEntry(removed_entry.vni_range, orig_vni); + VerifyTrustedVniEntry(actual_entry.vni_range, changed_vni); + } + } + + INSTANTIATE_TEST_SUITE_P( + DashOrchChangeTrustedVniTest, + DashOrchTest, + ::testing::Combine( + ::testing::Values(vni_value1, vni_range1), + ::testing::Values(vni_value2, vni_range2)), + [](const testing::TestParamInfo &info) { + const auto &vni1 = std::get<0>(info.param); + const auto &vni2 = std::get<1>(info.param); + return "EniTrustedVni_" + GetVniString(vni1) + "_to_" + GetVniString(vni2); + }); + + TEST_F(DashOrchTest, SetApplianceOutboundLookup) + { + dash::appliance::Appliance appliance = BuildApplianceEntry(); + appliance.set_outbound_direction_lookup("dst_mac"); + + std::vector actual_attrs; + actual_attrs.clear(); + + EXPECT_CALL(*mock_sai_dash_direction_lookup_api, create_direction_lookup_entry).Times(2) + .WillRepeatedly( + DoAll( + [&actual_attrs](const sai_direction_lookup_entry_t *entry, uint32_t count, const sai_attribute_t *attr_list) { + actual_attrs.assign(attr_list, attr_list + count); + }, + Invoke(old_sai_dash_direction_lookup_api, &sai_dash_direction_lookup_api_t::create_direction_lookup_entry) // Call the original function + ) + ); + + SetDashTable(APP_DASH_APPLIANCE_TABLE_NAME, appliance1, appliance); + VerifyDirectionLookup(actual_attrs, SAI_DIRECTION_LOOKUP_ENTRY_ACTION_SET_INBOUND_DIRECTION); + VerifyNoAttribute(actual_attrs, SAI_DIRECTION_LOOKUP_ENTRY_ATTR_DASH_ENI_MAC_OVERRIDE_TYPE); + actual_attrs.clear(); + + SetDashTable(APP_DASH_APPLIANCE_TABLE_NAME, appliance1, dash::appliance::Appliance(), false); + appliance.set_outbound_direction_lookup("src_mac"); + SetDashTable(APP_DASH_APPLIANCE_TABLE_NAME, appliance1, appliance); + VerifyDirectionLookup(actual_attrs, SAI_DIRECTION_LOOKUP_ENTRY_ACTION_SET_OUTBOUND_DIRECTION); + } +} \ No newline at end of file diff --git a/tests/mock_tests/dashportmaporch_ut.cpp b/tests/mock_tests/dashportmaporch_ut.cpp new file mode 100644 index 00000000000..7ba6d50f07e --- /dev/null +++ b/tests/mock_tests/dashportmaporch_ut.cpp @@ -0,0 +1,210 @@ +#define private public +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_sai_api.h" +#include "mock_dash_orch_test.h" + +#include "dash_api/outbound_port_map.pb.h" + +EXTERN_MOCK_FNS + +namespace dashportmaporch_test +{ + DEFINE_SAI_API_COMBINED_MOCK(dash_outbound_port_map, outbound_port_map, outbound_port_map_port_range) + using namespace mock_orch_test; + using ::testing::DoAll; + using ::testing::Return; + using ::testing::SetArgPointee; + using ::testing::SetArrayArgument; + using ::testing::SaveArg; + using ::testing::SaveArgPointee; + using ::testing::Invoke; + using ::testing::InSequence; + + class DashPortMapOrchTest : public MockDashOrchTest + { + void ApplySaiMock() + { + INIT_SAI_API_MOCK(dash_outbound_port_map); + MockSaiApis(); + } + + void PreTearDown() override + { + RestoreSaiApis(); + DEINIT_SAI_API_MOCK(dash_outbound_port_map); + } + + protected: + std::string port_map1 = "PORT_MAP_1"; + int port_map1_start_port = 1000; + int port_map1_end_port = 2000; + int port_map1_backend_port_base = 5000; + swss::IpAddress port_map1_backend_ip = swss::IpAddress("1.2.3.4"); + dash::outbound_port_map_range::OutboundPortMapRange BuildOutboundPortMapRange() + { + dash::outbound_port_map_range::OutboundPortMapRange port_map_range; + port_map_range.mutable_backend_ip()->set_ipv4(port_map1_backend_ip.getV4Addr()); + port_map_range.set_action(dash::outbound_port_map_range::PortMapRangeAction::ACTION_MAP_PRIVATE_LINK_SERVICE); + port_map_range.set_backend_port_base(port_map1_backend_port_base); + return port_map_range; + } + }; + + TEST_F(DashPortMapOrchTest, AddRemovePortMapEntry) + { + dash::outbound_port_map::OutboundPortMap port_map; + + std::vector exp_status = { SAI_STATUS_SUCCESS }; + sai_object_id_t fake_oid = 0x1234; + sai_object_id_t actual_removed_oid; + EXPECT_CALL(*mock_sai_dash_outbound_port_map_api, create_outbound_port_maps).WillOnce(DoAll(SetArgPointee<5>(fake_oid), SetArrayArgument<6>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(*mock_sai_dash_outbound_port_map_api, remove_outbound_port_maps).WillOnce(DoAll(SaveArgPointee<1>(&actual_removed_oid), SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + SetDashTable(APP_DASH_OUTBOUND_PORT_MAP_TABLE_NAME, port_map1, port_map); + SetDashTable(APP_DASH_OUTBOUND_PORT_MAP_TABLE_NAME, port_map1, port_map, false); + + EXPECT_EQ(actual_removed_oid, fake_oid); + } + + TEST_F(DashPortMapOrchTest, AddDuplicatePortMap) + { + dash::outbound_port_map::OutboundPortMap port_map; + + std::vector exp_status = { SAI_STATUS_SUCCESS }; + EXPECT_CALL(*mock_sai_dash_outbound_port_map_api, create_outbound_port_maps).Times(1); + SetDashTable(APP_DASH_OUTBOUND_PORT_MAP_TABLE_NAME, port_map1, port_map); + SetDashTable(APP_DASH_OUTBOUND_PORT_MAP_TABLE_NAME, port_map1, port_map); + } + + TEST_F(DashPortMapOrchTest, RemoveNonexistPortMap) + { + dash::outbound_port_map::OutboundPortMap port_map; + EXPECT_CALL(*mock_sai_dash_outbound_port_map_api, remove_outbound_port_maps).Times(0); + SetDashTable(APP_DASH_OUTBOUND_PORT_MAP_TABLE_NAME, port_map1, port_map, false); + } + + TEST_F(DashPortMapOrchTest, AddRemovePortMapRange) + { + uint32_t num_attrs; + const sai_attribute_t *attr_start; + std::vector actual_attrs; + sai_outbound_port_map_port_range_entry_t actual_entry; + sai_outbound_port_map_port_range_entry_t removed_entry; + sai_object_id_t fake_oid = 0x1234; + std::vector success_status = { SAI_STATUS_SUCCESS }; + + EXPECT_CALL(*mock_sai_dash_outbound_port_map_api, create_outbound_port_maps) + .WillOnce(DoAll( + SetArgPointee<5>(fake_oid), + SetArrayArgument<6>(success_status.begin(), success_status.end()), + Return(SAI_STATUS_SUCCESS))); + + EXPECT_CALL(*mock_sai_dash_outbound_port_map_api, create_outbound_port_map_port_range_entries) + .WillOnce(DoAll( + SaveArgPointee<1>(&actual_entry), + SaveArgPointee<2>(&num_attrs), + SaveArgPointee<3>(&attr_start), + Return(SAI_STATUS_SUCCESS))); + + EXPECT_CALL(*mock_sai_dash_outbound_port_map_api, remove_outbound_port_map_port_range_entries) + .WillOnce(DoAll( + SaveArgPointee<1>(&removed_entry), + SetArrayArgument<3>(success_status.begin(), success_status.end()), + Return(SAI_STATUS_SUCCESS))); + + SetDashTable(APP_DASH_OUTBOUND_PORT_MAP_TABLE_NAME, port_map1, dash::outbound_port_map::OutboundPortMap()); + + std::stringstream key_stream; + key_stream << port_map1 << ":" << port_map1_start_port << "-" << port_map1_end_port; + std::string key = key_stream.str(); + + dash::outbound_port_map_range::OutboundPortMapRange port_map_range = BuildOutboundPortMapRange(); + SetDashTable(APP_DASH_OUTBOUND_PORT_MAP_RANGE_TABLE_NAME, key, port_map_range); + + EXPECT_EQ(actual_entry.outbound_port_map_id, fake_oid); + EXPECT_EQ(actual_entry.dst_port_range.min, 1000); + EXPECT_EQ(actual_entry.dst_port_range.max, 2000); + + actual_attrs.assign(attr_start, attr_start + num_attrs); + + for (auto attr : actual_attrs) + { + if (attr.id == SAI_OUTBOUND_PORT_MAP_PORT_RANGE_ENTRY_ATTR_ACTION) + { + EXPECT_EQ(attr.value.s32, SAI_OUTBOUND_PORT_MAP_PORT_RANGE_ENTRY_ACTION_MAP_TO_PRIVATE_LINK_SERVICE); + } + else if (attr.id == SAI_OUTBOUND_PORT_MAP_PORT_RANGE_ENTRY_ATTR_BACKEND_IP) + { + EXPECT_EQ(attr.value.ipaddr.addr.ip4, port_map1_backend_ip.getV4Addr()); + } + else if (attr.id == SAI_OUTBOUND_PORT_MAP_PORT_RANGE_ENTRY_ATTR_MATCH_PORT_BASE) + { + EXPECT_EQ(attr.value.u32, port_map1_start_port); + } + else if (attr.id == SAI_OUTBOUND_PORT_MAP_PORT_RANGE_ENTRY_ATTR_BACKEND_PORT_BASE) + { + EXPECT_EQ(attr.value.u32, port_map1_backend_port_base); + } + } + + SetDashTable(APP_DASH_OUTBOUND_PORT_MAP_RANGE_TABLE_NAME, key, port_map_range, false); + EXPECT_EQ(removed_entry.outbound_port_map_id, fake_oid); + EXPECT_EQ(removed_entry.dst_port_range.min, 1000); + EXPECT_EQ(removed_entry.dst_port_range.max, 2000); + } + + TEST_F(DashPortMapOrchTest, AddDuplicatePortMapRange) + { + auto port_map_range = BuildOutboundPortMapRange(); + std::stringstream key_stream; + key_stream << port_map1 << ":" << port_map1_start_port << "-" << port_map1_end_port; + std::string key = key_stream.str(); + + EXPECT_CALL(*mock_sai_dash_outbound_port_map_api, create_outbound_port_map_port_range_entries).Times(2); + SetDashTable(APP_DASH_OUTBOUND_PORT_MAP_TABLE_NAME, port_map1, dash::outbound_port_map::OutboundPortMap()); + SetDashTable(APP_DASH_OUTBOUND_PORT_MAP_RANGE_TABLE_NAME, key, port_map_range); + SetDashTable(APP_DASH_OUTBOUND_PORT_MAP_RANGE_TABLE_NAME, key, port_map_range); + } + + TEST_F(DashPortMapOrchTest, RemoveNonexistPortMapRange) + { + std::stringstream key_stream; + key_stream << port_map1 << ":" << port_map1_start_port << "-" << port_map1_end_port; + std::string port_map_range_key = key_stream.str(); + + EXPECT_CALL(*mock_sai_dash_outbound_port_map_api, remove_outbound_port_map_port_range_entries); + SetDashTable(APP_DASH_OUTBOUND_PORT_MAP_TABLE_NAME, port_map1, dash::outbound_port_map::OutboundPortMap()); + SetDashTable(APP_DASH_OUTBOUND_PORT_MAP_RANGE_TABLE_NAME, port_map_range_key, dash::outbound_port_map_range::OutboundPortMapRange(), false); + } + + TEST_F(DashPortMapOrchTest, AddPortRangeWithoutPortMap) + { + auto port_map_range = BuildOutboundPortMapRange(); + std::stringstream key_stream; + key_stream << port_map1 << ":" << port_map1_start_port << "-" << port_map1_end_port; + std::string key = key_stream.str(); + EXPECT_CALL(*mock_sai_dash_outbound_port_map_api, create_outbound_port_map_port_range_entries).Times(0); + SetDashTable(APP_DASH_OUTBOUND_PORT_MAP_RANGE_TABLE_NAME, key, port_map_range, true, false); + } + + TEST_F(DashPortMapOrchTest, RemoveInUsePortMap) + { + auto port_map_range = BuildOutboundPortMapRange(); + std::stringstream key_stream; + key_stream << port_map1 << ":" << port_map1_start_port << "-" << port_map1_end_port; + std::string port_map_range_key = key_stream.str(); + + EXPECT_CALL(*mock_sai_dash_outbound_port_map_api, create_outbound_port_maps); + EXPECT_CALL(*mock_sai_dash_outbound_port_map_api, create_outbound_port_map_port_range_entries); + + SetDashTable(APP_DASH_OUTBOUND_PORT_MAP_TABLE_NAME, port_map1, dash::outbound_port_map::OutboundPortMap()); + SetDashTable(APP_DASH_OUTBOUND_PORT_MAP_RANGE_TABLE_NAME, port_map_range_key, port_map_range); + + SetDashTable(APP_DASH_OUTBOUND_PORT_MAP_TABLE_NAME, port_map1, dash::outbound_port_map::OutboundPortMap(), false, false); + } +} diff --git a/tests/mock_tests/dashrouteorch_ut.cpp b/tests/mock_tests/dashrouteorch_ut.cpp new file mode 100644 index 00000000000..390aedc5d38 --- /dev/null +++ b/tests/mock_tests/dashrouteorch_ut.cpp @@ -0,0 +1,57 @@ +#define private public +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_sai_api.h" +#include "mock_dash_orch_test.h" +#include "dash_api/appliance.pb.h" +#include "dash_api/route_type.pb.h" +#include "dash_api/eni.pb.h" +#include "dash_api/qos.pb.h" +#include "dash_api/eni_route.pb.h" + +EXTERN_MOCK_FNS +namespace dashrouteorch_test +{ + DEFINE_SAI_API_MOCK(dash_outbound_routing, outbound_routing); + using namespace mock_orch_test; + using ::testing::InSequence; + class DashRouteOrchTest : public MockDashOrchTest + { + void PostSetUp() + { + CreateApplianceEntry(); + CreateVnet(); + } + + void ApplySaiMock() + { + INIT_SAI_API_MOCK(dash_outbound_routing); + MockSaiApis(); + } + + void PreTearDown() + { + RestoreSaiApis(); + DEINIT_SAI_API_MOCK(dash_outbound_routing); + } + }; + + TEST_F(DashRouteOrchTest, RouteWithMissingTunnelNotAdded) + { + { + InSequence seq; + EXPECT_CALL(*mock_sai_dash_outbound_routing_api, create_outbound_routing_entries).Times(0); + EXPECT_CALL(*mock_sai_dash_outbound_routing_api, create_outbound_routing_entries).Times(1); + } + AddOutboundRoutingGroup(); + AddOutboundRoutingEntry(false); + + AddTunnel(); + AddOutboundRoutingEntry(); + } +} \ No newline at end of file diff --git a/tests/mock_tests/dashvnetorch_ut.cpp b/tests/mock_tests/dashvnetorch_ut.cpp new file mode 100644 index 00000000000..473e79a6196 --- /dev/null +++ b/tests/mock_tests/dashvnetorch_ut.cpp @@ -0,0 +1,167 @@ +#define private public +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_sai_api.h" +#include "mock_dash_orch_test.h" +#include "dash_api/appliance.pb.h" +#include "dash_api/route_type.pb.h" +#include "dash_api/eni.pb.h" +#include "dash_api/qos.pb.h" +#include "dash_api/eni_route.pb.h" +#include "gtest/gtest.h" +#include "crmorch.h" + +EXTERN_MOCK_FNS + +namespace dashvnetorch_test +{ + DEFINE_SAI_API_MOCK(dash_outbound_ca_to_pa, outbound_ca_to_pa); + DEFINE_SAI_API_MOCK(dash_pa_validation, pa_validation); + DEFINE_SAI_GENERIC_API_OBJECT_BULK_MOCK(dash_vnet, vnet) + using namespace mock_orch_test; + using ::testing::Return; + using ::testing::Throw; + using ::testing::DoAll; + using ::testing::SetArrayArgument; + using ::testing::SetArgPointee; + using ::testing::InSequence; + + class DashVnetOrchTest : public MockDashOrchTest + { + protected: + int GetCrmUsedCount(CrmResourceType type) + { + CrmOrch::CrmResourceEntry entry = CrmOrch::CrmResourceEntry("", CrmThresholdType::CRM_PERCENTAGE, 0, 1); + gCrmOrch->getResAvailability(type, entry); + return entry.countersMap["STATS"].usedCounter; + } + + void ApplySaiMock() override + { + INIT_SAI_API_MOCK(dash_vnet); + INIT_SAI_API_MOCK(dash_outbound_ca_to_pa); + INIT_SAI_API_MOCK(dash_pa_validation); + MockSaiApis(); + } + + void PostSetUp() override + { + CreateApplianceEntry(); + } + void PreTearDown() override + { + RestoreSaiApis(); + DEINIT_SAI_API_MOCK(dash_outbound_ca_to_pa); + DEINIT_SAI_API_MOCK(dash_pa_validation); + DEINIT_SAI_API_MOCK(dash_vnet); + } + + }; + + TEST_F(DashVnetOrchTest, AddRemoveVnet) + { + std::vector exp_status = {SAI_STATUS_SUCCESS}; + AddVnetEncapRoutingType(dash::route_type::ENCAP_TYPE_VXLAN); + AddPLRoutingType(); + { + InSequence seq; + EXPECT_CALL(*mock_sai_dash_vnet_api, create_vnets).Times(1); + EXPECT_CALL(*mock_sai_dash_outbound_ca_to_pa_api, create_outbound_ca_to_pa_entries).Times(1); + EXPECT_CALL(*mock_sai_dash_pa_validation_api, create_pa_validation_entries).Times(1); + EXPECT_CALL(*mock_sai_dash_outbound_ca_to_pa_api, create_outbound_ca_to_pa_entries).Times(1); + EXPECT_CALL(*mock_sai_dash_outbound_ca_to_pa_api, remove_outbound_ca_to_pa_entries).Times(2); + EXPECT_CALL(*mock_sai_dash_pa_validation_api, remove_pa_validation_entries).Times(1); + EXPECT_CALL(*mock_sai_dash_vnet_api, remove_vnets).Times(1); + } + + CreateVnet(); + AddVnetMap(); + AddPortMap(); + AddVnetMapPL(); + + RemoveVnetMap(); + RemoveVnetMapPL(); + RemoveVnet(); + } + + TEST_F(DashVnetOrchTest, AddVnetMapMissingVnetFails) + { + EXPECT_CALL(*mock_sai_dash_outbound_ca_to_pa_api, create_outbound_ca_to_pa_entries) + .Times(0); + EXPECT_CALL(*mock_sai_dash_pa_validation_api, create_pa_validation_entries) + .Times(0); + AddVnetEncapRoutingType(dash::route_type::ENCAP_TYPE_VXLAN); + AddVnetMap(false); + } + + TEST_F(DashVnetOrchTest, AddExistingOutboundCaToPaSuccessful) + { + AddVnetEncapRoutingType(dash::route_type::ENCAP_TYPE_VXLAN); + CreateVnet(); + AddVnetMap(); + std::vector exp_status = {SAI_STATUS_ITEM_ALREADY_EXISTS}; + + int expectedUsed = GetCrmUsedCount(CrmResourceType::CRM_DASH_IPV4_OUTBOUND_CA_TO_PA); + EXPECT_CALL(*mock_sai_dash_outbound_ca_to_pa_api, create_outbound_ca_to_pa_entries) + .Times(1).WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + AddVnetMap(); + int actualUsed = GetCrmUsedCount(CrmResourceType::CRM_DASH_IPV4_OUTBOUND_CA_TO_PA); + EXPECT_EQ(expectedUsed, actualUsed); + } + + TEST_F(DashVnetOrchTest, RemoveNonexistVnetMapFails) + { + int expectedUsed = GetCrmUsedCount(CrmResourceType::CRM_DASH_IPV4_OUTBOUND_CA_TO_PA); + std::vector exp_status = {SAI_STATUS_ITEM_NOT_FOUND}; + EXPECT_CALL(*mock_sai_dash_outbound_ca_to_pa_api, remove_outbound_ca_to_pa_entries) + .Times(1).WillOnce(DoAll(SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + RemoveVnetMap(); + int actualUsed = GetCrmUsedCount(CrmResourceType::CRM_DASH_IPV4_OUTBOUND_CA_TO_PA); + EXPECT_EQ(expectedUsed, actualUsed); + } + + TEST_F(DashVnetOrchTest, InvalidEncapVnetMapFails) + { + AddVnetEncapRoutingType(dash::route_type::ENCAP_TYPE_UNSPECIFIED); + CreateVnet(); + AddVnetMap(); + EXPECT_CALL(*mock_sai_dash_outbound_ca_to_pa_api, create_outbound_ca_to_pa_entries) + .Times(0); + AddVnetMap(); + } + + TEST_F(DashVnetOrchTest, AddExistPaValidationSuccessful) + { + AddVnetEncapRoutingType(dash::route_type::ENCAP_TYPE_VXLAN); + CreateVnet(); + std::vector exp_status = {SAI_STATUS_ITEM_ALREADY_EXISTS}; + int expectedUsed = GetCrmUsedCount(CrmResourceType::CRM_DASH_IPV4_PA_VALIDATION); + EXPECT_CALL(*mock_sai_dash_pa_validation_api, create_pa_validation_entries) + .Times(1).WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + AddVnetMap(); + int actualUsed = GetCrmUsedCount(CrmResourceType::CRM_DASH_IPV4_PA_VALIDATION); + EXPECT_EQ(expectedUsed, actualUsed); + } + + TEST_F(DashVnetOrchTest, RemovePaValidationInUseFails) + { + AddVnetEncapRoutingType(dash::route_type::ENCAP_TYPE_VXLAN); + CreateVnet(); + AddVnetMap(); + + int expectedUsed = GetCrmUsedCount(CrmResourceType::CRM_DASH_IPV4_PA_VALIDATION); + std::vector exp_status = {SAI_STATUS_OBJECT_IN_USE}; + + EXPECT_CALL(*mock_sai_dash_pa_validation_api, remove_pa_validation_entries) + .Times(1).WillOnce(DoAll(SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + RemoveVnet(false); + + int actualUsed = GetCrmUsedCount(CrmResourceType::CRM_DASH_IPV4_PA_VALIDATION); + EXPECT_EQ(expectedUsed, actualUsed); + } +} diff --git a/tests/mock_tests/database_config.json b/tests/mock_tests/database_config.json index baf705ea230..3d66647af80 100644 --- a/tests/mock_tests/database_config.json +++ b/tests/mock_tests/database_config.json @@ -76,6 +76,21 @@ "id" : 14, "separator": ":", "instance" : "redis" + }, + "DPU_APPL_DB" : { + "id" : 15, + "separator": ":", + "instance" : "redis" + }, + "DPU_APPL_STATE_DB" : { + "id" : 16, + "separator": "|", + "instance" : "redis" + }, + "DPU_STATE_DB": { + "id": 17, + "separator": "|", + "instance": "redis" } }, "VERSION" : "1.0" diff --git a/tests/mock_tests/fake_netlink.cpp b/tests/mock_tests/fake_netlink.cpp index 2370e13129a..54eb197493c 100644 --- a/tests/mock_tests/fake_netlink.cpp +++ b/tests/mock_tests/fake_netlink.cpp @@ -1,5 +1,6 @@ #include #include +#include static rtnl_link* g_fakeLink = [](){ auto fakeLink = rtnl_link_alloc(); @@ -7,6 +8,8 @@ static rtnl_link* g_fakeLink = [](){ return fakeLink; }(); +extern int rt_build_ret; +extern bool nlmsg_alloc_ret; extern "C" { @@ -15,4 +18,34 @@ struct rtnl_link* rtnl_link_get_by_name(struct nl_cache *cache, const char *name return g_fakeLink; } +static int build_route_msg(struct rtnl_route *tmpl, int cmd, int flags, + struct nl_msg **result) +{ + struct nl_msg *msg; + int err; + if (!(msg = nlmsg_alloc_simple(cmd, flags))) + return -NLE_NOMEM; + if ((err = rtnl_route_build_msg(msg, tmpl)) < 0) { + nlmsg_free(msg); + return err; + } + *result = msg; + return 0; +} + +int rtnl_route_build_add_request(struct rtnl_route *tmpl, int flags, + struct nl_msg **result) +{ + if (rt_build_ret != 0) + { + return rt_build_ret; + } + else if (!nlmsg_alloc_ret) + { + *result = NULL; + return 0; + } + return build_route_msg(tmpl, RTM_NEWROUTE, NLM_F_CREATE | flags, + result); +} } diff --git a/tests/mock_tests/fake_producerstatetable.cpp b/tests/mock_tests/fake_producerstatetable.cpp index 6221556f63c..33fab17ecf4 100644 --- a/tests/mock_tests/fake_producerstatetable.cpp +++ b/tests/mock_tests/fake_producerstatetable.cpp @@ -4,8 +4,13 @@ using namespace std; namespace swss { + ProducerStateTable::ProducerStateTable(RedisPipeline *pipeline, const string &tableName, bool buffered) - : TableBase(tableName, SonicDBConfig::getSeparator(pipeline->getDBConnector())), TableName_KeySet(tableName) {} + : TableBase(tableName, SonicDBConfig::getSeparator(pipeline->getDBConnector())), TableName_KeySet(tableName), m_buffered(buffered) + , m_pipeowned(false) + , m_tempViewActive(false) + , m_pipe(pipeline) {} ProducerStateTable::~ProducerStateTable() {} + } diff --git a/tests/mock_tests/fake_response_publisher.cpp b/tests/mock_tests/fake_response_publisher.cpp index 29a28d23607..5e6aa0f2a09 100644 --- a/tests/mock_tests/fake_response_publisher.cpp +++ b/tests/mock_tests/fake_response_publisher.cpp @@ -8,7 +8,10 @@ * when needed to test code that uses response publisher. */ std::unique_ptr gMockResponsePublisher; -ResponsePublisher::ResponsePublisher(bool buffered) : m_db(std::make_unique("APPL_STATE_DB", 0)), m_buffered(buffered) {} +ResponsePublisher::ResponsePublisher(const std::string& dbName, bool buffered, bool db_write_thread) : + m_db(std::make_unique(dbName, 0)), m_buffered(buffered) {} + +ResponsePublisher::~ResponsePublisher() {} void ResponsePublisher::publish( const std::string& table, const std::string& key, diff --git a/tests/mock_tests/fake_warmstarthelper.cpp b/tests/mock_tests/fake_warmstarthelper.cpp index 147227df15b..5e29eb0b08c 100644 --- a/tests/mock_tests/fake_warmstarthelper.cpp +++ b/tests/mock_tests/fake_warmstarthelper.cpp @@ -2,6 +2,11 @@ static swss::DBConnector gDb("APPL_DB", 0); +// Mock-specific static variables for testing warm restart state +static std::unordered_map g_mockRefreshMap; +static swss::WarmStart::WarmStartState g_mockState = swss::WarmStart::RECONCILED; +static bool g_mockEnabled = true; + namespace swss { WarmStartHelper::WarmStartHelper(RedisPipeline *pipeline, @@ -19,11 +24,12 @@ WarmStartHelper::~WarmStartHelper() void WarmStartHelper::setState(WarmStart::WarmStartState state) { + g_mockState = state; } WarmStart::WarmStartState WarmStartHelper::getState() const { - return WarmStart::WarmStartState::INITIALIZED; + return g_mockState; } bool WarmStartHelper::checkAndStart() @@ -33,12 +39,13 @@ bool WarmStartHelper::checkAndStart() bool WarmStartHelper::isReconciled() const { - return false; + return (g_mockState == WarmStart::RECONCILED); } bool WarmStartHelper::inProgress() const { - return false; + // Match real implementation: return true when enabled and not reconciled + return (g_mockEnabled && g_mockState != WarmStart::RECONCILED); } uint32_t WarmStartHelper::getRestartTimer() const @@ -53,6 +60,9 @@ bool WarmStartHelper::runRestoration() void WarmStartHelper::insertRefreshMap(const KeyOpFieldsValuesTuple &kfv) { + // Store the entry - in real implementation this would be used during reconciliation + const std::string key = kfvKey(kfv); + g_mockRefreshMap[key] = kfv; } void WarmStartHelper::reconcile() @@ -77,3 +87,11 @@ bool WarmStartHelper::compareOneFV(const std::string &v1, const std::string &v2) } } + +// Test utility function to reset mock state between tests +void resetMockWarmStartHelper() +{ + g_mockRefreshMap.clear(); + g_mockState = swss::WarmStart::RECONCILED; // Default to not in progress + g_mockEnabled = true; +} diff --git a/tests/mock_tests/fdborch/flush_syncd_notif_ut.cpp b/tests/mock_tests/fdborch/flush_syncd_notif_ut.cpp index e6bd8bea1cd..eab62450f45 100644 --- a/tests/mock_tests/fdborch/flush_syncd_notif_ut.cpp +++ b/tests/mock_tests/fdborch/flush_syncd_notif_ut.cpp @@ -77,7 +77,20 @@ namespace fdb_syncd_flush_test m_asic_db = std::make_shared("ASIC_DB", 0); // Construct dependencies - // 1) Portsorch + // 1) SwitchOrch + TableConnector stateDbSwitchTable(m_state_db.get(), "SWITCH_CAPABILITY"); + TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); + TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); + + vector switch_tables = { + conf_asic_sensors, + app_switch_table + }; + + ASSERT_EQ(gSwitchOrch, nullptr); + gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); + + // 2) Portsorch const int portsorch_base_pri = 40; vector ports_tables = { @@ -90,7 +103,7 @@ namespace fdb_syncd_flush_test m_portsOrch = std::make_shared(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); - // 2) Crmorch + // 3) Crmorch ASSERT_EQ(gCrmOrch, nullptr); gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); VxlanTunnelOrch *vxlan_tunnel_orch_1 = new VxlanTunnelOrch(m_state_db.get(), m_app_db.get(), APP_VXLAN_TUNNEL_TABLE_NAME); @@ -114,6 +127,8 @@ namespace fdb_syncd_flush_test } virtual void TearDown() override { + delete gSwitchOrch; + gSwitchOrch = nullptr; delete gCrmOrch; gCrmOrch = nullptr; gDirectory.m_values.clear(); diff --git a/tests/mock_tests/flexcounter_ut.cpp b/tests/mock_tests/flexcounter_ut.cpp new file mode 100644 index 00000000000..80a400a0fbc --- /dev/null +++ b/tests/mock_tests/flexcounter_ut.cpp @@ -0,0 +1,1162 @@ +#define private public // make Directory::m_values available to clean it. +#include "directory.h" +#undef private + +#include "json.h" +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_orch_test.h" +#include "dashorch.h" +#include "dashmeterorch.h" +#include "mock_table.h" +#include "notifier.h" +#define private public +#include "pfcactionhandler.h" +#include "switchorch.h" +#include +#undef private +#define private public +#include "warm_restart.h" +#undef private + +#include + +extern bool gTraditionalFlexCounter; + +namespace flexcounter_test +{ + using namespace std; + + // SAI default ports + std::map> defaultPortList; + + shared_ptr mockFlexCounterDb; + shared_ptr mockFlexCounterGroupTable; + shared_ptr mockFlexCounterTable; + sai_set_switch_attribute_fn mockOldSaiSetSwitchAttribute; + + void mock_counter_init(sai_set_switch_attribute_fn old) + { + mockFlexCounterDb = make_shared("FLEX_COUNTER_DB", 0); + mockFlexCounterGroupTable = make_shared(mockFlexCounterDb.get(), "FLEX_COUNTER_GROUP_TABLE"); + mockFlexCounterTable = make_shared(mockFlexCounterDb.get(), "FLEX_COUNTER_TABLE"); + + mockOldSaiSetSwitchAttribute = old; + } + + uint32_t mockFlexCounterOperationCallCount; + sai_status_t mockFlexCounterOperation(sai_object_id_t objectId, const sai_attribute_t *attr) + { + if (objectId != gSwitchId) + { + return SAI_STATUS_FAILURE; + } + + auto *param = reinterpret_cast(attr->value.ptr); + std::vector entries; + auto serializedObjectId = sai_serialize_object_id(objectId); + auto keys = tokenize(string((const char*)param->counter_key.list), ','); + bool first = true; + string groupName; + string key; + + for(auto key : keys) + { + if (first) + { + groupName = tokenize(key, ':')[0]; + first = false; + } + else + { + key = groupName + ":" + key; + } + + if (param->stats_mode.list != nullptr) + { + entries.push_back({STATS_MODE_FIELD, (const char*)param->stats_mode.list}); + } + + if (param->counter_ids.list != nullptr) + { + entries.push_back({(const char*)param->counter_field_name.list, (const char*)param->counter_ids.list}); + mockFlexCounterTable->set(key, entries); + entries.clear(); + } + else + { + mockFlexCounterTable->del(key); + } + } + + mockFlexCounterOperationCallCount++; + + return SAI_STATUS_SUCCESS; + } + + sai_status_t mockFlexCounterGroupOperation(sai_object_id_t objectId, const sai_attribute_t *attr) + { + if (objectId != gSwitchId) + { + return SAI_STATUS_FAILURE; + } + + std::vector entries; + sai_redis_flex_counter_group_parameter_t *flexCounterGroupParam = reinterpret_cast(attr->value.ptr); + + std::string key((const char*)flexCounterGroupParam->counter_group_name.list); + + if (flexCounterGroupParam->poll_interval.list != nullptr) + { + entries.push_back({POLL_INTERVAL_FIELD, (const char*)flexCounterGroupParam->poll_interval.list}); + } + + if (flexCounterGroupParam->stats_mode.list != nullptr) + { + entries.push_back({STATS_MODE_FIELD, (const char*)flexCounterGroupParam->stats_mode.list}); + } + + if (flexCounterGroupParam->plugin_name.list != nullptr) + { + entries.push_back({(const char*)flexCounterGroupParam->plugin_name.list, ""}); + } + + if (flexCounterGroupParam->operation.list != nullptr) + { + entries.push_back({FLEX_COUNTER_STATUS_FIELD, (const char*)flexCounterGroupParam->operation.list}); + } + + if (entries.size() > 0) + { + mockFlexCounterGroupTable->set(key, entries); + } + else + { + if (flexCounterGroupParam->bulk_chunk_size.list != nullptr || flexCounterGroupParam->bulk_chunk_size_per_prefix.list != nullptr) + { + return SAI_STATUS_SUCCESS; + } + mockFlexCounterGroupTable->del(key); + } + + return SAI_STATUS_SUCCESS; + } + + bool _checkFlexCounterTableContent(std::shared_ptr table, const std::string key, std::vector entries) + { + vector fieldValues; + + if (table->get(key, fieldValues)) + { + if (entries.size() == 1 && fieldValues.size() == 1 && fvField(entries[0]).find("COUNTER_ID_LIST") != std::string::npos) + { + auto counterIds = tokenize(fvValue(entries[0]), ','); + auto expectedCounterIds = tokenize(fvValue(fieldValues[0]), ','); + set counterIdSet(counterIds.begin(), counterIds.end()); + set expectedCounterSet(expectedCounterIds.begin(), expectedCounterIds.end()); + return (counterIdSet == expectedCounterSet); + } + set fvSet(fieldValues.begin(), fieldValues.end()); + set expectedSet(entries.begin(), entries.end()); + + bool result = (fvSet == expectedSet); + if (!result && gTraditionalFlexCounter && !entries.empty()) + { + // We can not mock plugin when counter model is traditional and plugin is empty string. + // As a result, the plugin field will not be inserted into the database. + // We add it into the entries fetched from database manually and redo comparing + // The plugin field must be the last one in entries vector + fvSet.insert(entries.back()); + result = (fvSet == expectedSet); + } + + return result; + } + + return entries.empty(); + } + + bool checkFlexCounterGroup(const std::string group, std::vector entries) + { + return _checkFlexCounterTableContent(mockFlexCounterGroupTable, group, entries); + } + + bool checkFlexCounter(const std::string group, sai_object_id_t oid, const std::string counter_field_name="", const std::string mode="") + { + std::vector entries; + + if (!mockFlexCounterTable->get(group + ":" + sai_serialize_object_id(oid), entries)) + { + return counter_field_name.empty(); + } + + if (fvField(entries[0]) == counter_field_name) + { + if (mode == "") + { + // only 1 item: counter IDs + return true; + } + else + { + // 1st item: counter ID, 2nd item: mode + return (fvField(entries[1]) == "mode") && (fvValue(entries[1]) == mode); + } + } + else if (mode != "") + { + // 1st item: mode, 2nd item: counter ID + return (fvField(entries[0]) == "mode") && (fvValue(entries[0]) == mode) && (fvField(entries[1]) == counter_field_name); + } + + return false; + } + + bool checkFlexCounter(const std::string group, sai_object_id_t oid, std::vector entries) + { + return _checkFlexCounterTableContent(mockFlexCounterTable, group + ":" + sai_serialize_object_id(oid), entries); + } + + void isNoPendingCounterObjects() + { + std::vector*> queueCounterManagers({ + &gPortsOrch->queue_stat_manager, + &gPortsOrch->queue_watermark_manager + }); + std::vector*> pgCounterManagers({ + &gPortsOrch->pg_drop_stat_manager, + &gPortsOrch->pg_watermark_manager + }); + + for (auto pgCounterManager : pgCounterManagers) + { + ASSERT_TRUE(pgCounterManager->cached_objects.pending_objects_map.empty()); + } + + for (auto queueCounterManager : queueCounterManagers) + { + for (auto it : queueCounterManager->cached_objects) + { + ASSERT_TRUE(it.second.pending_objects_map.empty()); + } + } + } + + sai_switch_api_t ut_sai_switch_api; + sai_switch_api_t *pold_sai_switch_api; + + sai_status_t _ut_stub_sai_set_switch_attribute( + _In_ sai_object_id_t switch_id, + _In_ const sai_attribute_t *attr) + { + if (attr[0].id == SAI_REDIS_SWITCH_ATTR_FLEX_COUNTER_GROUP) + { + mockFlexCounterGroupOperation(switch_id, attr); + } + else if (attr[0].id == SAI_REDIS_SWITCH_ATTR_FLEX_COUNTER) + { + mockFlexCounterOperation(switch_id, attr); + } + return pold_sai_switch_api->set_switch_attribute(switch_id, attr); + } + + void _hook_sai_switch_api() + { + ut_sai_switch_api = *sai_switch_api; + pold_sai_switch_api = sai_switch_api; + ut_sai_switch_api.set_switch_attribute = _ut_stub_sai_set_switch_attribute; + sai_switch_api = &ut_sai_switch_api; + mock_counter_init(nullptr); + } + + void _unhook_sai_switch_api() + { + sai_switch_api = pold_sai_switch_api; + } + + struct FlexCounterTest : public ::testing::TestWithParam> + { + shared_ptr m_app_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + shared_ptr m_counters_db; + shared_ptr m_chassis_app_db; + shared_ptr m_asic_db; + shared_ptr m_flex_counter_db; + bool create_only_config_db_buffers; + + FlexCounterTest() + { + // FIXME: move out from constructor + m_app_db = make_shared( + "APPL_DB", 0); + m_counters_db = make_shared( + "COUNTERS_DB", 0); + m_config_db = make_shared( + "CONFIG_DB", 0); + m_state_db = make_shared( + "STATE_DB", 0); + m_chassis_app_db = make_shared( + "CHASSIS_APP_DB", 0); + m_asic_db = make_shared( + "ASIC_DB", 0); + m_flex_counter_db = make_shared( + "FLEX_COUNTER_DB", 0); + } + + virtual void SetUp() override + { + ::testing_db::reset(); + + gTraditionalFlexCounter = get<0>(GetParam()); + create_only_config_db_buffers = get<1>(GetParam()); + gFlexCounterDelaySec = get<2>(GetParam()); + + if (gTraditionalFlexCounter) + { + initFlexCounterTables(); + } + + _hook_sai_switch_api(); + + // Create dependencies ... + TableConnector stateDbSwitchTable(m_state_db.get(), "SWITCH_CAPABILITY"); + TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); + TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); + + if (create_only_config_db_buffers) + { + Table deviceMetadata(m_config_db.get(), CFG_DEVICE_METADATA_TABLE_NAME); + deviceMetadata.set("localhost", { { "create_only_config_db_buffers", "true" } }); + } + + vector switch_tables = { + conf_asic_sensors, + app_switch_table + }; + + ASSERT_EQ(gSwitchOrch, nullptr); + gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); + + const int portsorch_base_pri = 40; + + vector ports_tables = { + { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_SEND_TO_INGRESS_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, + { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, + { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, + { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } + }; + + ASSERT_EQ(gPortsOrch, nullptr); + + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + + vector flex_counter_tables = { + CFG_FLEX_COUNTER_TABLE_NAME + }; + + auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); + + gDirectory.set(flexCounterOrch); + + vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, + APP_BUFFER_PROFILE_TABLE_NAME, + APP_BUFFER_QUEUE_TABLE_NAME, + APP_BUFFER_PG_TABLE_NAME, + APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, + APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME }; + + ASSERT_EQ(gBufferOrch, nullptr); + gBufferOrch = new BufferOrch(m_app_db.get(), m_config_db.get(), m_state_db.get(), buffer_tables); + + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate pot table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + portTable.set("PortInitDone", { { "lanes", "0" } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + ASSERT_EQ(gIntfsOrch, nullptr); + gIntfsOrch = new IntfsOrch(m_app_db.get(), APP_INTF_TABLE_NAME, gVrfOrch, m_chassis_app_db.get()); + } + + virtual void TearDown() override + { + ::testing_db::reset(); + + auto buffer_maps = BufferOrch::m_buffer_type_maps; + for (auto &i : buffer_maps) + { + i.second->clear(); + } + + delete gNeighOrch; + gNeighOrch = nullptr; + delete gFdbOrch; + gFdbOrch = nullptr; + delete gIntfsOrch; + gIntfsOrch = nullptr; + delete gPortsOrch; + gPortsOrch = nullptr; + delete gBufferOrch; + gBufferOrch = nullptr; + delete gQosOrch; + gQosOrch = nullptr; + delete gSwitchOrch; + gSwitchOrch = nullptr; + + // clear orchs saved in directory + gDirectory.m_values.clear(); + + _unhook_sai_switch_api(); + + // reset flex counter delay sec + gFlexCounterDelaySec = 0; + } + + static void SetUpTestCase() + { + // Init switch and create dependencies + + map profile = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + + auto status = ut_helper::initSaiApi(profile); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + sai_attribute_t attr; + + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + // Get switch source MAC address + attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gMacAddress = attr.value.mac; + + // Get the default virtual router ID + attr.id = SAI_SWITCH_ATTR_DEFAULT_VIRTUAL_ROUTER_ID; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gVirtualRouterId = attr.value.oid; + + // Get SAI default ports + defaultPortList = ut_helper::getInitialSaiPorts(); + ASSERT_TRUE(!defaultPortList.empty()); + } + + static void TearDownTestCase() + { + auto status = sai_switch_api->remove_switch(gSwitchId); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + gSwitchId = 0; + + ut_helper::uninitSaiApi(); + } + + }; + + TEST_P(FlexCounterTest, CounterTest) + { + // Check flex counter database after system initialization + ASSERT_TRUE(checkFlexCounterGroup(SWITCH_STAT_COUNTER_FLEX_COUNTER_GROUP, + { + {STATS_MODE_FIELD, STATS_MODE_READ}, + {POLL_INTERVAL_FIELD, "60000"}, + {FLEX_COUNTER_STATUS_FIELD, "disable"} + })); + ASSERT_TRUE(checkFlexCounterGroup(QUEUE_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, + { + {STATS_MODE_FIELD, STATS_MODE_READ_AND_CLEAR}, + {POLL_INTERVAL_FIELD, QUEUE_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS}, + {FLEX_COUNTER_STATUS_FIELD, "disable"}, + {QUEUE_PLUGIN_FIELD, ""} + })); + ASSERT_TRUE(checkFlexCounterGroup(PG_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, + { + {STATS_MODE_FIELD, STATS_MODE_READ_AND_CLEAR}, + {POLL_INTERVAL_FIELD, PG_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS}, + {FLEX_COUNTER_STATUS_FIELD, "disable"}, + {PG_PLUGIN_FIELD, ""} + })); + ASSERT_TRUE(checkFlexCounterGroup(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, + { + {STATS_MODE_FIELD, STATS_MODE_READ}, + {POLL_INTERVAL_FIELD, PORT_RATE_FLEX_COUNTER_POLLING_INTERVAL_MS}, + {FLEX_COUNTER_STATUS_FIELD, "disable"}, + {PORT_PLUGIN_FIELD, ""} + })); + ASSERT_TRUE(checkFlexCounterGroup(PG_DROP_STAT_COUNTER_FLEX_COUNTER_GROUP, + { + {STATS_MODE_FIELD, STATS_MODE_READ}, + {POLL_INTERVAL_FIELD, PG_DROP_FLEX_STAT_COUNTER_POLL_MSECS}, + {FLEX_COUNTER_STATUS_FIELD, "disable"} + })); + ASSERT_TRUE(checkFlexCounterGroup(RIF_STAT_COUNTER_FLEX_COUNTER_GROUP, + { + {STATS_MODE_FIELD, STATS_MODE_READ}, + {POLL_INTERVAL_FIELD, "1000"}, + {RIF_PLUGIN_FIELD, ""}, + })); + + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table sendToIngressPortTable = Table(m_app_db.get(), APP_SEND_TO_INGRESS_PORT_TABLE_NAME); + Table pgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); + Table pgTableCfg = Table(m_config_db.get(), CFG_BUFFER_PG_TABLE_NAME); + Table queueTable = Table(m_app_db.get(), APP_BUFFER_QUEUE_TABLE_NAME); + Table queueTableCfg = Table(m_config_db.get(), CFG_BUFFER_QUEUE_TABLE_NAME); + Table profileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); + Table poolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table flexCounterCfg = Table(m_config_db.get(), CFG_FLEX_COUNTER_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + auto firstPortName = ports.begin()->first; + + // Create test buffer pool + poolTable.set( + "ingress_lossless_pool", + { + { "type", "ingress" }, + { "mode", "dynamic" }, + { "size", "4200000" }, + }); + poolTable.set( + "egress_lossless_pool", + { + { "type", "egress" }, + { "mode", "dynamic" }, + { "size", "4200000" }, + }); + + if (create_only_config_db_buffers) + { + // Create test buffer profile + profileTable.set("ingress_lossless_profile", { { "pool", "ingress_lossless_pool" }, + { "xon", "14832" }, + { "xoff", "14832" }, + { "size", "35000" }, + { "dynamic_th", "0" } }); + profileTable.set("egress_lossless_profile", { { "pool", "egress_lossless_pool" }, + { "size", "0" }, + { "dynamic_th", "7" } }); + + // Apply profile on PGs 3-4 all ports + auto appdbKey = firstPortName + ":3-4"; + auto cfgdbKey = firstPortName + "|3-4"; + pgTable.set(appdbKey, { { "profile", "ingress_lossless_profile" } }); + pgTableCfg.set(cfgdbKey, { { "profile", "ingress_lossless_profile" } }); + queueTable.set(appdbKey, { { "profile", "egress_lossless_profile" } }); + queueTableCfg.set(cfgdbKey, { { "profile", "egress_lossless_profile" } }); + } + + // Populate port table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + // Populate send to ingresss port table + sendToIngressPortTable.set("SEND_TO_INGRESS", {{"NULL", "NULL"}}); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + gBufferOrch->addExistingData(&pgTable); + gBufferOrch->addExistingData(&queueTable); + gBufferOrch->addExistingData(&poolTable); + gBufferOrch->addExistingData(&profileTable); + + // Apply configuration : + // create ports + static_cast(gBufferOrch)->doTask(); + static_cast(gPortsOrch)->doTask(); + + portTable.set("PortInitDone", { { "lanes", "0" } }); + gPortsOrch->addExistingData(&portTable); + + // Apply configuration + // configure buffers + // ports + static_cast(gPortsOrch)->doTask(); + + // Since init done is set now, apply buffers + static_cast(gBufferOrch)->doTask(); + + ASSERT_TRUE(gPortsOrch->allPortsReady()); + + // Enable and check counters + const std::vector values({ {FLEX_COUNTER_STATUS_FIELD, "enable"} }); + flexCounterCfg.set("SWITCH", values); + flexCounterCfg.set("PG_WATERMARK", values); + flexCounterCfg.set("QUEUE_WATERMARK", values); + flexCounterCfg.set("QUEUE", values); + flexCounterCfg.set("PORT_BUFFER_DROP", values); + flexCounterCfg.set("PG_DROP", values); + flexCounterCfg.set("PORT", values); + flexCounterCfg.set("BUFFER_POOL_WATERMARK", values); + flexCounterCfg.set("PFCWD", values); + + auto flexCounterOrch = gDirectory.get(); + flexCounterOrch->addExistingData(&flexCounterCfg); + static_cast(flexCounterOrch)->doTask(); + + if (gFlexCounterDelaySec > 0) + { + // Expire timer + flexCounterOrch->doTask(*flexCounterOrch->m_delayTimer); + static_cast(flexCounterOrch)->doTask(); + } + + isNoPendingCounterObjects(); + + ASSERT_TRUE(checkFlexCounterGroup(SWITCH_STAT_COUNTER_FLEX_COUNTER_GROUP, + { + {POLL_INTERVAL_FIELD, "60000"}, + {STATS_MODE_FIELD, STATS_MODE_READ}, + {FLEX_COUNTER_STATUS_FIELD, "enable"} + })); + ASSERT_TRUE(checkFlexCounterGroup(BUFFER_POOL_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, + { + {POLL_INTERVAL_FIELD, "60000"}, + {STATS_MODE_FIELD, STATS_MODE_READ_AND_CLEAR}, + {FLEX_COUNTER_STATUS_FIELD, "enable"}, + {BUFFER_POOL_PLUGIN_FIELD, ""} + })); + ASSERT_TRUE(checkFlexCounterGroup(QUEUE_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, + { + {POLL_INTERVAL_FIELD, "60000"}, + {STATS_MODE_FIELD, STATS_MODE_READ_AND_CLEAR}, + {FLEX_COUNTER_STATUS_FIELD, "enable"}, + {QUEUE_PLUGIN_FIELD, ""} + })); + ASSERT_TRUE(checkFlexCounterGroup(PG_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, + { + {POLL_INTERVAL_FIELD, "60000"}, + {STATS_MODE_FIELD, STATS_MODE_READ_AND_CLEAR}, + {FLEX_COUNTER_STATUS_FIELD, "enable"}, + {PG_PLUGIN_FIELD, ""} + })); + ASSERT_TRUE(checkFlexCounterGroup(PORT_BUFFER_DROP_STAT_FLEX_COUNTER_GROUP, + { + {POLL_INTERVAL_FIELD, "60000"}, + {STATS_MODE_FIELD, STATS_MODE_READ}, + {FLEX_COUNTER_STATUS_FIELD, "enable"} + })); + ASSERT_TRUE(checkFlexCounterGroup(PG_DROP_STAT_COUNTER_FLEX_COUNTER_GROUP, + { + {POLL_INTERVAL_FIELD, "10000"}, + {STATS_MODE_FIELD, STATS_MODE_READ}, + {FLEX_COUNTER_STATUS_FIELD, "enable"} + })); + ASSERT_TRUE(checkFlexCounterGroup(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, + { + {POLL_INTERVAL_FIELD, "1000"}, + {STATS_MODE_FIELD, STATS_MODE_READ}, + {FLEX_COUNTER_STATUS_FIELD, "enable"}, + {PORT_PLUGIN_FIELD, ""} + })); + ASSERT_TRUE(checkFlexCounterGroup(QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP, + { + {POLL_INTERVAL_FIELD, "10000"}, + {STATS_MODE_FIELD, STATS_MODE_READ}, + {FLEX_COUNTER_STATUS_FIELD, "enable"}, + })); + + sai_object_id_t pool_oid; + pool_oid = (*BufferOrch::m_buffer_type_maps[APP_BUFFER_POOL_TABLE_NAME])["ingress_lossless_pool"].m_saiObjectId; + ASSERT_TRUE(checkFlexCounter(BUFFER_POOL_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, pool_oid, BUFFER_POOL_COUNTER_ID_LIST)); + Port firstPort; + gPortsOrch->getPort(firstPortName, firstPort); + auto pgOid = firstPort.m_priority_group_ids[3]; + ASSERT_TRUE(checkFlexCounter(SWITCH_STAT_COUNTER_FLEX_COUNTER_GROUP, gSwitchId, + { + {SWITCH_COUNTER_ID_LIST, + "SAI_SWITCH_STAT_TX_TRIM_PACKETS," + "SAI_SWITCH_STAT_DROPPED_TRIM_PACKETS" + } + })); + ASSERT_TRUE(checkFlexCounter(PG_DROP_STAT_COUNTER_FLEX_COUNTER_GROUP, pgOid, + { + {PG_COUNTER_ID_LIST, + "SAI_INGRESS_PRIORITY_GROUP_STAT_DROPPED_PACKETS" + } + })); + ASSERT_TRUE(checkFlexCounter(PG_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, pgOid, + { + {PG_COUNTER_ID_LIST, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES," + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES" + } + })); + auto queueOid = firstPort.m_queue_ids[3]; + ASSERT_TRUE(checkFlexCounter(QUEUE_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, queueOid, + { + {QUEUE_COUNTER_ID_LIST, + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES" + } + })); + ASSERT_TRUE(checkFlexCounter(QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP, queueOid, + { + {QUEUE_COUNTER_ID_LIST, + "SAI_QUEUE_STAT_TX_TRIM_PACKETS," + "SAI_QUEUE_STAT_DROPPED_TRIM_PACKETS," + "SAI_QUEUE_STAT_TRIM_PACKETS," + "SAI_QUEUE_STAT_DROPPED_BYTES,SAI_QUEUE_STAT_DROPPED_PACKETS," + "SAI_QUEUE_STAT_BYTES,SAI_QUEUE_STAT_PACKETS" + } + })); + auto oid = firstPort.m_port_id; + ASSERT_TRUE(checkFlexCounter(PORT_BUFFER_DROP_STAT_FLEX_COUNTER_GROUP, oid, + { + {PORT_COUNTER_ID_LIST, + "SAI_PORT_STAT_OUT_DROPPED_PKTS,SAI_PORT_STAT_IN_DROPPED_PKTS" + } + })); + // Do not check the content of port counter since it's large and varies among platforms. + ASSERT_TRUE(checkFlexCounter(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, oid, PORT_COUNTER_ID_LIST)); + + // create a routing interface + std::deque entries; + entries.push_back({firstPort.m_alias, "SET", { {"mtu", "9100"}}}); + auto consumer = dynamic_cast(gIntfsOrch->getExecutor(APP_INTF_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gIntfsOrch)->doTask(); + + // Check flex counter database + auto rifOid = gIntfsOrch->m_rifsToAdd[0].m_rif_id; + Table vid2rid = Table(m_asic_db.get(), "VIDTORID"); + if (gTraditionalFlexCounter) + { + const auto id = sai_serialize_object_id(rifOid); + vid2rid.set("", { {id, ""} }); + } + (gIntfsOrch)->doTask(*gIntfsOrch->m_updateMapsTimer); + ASSERT_TRUE(checkFlexCounter(RIF_STAT_COUNTER_FLEX_COUNTER_GROUP, rifOid, + { + {RIF_COUNTER_ID_LIST, + "SAI_ROUTER_INTERFACE_STAT_IN_PACKETS,SAI_ROUTER_INTERFACE_STAT_IN_OCTETS," + "SAI_ROUTER_INTERFACE_STAT_IN_ERROR_PACKETS,SAI_ROUTER_INTERFACE_STAT_IN_ERROR_OCTETS," + "SAI_ROUTER_INTERFACE_STAT_OUT_PACKETS,SAI_ROUTER_INTERFACE_STAT_OUT_OCTETS," + "SAI_ROUTER_INTERFACE_STAT_OUT_ERROR_PACKETS,SAI_ROUTER_INTERFACE_STAT_OUT_ERROR_OCTETS," + } + })); + + // remove the dependency, expect delete and create a new one + entries.clear(); + entries.push_back({firstPort.m_alias, "DEL", { {} }}); + consumer->addToSync(entries); + static_cast(gIntfsOrch)->doTask(); + + // Check flex counter database + ASSERT_TRUE(checkFlexCounter(RIF_STAT_COUNTER_FLEX_COUNTER_GROUP, rifOid)); + + // PFC watchdog counter test + vector pfc_wd_tables = { + CFG_PFC_WD_TABLE_NAME + }; + + static const vector portStatIds = + { + SAI_PORT_STAT_PFC_0_RX_PAUSE_DURATION_US, + SAI_PORT_STAT_PFC_1_RX_PAUSE_DURATION_US, + SAI_PORT_STAT_PFC_2_RX_PAUSE_DURATION_US, + SAI_PORT_STAT_PFC_3_RX_PAUSE_DURATION_US, + SAI_PORT_STAT_PFC_4_RX_PAUSE_DURATION_US, + SAI_PORT_STAT_PFC_5_RX_PAUSE_DURATION_US, + SAI_PORT_STAT_PFC_6_RX_PAUSE_DURATION_US, + SAI_PORT_STAT_PFC_7_RX_PAUSE_DURATION_US, + SAI_PORT_STAT_PFC_0_RX_PKTS, + SAI_PORT_STAT_PFC_1_RX_PKTS, + SAI_PORT_STAT_PFC_2_RX_PKTS, + SAI_PORT_STAT_PFC_3_RX_PKTS, + SAI_PORT_STAT_PFC_4_RX_PKTS, + SAI_PORT_STAT_PFC_5_RX_PKTS, + SAI_PORT_STAT_PFC_6_RX_PKTS, + SAI_PORT_STAT_PFC_7_RX_PKTS, + }; + + static const vector queueStatIds = + { + SAI_QUEUE_STAT_PACKETS, + SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES, + }; + + static const vector queueAttrIds = + { + SAI_QUEUE_ATTR_PAUSE_STATUS, + }; + + gPfcwdOrch = new PfcWdSwOrch( + m_config_db.get(), + pfc_wd_tables, + portStatIds, + queueStatIds, + queueAttrIds, + 100); + gPfcwdOrch->m_platform = MLNX_PLATFORM_SUBSTRING; + + vector qos_tables = { + CFG_TC_TO_QUEUE_MAP_TABLE_NAME, + CFG_SCHEDULER_TABLE_NAME, + CFG_DSCP_TO_TC_MAP_TABLE_NAME, + CFG_MPLS_TC_TO_TC_MAP_TABLE_NAME, + CFG_DOT1P_TO_TC_MAP_TABLE_NAME, + CFG_QUEUE_TABLE_NAME, + CFG_PORT_QOS_MAP_TABLE_NAME, + CFG_WRED_PROFILE_TABLE_NAME, + CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, + CFG_DSCP_TO_FC_MAP_TABLE_NAME, + CFG_EXP_TO_FC_MAP_TABLE_NAME, + CFG_TC_TO_DSCP_MAP_TABLE_NAME + }; + gQosOrch = new QosOrch(m_config_db.get(), qos_tables); + entries.clear(); + entries.push_back({firstPort.m_alias, "SET", + { + {"pfc_enable", "3,4"}, + {"pfcwd_sw_enable", "3,4"} + }}); + auto portQosMapConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + portQosMapConsumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + + // create pfcwd entry for first port with drop action + entries.clear(); + entries.push_back({"GLOBAL", "SET", + { + {POLL_INTERVAL_FIELD, "200"}, + }}); + entries.push_back({firstPort.m_alias, "SET", + { + {"action", "drop"}, + {"detection_time", "200"}, + {"restoration_time", "200"} + }}); + + consumer = dynamic_cast(gPfcwdOrch->getExecutor(CFG_PFC_WD_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + + static_cast(gPfcwdOrch)->doTask(); + + ASSERT_TRUE(checkFlexCounterGroup(PFC_WD_FLEX_COUNTER_GROUP, + { + {POLL_INTERVAL_FIELD, "200"}, + {STATS_MODE_FIELD, STATS_MODE_READ}, + {FLEX_COUNTER_STATUS_FIELD, "enable"}, + {QUEUE_PLUGIN_FIELD, ""} + })); + + ASSERT_TRUE(checkFlexCounter(PFC_WD_FLEX_COUNTER_GROUP, firstPort.m_port_id, + { + {PORT_COUNTER_ID_LIST, "SAI_PORT_STAT_PFC_3_RX_PAUSE_DURATION_US,SAI_PORT_STAT_PFC_4_RX_PAUSE_DURATION_US,SAI_PORT_STAT_PFC_3_RX_PKTS,SAI_PORT_STAT_PFC_4_RX_PKTS"} + })); + + ASSERT_TRUE(checkFlexCounter(PFC_WD_FLEX_COUNTER_GROUP, firstPort.m_queue_ids[3], + { + {QUEUE_COUNTER_ID_LIST, "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES,SAI_QUEUE_STAT_PACKETS"}, + {QUEUE_ATTR_ID_LIST, "SAI_QUEUE_ATTR_PAUSE_STATUS"} + })); + + entries.push_back({firstPort.m_alias, "DEL", { {}}}); + consumer->addToSync(entries); + entries.clear(); + static_cast(gPfcwdOrch)->doTask(); + ASSERT_TRUE(checkFlexCounter(PFC_WD_FLEX_COUNTER_GROUP, firstPort.m_port_id)); + ASSERT_TRUE(checkFlexCounter(PFC_WD_FLEX_COUNTER_GROUP, firstPort.m_queue_ids[3])); + + delete gPfcwdOrch; + gPfcwdOrch = nullptr; + std::vector pfcValues; + ASSERT_TRUE(checkFlexCounterGroup(PFC_WD_FLEX_COUNTER_GROUP, pfcValues)); + + if (create_only_config_db_buffers) + { + auto appdbKey = firstPortName + ":3-4"; + // Remove buffer PGs/queues + entries.push_back({appdbKey, "DEL", { {} }}); + consumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_PG_TABLE_NAME)); + consumer->addToSync(entries); + consumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_QUEUE_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + static_cast(gBufferOrch)->doTask(); + + isNoPendingCounterObjects(); + + ASSERT_TRUE(checkFlexCounter(PG_DROP_STAT_COUNTER_FLEX_COUNTER_GROUP, pgOid)); + ASSERT_TRUE(checkFlexCounter(PG_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, pgOid)); + ASSERT_TRUE(checkFlexCounter(QUEUE_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, queueOid)); + ASSERT_TRUE(checkFlexCounter(QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP, queueOid)); + + if (!gTraditionalFlexCounter) + { + // Create and remove without flushing counters + auto oldMockFlexCounterCallCount = mockFlexCounterOperationCallCount; + gPortsOrch->createPortBufferQueueCounters(firstPort, "3"); + gPortsOrch->removePortBufferQueueCounters(firstPort, "3"); + gPortsOrch->createPortBufferPgCounters(firstPort, "3"); + gPortsOrch->removePortBufferPgCounters(firstPort, "3"); + ASSERT_EQ(oldMockFlexCounterCallCount, mockFlexCounterOperationCallCount); + isNoPendingCounterObjects(); + } + + // Remove buffer profiles + entries.push_back({"ingress_lossless_profile", "DEL", { {} }}); + consumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_PROFILE_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + static_cast(gBufferOrch)->doTask(); + + if (!gTraditionalFlexCounter) + { + // Verify bulk chunk size fields which can be verified in any combination of parameters. + // We verify it here just for convenience. + consumer = dynamic_cast(flexCounterOrch->getExecutor(CFG_FLEX_COUNTER_TABLE_NAME)); + + entries.push_back({"PORT", "SET", { + {"FLEX_COUNTER_STATUS", "enable"}, + {"BULK_CHUNK_SIZE", "64"} + }}); + consumer->addToSync(entries); + entries.clear(); + static_cast(flexCounterOrch)->doTask(); + ASSERT_TRUE(flexCounterOrch->m_groupsWithBulkChunkSize.find("PORT") != flexCounterOrch->m_groupsWithBulkChunkSize.end()); + + entries.push_back({"PORT", "SET", { + {"FLEX_COUNTER_STATUS", "enable"} + }}); + consumer->addToSync(entries); + entries.clear(); + static_cast(flexCounterOrch)->doTask(); + ASSERT_EQ(flexCounterOrch->m_groupsWithBulkChunkSize.find("PORT"), flexCounterOrch->m_groupsWithBulkChunkSize.end()); + + entries.push_back({"PORT", "SET", { + {"FLEX_COUNTER_STATUS", "enable"}, + {"BULK_CHUNK_SIZE_PER_PREFIX", "SAI_PORT_STAT_IF_OUT_QLEN:0;SAI_PORT_STAT_IF_IN_FEC:32"} + }}); + consumer->addToSync(entries); + entries.clear(); + static_cast(flexCounterOrch)->doTask(); + ASSERT_TRUE(flexCounterOrch->m_groupsWithBulkChunkSize.find("PORT") != flexCounterOrch->m_groupsWithBulkChunkSize.end()); + + entries.push_back({"PORT", "SET", { + {"FLEX_COUNTER_STATUS", "enable"} + }}); + consumer->addToSync(entries); + entries.clear(); + static_cast(flexCounterOrch)->doTask(); + ASSERT_EQ(flexCounterOrch->m_groupsWithBulkChunkSize.find("PORT"), flexCounterOrch->m_groupsWithBulkChunkSize.end()); + } + } + + // Remove buffer pools + entries.push_back({"ingress_lossless_pool", "DEL", { {} }}); + consumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_POOL_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + static_cast(gBufferOrch)->doTask(); + ASSERT_TRUE(checkFlexCounter(BUFFER_POOL_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, pool_oid)); + + // Warm/fast-boot case - no FC processing done before APPLY_VIEW + std::vector ts; + + gDirectory.get()->bake(); + gDirectory.get()->dumpPendingTasks(ts); + + ASSERT_TRUE(ts.empty()); + } + + INSTANTIATE_TEST_CASE_P( + FlexCounterTests, + FlexCounterTest, + ::testing::Values( + // traditional_flex_counter, create_only_config_db_buffers, flex_counter_delay_sec + std::make_tuple(false, true, 0), + std::make_tuple(false, false, 0), + std::make_tuple(true, true, 0), + std::make_tuple(true, false, 0), + std::make_tuple(false, true, 120), + std::make_tuple(false, false, 120), + std::make_tuple(true, true, 120), + std::make_tuple(true, false, 120) + ) + ); + + using namespace mock_orch_test; + class StandaloneFCTest : public MockOrchTest + { + virtual void PostSetUp() { + _hook_sai_switch_api(); + } + + virtual void PreTearDown() { + _unhook_sai_switch_api(); + } + }; + + TEST_F(StandaloneFCTest, TestEniStatusUpdate) + { + /* Add a mock ENI */ + EniEntry tmp_entry; + tmp_entry.eni_id = 0x7008000000020; + m_DashOrch->eni_entries_["497f23d7-f0ac-4c99-a98f-59b470e8c7b"] = tmp_entry; + + /* Should create ENI Counter stats for existing ENI's */ + m_DashOrch->handleFCStatusUpdate(true); + m_DashOrch->doTask(*(m_DashOrch->m_fc_update_timer)); + ASSERT_TRUE(checkFlexCounter(ENI_STAT_COUNTER_FLEX_COUNTER_GROUP, tmp_entry.eni_id, ENI_COUNTER_ID_LIST)); + + /* This should delete the STATS */ + m_DashOrch->handleFCStatusUpdate(false); + ASSERT_FALSE(checkFlexCounter(ENI_STAT_COUNTER_FLEX_COUNTER_GROUP, tmp_entry.eni_id, ENI_COUNTER_ID_LIST)); + } + + TEST_F(StandaloneFCTest, TestCaching) + { + mockFlexCounterOperationCallCount = 0; + + /* Disable traditional FC since caching is only used for FC config through SAIREDIS channel */ + gTraditionalFlexCounter = false; + FlexCounterTaggedCachedManager port_stat_manager(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, 1000, false); + + // Create two port OIDs + sai_object_id_t port1_oid = 0x100000000000d; + sai_object_id_t port2_oid = 0x100000000000e; + sai_object_id_t port3_oid = 0x100000000000f; + sai_object_id_t port4_oid = 0x1000000000010; + sai_object_id_t port5_oid = 0x100000000000a; + sai_object_id_t port6_oid = 0x100000000000b; + // Different counter stats for each port + std::unordered_set type1_stats = { + "SAI_PORT_STAT_IF_IN_OCTETS", + "SAI_PORT_STAT_IF_IN_ERRORS" + }; + std::unordered_set type2_stats = { + "SAI_PORT_STAT_IF_OUT_OCTETS", + "SAI_PORT_STAT_IF_OUT_ERRORS" + }; + std::unordered_set type3_stats = { + "SAI_PORT_STAT_IF_IN_OCTETS", + "SAI_PORT_STAT_IF_OUT_ERRORS" + }; + + // Set counter IDs for both ports + port_stat_manager.setCounterIdList(port1_oid, CounterType::PORT, type1_stats); + port_stat_manager.setCounterIdList(port2_oid, CounterType::PORT, type1_stats); + port_stat_manager.setCounterIdList(port6_oid, CounterType::PORT, type3_stats); + port_stat_manager.setCounterIdList(port3_oid, CounterType::PORT, type2_stats); + port_stat_manager.setCounterIdList(port4_oid, CounterType::PORT, type2_stats); + port_stat_manager.setCounterIdList(port5_oid, CounterType::PORT, type1_stats); + + // Flush the counters + port_stat_manager.flush(); + + /* SAIREDIS channel should have been called thrice, once for port1&port2&port5, port3&port4 and port6*/ + ASSERT_EQ(mockFlexCounterOperationCallCount, 3); + + ASSERT_TRUE(checkFlexCounter(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, port6_oid, + { + {PORT_COUNTER_ID_LIST, + "SAI_PORT_STAT_IF_IN_OCTETS," + "SAI_PORT_STAT_IF_OUT_ERRORS" + } + })); + ASSERT_TRUE(checkFlexCounter(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, port5_oid, + { + {PORT_COUNTER_ID_LIST, + "SAI_PORT_STAT_IF_IN_OCTETS," + "SAI_PORT_STAT_IF_IN_ERRORS" + } + })); + ASSERT_TRUE(checkFlexCounter(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, port1_oid, + { + {PORT_COUNTER_ID_LIST, + "SAI_PORT_STAT_IF_IN_OCTETS," + "SAI_PORT_STAT_IF_IN_ERRORS" + } + })); + + ASSERT_TRUE(checkFlexCounter(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, port2_oid, + { + {PORT_COUNTER_ID_LIST, + "SAI_PORT_STAT_IF_IN_OCTETS," + "SAI_PORT_STAT_IF_IN_ERRORS" + } + })); + + ASSERT_TRUE(checkFlexCounter(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, port3_oid, + { + {PORT_COUNTER_ID_LIST, + "SAI_PORT_STAT_IF_OUT_OCTETS," + "SAI_PORT_STAT_IF_OUT_ERRORS" + } + })); + + ASSERT_TRUE(checkFlexCounter(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, port4_oid, + { + {PORT_COUNTER_ID_LIST, + "SAI_PORT_STAT_IF_OUT_OCTETS," + "SAI_PORT_STAT_IF_OUT_ERRORS" + } + })); + } + + class MeterStatFlexCounterTest : public MockOrchTest + { + virtual void PostSetUp() { + _hook_sai_switch_api(); + } + + virtual void PreTearDown() { + _unhook_sai_switch_api(); + } + }; + + TEST_F(MeterStatFlexCounterTest, TestStatusUpdate) + { + /* Add a mock ENI */ + EniEntry tmp_entry; + tmp_entry.eni_id = 0x7008000000021; + m_DashOrch->eni_entries_["497f23d7-f0ac-4c99-a98f-59b470e8c7c"] = tmp_entry; + + /* Should create Meter Counter stats for existing ENI's */ + m_DashMeterOrch->handleMeterFCStatusUpdate(true); + m_DashMeterOrch->doTask(*(m_DashMeterOrch->m_meter_fc_update_timer)); + ASSERT_TRUE(checkFlexCounter(METER_STAT_COUNTER_FLEX_COUNTER_GROUP, tmp_entry.eni_id, DASH_METER_COUNTER_ID_LIST)); + + /* This should delete the STATS */ + m_DashMeterOrch->handleMeterFCStatusUpdate(false); + ASSERT_FALSE(checkFlexCounter(METER_STAT_COUNTER_FLEX_COUNTER_GROUP, tmp_entry.eni_id, DASH_METER_COUNTER_ID_LIST)); + } +} diff --git a/tests/mock_tests/flowcounterrouteorch_ut.cpp b/tests/mock_tests/flowcounterrouteorch_ut.cpp index cf80bda5bf6..089d681b31e 100644 --- a/tests/mock_tests/flowcounterrouteorch_ut.cpp +++ b/tests/mock_tests/flowcounterrouteorch_ut.cpp @@ -135,6 +135,7 @@ namespace flowcounterrouteorch_test ASSERT_EQ(gPortsOrch, nullptr); gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + gDirectory.set(gPortsOrch); vector vnet_tables = { APP_VNET_RT_TABLE_NAME, @@ -175,12 +176,18 @@ namespace flowcounterrouteorch_test ASSERT_EQ(gNeighOrch, nullptr); gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); - auto* tunnel_decap_orch = new TunnelDecapOrch(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + ASSERT_EQ(gTunneldecapOrch, nullptr); + vector tunnel_tables = { + APP_TUNNEL_DECAP_TABLE_NAME, + APP_TUNNEL_DECAP_TERM_TABLE_NAME + }; + gTunneldecapOrch = new TunnelDecapOrch(m_app_db.get(), m_state_db.get(), m_config_db.get(), tunnel_tables); + vector mux_tables = { CFG_MUX_CABLE_TABLE_NAME, CFG_PEER_SWITCH_TABLE_NAME }; - auto* mux_orch = new MuxOrch(m_config_db.get(), mux_tables, tunnel_decap_orch, gNeighOrch, gFdbOrch); + auto* mux_orch = new MuxOrch(m_config_db.get(), mux_tables, gTunneldecapOrch, gNeighOrch, gFdbOrch); gDirectory.set(mux_orch); ASSERT_EQ(gFgNhgOrch, nullptr); @@ -194,11 +201,16 @@ namespace flowcounterrouteorch_test gFgNhgOrch = new FgNhgOrch(m_config_db.get(), m_app_db.get(), m_state_db.get(), fgnhg_tables, gNeighOrch, gIntfsOrch, gVrfOrch); ASSERT_EQ(gSrv6Orch, nullptr); - vector srv6_tables = { - APP_SRV6_SID_LIST_TABLE_NAME, - APP_SRV6_MY_SID_TABLE_NAME + TableConnector srv6_sid_list_table(m_app_db.get(), APP_SRV6_SID_LIST_TABLE_NAME); + TableConnector srv6_my_sid_table(m_app_db.get(), APP_SRV6_MY_SID_TABLE_NAME); + TableConnector srv6_my_sid_cfg_table(m_config_db.get(), CFG_SRV6_MY_SID_TABLE_NAME); + + vector srv6_tables = { + srv6_sid_list_table, + srv6_my_sid_table, + srv6_my_sid_cfg_table }; - gSrv6Orch = new Srv6Orch(m_app_db.get(), srv6_tables, gSwitchOrch, gVrfOrch, gNeighOrch); + gSrv6Orch = new Srv6Orch(m_config_db.get(), m_app_db.get(), srv6_tables, gSwitchOrch, gVrfOrch, gNeighOrch); // Start FlowCounterRouteOrch static const vector route_pattern_tables = { @@ -234,6 +246,7 @@ namespace flowcounterrouteorch_test for (const auto &it : ports) { portTable.set(it.first, it.second); + portTable.set(it.first, {{ "oper_status", "up" }}); } // Set PortConfigDone @@ -305,6 +318,9 @@ namespace flowcounterrouteorch_test delete gNeighOrch; gNeighOrch = nullptr; + delete gTunneldecapOrch; + gTunneldecapOrch = nullptr; + delete gFdbOrch; gFdbOrch = nullptr; @@ -397,4 +413,4 @@ namespace flowcounterrouteorch_test vrf_consumer->addToSync(entries); static_cast(gVrfOrch)->doTask(); } -} \ No newline at end of file +} diff --git a/tests/mock_tests/fpmsyncd/receive_srv6_mysids_ut.cpp b/tests/mock_tests/fpmsyncd/receive_srv6_mysids_ut.cpp new file mode 100644 index 00000000000..daa45a77593 --- /dev/null +++ b/tests/mock_tests/fpmsyncd/receive_srv6_mysids_ut.cpp @@ -0,0 +1,1349 @@ +#include "ut_helpers_fpmsyncd.h" +#include "gtest/gtest.h" +#include +#include "mock_table.h" +#include +#include +#include +#include "ipaddress.h" + +#define private public // Need to modify internal cache +#include "fpmlink.h" +#include "routesync.h" +#undef private + +using namespace swss; +using namespace testing; + +#define MY_SID_KEY_DELIMITER ':' + +/* +Test Fixture +*/ +namespace ut_fpmsyncd +{ + struct FpmSyncdSRv6MySIDsTest : public ::testing::Test + { + std::shared_ptr m_app_db; + std::shared_ptr pipeline; + std::shared_ptr m_routeSync; + std::shared_ptr m_fpmLink; + std::shared_ptr m_srv6MySidTable; + + virtual void SetUp() override + { + testing_db::reset(); + + m_app_db = std::make_shared("APPL_DB", 0); + + /* Construct dependencies */ + + /* 1) RouteSync */ + pipeline = std::make_shared(m_app_db.get()); + m_routeSync = std::make_shared(pipeline.get()); + + /* 2) FpmLink */ + m_fpmLink = std::make_shared(m_routeSync.get()); + + /* 3) SRV6_MY_SID_TABLE in APP_DB */ + m_srv6MySidTable = std::make_shared(m_app_db.get(), APP_SRV6_MY_SID_TABLE_NAME); + } + + virtual void TearDown() override + { + } + }; +} + +string get_srv6_my_sid_table_key(IpAddress *mysid, int8_t block_len, int8_t node_len, int8_t func_len, int8_t arg_len) +{ + string my_sid_table_key; + + my_sid_table_key += to_string(block_len); + my_sid_table_key += MY_SID_KEY_DELIMITER; + my_sid_table_key += to_string(node_len); + my_sid_table_key += MY_SID_KEY_DELIMITER; + my_sid_table_key += to_string(func_len); + my_sid_table_key += MY_SID_KEY_DELIMITER; + my_sid_table_key += to_string(arg_len); + my_sid_table_key += MY_SID_KEY_DELIMITER; + my_sid_table_key += mysid->to_string(); + + return my_sid_table_key; +} + +namespace ut_fpmsyncd +{ + /* Test Receiving an SRv6 My SID nexthop bound to the End behavior */ + TEST_F(FpmSyncdSRv6MySIDsTest, RecevingRouteWithSRv6MySIDEnd) + { + ASSERT_NE(m_routeSync, nullptr); + + /* Create a Netlink object containing an SRv6 My SID */ + IpAddress _mysid = IpAddress("fc00:0:1:1::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + uint32_t _action = SRV6_LOCALSID_ACTION_END; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + struct nlmsg *nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + std::string action; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), true); + ASSERT_EQ(action, "end"); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + /* Delete My SID */ + nl_obj = create_srv6_mysid_nlmsg(RTM_DELSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an SRv6 My SID nexthop bound to the End.X behavior */ + TEST_F(FpmSyncdSRv6MySIDsTest, RecevingRouteWithSRv6MySIDEndX) + { + ASSERT_NE(m_routeSync, nullptr); + + /* Create a Netlink object containing an SRv6 My SID */ + IpAddress _mysid = IpAddress("fc00:0:1:1::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + uint32_t _action = SRV6_LOCALSID_ACTION_END_X; + IpAddress _adj = IpAddress("2001:db8:1::1"); + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + struct nlmsg *nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, NULL, &_adj); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + std::string action; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), true); + ASSERT_EQ(action, "end.x"); + + std::string adj; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "adj", adj), true); + ASSERT_EQ(adj, "2001:db8:1::1"); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + /* Delete My SID */ + nl_obj = create_srv6_mysid_nlmsg(RTM_DELSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, NULL, &_adj); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "adj", adj), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an SRv6 My SID nexthop bound to the End.T behavior */ + TEST_F(FpmSyncdSRv6MySIDsTest, RecevingRouteWithSRv6MySIDEndT) + { + ASSERT_NE(m_routeSync, nullptr); + + /* Create a Netlink object containing an SRv6 My SID */ + IpAddress _mysid = IpAddress("fc00:0:1:1::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + uint32_t _action = SRV6_LOCALSID_ACTION_END_T; + char *_vrf = "Vrf10"; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + struct nlmsg *nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, _vrf); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + std::string action; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), true); + ASSERT_EQ(action, "end.t"); + + std::string vrf; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "vrf", vrf), true); + ASSERT_EQ(vrf, "Vrf10"); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + /* Delete My SID */ + nl_obj = create_srv6_mysid_nlmsg(RTM_DELSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, _vrf); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "vrf", vrf), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an SRv6 My SID nexthop bound to the End.DX6 behavior */ + TEST_F(FpmSyncdSRv6MySIDsTest, RecevingRouteWithSRv6MySIDEndDX6) + { + ASSERT_NE(m_routeSync, nullptr); + + /* Create a Netlink object containing an SRv6 My SID */ + IpAddress _mysid = IpAddress("fc00:0:1:1::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + uint32_t _action = SRV6_LOCALSID_ACTION_END_DX6; + IpAddress _adj = IpAddress("2001:db8:1::1"); + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + struct nlmsg *nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, NULL, &_adj); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + std::string action; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), true); + ASSERT_EQ(action, "end.dx6"); + + std::string adj; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "adj", adj), true); + ASSERT_EQ(adj, "2001:db8:1::1"); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + /* Delete My SID */ + nl_obj = create_srv6_mysid_nlmsg(RTM_DELSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, NULL, &_adj); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "adj", adj), false); + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an SRv6 My SID nexthop bound to the End.DX4 behavior */ + TEST_F(FpmSyncdSRv6MySIDsTest, RecevingRouteWithSRv6MySIDEndDX4) + { + ASSERT_NE(m_routeSync, nullptr); + + /* Create a Netlink object containing an SRv6 My SID */ + IpAddress _mysid = IpAddress("fc00:0:1:1::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + uint32_t _action = SRV6_LOCALSID_ACTION_END_DX4; + IpAddress _adj = IpAddress("10.0.0.1"); + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + struct nlmsg *nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, NULL, &_adj); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + std::string action; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), true); + ASSERT_EQ(action, "end.dx4"); + + std::string adj; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "adj", adj), true); + ASSERT_EQ(adj, "10.0.0.1"); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + /* Delete My SID */ + nl_obj = create_srv6_mysid_nlmsg(RTM_DELSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, NULL, &_adj); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "adj", adj), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an SRv6 My SID nexthop bound to the End.DT4 behavior */ + TEST_F(FpmSyncdSRv6MySIDsTest, RecevingRouteWithSRv6MySIDEndDT4) + { + ASSERT_NE(m_routeSync, nullptr); + + /* Create a Netlink object containing an SRv6 My SID */ + IpAddress _mysid = IpAddress("fc00:0:1:1::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + uint32_t _action = SRV6_LOCALSID_ACTION_END_DT4; + char *_vrf = "Vrf10"; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + struct nlmsg *nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, _vrf); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + std::string action; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), true); + ASSERT_EQ(action, "end.dt4"); + + std::string vrf; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "vrf", vrf), true); + ASSERT_EQ(vrf, "Vrf10"); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + /* Delete My SID */ + nl_obj = create_srv6_mysid_nlmsg(RTM_DELSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, _vrf); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "vrf", vrf), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an SRv6 My SID nexthop bound to the End.DT6 behavior */ + TEST_F(FpmSyncdSRv6MySIDsTest, RecevingRouteWithSRv6MySIDEndDT6) + { + ASSERT_NE(m_routeSync, nullptr); + + /* Create a Netlink object containing an SRv6 My SID */ + IpAddress _mysid = IpAddress("fc00:0:1:1::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + uint32_t _action = SRV6_LOCALSID_ACTION_END_DT6; + char *_vrf = "Vrf10"; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + struct nlmsg *nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, _vrf); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + std::string action; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), true); + ASSERT_EQ(action, "end.dt6"); + + std::string vrf; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "vrf", vrf), true); + ASSERT_EQ(vrf, "Vrf10"); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + /* Delete My SID */ + nl_obj = create_srv6_mysid_nlmsg(RTM_DELSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, _vrf); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "vrf", vrf), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an SRv6 My SID nexthop bound to the End.DT46 behavior */ + TEST_F(FpmSyncdSRv6MySIDsTest, RecevingRouteWithSRv6MySIDEndDT46) + { + ASSERT_NE(m_routeSync, nullptr); + + /* Create a Netlink object containing an SRv6 My SID */ + IpAddress _mysid = IpAddress("fc00:0:1:1::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + uint32_t _action = SRV6_LOCALSID_ACTION_END_DT46; + char *_vrf = "Vrf10"; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + struct nlmsg *nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, _vrf); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + std::string action; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), true); + ASSERT_EQ(action, "end.dt46"); + + std::string vrf; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "vrf", vrf), true); + ASSERT_EQ(vrf, "Vrf10"); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + /* Delete My SID */ + nl_obj = create_srv6_mysid_nlmsg(RTM_DELSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, _vrf); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "vrf", vrf), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an SRv6 My SID nexthop bound to the uN behavior */ + TEST_F(FpmSyncdSRv6MySIDsTest, RecevingRouteWithSRv6MySIDUN) + { + ASSERT_NE(m_routeSync, nullptr); + + /* Create a Netlink object containing an SRv6 My SID */ + IpAddress _mysid = IpAddress("fc00:0:1::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + uint32_t _action = SRV6_LOCALSID_ACTION_UN; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + struct nlmsg *nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + std::string action; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), true); + ASSERT_EQ(action, "un"); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + /* Delete My SID */ + nl_obj = create_srv6_mysid_nlmsg(RTM_DELSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an SRv6 My SID nexthop bound to the uA behavior */ + TEST_F(FpmSyncdSRv6MySIDsTest, RecevingRouteWithSRv6MySIDUA) + { + ASSERT_NE(m_routeSync, nullptr); + + /* Create a Netlink object containing an SRv6 My SID */ + IpAddress _mysid = IpAddress("fc00:0:1:e000::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + uint32_t _action = SRV6_LOCALSID_ACTION_UA; + IpAddress _adj = IpAddress("2001:db8:1::1"); + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + struct nlmsg *nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, NULL, &_adj); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + std::string action; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), true); + ASSERT_EQ(action, "ua"); + + std::string adj; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "adj", adj), true); + ASSERT_EQ(adj, "2001:db8:1::1"); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + /* Delete My SID */ + nl_obj = create_srv6_mysid_nlmsg(RTM_DELSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, NULL, &_adj); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "adj", adj), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an SRv6 My SID nexthop bound to the uA behavior with an interface */ + TEST_F(FpmSyncdSRv6MySIDsTest, RecevingRouteWithSRv6MySIDUAWithIntf) + { + ASSERT_NE(m_routeSync, nullptr); + + /* Create a Netlink object containing an SRv6 My SID */ + IpAddress _mysid = IpAddress("fc00:0:1:e000::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + uint32_t _action = SRV6_LOCALSID_ACTION_UA; + IpAddress _adj = IpAddress("fe80::e822:daff:feab:3ee9"); + char *_intf = "Ethernet0"; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + struct nlmsg *nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, NULL, &_adj, _intf); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + std::string action; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), true); + ASSERT_EQ(action, "ua"); + + std::string adj; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "adj", adj), true); + ASSERT_EQ(adj, "fe80::e822:daff:feab:3ee9@Ethernet0"); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + /* Delete My SID */ + nl_obj = create_srv6_mysid_nlmsg(RTM_DELSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, NULL, &_adj, _intf); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "adj", adj), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an SRv6 My SID nexthop bound to the uDX6 behavior */ + TEST_F(FpmSyncdSRv6MySIDsTest, RecevingRouteWithSRv6MySIDUDX6) + { + ASSERT_NE(m_routeSync, nullptr); + + /* Create a Netlink object containing an SRv6 My SID */ + IpAddress _mysid = IpAddress("fc00:0:1:e000::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + uint32_t _action = SRV6_LOCALSID_ACTION_UDX6; + IpAddress _adj = IpAddress("2001:db8:1::1"); + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + struct nlmsg *nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, NULL, &_adj); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + std::string action; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), true); + ASSERT_EQ(action, "udx6"); + + std::string adj; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "adj", adj), true); + ASSERT_EQ(adj, "2001:db8:1::1"); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + /* Delete My SID */ + nl_obj = create_srv6_mysid_nlmsg(RTM_DELSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, NULL, &_adj); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "adj", adj), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an SRv6 My SID nexthop bound to the uDX4 behavior */ + TEST_F(FpmSyncdSRv6MySIDsTest, RecevingRouteWithSRv6MySIDUDX4) + { + ASSERT_NE(m_routeSync, nullptr); + + /* Create a Netlink object containing an SRv6 My SID */ + IpAddress _mysid = IpAddress("fc00:0:1:e000::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + uint32_t _action = SRV6_LOCALSID_ACTION_UDX4; + IpAddress _adj = IpAddress("10.0.0.1"); + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + struct nlmsg *nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, NULL, &_adj); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + std::string action; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), true); + ASSERT_EQ(action, "udx4"); + + std::string adj; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "adj", adj), true); + ASSERT_EQ(adj, "10.0.0.1"); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + /* Delete My SID */ + nl_obj = create_srv6_mysid_nlmsg(RTM_DELSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, NULL, &_adj); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "adj", adj), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an SRv6 My SID nexthop bound to the uDT4 behavior */ + TEST_F(FpmSyncdSRv6MySIDsTest, RecevingRouteWithSRv6MySIDUDT4) + { + ASSERT_NE(m_routeSync, nullptr); + + /* Create a Netlink object containing an SRv6 My SID */ + IpAddress _mysid = IpAddress("fc00:0:1:e000::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + uint32_t _action = SRV6_LOCALSID_ACTION_UDT4; + char *_vrf = "Vrf10"; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + struct nlmsg *nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, _vrf); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + std::string action; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), true); + ASSERT_EQ(action, "udt4"); + + std::string vrf; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "vrf", vrf), true); + ASSERT_EQ(vrf, "Vrf10"); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + /* Delete My SID */ + nl_obj = create_srv6_mysid_nlmsg(RTM_DELSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, _vrf); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "vrf", vrf), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an SRv6 My SID nexthop bound to the uDT6 behavior */ + TEST_F(FpmSyncdSRv6MySIDsTest, RecevingRouteWithSRv6MySIDUDT6) + { + ASSERT_NE(m_routeSync, nullptr); + + /* Create a Netlink object containing an SRv6 My SID */ + IpAddress _mysid = IpAddress("fc00:0:1:e000::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + uint32_t _action = SRV6_LOCALSID_ACTION_UDT6; + char *_vrf = "Vrf10"; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + struct nlmsg *nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, _vrf); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + std::string action; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), true); + ASSERT_EQ(action, "udt6"); + + std::string vrf; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "vrf", vrf), true); + ASSERT_EQ(vrf, "Vrf10"); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + /* Delete My SID */ + nl_obj = create_srv6_mysid_nlmsg(RTM_DELSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, _vrf); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "vrf", vrf), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an SRv6 My SID nexthop bound to the uDT46 behavior */ + TEST_F(FpmSyncdSRv6MySIDsTest, RecevingRouteWithSRv6MySIDUDT46) + { + ASSERT_NE(m_routeSync, nullptr); + + /* Create a Netlink object containing an SRv6 My SID */ + IpAddress _mysid = IpAddress("fc00:0:1:e000::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + uint32_t _action = SRV6_LOCALSID_ACTION_UDT46; + char *_vrf = "Vrf10"; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + struct nlmsg *nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, _vrf); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + std::string action; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), true); + ASSERT_EQ(action, "udt46"); + + std::string vrf; + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "vrf", vrf), true); + ASSERT_EQ(vrf, "Vrf10"); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + /* Delete My SID */ + nl_obj = create_srv6_mysid_nlmsg(RTM_DELSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action, _vrf); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "vrf", vrf), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an SRv6 My SID with default SID structure */ + TEST_F(FpmSyncdSRv6MySIDsTest, SRv6MySidEndDefaultSidStructure) + { + ASSERT_NE(m_routeSync, nullptr); + + shared_ptr m_app_db; + m_app_db = make_shared("APPL_DB", 0); + Table srv6_my_sid_table(m_app_db.get(), APP_SRV6_MY_SID_TABLE_NAME); + + struct nlmsg *nl_obj; + IpAddress _mysid = IpAddress("fc00:0:1:40::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + std::string action; + std::string adj; + std::string vrf; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + /* Create a Netlink object containing an SRv6 My SID */ + + nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, -1, -1, -1, -1, SRV6_LOCALSID_ACTION_END); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), true); + ASSERT_EQ(action, "end"); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + /* Delete My SID */ + nl_obj = create_srv6_mysid_nlmsg(RTM_DELSRV6LOCALSID, &_mysid, 32, 16, 16, 0, SRV6_LOCALSID_ACTION_END); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd removed the entry from the APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving a route containing an invalid SRv6 My SID */ + TEST_F(FpmSyncdSRv6MySIDsTest, RecevingRouteWithSRv6MySIDInvalid) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + IpAddress _mysid = IpAddress("fc00:0:1:1::"); + int8_t _block_len; + int8_t _node_len; + int8_t _func_len; + int8_t _arg_len; + uint32_t _action = SRV6_LOCALSID_ACTION_UN; + std::string action; + string my_sid_table_key; + + /* Create a Netlink object containing an SRv6 My SID with missing block length */ + _block_len = -1; + _node_len = 16; + _func_len = 16; + _arg_len = 0; + my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Ensure that fpmsyncd does not create an entry in APP_DB (because my SID is invalid)*/ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + + /* Create a Netlink object containing an SRv6 My SID with missing node length */ + _block_len = 32; + _node_len = -1; + _func_len = 16; + _arg_len = 0; + my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Ensure that fpmsyncd does not create an entry in APP_DB (because my SID is invalid)*/ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + + /* Create a Netlink object containing an SRv6 My SID with missing function length */ + _block_len = 32; + _node_len = 16; + _func_len = -1; + _arg_len = 0; + my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Ensure that fpmsyncd does not create an entry in APP_DB (because my SID is invalid)*/ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + + /* Create a Netlink object containing an SRv6 My SID with missing argument length */ + _block_len = 32; + _node_len = 16; + _func_len = 16; + _arg_len = -1; + my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, 0); + + nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, _block_len, _node_len, _func_len, _arg_len, _action); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB (with default argument length)*/ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), true); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + } + + /* Test Receiving a route containing an invalid SRv6 My SID with missing SID value */ + TEST_F(FpmSyncdSRv6MySIDsTest, SRv6MySidInvalidMissingSidValue) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + std::string action; + + /* Create a Netlink object containing an SRv6 My SID with missing SID value */ + nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, NULL, 32, 16, 16, 0, SRV6_LOCALSID_ACTION_END); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd did not create any entry in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget("32:16:16:0:", "action", action), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving a route containing an invalid SRv6 My SID with IPv4 address as the SID value */ + TEST_F(FpmSyncdSRv6MySIDsTest, SRv6MySidInvalidIpv4SidValue) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + IpAddress _mysid = IpAddress("10.0.0.1"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + std::string action; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + /* Create a Netlink object containing an SRv6 My SID with IPv4 SID value */ + nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, 32, 16, 16, 0, SRV6_LOCALSID_ACTION_END, NULL, NULL, NULL, 10, 0, AF_INET); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd did not create any entry in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving a route containing an invalid SRv6 My SID with invalid SID value prefix length */ + TEST_F(FpmSyncdSRv6MySIDsTest, SRv6MySidInvalidSidPrefixlen) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + IpAddress _mysid = IpAddress("fc00:0:1:e000::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + std::string action; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + /* Create a Netlink object containing an SRv6 My SID with invalid SID value prefix length */ + nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, 32, 16, 16, 0, SRV6_LOCALSID_ACTION_END, NULL, NULL, NULL, 10, 200, AF_INET6); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd did not create any entry in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving a route containing an SRv6 My SID with invalid action */ + TEST_F(FpmSyncdSRv6MySIDsTest, SRv6MySidInvalidAction) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + IpAddress _mysid = IpAddress("fc00:0:1:e000::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + std::string action; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + /* Create a Netlink object containing an SRv6 My SID with invalid action */ + nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, 32, 16, 16, 0, 329); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd did not create any entry in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving a route containing an SRv6 My SID with unspec action */ + TEST_F(FpmSyncdSRv6MySIDsTest, SRv6MySidInvalidUnspecAction) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + IpAddress _mysid = IpAddress("fc00:0:1:e000::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + std::string action; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + /* Create a Netlink object containing an SRv6 My SID with unspec action */ + nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, 32, 16, 16, 0, SRV6_LOCALSID_ACTION_UNSPEC); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd did not create any entry in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving a route containing an SRv6 My SID bound to End.DT6 behavior with empty VRF */ + TEST_F(FpmSyncdSRv6MySIDsTest, SRv6MySidInvalidVrf) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + IpAddress _mysid = IpAddress("fc00:0:1:e000::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + std::string action; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + /* Create a Netlink object containing an SRv6 My SID bound to End.DT6 behavior with empty VRF */ + nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, 32, 16, 16, 0, SRV6_LOCALSID_ACTION_END_DT6, NULL); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd did not create any entry in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving a route containing an invalid SRv6 My SID bound to End.X behavior with empty adjacency */ + TEST_F(FpmSyncdSRv6MySIDsTest, SRv6MySidInvalidAdjacency) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + IpAddress _mysid = IpAddress("fc00:0:1:e000::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + std::string action; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + /* Create a Netlink object containing an SRv6 My SID bound to End.X behavior with empty adjacency */ + nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, 32, 16, 16, 0, SRV6_LOCALSID_ACTION_END_X, NULL, NULL); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd did not create any entry in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving a route containing an SRv6 My SID with missing block length */ + TEST_F(FpmSyncdSRv6MySIDsTest, SRv6MySidInvalidMissingBlockLen) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + IpAddress _mysid = IpAddress("fc00:0:1:e000::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + std::string action; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + /* Create a Netlink object containing an SRv6 My SID with missing block length */ + nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, -1, 16, 16, 0, SRV6_LOCALSID_ACTION_END, NULL, NULL, NULL, 10, 200, AF_INET6); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd did not create any entry in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving a route containing an SRv6 My SID with missing node length */ + TEST_F(FpmSyncdSRv6MySIDsTest, SRv6MySidInvalidMissingNodeLen) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + IpAddress _mysid = IpAddress("fc00:0:1:e000::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + std::string action; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + /* Create a Netlink object containing an SRv6 My SID with missing node length */ + nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, 32, -1, 16, 0, SRV6_LOCALSID_ACTION_END, NULL, NULL, NULL, 10, 200, AF_INET6); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd did not create any entry in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving a route containing an SRv6 My SID with missing function length */ + TEST_F(FpmSyncdSRv6MySIDsTest, SRv6MySidMissingFunctionLen) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + IpAddress _mysid = IpAddress("fc00:0:1:e000::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + std::string action; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + /* Create a Netlink object containing an SRv6 My SID with missing node length */ + nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, 32, 16, -1, 0, SRV6_LOCALSID_ACTION_END, NULL, NULL, NULL, 10, 200, AF_INET6); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd did not create any entry in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving a route containing an SRv6 My SID with missing argument length */ + TEST_F(FpmSyncdSRv6MySIDsTest, SRv6MySidMissingArgumentLen) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + IpAddress _mysid = IpAddress("fc00:0:1:e000::"); + int8_t _block_len = 32; + int8_t _node_len = 16; + int8_t _func_len = 16; + int8_t _arg_len = 0; + std::string action; + string my_sid_table_key = get_srv6_my_sid_table_key(&_mysid, _block_len, _node_len, _func_len, _arg_len); + + /* Create a Netlink object containing an SRv6 My SID with missing node length */ + nl_obj = create_srv6_mysid_nlmsg(RTM_NEWSRV6LOCALSID, &_mysid, 32, 16, 16, -1, SRV6_LOCALSID_ACTION_END); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd did not create any entry in APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), true); + ASSERT_EQ(action, "end"); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + /* Delete My SID */ + nl_obj = create_srv6_mysid_nlmsg(RTM_DELSRV6LOCALSID, &_mysid, 32, 16, 16, 0, SRV6_LOCALSID_ACTION_END); + if (!nl_obj) + throw std::runtime_error("SRv6 My SID creation failed"); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd removed the entry from the APP_DB */ + ASSERT_EQ(m_srv6MySidTable->hget(my_sid_table_key, "action", action), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } +} \ No newline at end of file diff --git a/tests/mock_tests/fpmsyncd/receive_srv6_steer_routes_ut.cpp b/tests/mock_tests/fpmsyncd/receive_srv6_steer_routes_ut.cpp new file mode 100644 index 00000000000..56199b5e1c7 --- /dev/null +++ b/tests/mock_tests/fpmsyncd/receive_srv6_steer_routes_ut.cpp @@ -0,0 +1,528 @@ +#include "ut_helpers_fpmsyncd.h" +#include "gtest/gtest.h" +#include +#include "mock_table.h" +#include +#include +#include +#include "ipaddress.h" +#include "ipprefix.h" + +#define private public // Need to modify internal cache +#include "fpmlink.h" +#include "routesync.h" +#undef private + +using namespace swss; +using namespace testing; + +/* +Test Fixture +*/ +namespace ut_fpmsyncd +{ + struct FpmSyncdSRv6RoutesTest : public ::testing::Test + { + std::shared_ptr m_app_db; + std::shared_ptr pipeline; + std::shared_ptr m_routeSync; + std::shared_ptr m_fpmLink; + std::shared_ptr m_routeTable; + std::shared_ptr m_srv6SidListTable; + + virtual void SetUp() override + { + testing_db::reset(); + + m_app_db = std::make_shared("APPL_DB", 0); + + /* Construct dependencies */ + + /* 1) RouteSync */ + pipeline = std::make_shared(m_app_db.get()); + m_routeSync = std::make_shared(pipeline.get()); + + /* 2) FpmLink */ + m_fpmLink = std::make_shared(m_routeSync.get()); + + /* 3) ROUTE_TABLE in APP_DB */ + m_routeTable = std::make_shared(m_app_db.get(), APP_ROUTE_TABLE_NAME); + + /* 4) SRV6_SID_LIST_TABLE in APP_DB */ + m_srv6SidListTable = std::make_shared(m_app_db.get(), APP_SRV6_SID_LIST_TABLE_NAME); + } + + virtual void TearDown() override + { + } + }; +} + +namespace ut_fpmsyncd +{ + /* Test Receiving an SRv6 VPN Route (with an IPv4 prefix) */ + TEST_F(FpmSyncdSRv6RoutesTest, RecevingSRv6VpnRoutesWithIPv4Prefix) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + std::string path; + std::string segment; + std::string seg_src; + + /* Create a Netlink object to install the SRv6 VPN Route */ + IpPrefix _dst = IpPrefix("192.168.6.0/24"); + IpAddress _vpn_sid = IpAddress("fc00:0:2:1::"); + IpAddress _encap_src_addr = IpAddress("fc00:0:1:1::1"); + + nl_obj = create_srv6_vpn_route_nlmsg(RTM_NEWROUTE, &_dst, &_encap_src_addr, &_vpn_sid); + if (!nl_obj) + throw std::runtime_error("SRv6 VPN Route creation failed"); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6SidListTable->hget("fc00:0:2:1::", "path", path), true); + ASSERT_EQ(path, _vpn_sid.to_string()); + + ASSERT_EQ(m_routeTable->hget("Vrf10:192.168.6.0/24", "segment", segment), true); + ASSERT_EQ(segment, "fc00:0:2:1::"); + + ASSERT_EQ(m_routeTable->hget("Vrf10:192.168.6.0/24", "seg_src", seg_src), true); + ASSERT_EQ(seg_src, _encap_src_addr.to_string()); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + + /* Create a Netlink object to uninstall the SRv6 VPN Route */ + nl_obj = create_srv6_vpn_route_nlmsg(RTM_DELROUTE, &_dst, &_encap_src_addr, &_vpn_sid); + if (!nl_obj) + throw std::runtime_error("SRv6 VPN Route creation failed"); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd removed the entry from APP_DB */ + ASSERT_EQ(m_srv6SidListTable->hget("fc00:0:2:1::", "path", path), false); + ASSERT_EQ(m_routeTable->hget("Vrf10:192.168.6.0/24", "segment", segment), false); + ASSERT_EQ(m_routeTable->hget("Vrf10:192.168.6.0/24", "seg_src", seg_src), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an SRv6 VPN Route (with an IPv4 prefix with /32 prefix length) */ + TEST_F(FpmSyncdSRv6RoutesTest, RecevingSRv6VpnRoutesWithIPv4PrefixMaxPrefixLength) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + std::string path; + std::string segment; + std::string seg_src; + + + /* Create a Netlink object containing an SRv6 VPN Route */ + IpPrefix _dst = IpPrefix("192.168.6.1/32"); + IpAddress _vpn_sid = IpAddress("fc00:0:2:1::"); + IpAddress _encap_src_addr = IpAddress("fc00:0:1:1::1"); + + nl_obj = create_srv6_vpn_route_nlmsg(RTM_NEWROUTE, &_dst, &_encap_src_addr, &_vpn_sid); + if (!nl_obj) + throw std::runtime_error("SRv6 VPN Route creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6SidListTable->hget("fc00:0:2:1::", "path", path), true); + ASSERT_EQ(path, _vpn_sid.to_string()); + + ASSERT_EQ(m_routeTable->hget("Vrf10:192.168.6.1", "segment", segment), true); + ASSERT_EQ(segment, "fc00:0:2:1::"); + + ASSERT_EQ(m_routeTable->hget("Vrf10:192.168.6.1", "seg_src", seg_src), true); + ASSERT_EQ(seg_src, _encap_src_addr.to_string()); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + + /* Create a Netlink object to uninstall the SRv6 VPN Route */ + nl_obj = create_srv6_vpn_route_nlmsg(RTM_DELROUTE, &_dst, &_encap_src_addr, &_vpn_sid); + if (!nl_obj) + throw std::runtime_error("SRv6 VPN Route creation failed"); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd removed the entry from APP_DB */ + ASSERT_EQ(m_srv6SidListTable->hget("fc00:0:2:1::", "path", path), false); + ASSERT_EQ(m_routeTable->hget("Vrf10:192.168.6.1/32", "segment", segment), false); + ASSERT_EQ(m_routeTable->hget("Vrf10:192.168.6.1/32", "seg_src", seg_src), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an SRv6 VPN Route (with an IPv6 prefix) */ + TEST_F(FpmSyncdSRv6RoutesTest, RecevingSRv6VpnRoutesWithIPv6Prefix) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + std::string path; + std::string segment; + std::string seg_src; + + /* Create a Netlink object containing an SRv6 VPN Route */ + IpPrefix _dst = IpPrefix("fd00:0:21::/64"); + IpAddress _vpn_sid = IpAddress("fc00:0:2:1::"); + IpAddress _encap_src_addr = IpAddress("fc00:0:1:1::1"); + + nl_obj = create_srv6_vpn_route_nlmsg(RTM_NEWROUTE, &_dst, &_encap_src_addr, &_vpn_sid); + if (!nl_obj) + throw std::runtime_error("SRv6 VPN Route creation failed"); + + /* Send the Netlink object to the FpmLink */ + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6SidListTable->hget("fc00:0:2:1::", "path", path), true); + ASSERT_EQ(path, _vpn_sid.to_string()); + + ASSERT_EQ(m_routeTable->hget("Vrf10:fd00:0:21::/64", "segment", segment), true); + ASSERT_EQ(segment, "fc00:0:2:1::"); + + ASSERT_EQ(m_routeTable->hget("Vrf10:fd00:0:21::/64", "seg_src", seg_src), true); + ASSERT_EQ(seg_src, _encap_src_addr.to_string()); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + + + /* Create a Netlink object to uninstall the SRv6 VPN Route */ + nl_obj = create_srv6_vpn_route_nlmsg(RTM_DELROUTE, &_dst, &_encap_src_addr, &_vpn_sid); + if (!nl_obj) + throw std::runtime_error("SRv6 VPN Route creation failed"); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd removed the entry from APP_DB */ + ASSERT_EQ(m_srv6SidListTable->hget("fc00:0:2:1::", "path", path), false); + ASSERT_EQ(m_routeTable->hget("Vrf10:fd00:0:21::/64", "segment", segment), false); + ASSERT_EQ(m_routeTable->hget("Vrf10:fd00:0:21::/64", "seg_src", seg_src), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an SRv6 VPN Route with missing destination prefix */ + TEST_F(FpmSyncdSRv6RoutesTest, SRv6VpnRoutesInvalidMissingDst) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + std::string path; + std::string segment; + std::string seg_src; + + /* + * Scenario 1: SRv6 VPN Route with missing destination + */ + + /* Create a Netlink object containing an SRv6 VPN Route with missing destination */ + IpAddress _vpn_sid = IpAddress("fc00:0:2:1::"); + IpAddress _encap_src_addr = IpAddress("fc00:0:1:1::1"); + + nl_obj = create_srv6_vpn_route_nlmsg(RTM_NEWROUTE, NULL, &_encap_src_addr, &_vpn_sid); + if (!nl_obj) + throw std::runtime_error("SRv6 VPN Route creation failed"); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6SidListTable->hget("fc00:0:2:1::", "path", path), false); + + ASSERT_EQ(m_routeTable->hget("Vrf10:192.168.6.0/24", "segment", segment), false); + + ASSERT_EQ(m_routeTable->hget("Vrf10:192.168.6.0/24", "seg_src", seg_src), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + + /* Test Receiving an invalid SRv6 VPN Route IPv4 with invalid prefix length */ + TEST_F(FpmSyncdSRv6RoutesTest, SRv6VpnRoutesInvalidPrefixlenIpv4) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + IpPrefix _dst; + IpAddress _vpn_sid; + IpAddress _encap_src_addr; + std::string path; + std::string segment; + std::string seg_src; + + /* Create a Netlink object containing an SRv6 VPN Route IPv4 with invalid prefix length */ + _dst = IpPrefix("192.168.6.0"); + _vpn_sid = IpAddress("fc00:0:2:1::"); + _encap_src_addr = IpAddress("fc00:0:1:1::1"); + + nl_obj = create_srv6_vpn_route_nlmsg(RTM_NEWROUTE, &_dst, &_encap_src_addr, &_vpn_sid, 10, 100); + if (!nl_obj) + throw std::runtime_error("SRv6 VPN Route creation failed"); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6SidListTable->hget("fc00:0:2:1::", "path", path), false); + + ASSERT_EQ(m_routeTable->hget("Vrf10:192.168.6.0/100", "segment", segment), false); + + ASSERT_EQ(m_routeTable->hget("Vrf10:192.168.6.0/100", "seg_src", seg_src), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an invalid SRv6 VPN Route IPv6 with invalid prefix length */ + TEST_F(FpmSyncdSRv6RoutesTest, SRv6VpnRoutesInvalidPrefixlenIpv6) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + IpPrefix _dst; + IpAddress _vpn_sid; + IpAddress _encap_src_addr; + std::string path; + std::string segment; + std::string seg_src; + + /* Create a Netlink object containing an SRv6 VPN Route IPv6 with invalid prefix length */ + _dst = IpPrefix("fd00:0:21::"); + _vpn_sid = IpAddress("fc00:0:2:1::"); + _encap_src_addr = IpAddress("fc00:0:1:1::1"); + + nl_obj = create_srv6_vpn_route_nlmsg(RTM_NEWROUTE, &_dst, &_encap_src_addr, &_vpn_sid, 10, 200); + if (!nl_obj) + throw std::runtime_error("SRv6 VPN Route creation failed"); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6SidListTable->hget("fc00:0:2:1::", "path", path), false); + + ASSERT_EQ(m_routeTable->hget("Vrf10:fd00:0:21::/200", "segment", segment), false); + + ASSERT_EQ(m_routeTable->hget("Vrf10:fd00:0:21::/200", "seg_src", seg_src), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an invalid SRv6 VPN Route with invalid address family */ + TEST_F(FpmSyncdSRv6RoutesTest, SRv6VpnRoutesInvalidAddressFamily) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + IpPrefix _dst; + IpAddress _vpn_sid; + IpAddress _encap_src_addr; + std::string path; + std::string segment; + std::string seg_src; + + + /* + * Scenario 4: SRv6 VPN Route with invalid address family + */ + + /* Create a Netlink object containing an SRv6 VPN Route IPv6 with invalid address family */ + _dst = IpPrefix("fd00:0:21::/64"); + _vpn_sid = IpAddress("fc00:0:2:1::"); + _encap_src_addr = IpAddress("fc00:0:1:1::1"); + + nl_obj = create_srv6_vpn_route_nlmsg(RTM_NEWROUTE, &_dst, &_encap_src_addr, &_vpn_sid, 10, 64, 100); + if (!nl_obj) + throw std::runtime_error("SRv6 VPN Route creation failed"); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6SidListTable->hget("fc00:0:2:1::", "path", path), false); + + ASSERT_EQ(m_routeTable->hget("Vrf10:fd00:0:21::/64", "segment", segment), false); + + ASSERT_EQ(m_routeTable->hget("Vrf10:fd00:0:21::/64", "seg_src", seg_src), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an invalid SRv6 VPN Route with invalid Vrf */ + TEST_F(FpmSyncdSRv6RoutesTest, SRv6VpnRoutesInvalidVrf) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + IpPrefix _dst; + IpAddress _vpn_sid; + IpAddress _encap_src_addr; + std::string path; + std::string segment; + std::string seg_src; + + /* Create a Netlink object containing an SRv6 VPN Route IPv6 with invalid Vrf */ + _dst = IpPrefix("fd00:0:21::/64"); + _vpn_sid = IpAddress("fc00:0:2:1::"); + _encap_src_addr = IpAddress("fc00:0:1:1::1"); + + nl_obj = create_srv6_vpn_route_nlmsg(RTM_NEWROUTE, &_dst, &_encap_src_addr, &_vpn_sid, 20); + if (!nl_obj) + throw std::runtime_error("SRv6 VPN Route creation failed"); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6SidListTable->hget("fc00:0:2:1::", "path", path), false); + + ASSERT_EQ(m_routeTable->hget("Vrf10:fd00:0:21::/64", "segment", segment), false); + + ASSERT_EQ(m_routeTable->hget("Vrf10:fd00:0:21::/64", "seg_src", seg_src), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an invalid SRv6 VPN Route with invalid Vrf name (not starting with "Vrf" prefix) */ + TEST_F(FpmSyncdSRv6RoutesTest, SRv6VpnRoutesInvalidVrfName) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + IpPrefix _dst; + IpAddress _vpn_sid; + IpAddress _encap_src_addr; + std::string path; + std::string segment; + std::string seg_src; + + /* Create a Netlink object containing an SRv6 VPN Route IPv6 with invalid Vrf name (not starting with "Vrf" prefix) */ + _dst = IpPrefix("fd00:0:21::/64"); + _vpn_sid = IpAddress("fc00:0:2:1::"); + _encap_src_addr = IpAddress("fc00:0:1:1::1"); + + nl_obj = create_srv6_vpn_route_nlmsg(RTM_NEWROUTE, &_dst, &_encap_src_addr, &_vpn_sid, 30); + if (!nl_obj) + throw std::runtime_error("SRv6 VPN Route creation failed"); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6SidListTable->hget("fc00:0:2:1::", "path", path), false); + + ASSERT_EQ(m_routeTable->hget("invalidVrf:fd00:0:21::/64", "segment", segment), false); + + ASSERT_EQ(m_routeTable->hget("invalidVrf:fd00:0:21::/64", "seg_src", seg_src), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an invalid SRv6 VPN Route with invalid route type (blackhole) */ + TEST_F(FpmSyncdSRv6RoutesTest, SRv6VpnRoutesInvalidRouteTypeBlackhole) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + IpPrefix _dst; + IpAddress _vpn_sid; + IpAddress _encap_src_addr; + std::string path; + std::string segment; + std::string seg_src; + + /* Create a Netlink object containing an SRv6 VPN Route IPv6 with invalid route type (blackhole) */ + _dst = IpPrefix("fd00:0:21::/64"); + _vpn_sid = IpAddress("fc00:0:2:1::"); + _encap_src_addr = IpAddress("fc00:0:1:1::1"); + + nl_obj = create_srv6_vpn_route_nlmsg(RTM_NEWROUTE, &_dst, &_encap_src_addr, &_vpn_sid, 30, 64, AF_INET6, RTN_BLACKHOLE); + if (!nl_obj) + throw std::runtime_error("SRv6 VPN Route creation failed"); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6SidListTable->hget("fc00:0:2:1::", "path", path), false); + + ASSERT_EQ(m_routeTable->hget("Vrf10:fd00:0:21::/64", "segment", segment), false); + + ASSERT_EQ(m_routeTable->hget("Vrf10:fd00:0:21::/64", "seg_src", seg_src), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } + + /* Test Receiving an invalid SRv6 VPN Route with invalid route type (multicast) */ + TEST_F(FpmSyncdSRv6RoutesTest, SRv6VpnRoutesInvalidRouteTypeMulticast) + { + ASSERT_NE(m_routeSync, nullptr); + + struct nlmsg *nl_obj; + IpPrefix _dst; + IpAddress _vpn_sid; + IpAddress _encap_src_addr; + std::string path; + std::string segment; + std::string seg_src; + + /* Create a Netlink object containing an SRv6 VPN Route IPv6 with invalid route type (multicast) */ + _dst = IpPrefix("fd00:0:21::/64"); + _vpn_sid = IpAddress("fc00:0:2:1::"); + _encap_src_addr = IpAddress("fc00:0:1:1::1"); + + nl_obj = create_srv6_vpn_route_nlmsg(RTM_NEWROUTE, &_dst, &_encap_src_addr, &_vpn_sid, 30, 64, AF_INET6, RTN_MULTICAST); + if (!nl_obj) + throw std::runtime_error("SRv6 VPN Route creation failed"); + + /* Send the Netlink object to the FpmLink */ + ASSERT_EQ(m_fpmLink->isRawProcessing(&nl_obj->n), true); + m_fpmLink->processRawMsg(&nl_obj->n); + + /* Check that fpmsyncd created the correct entries in APP_DB */ + ASSERT_EQ(m_srv6SidListTable->hget("fc00:0:2:1::", "path", path), false); + + ASSERT_EQ(m_routeTable->hget("Vrf10:fd00:0:21::/64", "segment", segment), false); + + ASSERT_EQ(m_routeTable->hget("Vrf10:fd00:0:21::/64", "seg_src", seg_src), false); + + /* Destroy the Netlink object and free the memory */ + free_nlobj(nl_obj); + } +} diff --git a/tests/mock_tests/fpmsyncd/test_routesync.cpp b/tests/mock_tests/fpmsyncd/test_routesync.cpp index debfa16d210..00931ab5a5e 100644 --- a/tests/mock_tests/fpmsyncd/test_routesync.cpp +++ b/tests/mock_tests/fpmsyncd/test_routesync.cpp @@ -1,12 +1,53 @@ -#include "fpmsyncd/routesync.h" - +#include "redisutility.h" +#include "ut_helpers_fpmsyncd.h" #include #include +#include "mock_table.h" +#define private public +#include "fpmsyncd/routesync.h" +#include "fpmsyncd/fpmlink.h" +#undef private + +#include +#include +#include +#include +#include +#include +#include + +#include using namespace swss; +using namespace testing; +using namespace ut_fpmsyncd; + +#define MAX_PAYLOAD 1024 using ::testing::_; +extern void resetMockWarmStartHelper(); + +int rt_build_ret = 0; +bool nlmsg_alloc_ret = true; +#pragma GCC diagnostic ignored "-Wcast-align" + +class MockRouteSync : public RouteSync +{ +public: + MockRouteSync(RedisPipeline *m_pipeline) : RouteSync(m_pipeline) + { + } + + ~MockRouteSync() + { + } + MOCK_METHOD(bool, getEvpnNextHop, (nlmsghdr *, int, + rtattr *[], std::string&, + std::string& , std::string&, + std::string&), (override)); + MOCK_METHOD(bool, getIfName, (int, char *, size_t), (override)); +}; class MockFpm : public FpmInterface { public: @@ -34,18 +75,25 @@ class FpmSyncdResponseTest : public ::testing::Test public: void SetUp() override { + testing_db::reset(); EXPECT_EQ(rtnl_route_read_protocol_names(DefaultRtProtoPath), 0); m_routeSync.setSuppressionEnabled(true); } void TearDown() override { + testing_db::reset(); } - DBConnector m_db{"APPL_DB", 0}; - RedisPipeline m_pipeline{&m_db, 1}; - RouteSync m_routeSync{&m_pipeline}; + shared_ptr m_db = make_shared("APPL_DB", 0); + shared_ptr m_pipeline = make_shared(m_db.get()); + RouteSync m_routeSync{m_pipeline.get()}; MockFpm m_mockFpm{&m_routeSync}; + MockRouteSync m_mockRouteSync{m_pipeline.get()}; + + const char* test_gateway = "192.168.1.1"; + const char* test_gateway_ = "192.168.1.2"; + const char* test_gateway__ = "192.168.1.3"; }; TEST_F(FpmSyncdResponseTest, RouteResponseFeedbackV4) @@ -170,3 +218,2022 @@ TEST_F(FpmSyncdResponseTest, WarmRestart) m_routeSync.onWarmStartEnd(applStateDb); } + +TEST_F(FpmSyncdResponseTest, testEvpn) +{ + struct nlmsghdr *nlh = (struct nlmsghdr *) malloc(NLMSG_SPACE(MAX_PAYLOAD)); + shared_ptr m_app_db; + m_app_db = make_shared("APPL_DB", 0); + Table app_route_table(m_app_db.get(), APP_ROUTE_TABLE_NAME); + + memset(nlh, 0, NLMSG_SPACE(MAX_PAYLOAD)); + nlh->nlmsg_type = RTM_NEWROUTE; + struct rtmsg rtm; + rtm.rtm_family = AF_INET; + rtm.rtm_protocol = 200; + rtm.rtm_type = RTN_UNICAST; + rtm.rtm_table = 0; + rtm.rtm_dst_len = 32; + nlh->nlmsg_len = NLMSG_SPACE(MAX_PAYLOAD); + memcpy(NLMSG_DATA(nlh), &rtm, sizeof(rtm)); + + EXPECT_CALL(m_mockRouteSync, getEvpnNextHop(_, _, _, _, _, _, _)).Times(testing::AtLeast(1)).WillOnce([&]( + struct nlmsghdr *h, int received_bytes, + struct rtattr *tb[], std::string& nexthops, + std::string& vni_list, std::string& mac_list, + std::string& intf_list)-> bool { + vni_list="100"; + mac_list="aa:aa:aa:aa:aa:aa"; + intf_list="Ethernet0"; + nexthops = "1.1.1.1"; + return true; + }); + m_mockRouteSync.onMsgRaw(nlh); + + vector keys; + vector fieldValues; + app_route_table.getKeys(keys); + ASSERT_EQ(keys.size(), 1); + + app_route_table.get(keys[0], fieldValues); + auto value = swss::fvsGetValue(fieldValues, "protocol", true); + ASSERT_EQ(value.get(), "0xc8"); + +} + +TEST_F(FpmSyncdResponseTest, testSendOffloadReply) +{ + rt_build_ret = 1; + rtnl_route* routeObject{}; + + ASSERT_EQ(m_routeSync.sendOffloadReply(routeObject), false); + rt_build_ret = 0; + nlmsg_alloc_ret = false; + ASSERT_EQ(m_routeSync.sendOffloadReply(routeObject), false); + nlmsg_alloc_ret = true; +} + +struct nlmsghdr* createNewNextHopMsgHdr(int32_t ifindex, const char* gateway, uint32_t id, unsigned char nh_family=AF_INET) { + struct nlmsghdr *nlh = (struct nlmsghdr *)malloc(NLMSG_SPACE(MAX_PAYLOAD)); + memset(nlh, 0, NLMSG_SPACE(MAX_PAYLOAD)); + + // Set header + nlh->nlmsg_type = RTM_NEWNEXTHOP; + nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_REPLACE; + nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nhmsg)); + + // Set nhmsg + struct nhmsg *nhm = (struct nhmsg *)NLMSG_DATA(nlh); + nhm->nh_family = nh_family; + + // Add NHA_ID + struct rtattr *rta = (struct rtattr *)((char *)nlh + NLMSG_ALIGN(nlh->nlmsg_len)); + rta->rta_type = NHA_ID; + rta->rta_len = RTA_LENGTH(sizeof(uint32_t)); + *(uint32_t *)RTA_DATA(rta) = id; + nlh->nlmsg_len = NLMSG_ALIGN(nlh->nlmsg_len) + RTA_ALIGN(rta->rta_len); + + // Add NHA_OIF + rta = (struct rtattr *)((char *)nlh + NLMSG_ALIGN(nlh->nlmsg_len)); + rta->rta_type = NHA_OIF; + rta->rta_len = RTA_LENGTH(sizeof(int32_t)); + *(int32_t *)RTA_DATA(rta) = ifindex; + nlh->nlmsg_len = NLMSG_ALIGN(nlh->nlmsg_len) + RTA_ALIGN(rta->rta_len); + + // Add NHA_GATEWAY + rta = (struct rtattr *)((char *)nlh + NLMSG_ALIGN(nlh->nlmsg_len)); + rta->rta_type = NHA_GATEWAY; + if (nh_family == AF_INET6) + { + struct in6_addr gw_addr6; + inet_pton(AF_INET6, gateway, &gw_addr6); + rta->rta_len = RTA_LENGTH(sizeof(struct in6_addr)); + memcpy(RTA_DATA(rta), &gw_addr6, sizeof(struct in6_addr)); + } + else + { + struct in_addr gw_addr; + inet_pton(AF_INET, gateway, &gw_addr); + rta->rta_len = RTA_LENGTH(sizeof(struct in_addr)); + memcpy(RTA_DATA(rta), &gw_addr, sizeof(struct in_addr)); + } + nlh->nlmsg_len = NLMSG_ALIGN(nlh->nlmsg_len) + RTA_ALIGN(rta->rta_len); + + return nlh; +} + +TEST_F(FpmSyncdResponseTest, TestNoNHAId) +{ + struct nlmsghdr *nlh = (struct nlmsghdr *)malloc(NLMSG_SPACE(MAX_PAYLOAD)); + memset(nlh, 0, NLMSG_SPACE(MAX_PAYLOAD)); + + nlh->nlmsg_type = RTM_NEWNEXTHOP; + nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_REPLACE; + nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nhmsg)); + struct nhmsg *nhm = (struct nhmsg *)NLMSG_DATA(nlh); + nhm->nh_family = AF_INET; + + EXPECT_CALL(m_mockRouteSync, getIfName(_, _, _)) + .Times(0); + + m_mockRouteSync.onNextHopMsg(nlh, 0); + + free(nlh); +} + +TEST_F(FpmSyncdResponseTest, TestNextHopAdd) +{ + uint32_t test_id = 10; + int32_t test_ifindex = 5; + + struct nlmsghdr* nlh = createNewNextHopMsgHdr(test_ifindex, test_gateway, test_id); + int expected_length = (int)(nlh->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg))); + + EXPECT_CALL(m_mockRouteSync, getIfName(test_ifindex, _, _)) + .WillOnce(DoAll( + [](int32_t, char* ifname, size_t size) { + strncpy(ifname, "Ethernet1", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + m_mockRouteSync.onNextHopMsg(nlh, expected_length); + + auto it = m_mockRouteSync.m_nh_groups.find(test_id); + ASSERT_NE(it, m_mockRouteSync.m_nh_groups.end()) << "Failed to add new nexthop"; + + free(nlh); +} + +TEST_F(FpmSyncdResponseTest, TestIPv6NextHopAdd) +{ + uint32_t test_id = 20; + const char* test_gateway = "2001:db8::1"; + int32_t test_ifindex = 7; + + struct nlmsghdr* nlh = createNewNextHopMsgHdr(test_ifindex, test_gateway, test_id, AF_INET6); + int expected_length = (int)(nlh->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg))); + + EXPECT_CALL(m_mockRouteSync, getIfName(test_ifindex, _, _)) + .WillOnce(DoAll( + [](int32_t, char* ifname, size_t size) { + strncpy(ifname, "Ethernet2", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + m_mockRouteSync.onNextHopMsg(nlh, expected_length); + + Table nexthop_group_table(m_db.get(), APP_NEXTHOP_GROUP_TABLE_NAME); + + vector fieldValues; + string key = to_string(test_id); + nexthop_group_table.get(key, fieldValues); + + // onNextHopMsg only updates m_nh_groups unless the nhg is marked as installed + ASSERT_TRUE(fieldValues.empty()); + + // Update the nexthop group to mark it as installed and write to DB + m_mockRouteSync.installNextHopGroup(test_id); + nexthop_group_table.get(key, fieldValues); + + string nexthop, ifname; + for (const auto& fv : fieldValues) { + if (fvField(fv) == "nexthop") { + nexthop = fvValue(fv); + } else if (fvField(fv) == "ifname") { + ifname = fvValue(fv); + } + } + + EXPECT_EQ(nexthop, test_gateway); + EXPECT_EQ(ifname, "Ethernet2"); + + free(nlh); +} + + +TEST_F(FpmSyncdResponseTest, TestGetIfNameFailure) +{ + uint32_t test_id = 22; + int32_t test_ifindex = 9; + + struct nlmsghdr* nlh = createNewNextHopMsgHdr(test_ifindex, test_gateway, test_id); + int expected_length = (int)(nlh->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg))); + + EXPECT_CALL(m_mockRouteSync, getIfName(test_ifindex, _, _)) + .WillOnce(Return(false)); + + m_mockRouteSync.onNextHopMsg(nlh, expected_length); + + auto it = m_mockRouteSync.m_nh_groups.find(test_id); + ASSERT_NE(it, m_mockRouteSync.m_nh_groups.end()); + EXPECT_EQ(it->second.intf, "unknown"); + + free(nlh); +} +TEST_F(FpmSyncdResponseTest, TestSkipSpecialInterfaces) +{ + uint32_t test_id = 11; + int32_t test_ifindex = 6; + + EXPECT_CALL(m_mockRouteSync, getIfName(test_ifindex, _, _)) + .WillOnce(DoAll( + [](int32_t ifidx, char* ifname, size_t size) { + strncpy(ifname, "eth0", size); + }, + Return(true) + )); + + struct nlmsghdr* nlh = createNewNextHopMsgHdr(test_ifindex, test_gateway, test_id); + int expected_length = (int)(nlh->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg))); + + m_mockRouteSync.onNextHopMsg(nlh, expected_length); + + auto it = m_mockRouteSync.m_nh_groups.find(test_id); + EXPECT_EQ(it, m_mockRouteSync.m_nh_groups.end()) << "Should skip eth0 interface"; + + free(nlh); +} + +TEST_F(FpmSyncdResponseTest, TestNextHopGroupKeyString) +{ + EXPECT_EQ(m_mockRouteSync.getNextHopGroupKeyAsString(1), "1"); + EXPECT_EQ(m_mockRouteSync.getNextHopGroupKeyAsString(1234), "1234"); +} + +TEST_F(FpmSyncdResponseTest, TestGetNextHopGroupFields) +{ + // Test single next hop case + { + NextHopGroup nhg(1, test_gateway, "Ethernet0"); + m_mockRouteSync.m_nh_groups.insert({1, nhg}); + + string nexthops, ifnames, weights; + m_mockRouteSync.getNextHopGroupFields(nhg, nexthops, ifnames, weights); + + EXPECT_EQ(nexthops, test_gateway); + EXPECT_EQ(ifnames, "Ethernet0"); + EXPECT_TRUE(weights.empty()); + } + + // Test multiple next hops with weights + { + // Create the component next hops first + NextHopGroup nhg1(1, test_gateway, "Ethernet0"); + NextHopGroup nhg2(2, test_gateway_, "Ethernet1"); + m_mockRouteSync.m_nh_groups.insert({1, nhg1}); + m_mockRouteSync.m_nh_groups.insert({2, nhg2}); + + // Create the group with multiple next hops + vector> group_members; + group_members.push_back(make_pair(1, 1)); // id=1, weight=1 + group_members.push_back(make_pair(2, 2)); // id=2, weight=2 + + NextHopGroup nhg(3, group_members); + m_mockRouteSync.m_nh_groups.insert({3, nhg}); + + string nexthops, ifnames, weights; + m_mockRouteSync.getNextHopGroupFields(nhg, nexthops, ifnames, weights); + + EXPECT_EQ(nexthops, "192.168.1.1,192.168.1.2"); + EXPECT_EQ(ifnames, "Ethernet0,Ethernet1"); + EXPECT_EQ(weights, "1,2"); + } + + // Test IPv6 default case + { + NextHopGroup nhg(4, "", "Ethernet0"); + m_mockRouteSync.m_nh_groups.insert({4, nhg}); + + string nexthops, ifnames, weights; + m_mockRouteSync.getNextHopGroupFields(nhg, nexthops, ifnames, weights, AF_INET6); + + EXPECT_EQ(nexthops, "::"); + EXPECT_EQ(ifnames, "Ethernet0"); + EXPECT_TRUE(weights.empty()); + } + + // Both empty + { + NextHopGroup nhg(5, "", ""); + string nexthops, ifnames, weights; + m_mockRouteSync.getNextHopGroupFields(nhg, nexthops, ifnames, weights, AF_INET); + + EXPECT_EQ(nexthops, "0.0.0.0"); + EXPECT_TRUE(ifnames.empty()); + EXPECT_TRUE(weights.empty()); + } +} + +TEST_F(FpmSyncdResponseTest, TestUpdateNextHopGroupDb) +{ + Table nexthop_group_table(m_db.get(), APP_NEXTHOP_GROUP_TABLE_NAME); + + // Test single next hop group + { + NextHopGroup nhg(1, test_gateway, "Ethernet0"); + m_mockRouteSync.updateNextHopGroupDb(nhg); + + vector fieldValues; + nexthop_group_table.get("1", fieldValues); + + EXPECT_EQ(fieldValues.size(), 3); + EXPECT_EQ(fvField(fieldValues[0]), "nexthop"); + EXPECT_EQ(fvValue(fieldValues[0]), test_gateway); + EXPECT_EQ(fvField(fieldValues[1]), "ifname"); + EXPECT_EQ(fvValue(fieldValues[1]), "Ethernet0"); + EXPECT_EQ(fvField(fieldValues[2]), "weight"); + EXPECT_EQ(fvValue(fieldValues[2]), ""); + } + + // Test group with multiple next hops + { + vector> group_members; + group_members.push_back(make_pair(1, 1)); + group_members.push_back(make_pair(2, 2)); + + NextHopGroup nhg1(1, test_gateway, "Ethernet0"); + NextHopGroup nhg2(2, test_gateway_, "Ethernet1"); + NextHopGroup group(3, group_members); + + m_mockRouteSync.m_nh_groups.insert({1, nhg1}); + m_mockRouteSync.m_nh_groups.insert({2, nhg2}); + m_mockRouteSync.m_nh_groups.insert({3, group}); + + m_mockRouteSync.installNextHopGroup(3); + + auto it = m_mockRouteSync.m_nh_groups.find(3); + ASSERT_NE(it, m_mockRouteSync.m_nh_groups.end()); + EXPECT_TRUE(it->second.installed); + vector fieldValues; + nexthop_group_table.get("3", fieldValues); + EXPECT_EQ(fieldValues.size(), 3); + EXPECT_EQ(fvField(fieldValues[0]), "nexthop"); + EXPECT_EQ(fvValue(fieldValues[0]), "192.168.1.1,192.168.1.2"); + EXPECT_EQ(fvField(fieldValues[1]), "ifname"); + EXPECT_EQ(fvValue(fieldValues[1]), "Ethernet0,Ethernet1"); + EXPECT_EQ(fvField(fieldValues[2]), "weight"); + EXPECT_EQ(fvValue(fieldValues[2]), "1,2"); + } + + // Empty nexthop (default route case) + { + NextHopGroup nhg(4, "", "Ethernet0"); + m_mockRouteSync.updateNextHopGroupDb(nhg); + + vector fieldValues; + nexthop_group_table.get("4", fieldValues); + + EXPECT_EQ(fieldValues.size(), 3); + EXPECT_EQ(fvField(fieldValues[0]), "nexthop"); + EXPECT_EQ(fvValue(fieldValues[0]), "0.0.0.0"); + EXPECT_EQ(fvField(fieldValues[1]), "ifname"); + EXPECT_EQ(fvValue(fieldValues[1]), "Ethernet0"); + EXPECT_EQ(fvField(fieldValues[2]), "weight"); + EXPECT_EQ(fvValue(fieldValues[2]), ""); + } + + // Empty interface name + { + NextHopGroup nhg(5, test_gateway, ""); + m_mockRouteSync.updateNextHopGroupDb(nhg); + + vector fieldValues; + nexthop_group_table.get("5", fieldValues); + + EXPECT_EQ(fieldValues.size(), 3); + EXPECT_EQ(fvField(fieldValues[0]), "nexthop"); + EXPECT_EQ(fvValue(fieldValues[0]), test_gateway); + EXPECT_EQ(fvField(fieldValues[1]), "ifname"); + EXPECT_EQ(fvValue(fieldValues[1]), ""); + EXPECT_EQ(fvField(fieldValues[2]), "weight"); + EXPECT_EQ(fvValue(fieldValues[2]), ""); + } +} + +TEST_F(FpmSyncdResponseTest, TestDeleteNextHopGroup) +{ + // Setup test groups + NextHopGroup nhg1(1, test_gateway, "Ethernet0"); + NextHopGroup nhg2(2, test_gateway_, "Ethernet1"); + nhg1.installed = true; + nhg2.installed = true; + + m_mockRouteSync.m_nh_groups.insert({1, nhg1}); + m_mockRouteSync.m_nh_groups.insert({2, nhg2}); + + // Test deletion + m_mockRouteSync.deleteNextHopGroup(1); + EXPECT_EQ(m_mockRouteSync.m_nh_groups.find(1), m_mockRouteSync.m_nh_groups.end()); + EXPECT_NE(m_mockRouteSync.m_nh_groups.find(2), m_mockRouteSync.m_nh_groups.end()); + + // Test deleting non-existent group + m_mockRouteSync.deleteNextHopGroup(999); + EXPECT_EQ(m_mockRouteSync.m_nh_groups.find(999), m_mockRouteSync.m_nh_groups.end()); +} + +struct nlmsghdr* createNewNextHopMsgHdr(const vector>& group_members, uint32_t id, + uint32_t nlmsg_type = RTM_NEWNEXTHOP) { + struct nlmsghdr *nlh = (struct nlmsghdr *)malloc(NLMSG_SPACE(MAX_PAYLOAD)); + memset(nlh, 0, NLMSG_SPACE(MAX_PAYLOAD)); + + // Set header + nlh->nlmsg_type = static_cast(nlmsg_type); + nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_REPLACE; + nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nhmsg)); + + // Set nhmsg + struct nhmsg *nhm = (struct nhmsg *)NLMSG_DATA(nlh); + nhm->nh_family = AF_INET; + + // Add NHA_ID + struct rtattr *rta = (struct rtattr *)((char *)nlh + NLMSG_ALIGN(nlh->nlmsg_len)); + rta->rta_type = NHA_ID; + rta->rta_len = RTA_LENGTH(sizeof(uint32_t)); + *(uint32_t *)RTA_DATA(rta) = id; + nlh->nlmsg_len = NLMSG_ALIGN(nlh->nlmsg_len) + RTA_ALIGN(rta->rta_len); + + // Add NHA_GROUP + rta = (struct rtattr *)((char *)nlh + NLMSG_ALIGN(nlh->nlmsg_len)); + rta->rta_type = NHA_GROUP; + struct nexthop_grp* grp = (struct nexthop_grp*)malloc(group_members.size() * sizeof(struct nexthop_grp)); + + for (size_t i = 0; i < group_members.size(); i++) { + grp[i].id = group_members[i].first; + grp[i].weight = group_members[i].second - 1; // kernel stores weight-1 + } + + size_t payload_size = group_members.size() * sizeof(struct nexthop_grp); + if (payload_size > USHRT_MAX - RTA_LENGTH(0)) { + free(nlh); + return nullptr; + } + + rta->rta_len = static_cast(RTA_LENGTH(group_members.size() * sizeof(struct nexthop_grp))); + memcpy(RTA_DATA(rta), grp, group_members.size() * sizeof(struct nexthop_grp)); + nlh->nlmsg_len = NLMSG_ALIGN(nlh->nlmsg_len) + RTA_ALIGN(rta->rta_len); + + free(grp); + return nlh; +} + +TEST_F(FpmSyncdResponseTest, TestNextHopGroupAdd) +{ + // 1. create nexthops + uint32_t nh1_id = 1; + uint32_t nh2_id = 2; + uint32_t nh3_id = 3; + + struct nlmsghdr* nlh1 = createNewNextHopMsgHdr(1, test_gateway, nh1_id); + struct nlmsghdr* nlh2 = createNewNextHopMsgHdr(2, test_gateway_, nh2_id); + struct nlmsghdr* nlh3 = createNewNextHopMsgHdr(3, test_gateway__, nh3_id); + + EXPECT_CALL(m_mockRouteSync, getIfName(1, _, _)) + .WillOnce(DoAll( + [](int32_t, char* ifname, size_t size) { + strncpy(ifname, "Ethernet1", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + EXPECT_CALL(m_mockRouteSync, getIfName(2, _, _)) + .WillOnce(DoAll( + [](int32_t, char* ifname, size_t size) { + strncpy(ifname, "Ethernet2", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + EXPECT_CALL(m_mockRouteSync, getIfName(3, _, _)) + .WillOnce(DoAll( + [](int32_t, char* ifname, size_t size) { + strncpy(ifname, "Ethernet3", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + m_mockRouteSync.onNextHopMsg(nlh1, (int)(nlh1->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg)))); + m_mockRouteSync.onNextHopMsg(nlh2, (int)(nlh2->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg)))); + m_mockRouteSync.onNextHopMsg(nlh3, (int)(nlh3->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg)))); + + // 2. create a nexthop group with these nexthops + uint32_t group_id = 10; + vector> group_members = { + {nh1_id, 1}, // id=1, weight=1 + {nh2_id, 2}, // id=2, weight=2 + {nh3_id, 3} // id=3, weight=3 + }; + + struct nlmsghdr* group_nlh = createNewNextHopMsgHdr(group_members, group_id); + ASSERT_NE(group_nlh, nullptr) << "Failed to create group nexthop message"; + m_mockRouteSync.onNextHopMsg(group_nlh, (int)(group_nlh->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg)))); + + // Verify the group was added correctly + auto it = m_mockRouteSync.m_nh_groups.find(group_id); + ASSERT_NE(it, m_mockRouteSync.m_nh_groups.end()) << "Failed to add nexthop group"; + + // Verify group members + const auto& group = it->second.group; + ASSERT_EQ(group.size(), 3) << "Wrong number of group members"; + + // Check each member's ID and weight + EXPECT_EQ(group[0].first, nh1_id); + EXPECT_EQ(group[0].second, 1); + EXPECT_EQ(group[1].first, nh2_id); + EXPECT_EQ(group[1].second, 2); + EXPECT_EQ(group[2].first, nh3_id); + EXPECT_EQ(group[2].second, 3); + + // Mark the group as installed and verify DB update + m_mockRouteSync.installNextHopGroup(group_id); + + Table nexthop_group_table(m_db.get(), APP_NEXTHOP_GROUP_TABLE_NAME); + vector fieldValues; + string key = to_string(group_id); + nexthop_group_table.get(key, fieldValues); + + ASSERT_EQ(fieldValues.size(), 3) << "Wrong number of fields in DB"; + + // Verify the DB fields + string nexthops, ifnames, weights; + for (const auto& fv : fieldValues) { + if (fvField(fv) == "nexthop") { + nexthops = fvValue(fv); + } else if (fvField(fv) == "ifname") { + ifnames = fvValue(fv); + } else if (fvField(fv) == "weight") { + weights = fvValue(fv); + } + } + + EXPECT_EQ(nexthops, "192.168.1.1,192.168.1.2,192.168.1.3"); + EXPECT_EQ(ifnames, "Ethernet1,Ethernet2,Ethernet3"); + EXPECT_EQ(weights, "1,2,3"); + + // Cleanup + free(nlh1); + free(nlh2); + free(nlh3); + free(group_nlh); +} + +TEST_F(FpmSyncdResponseTest, TestRouteMsgWithNHG) +{ + Table route_table(m_db.get(), APP_ROUTE_TABLE_NAME); + auto createRoute = [](const char* prefix, uint8_t prefixlen) -> rtnl_route* { + rtnl_route* route = rtnl_route_alloc(); + nl_addr* dst_addr; + nl_addr_parse(prefix, AF_INET, &dst_addr); + rtnl_route_set_dst(route, dst_addr); + rtnl_route_set_type(route, RTN_UNICAST); + rtnl_route_set_protocol(route, RTPROT_STATIC); + rtnl_route_set_family(route, AF_INET); + rtnl_route_set_scope(route, RT_SCOPE_UNIVERSE); + rtnl_route_set_table(route, RT_TABLE_MAIN); + nl_addr_put(dst_addr); + return route; + }; + + uint32_t test_nh_id = 1; + uint32_t test_nhg_id = 2; + uint32_t test_nh_id_ = 3; + uint32_t test_nh_id__ = 4; + + // create a route + const char* test_destipprefix = "10.1.1.0"; + rtnl_route* test_route = createRoute(test_destipprefix, 24); + + // Test 1: use a non-existent nh_id + { + rtnl_route_set_nh_id(test_route, test_nh_id); + + m_mockRouteSync.onRouteMsg(RTM_NEWROUTE, (nl_object*)test_route, nullptr); + + vector keys; + route_table.getKeys(keys); + + // verify the route is discarded + EXPECT_TRUE(std::find(keys.begin(), keys.end(), test_destipprefix) == keys.end()); + } + + // Test 2: using a nexthop + { + // create the nexthop + struct nlmsghdr* nlh = createNewNextHopMsgHdr(1, test_gateway, test_nh_id); + + EXPECT_CALL(m_mockRouteSync, getIfName(1, _, _)) + .WillOnce(DoAll( + [](int32_t, char* ifname, size_t size) { + strncpy(ifname, "Ethernet1", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + m_mockRouteSync.onNextHopMsg(nlh, (int)(nlh->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg)))); + + free(nlh); + + rtnl_route_set_nh_id(test_route, test_nh_id); + + m_mockRouteSync.onRouteMsg(RTM_NEWROUTE, (nl_object*)test_route, nullptr); + + vector fvs; + EXPECT_TRUE(route_table.get(test_destipprefix, fvs)); + EXPECT_EQ(fvs.size(), 11); + for (const auto& fv : fvs) { + if (fvField(fv) == "nexthop") { + EXPECT_EQ(fvValue(fv), test_gateway); + } else if (fvField(fv) == "ifname") { + EXPECT_EQ(fvValue(fv), "Ethernet1"); + } else if (fvField(fv) == "protocol") { + EXPECT_EQ(fvValue(fv), "static"); + } else if (fvField(fv) == "blackhole") { + EXPECT_EQ(fvValue(fv), "false"); + } else if (fvField(fv) == "nexthop_group") { + EXPECT_EQ(fvValue(fv), ""); + } else if (fvField(fv) == "mpls_nh") { + EXPECT_EQ(fvValue(fv), ""); + } else if (fvField(fv) == "weight") { + EXPECT_EQ(fvValue(fv), ""); + } + } + } + + // Test 3: using an nhg + { + struct nlmsghdr* nlh1 = createNewNextHopMsgHdr(2, test_gateway_, test_nh_id_); + struct nlmsghdr* nlh2 = createNewNextHopMsgHdr(3, test_gateway__, test_nh_id__); + + EXPECT_CALL(m_mockRouteSync, getIfName(2, _, _)) + .WillOnce(DoAll( + [](int32_t, char* ifname, size_t size) { + strncpy(ifname, "Ethernet2", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + EXPECT_CALL(m_mockRouteSync, getIfName(3, _, _)) + .WillOnce(DoAll( + [](int32_t, char* ifname, size_t size) { + strncpy(ifname, "Ethernet3", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + m_mockRouteSync.onNextHopMsg(nlh1, (int)(nlh1->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg)))); + m_mockRouteSync.onNextHopMsg(nlh2, (int)(nlh2->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg)))); + + vector> group_members = { + {test_nh_id_, 1}, + {test_nh_id__, 2} + }; + + struct nlmsghdr* group_nlh = createNewNextHopMsgHdr(group_members, test_nhg_id); + m_mockRouteSync.onNextHopMsg(group_nlh, (int)(group_nlh->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg)))); + + // create the route object referring to this next hop group + rtnl_route_set_nh_id(test_route, test_nhg_id); + m_mockRouteSync.onRouteMsg(RTM_NEWROUTE, (nl_object*)test_route, nullptr); + + vector fvs; + EXPECT_TRUE(route_table.get(test_destipprefix, fvs)); + + for (const auto& fv : fvs) { + if (fvField(fv) == "nexthop_group") { + EXPECT_EQ(fvValue(fv), "2"); + } else if (fvField(fv) == "protocol") { + EXPECT_EQ(fvValue(fv), "static"); + } + } + + vector group_fvs; + Table nexthop_group_table(m_db.get(), APP_NEXTHOP_GROUP_TABLE_NAME); + EXPECT_TRUE(nexthop_group_table.get("2", group_fvs)); + + // clean up + free(nlh1); + free(nlh2); + free(group_nlh); + } + + rtnl_route_put(test_route); +} + + +TEST_F(FpmSyncdResponseTest, RouteResponseOnNoProto) +{ + // Expect the message to zebra is sent + EXPECT_CALL(m_mockFpm, send(_)).Times(0); + + m_routeSync.onRouteResponse("1.0.0.0/24", { + {"err_str", "SWSS_RC_SUCCESS"}, + }); +} + +TEST_F(FpmSyncdResponseTest, TestBlackholeRoute) +{ + Table route_table(m_db.get(), APP_ROUTE_TABLE_NAME); + Table label_route_table(m_db.get(), APP_LABEL_ROUTE_TABLE_NAME); + auto createRoute = [](const char* prefix, uint8_t prefixlen) -> rtnl_route* { + rtnl_route* route = rtnl_route_alloc(); + nl_addr* dst_addr; + nl_addr_parse(prefix, AF_INET, &dst_addr); + rtnl_route_set_dst(route, dst_addr); + rtnl_route_set_type(route, RTN_BLACKHOLE); + rtnl_route_set_protocol(route, RTPROT_STATIC); + rtnl_route_set_family(route, AF_INET); + rtnl_route_set_scope(route, RT_SCOPE_UNIVERSE); + rtnl_route_set_table(route, RT_TABLE_UNSPEC); + nl_addr_put(dst_addr); + return route; + }; + + // create a route + const char* test_destipprefix = "10.1.1.0"; + rtnl_route* test_route = createRoute(test_destipprefix, 24); + + const char* test_destipprefix2 = "20.1.1.0"; + rtnl_route* test_route2 = createRoute(test_destipprefix2, 24); + { + + m_mockRouteSync.onRouteMsg(RTM_NEWROUTE, (nl_object*)test_route, nullptr); + + // verify the blackhole route has protocol programmed + vector fvs; + EXPECT_TRUE(route_table.get(test_destipprefix, fvs)); + + bool proto_found = false; + for (const auto& fv : fvs) { + if (fvField(fv) == "protocol") { + proto_found = true; + EXPECT_EQ(fvValue(fv), "static"); + } + } + EXPECT_TRUE(proto_found); + + m_mockRouteSync.onLabelRouteMsg(RTM_NEWROUTE, (nl_object*)test_route2); + + // verify the blackhole route has protocol programmed + EXPECT_TRUE(label_route_table.get(test_destipprefix2, fvs)); + + proto_found = false; + for (const auto& fv : fvs) { + if (fvField(fv) == "protocol") { + proto_found = true; + EXPECT_EQ(fvValue(fv), "static"); + } + } + EXPECT_TRUE(proto_found); + } +} + +auto create_nl_addr(const char* addr_str) +{ + nl_addr* addr; + nl_addr_parse(addr_str, AF_INET, &addr); + return unique_ptr(addr, nl_addr_put); +} + +auto create_route(const char* dst_addr_str) +{ + rtnl_route* route = rtnl_route_alloc(); + auto dst_addr = create_nl_addr(dst_addr_str); + rtnl_route_set_dst(route, dst_addr.get()); + rtnl_route_set_type(route, RTN_UNICAST); + rtnl_route_set_protocol(route, RTPROT_STATIC); + rtnl_route_set_family(route, AF_INET); + rtnl_route_set_scope(route, RT_SCOPE_UNIVERSE); + rtnl_route_set_table(route, RT_TABLE_MAIN); + return unique_ptr(route, rtnl_route_put); +} + +rtnl_nexthop* create_nexthop(const char* gateway_str) +{ + static int idx = 1; // interface index + ++idx; + // Create a nexthop with 0 weight + rtnl_nexthop* nh = rtnl_route_nh_alloc(); + rtnl_route_nh_set_weight(nh, 0); + rtnl_route_nh_set_ifindex(nh, idx); + auto gateway_addr = create_nl_addr(gateway_str); + rtnl_route_nh_set_gateway(nh, gateway_addr.get()); + return nh; +} + +// Checks that when a nexthop is not assigned a weight, the default weight of 1 is used. +TEST_F(FpmSyncdResponseTest, TestGetNextHopWt) +{ + auto test_route = create_route("10.1.1.0"); + + // Create two nexthops with 0 weight + rtnl_nexthop* nh1 = create_nexthop(test_gateway); + rtnl_nexthop* nh2 = create_nexthop(test_gateway_); + + // Add new nexthops to the route + rtnl_route_add_nexthop(test_route.get(), nh1); + rtnl_route_add_nexthop(test_route.get(), nh2); + + EXPECT_EQ(m_mockRouteSync.getNextHopWt(test_route.get()), "1,1"); +} + +class WarmRestartRouteSyncTest : public ::testing::Test +{ +public: + void SetUp() override + { + resetMockWarmStartHelper(); // Reset warm restart state before each test + testing_db::reset(); + EXPECT_EQ(rtnl_route_read_protocol_names(DefaultRtProtoPath), 0); + } + + void TearDown() override + { + resetMockWarmStartHelper(); // Reset warm restart state after each test + testing_db::reset(); + } + + shared_ptr m_db = make_shared("APPL_DB", 0); + shared_ptr m_pipeline = make_shared(m_db.get()); + RouteSync m_testRouteSync{m_pipeline.get()}; +}; + +TEST_F(WarmRestartRouteSyncTest, TestRouteMessageHandlingWarmRestartNotInProgress) +{ + EXPECT_FALSE(m_testRouteSync.getWarmStartHelper().inProgress()); + + auto route = create_route("192.168.1.0/24"); + + rtnl_route_set_type(route.get(), RTN_BLACKHOLE); + rtnl_route_set_protocol(route.get(), RTPROT_BGP); + + m_testRouteSync.onRouteMsg(RTM_NEWROUTE, (struct nl_object*)route.get(), nullptr); + + // Verify: Route was set directly in the table + Table routeTable(m_db.get(), APP_ROUTE_TABLE_NAME); + vector result; + EXPECT_TRUE(routeTable.get("192.168.1.0/24", result)); + + // Should have protocol and blackhole fields + bool foundProtocol = false, foundBlackhole = false; + for (const auto& fv : result) { + if (fvField(fv) == "protocol" && fvValue(fv) == "bgp") { + foundProtocol = true; + } else if (fvField(fv) == "blackhole" && fvValue(fv) == "true") { + foundBlackhole = true; + } + } + EXPECT_TRUE(foundProtocol); + EXPECT_TRUE(foundBlackhole); +} + +TEST_F(WarmRestartRouteSyncTest, TestRouteDeleteHandlingWarmRestartNotInProgress) +{ + auto route = create_route("192.168.2.0/24"); + rtnl_route_set_type(route.get(), RTN_BLACKHOLE); + rtnl_route_set_protocol(route.get(), RTPROT_STATIC); + + m_testRouteSync.onRouteMsg(RTM_NEWROUTE, (struct nl_object*)route.get(), nullptr); + + Table routeTable(m_db.get(), APP_ROUTE_TABLE_NAME); + vector result; + EXPECT_TRUE(routeTable.get("192.168.2.0/24", result)); + + EXPECT_FALSE(m_testRouteSync.getWarmStartHelper().inProgress()); + + m_testRouteSync.onRouteMsg(RTM_DELROUTE, (struct nl_object*)route.get(), nullptr); + + // Verify: Route was deleted from the table + EXPECT_FALSE(routeTable.get("192.168.2.0/24", result)); +} + +TEST_F(WarmRestartRouteSyncTest, TestBlackholeRouteHandlingWarmRestartNotInProgress) +{ + EXPECT_FALSE(m_testRouteSync.getWarmStartHelper().inProgress()); + + auto route = create_route("192.168.6.0/24"); + rtnl_route_set_type(route.get(), RTN_BLACKHOLE); + rtnl_route_set_protocol(route.get(), RTPROT_STATIC); + + m_testRouteSync.onRouteMsg(RTM_NEWROUTE, (struct nl_object*)route.get(), nullptr); + + Table routeTable(m_db.get(), APP_ROUTE_TABLE_NAME); + vector result; + EXPECT_TRUE(routeTable.get("192.168.6.0/24", result)); + + bool foundBlackhole = false, foundProtocol = false; + for (const auto& fv : result) { + if (fvField(fv) == "blackhole" && fvValue(fv) == "true") { + foundBlackhole = true; + } else if (fvField(fv) == "protocol" && fvValue(fv) == "static") { + foundProtocol = true; + } + } + EXPECT_TRUE(foundBlackhole); + EXPECT_TRUE(foundProtocol); +} + +TEST_F(WarmRestartRouteSyncTest, TestVrfRouteHandlingWarmRestartNotInProgress) +{ + // Test VRF route handling with warm restart integration + + EXPECT_FALSE(m_testRouteSync.getWarmStartHelper().inProgress()); + + auto route = create_route("192.168.8.0/24"); + rtnl_route_set_type(route.get(), RTN_BLACKHOLE); + rtnl_route_set_protocol(route.get(), RTPROT_BGP); + rtnl_route_set_table(route.get(), 100); // VRF table ID + + m_testRouteSync.onRouteMsg(RTM_NEWROUTE, (struct nl_object*)route.get(), "Vrf100"); + + Table routeTable(m_db.get(), APP_ROUTE_TABLE_NAME); + vector result; + EXPECT_TRUE(routeTable.get("Vrf100:192.168.8.0/24", result)); + + bool foundProtocol = false, foundBlackhole = false; + for (const auto& fv : result) { + if (fvField(fv) == "protocol" && fvValue(fv) == "bgp") { + foundProtocol = true; + } else if (fvField(fv) == "blackhole" && fvValue(fv) == "true") { + foundBlackhole = true; + } + } + EXPECT_TRUE(foundProtocol); + EXPECT_TRUE(foundBlackhole); +} + +TEST_F(WarmRestartRouteSyncTest, TestStaticRouteHandlingWarmRestartNotInProgress) +{ + // Test static route handling with warm restart integration + EXPECT_FALSE(m_testRouteSync.getWarmStartHelper().inProgress()); + + auto route = create_route("192.168.3.0/24"); + rtnl_route_set_type(route.get(), RTN_BLACKHOLE); + rtnl_route_set_protocol(route.get(), RTPROT_STATIC); + + m_testRouteSync.onRouteMsg(RTM_NEWROUTE, (struct nl_object*)route.get(), nullptr); + + Table routeTable(m_db.get(), APP_ROUTE_TABLE_NAME); + vector result; + EXPECT_TRUE(routeTable.get("192.168.3.0/24", result)); + + bool foundProtocol = false; + for (const auto& fv : result) { + if (fvField(fv) == "protocol" && fvValue(fv) == "static") { + foundProtocol = true; + } + } + EXPECT_TRUE(foundProtocol); +} + +// Tests for when warm restart IS in progress +TEST_F(WarmRestartRouteSyncTest, TestRouteHandlingWarmRestartInProgress) +{ + // Simulate warm restart in progress by setting state to INITIALIZED (not RECONCILED) + m_testRouteSync.getWarmStartHelper().setState(WarmStart::INITIALIZED); + + EXPECT_TRUE(m_testRouteSync.getWarmStartHelper().inProgress()); + EXPECT_FALSE(m_testRouteSync.getWarmStartHelper().isReconciled()); + + auto route = create_route("192.168.10.0/24"); + rtnl_route_set_type(route.get(), RTN_BLACKHOLE); + rtnl_route_set_protocol(route.get(), RTPROT_BGP); + + m_testRouteSync.onRouteMsg(RTM_NEWROUTE, (struct nl_object*)route.get(), nullptr); + + Table routeTable(m_db.get(), APP_ROUTE_TABLE_NAME); + vector result; + EXPECT_FALSE(routeTable.get("192.168.10.0/24", result)); +} + +TEST_F(WarmRestartRouteSyncTest, TestVrfRouteHandlingWarmRestartInProgress) +{ + // Simulate warm restart in progress by setting state to RESTORED + m_testRouteSync.getWarmStartHelper().setState(WarmStart::RESTORED); + + EXPECT_TRUE(m_testRouteSync.getWarmStartHelper().inProgress()); + + auto route = create_route("192.168.11.0/24"); + rtnl_route_set_type(route.get(), RTN_BLACKHOLE); + rtnl_route_set_protocol(route.get(), RTPROT_STATIC); + rtnl_route_set_table(route.get(), 200); // Different VRF table + + m_testRouteSync.onRouteMsg(RTM_NEWROUTE, (struct nl_object*)route.get(), "Vrf200"); + + // Verify: Route should NOT be in the regular table yet (handled by warm restart helper) + Table routeTable(m_db.get(), APP_ROUTE_TABLE_NAME); + vector result; + EXPECT_FALSE(routeTable.get("Vrf200:192.168.11.0/24", result)); +} + +TEST_F(WarmRestartRouteSyncTest, TestRouteDeleteHandlingWarmRestartInProgress) +{ + auto route = create_route("192.168.12.0/24"); + rtnl_route_set_type(route.get(), RTN_BLACKHOLE); + rtnl_route_set_protocol(route.get(), RTPROT_STATIC); + + m_testRouteSync.onRouteMsg(RTM_NEWROUTE, (struct nl_object*)route.get(), nullptr); + + Table routeTable(m_db.get(), APP_ROUTE_TABLE_NAME); + vector result; + EXPECT_TRUE(routeTable.get("192.168.12.0/24", result)); + + // Now simulate warm restart in progress + m_testRouteSync.getWarmStartHelper().setState(WarmStart::INITIALIZED); + EXPECT_TRUE(m_testRouteSync.getWarmStartHelper().inProgress()); + + m_testRouteSync.onRouteMsg(RTM_DELROUTE, (struct nl_object*)route.get(), nullptr); + + // Verify: Route should still be in table (deletion handled by warm restart helper) + EXPECT_TRUE(routeTable.get("192.168.12.0/24", result)); + + +} + + +TEST_F(FpmSyncdResponseTest, TestSrv6VpnRoute_Add_NHG) +{ + std::string dst_prefix = "2001:db8::/64"; + std::string encap_src = "2001:db8::1"; + std::string vpn_sid = "2001:db8::2"; + uint16_t vrf_table_id = 100; + uint32_t pic_id = 67; + uint32_t nhg_id = 12; + + /* Create IpAddress and IpPrefix Object */ + IpAddress _encap_src_obj = IpAddress(encap_src); + IpAddress _vpn_sid_obj = IpAddress(vpn_sid); + IpPrefix _dst_obj = IpPrefix(dst_prefix); + + /* Create Srv6 Vpn route netlink msg */ + struct nlmsg *nl_obj = create_srv6_vpn_route_nlmsg( + RTM_NEWSRV6VPNROUTE, + &_dst_obj, + &_encap_src_obj, + &_vpn_sid_obj, + vrf_table_id, + 64, + AF_INET6, + RTN_UNICAST, + nhg_id, + pic_id); + if (!nl_obj) { + ADD_FAILURE() << "Failed to create SRv6 VPN Route message"; + return; + } + + /* Mock using getIfName to return vrfname */ + EXPECT_CALL(m_mockRouteSync, getIfName(vrf_table_id, _, _)) + .Times(2) + .WillRepeatedly(DoAll( + [](int32_t, char* ifname, size_t size) { + strncpy(ifname, "Vrf100", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + /* for case: not found pic_it or nhg_it + * nothing to check and would return. + */ + m_mockRouteSync.onSrv6VpnRouteMsg(&nl_obj->n, nl_obj->n.nlmsg_len); + + /* Construct PIC Group */ + NextHopGroup pic_group(pic_id, encap_src, "sr0"); + pic_group.vpn_sid = vpn_sid; + pic_group.seg_src = encap_src; + m_mockRouteSync.m_nh_groups.insert({pic_id, pic_group}); + + /* Construct NHG */ + vector> nhg_data; + nhg_data.push_back(make_pair(1, 1)); + NextHopGroup nh_group(nhg_id, nhg_data); + nh_group.nexthop = "fe80::1"; + nh_group.intf = "eth0"; + m_mockRouteSync.m_nh_groups.insert({nhg_id, nh_group}); + + /* Call the target function */ + m_mockRouteSync.onSrv6VpnRouteMsg(&nl_obj->n, nl_obj->n.nlmsg_len); + + // Check whether use the m_routeTable.set + Table route_table(m_db.get(), APP_ROUTE_TABLE_NAME); + std::vector fvs; + std::string key = "Vrf100:" + dst_prefix; + + /* Check the results */ + bool found = route_table.get(key, fvs); + EXPECT_TRUE(found); + // Check each attr value + for (const auto& fv : fvs) { + if (fvField(fv) == "pic_context_id") { + EXPECT_EQ(fvValue(fv), "67"); + } else if (fvField(fv) == "nexthop_group") { + EXPECT_EQ(fvValue(fv), "12"); + } + } + + /* Check whether use the m_nexthop_groupTable.set */ + Table nhg_table(m_db.get(), APP_NEXTHOP_GROUP_TABLE_NAME); + std::vector fvs_nhg; + std::string key_nhg = m_mockRouteSync.getNextHopGroupKeyAsString(nhg_id); + + /* Check the result */ + bool found_nhg = nhg_table.get(key_nhg.c_str(), fvs_nhg); + EXPECT_TRUE(found_nhg); + // Check each attr value + for (const auto& fv_nhg : fvs_nhg) { + if (fvField(fv_nhg) == "seg_src") { + EXPECT_EQ(fvValue(fv_nhg), "2001:db8::1"); + } + } + + free(nl_obj); +} + +TEST_F(FpmSyncdResponseTest, TestSrv6VpnRoute_NH) +{ + std::string dst_prefix = "2001:db8:1::/64"; + std::string encap_src = "2001:db8:1::1"; + std::string vpn_sid = "2001:db8:1::2"; + uint16_t vrf_table_id = 101; + uint32_t pic_id = 89; + uint32_t nhg_id = 34; + + /* Create IpAddress and IpPrefix Object */ + IpAddress _encap_src_obj = IpAddress(encap_src); + IpAddress _vpn_sid_obj = IpAddress(vpn_sid); + IpPrefix _dst_obj = IpPrefix(dst_prefix); + + /* Mock using getIfName to return vrfname */ + EXPECT_CALL(m_mockRouteSync, getIfName(vrf_table_id, _, _)) + .Times(11) + .WillRepeatedly(DoAll( + [](int32_t, char* ifname, size_t size) { + strncpy(ifname, "Vrf101", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + /*-----------------------------------------------*/ + /* Test 1: Create and process ADD message for NH */ + /*-----------------------------------------------*/ + { + /* Create Srv6 Vpn route netlink msg with ADD cmd */ + struct nlmsg *nl_obj = create_srv6_vpn_route_nlmsg( + RTM_NEWSRV6VPNROUTE, + &_dst_obj, + &_encap_src_obj, + &_vpn_sid_obj, + vrf_table_id, + 64, + AF_INET6, + RTN_UNICAST, + nhg_id, + pic_id); + if (!nl_obj) { + ADD_FAILURE() << "Failed to create SRv6 VPN Route message"; + return; + } + + /* Construct PIC Group */ + NextHopGroup pic_group(pic_id, encap_src, "sr0"); + pic_group.vpn_sid = vpn_sid; + pic_group.seg_src = encap_src; + m_mockRouteSync.m_nh_groups.insert({pic_id, pic_group}); + + /* Construct NHG with no group */ + NextHopGroup nh_group(nhg_id, "fe80::2", "eth1"); + nh_group.nexthop = "fe80::2"; + nh_group.intf = "eth1"; + m_mockRouteSync.m_nh_groups.insert({nhg_id, nh_group}); + + /* Call the target function */ + m_mockRouteSync.onSrv6VpnRouteMsg(&nl_obj->n, nl_obj->n.nlmsg_len); + + /* Check whether use the m_routeTable.set */ + Table route_table(m_db.get(), APP_ROUTE_TABLE_NAME); + std::vector fvs; + std::string key = "Vrf101:" + dst_prefix; + + /* Check the result */ + bool found = route_table.get(key, fvs); + EXPECT_TRUE(found); + // Check each attr value + for (const auto& fv : fvs) { + if (fvField(fv) == "nexthop") { + EXPECT_EQ(fvValue(fv), "2001:db8:1::1"); + } else if (fvField(fv) == "vpn_sid") { + EXPECT_EQ(fvValue(fv), "2001:db8:1::2"); + } else if (fvField(fv) == "seg_src") { + EXPECT_EQ(fvValue(fv), "2001:db8:1::1"); + } else if (fvField(fv) == "ifname") { + EXPECT_EQ(fvValue(fv), "eth1"); + } + } + + /* Free the memory */ + free(nl_obj); + } + + /*-----------------------------------------------*/ + /* Test 2: Create and process DEL message for NH */ + /*-----------------------------------------------*/ + { + /* Create Srv6 Vpn route netlink msg with DEL cmd */ + struct nlmsg *del_nl_obj = create_srv6_vpn_route_nlmsg( + RTM_DELSRV6VPNROUTE, + &_dst_obj, + &_encap_src_obj, + &_vpn_sid_obj, + vrf_table_id, + 64, + AF_INET6, + RTN_UNICAST, + nhg_id, + pic_id); + if (!del_nl_obj) { + ADD_FAILURE() << "Failed to create SRv6 DEL Route message"; + return; + } + + /* Call the target function for DEL */ + m_mockRouteSync.onSrv6VpnRouteMsg(&del_nl_obj->n, del_nl_obj->n.nlmsg_len); + + /* Check whether use the m_routeTable.set */ + Table route_table(m_db.get(), APP_ROUTE_TABLE_NAME); + std::vector fvs; + std::string key = "Vrf101:" + dst_prefix; + + /* Check whether the route was deleted */ + bool found = route_table.get(key, fvs); + EXPECT_FALSE(found); + + free(del_nl_obj); + } + + /*-----------------------------*/ + /* Test 3: Test other branches */ + /*-----------------------------*/ + // Case 1: no DST + { + /* Create a route message without RTA_DST */ + struct nlmsg *nl_obj_no_dst = create_srv6_vpn_route_nlmsg( + RTM_NEWSRV6VPNROUTE, + nullptr, + &_encap_src_obj, + &_vpn_sid_obj, + vrf_table_id, + 64, + AF_INET6, + RTN_UNICAST, + nhg_id, + pic_id); + if (!nl_obj_no_dst) { + ADD_FAILURE() << "Failed to create SRv6 VPN Route message"; + return; + } + + /* Call the target function */ + m_mockRouteSync.onSrv6VpnRouteMsg(&nl_obj_no_dst->n, nl_obj_no_dst->n.nlmsg_len); + + free(nl_obj_no_dst); + } + + // Case 2: AF_INET6 with too large dst bitlen + { + /* Create a route message with too large dst bitlen */ + struct nlmsg *nl_obj_large_bitlen = create_srv6_vpn_route_nlmsg( + RTM_NEWSRV6VPNROUTE, + &_dst_obj, + &_encap_src_obj, + &_vpn_sid_obj, + vrf_table_id, + 130, + AF_INET6, + RTN_UNICAST, + nhg_id, + pic_id); + if (!nl_obj_large_bitlen) { + ADD_FAILURE() << "Failed to create SRv6 VPN Route message"; + return; + } + + /* Call the target function */ + m_mockRouteSync.onSrv6VpnRouteMsg(&nl_obj_large_bitlen->n, nl_obj_large_bitlen->n.nlmsg_len); + + free(nl_obj_large_bitlen); + } + + // Case 3: AF_INET6 with max dst bitlen + { + /* Create a route message with max dst bitlen */ + struct nlmsg *nl_obj_max_bitlen = create_srv6_vpn_route_nlmsg( + RTM_NEWSRV6VPNROUTE, + &_dst_obj, + &_encap_src_obj, + &_vpn_sid_obj, + vrf_table_id, + 128, + AF_INET6, + RTN_UNICAST, + nhg_id, + pic_id); + if (!nl_obj_max_bitlen) { + ADD_FAILURE() << "Failed to create SRv6 VPN Route message"; + return; + } + + /* Call the target function */ + m_mockRouteSync.onSrv6VpnRouteMsg(&nl_obj_max_bitlen->n, nl_obj_max_bitlen->n.nlmsg_len); + + free(nl_obj_max_bitlen); + } + + // Case 4: wrong nlmsg_type, neither RTM_NEWSRV6VPNROUTE nor RTM_DELSRV6VPNROUTE + { + /* Create a route message with wrong nlmsg_type */ + struct nlmsg *nl_obj_wrong_nlmsg_type = create_srv6_vpn_route_nlmsg( + RTM_NEWSRV6LOCALSID, + &_dst_obj, + &_encap_src_obj, + &_vpn_sid_obj, + vrf_table_id, + 64, + AF_INET6, + RTN_UNICAST, + nhg_id, + pic_id); + if (!nl_obj_wrong_nlmsg_type) { + ADD_FAILURE() << "Failed to create SRv6 VPN Route message"; + return; + } + + /* Call the target function */ + m_mockRouteSync.onSrv6VpnRouteMsg(&nl_obj_wrong_nlmsg_type->n, nl_obj_wrong_nlmsg_type->n.nlmsg_len); + + free(nl_obj_wrong_nlmsg_type); + } + + // Case 5: wrong rtm_type + { + /* List of rtm_types to test */ + int types_to_test[] = { + RTN_BLACKHOLE, + RTN_UNREACHABLE, + RTN_PROHIBIT, + RTN_MULTICAST, + RTN_BROADCAST, + RTN_LOCAL, + __RTN_MAX // default case + }; + + for (int rtm_type : types_to_test) { + struct nlmsg *nl_obj_wrong_rtm_type = create_srv6_vpn_route_nlmsg( + RTM_NEWSRV6VPNROUTE, + &_dst_obj, + &_encap_src_obj, + &_vpn_sid_obj, + vrf_table_id, + 64, + AF_INET6, + static_cast(rtm_type), + nhg_id, + pic_id); + if (!nl_obj_wrong_rtm_type) { + ADD_FAILURE() << "Failed to create SRv6 VPN Route message with type " << rtm_type; + continue; + } + + /* Call the target function */ + m_mockRouteSync.onSrv6VpnRouteMsg(&nl_obj_wrong_rtm_type->n, nl_obj_wrong_rtm_type->n.nlmsg_len); + + free(nl_obj_wrong_rtm_type); + } + } + + // Case 6: invalid rtm_family + { + /* Create a route message with invalid rtm_family */ + struct nlmsg *nl_obj_invalid_rtm_family = create_srv6_vpn_route_nlmsg( + RTM_NEWSRV6VPNROUTE, + &_dst_obj, + &_encap_src_obj, + &_vpn_sid_obj, + vrf_table_id, + 64, + AF_LOCAL, + RTN_UNICAST, + nhg_id, + pic_id); + if (!nl_obj_invalid_rtm_family) { + ADD_FAILURE() << "Failed to create SRv6 VPN Route message"; + return; + } + + /* Call the target function */ + m_mockRouteSync.onSrv6VpnRouteMsg(&nl_obj_invalid_rtm_family->n, nl_obj_invalid_rtm_family->n.nlmsg_len); + + free(nl_obj_invalid_rtm_family); + } + + // Case 7: create RTA_TABLE + { + /* Create a route message with RTA_TABLE */ + struct nlmsg *nl_obj_RTA_TABLE = create_srv6_vpn_route_nlmsg( + RTM_NEWSRV6VPNROUTE, + &_dst_obj, + &_encap_src_obj, + &_vpn_sid_obj, + 257, // set vrf_table_id > 256 + 64, + AF_INET6, + RTN_UNICAST, + nhg_id, + pic_id); + if (!nl_obj_RTA_TABLE) { + ADD_FAILURE() << "Failed to create SRv6 VPN Route message"; + return; + } + + /* Mock using getIfName to return vrfname, vrf_table_id == 257 */ + EXPECT_CALL(m_mockRouteSync, getIfName(257, _, _)) + .WillOnce(DoAll( + [](int32_t, char* ifname, size_t size) { + strncpy(ifname, "Vrf257", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + /* Call the target function */ + m_mockRouteSync.onSrv6VpnRouteMsg(&nl_obj_RTA_TABLE->n, nl_obj_RTA_TABLE->n.nlmsg_len); + + free(nl_obj_RTA_TABLE); + } +} + + +/* Add UT for onPicContextMsg */ +struct nlmsghdr* createPicContextMsgHdr(uint16_t msg_type, uint32_t id = 0, const char *gateway = nullptr, + int32_t ifindex = 0, unsigned char nh_family = AF_INET, + const char *seg6_sid = nullptr, + const char *seg6_src = nullptr, + uint32_t encap_type = LWTUNNEL_ENCAP_SEG6) +{ + struct nlmsghdr *nlh = (struct nlmsghdr *)malloc(NLMSG_SPACE(MAX_PAYLOAD)); + memset(nlh, 0, NLMSG_SPACE(MAX_PAYLOAD)); + + // Set header + nlh->nlmsg_type = msg_type; + nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_REPLACE; + nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nhmsg)); + + // Set nhmsg + struct nhmsg *nhm = (struct nhmsg *)NLMSG_DATA(nlh); + nhm->nh_family = nh_family; + + // Prepare the rta + struct rtattr *rta; + // Add NHA_ID + if (id) { + rta = (struct rtattr *)((char *)nlh + NLMSG_ALIGN(nlh->nlmsg_len)); + rta->rta_type = NHA_ID; + rta->rta_len = RTA_LENGTH(sizeof(uint32_t)); + *(uint32_t *)RTA_DATA(rta) = id; + nlh->nlmsg_len = NLMSG_ALIGN(nlh->nlmsg_len) + RTA_ALIGN(rta->rta_len); + } + + // Add NHA_OIF + if (ifindex) { + rta = (struct rtattr *)((char *)nlh + NLMSG_ALIGN(nlh->nlmsg_len)); + rta->rta_type = NHA_OIF; + rta->rta_len = RTA_LENGTH(sizeof(int32_t)); + *(int32_t *)RTA_DATA(rta) = ifindex; + nlh->nlmsg_len = NLMSG_ALIGN(nlh->nlmsg_len) + RTA_ALIGN(rta->rta_len); + } + + // Add NHA_GATEWAY + if (gateway) { + rta = (struct rtattr *)((char *)nlh + NLMSG_ALIGN(nlh->nlmsg_len)); + rta->rta_type = NHA_GATEWAY; + if (nh_family == AF_INET6) { + struct in6_addr gw_addr6; + inet_pton(AF_INET6, gateway, &gw_addr6); + rta->rta_len = RTA_LENGTH(sizeof(struct in6_addr)); + memcpy(RTA_DATA(rta), &gw_addr6, sizeof(struct in6_addr)); + } + else { + struct in_addr gw_addr; + inet_pton(AF_INET, gateway, &gw_addr); + rta->rta_len = RTA_LENGTH(sizeof(struct in_addr)); + memcpy(RTA_DATA(rta), &gw_addr, sizeof(struct in_addr)); + } + nlh->nlmsg_len = NLMSG_ALIGN(nlh->nlmsg_len) + RTA_ALIGN(rta->rta_len); + } + + // Add Srv6 Encap info if provided + if (seg6_sid && seg6_src) { + // Add NHA_ENCAP_TYPE tlv, type of value is int. Similar with rta_type, change it to uint16_t + rta = (struct rtattr *)((char *)nlh + NLMSG_ALIGN(nlh->nlmsg_len)); + rta->rta_type = NHA_ENCAP_TYPE; + rta->rta_len = RTA_LENGTH(sizeof(uint16_t)); + *(uint16_t *)RTA_DATA(rta) = static_cast(encap_type); + nlh->nlmsg_len = NLMSG_ALIGN(nlh->nlmsg_len) + RTA_ALIGN(rta->rta_len); + + /* + * Prepare work: Calculate the nested tlv length, + * we need it to fill the value of outside len. + */ + size_t num_segments = 1; + size_t value_size = sizeof(struct seg6_iptunnel_encap_pri) + + num_segments * sizeof(struct ipv6_sr_hdr) + + num_segments * sizeof(struct in6_addr); + size_t nested_size = sizeof(struct rtattr) + value_size; + + // Add type and len of NHA_ENCAP + rta = (struct rtattr *)((char *)nlh + NLMSG_ALIGN(nlh->nlmsg_len)); + rta->rta_type = NHA_ENCAP; + rta->rta_len = static_cast(RTA_LENGTH(nested_size)); + + // Add nested SEG6_IPTUNNEL_SRH as ENCAP's payload + struct rtattr *sub_rta = (struct rtattr *)(RTA_DATA(rta)); + // Add type and len of SEG6_IPTUNNEL_SRH + sub_rta->rta_type = SEG6_IPTUNNEL_SRH; + sub_rta->rta_len = static_cast(RTA_LENGTH(value_size)); + + // Prepare the value we truly need + struct seg6_iptunnel_encap_pri *encap_data = (struct seg6_iptunnel_encap_pri *)malloc(value_size); + if (!encap_data) { + free(nlh); + return NULL; + } + memset(encap_data, 0, value_size); + + // Set the src + struct in6_addr src; + inet_pton(AF_INET6, seg6_src, &src); + encap_data->src = src; + + // Aquire srh pointer + struct ipv6_sr_hdr *srh = encap_data->srh; + // Set segments Address + struct in6_addr sid; + inet_pton(AF_INET6, seg6_sid, &sid); + memcpy(srh->segments, &sid, sizeof(sid)); + + // Copy the entire data into the Netlink message + memcpy(RTA_DATA(sub_rta), encap_data, value_size); + nlh->nlmsg_len = NLMSG_ALIGN(nlh->nlmsg_len) + RTA_ALIGN(rta->rta_len); + + free(encap_data); + } + + return nlh; +} + +TEST_F(FpmSyncdResponseTest, TestPicContext_NH) +{ + uint16_t msg_type = RTM_NEWPICCONTEXT; + uint32_t id = 100; + const char *gateway = "2001:db8::1"; + int32_t ifindex = 101; + unsigned char nh_family = AF_INET6; + const char *seg6_sid = "2001:db8::2"; + const char *seg6_src = "2001:db8::3"; + + + /*-------------------------------------------*/ + /* Test 1: Create and process ADD msg for NH */ + /*-------------------------------------------*/ + { + /* Create netlink msg header with ADD cmd */ + struct nlmsghdr *nlh = createPicContextMsgHdr( + msg_type, + id, + gateway, + ifindex, + nh_family, + seg6_sid, + seg6_src + ); + if (!nlh) { + ADD_FAILURE() << "Failed to create Pic Context nlmsghdr"; + return; + } + int expected_length = (int)(nlh->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg))); + + /* Construct the IfName */ + EXPECT_CALL(m_mockRouteSync, getIfName(ifindex, _, _)) + .WillOnce(DoAll( + [](int32_t, char *ifname, size_t size) { + strncpy(ifname, "Ethernet1", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + /* Call the target function */ + m_mockRouteSync.onPicContextMsg(nlh, expected_length); + + /* Check the results */ + auto it = m_mockRouteSync.m_nh_groups.find(id); + ASSERT_NE(it, m_mockRouteSync.m_nh_groups.end()) << "Failed to add new Pic Context"; + + /* Check other attrs */ + const NextHopGroup &nhg = it->second; + // Check each value of attr + EXPECT_EQ(nhg.nexthop, gateway); // Check gateway + EXPECT_EQ(nhg.intf, "Ethernet1"); // Check interface name + EXPECT_EQ(nhg.vpn_sid, seg6_sid); // Check sid + EXPECT_EQ(nhg.seg_src, seg6_src); // Check seg_src + + free(nlh); + } + + /*-------------------------------------------*/ + /* Test 2: Create and process DEL msg for NH */ + /*-------------------------------------------*/ + { + /* Create netlink msg header with DEL cmd */ + struct nlmsghdr *nlh_del = createPicContextMsgHdr(RTM_DELPICCONTEXT, id); + if (!nlh_del) { + ADD_FAILURE() << "Failed to create Pic Context nlmsghdr"; + return; + } + int expected_length = (int)(nlh_del->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg))); + + /* Call the target function */ + m_mockRouteSync.onPicContextMsg(nlh_del, expected_length); + + /* Check the result */ + auto it = m_mockRouteSync.m_nh_groups.find(id); + ASSERT_EQ(it, m_mockRouteSync.m_nh_groups.end()) << "Failed to remove Pic Context"; + + free(nlh_del); + } + + /*-------------------------------*/ + /* Test 3: Other branches for NH */ + /*-------------------------------*/ + // Case 1: nlmsg_type is nothing to do with PIC + { + /* Create netlink msg header with wrong nlmsg_type */ + struct nlmsghdr *nlh_no_pic = createPicContextMsgHdr(RTM_NEWSRV6VPNROUTE); + if (!nlh_no_pic) { + ADD_FAILURE() << "Failed to create Pic Context nlmsghdr"; + return; + } + int expected_length = (int)(nlh_no_pic->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg))); + + /* Call the target function */ + m_mockRouteSync.onPicContextMsg(nlh_no_pic, expected_length); + + /* Check the result */ + auto it = m_mockRouteSync.m_nh_groups.find(id); + ASSERT_EQ(it, m_mockRouteSync.m_nh_groups.end()) << "We should've find nothing for no pic case"; + + free(nlh_no_pic); + } + + // Case 2: missing NHA_ID + { + /* Create netlink msg header without NHA_ID */ + struct nlmsghdr *nlh_no_id = createPicContextMsgHdr(msg_type, 0); + if (!nlh_no_id) { + ADD_FAILURE() << "Failed to create Pic Context nlmsghdr"; + return; + } + int expected_length = (int)(nlh_no_id->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg))); + + /* Call the target function */ + m_mockRouteSync.onPicContextMsg(nlh_no_id, expected_length); + + /* Check the result */ + auto it = m_mockRouteSync.m_nh_groups.find(id); + ASSERT_EQ(it, m_mockRouteSync.m_nh_groups.end()) << "We should've find nothing for no id case"; + + free(nlh_no_id); + } + + // Case 3: has NHA_ENCAP & NHA_ENCAP_TYPE but not LWTUNNEL_ENCAP_SEG6 + { + /* Create netlink msg header with other encap_type */ + struct nlmsghdr *nlh_wrong_encap = createPicContextMsgHdr( + msg_type, + 200, + gateway, + ifindex, + nh_family, + seg6_sid, + seg6_src, + __LWTUNNEL_ENCAP_MAX + ); + if (!nlh_wrong_encap) { + ADD_FAILURE() << "Failed to create Pic Context nlmsghdr"; + return; + } + int expected_length = (int)(nlh_wrong_encap->nlmsg_len - NLMSG_ALIGN(sizeof(struct nhmsg))); + + /* Construct the IfName */ + EXPECT_CALL(m_mockRouteSync, getIfName(ifindex, _, _)) + .WillOnce(DoAll( + [](int32_t, char *ifname, size_t size) { + strncpy(ifname, "Ethernet1", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + /* Call the target fuction */ + m_mockRouteSync.onPicContextMsg(nlh_wrong_encap, expected_length); + + /* Check the results */ + auto it = m_mockRouteSync.m_nh_groups.find(200); + ASSERT_NE(it, m_mockRouteSync.m_nh_groups.end()) << "Failed to add new Pic Context"; + + /* Check other attrs */ + const NextHopGroup &nhg = it->second; + // Check the values + EXPECT_EQ(nhg.vpn_sid, ""); + EXPECT_EQ(nhg.seg_src, ""); + + free(nlh_wrong_encap); + } + + // Case 4: addr_family == AF_INET + { + /* Create netlink msg header for AF_INET case */ + struct nlmsghdr *nlh_ipv4 = createPicContextMsgHdr( + msg_type, + 300, + "192.168.0.1", + ifindex + ); + if (!nlh_ipv4) { + ADD_FAILURE() << "Failed to create Pic Context nlmsghdr"; + return; + } + int expected_length = (int)(nlh_ipv4->nlmsg_len - NLMSG_ALIGN(sizeof(struct nhmsg))); + + /* Construct the IfName */ + EXPECT_CALL(m_mockRouteSync, getIfName(ifindex, _, _)) + .WillOnce(DoAll( + [](int32_t, char *ifname, size_t size) { + strncpy(ifname, "Ethernet1", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + /* Call the target function */ + m_mockRouteSync.onPicContextMsg(nlh_ipv4, expected_length); + + /* Check the results */ + auto it = m_mockRouteSync.m_nh_groups.find(300); + ASSERT_NE(it, m_mockRouteSync.m_nh_groups.end()) << "Failed to add new Pic Context for ipv4"; + + /* Check other attrs */ + const NextHopGroup &nhg = it->second; + // Check each value of attr + EXPECT_EQ(nhg.nexthop, "192.168.0.1"); + EXPECT_EQ(nhg.intf, "Ethernet1"); + + free(nlh_ipv4); + } + + // case 5: unknown addr_family type + { + /* Create netlink msg header with unknown addr_family type */ + struct nlmsghdr *nlh_unknown_af = createPicContextMsgHdr( + msg_type, + id, + gateway, + ifindex, + AF_UNSPEC + ); + if (!nlh_unknown_af) { + ADD_FAILURE() << "Failed to create Pic Context nlmsghdr"; + return; + } + int expected_length = (int)(nlh_unknown_af->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg))); + + /* Call the target function */ + m_mockRouteSync.onPicContextMsg(nlh_unknown_af, expected_length); + + /* Check the result */ + auto it = m_mockRouteSync.m_nh_groups.find(id); + ASSERT_EQ(it, m_mockRouteSync.m_nh_groups.end()) << "We should not process the unknown af"; + + free(nlh_unknown_af); + } + + // case 6: ifName does not exist + { + /* Create netlink msg header with ADD cmd */ + struct nlmsghdr *nlh_unknown_intf = createPicContextMsgHdr( + msg_type, + 400, + gateway, + ifindex, + nh_family + ); + if (!nlh_unknown_intf) { + ADD_FAILURE() << "Failed to create Pic Context nlmsghdr"; + return; + } + int expected_length = (int)(nlh_unknown_intf->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg))); + + /* Construct the IfName, mock unknown case */ + EXPECT_CALL(m_mockRouteSync, getIfName(ifindex, _, _)) + .WillOnce(Return(false)); + + /* Call the target function */ + m_mockRouteSync.onPicContextMsg(nlh_unknown_intf, expected_length); + + /* Check the results */ + auto it = m_mockRouteSync.m_nh_groups.find(400); + ASSERT_NE(it, m_mockRouteSync.m_nh_groups.end()) << "Failed to add new Pic Context"; + + /* Check other attrs */ + const NextHopGroup &nhg = it->second; + // Check each value of attr + EXPECT_EQ(nhg.nexthop, gateway); + EXPECT_EQ(nhg.intf, "unknown"); + + free(nlh_unknown_intf); + } + + // case 7: ifName is docker0 + { + /* Create netlink msg header with ifName "docker0" */ + struct nlmsghdr *nlh_docker0 = createPicContextMsgHdr( + msg_type, + id, + gateway, + ifindex, + nh_family + ); + if (!nlh_docker0) { + ADD_FAILURE() << "Failed to create Pic Context nlmsghdr"; + return; + } + int expected_length = (int)(nlh_docker0->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg))); + + /* Construct the IfName, docker0 */ + EXPECT_CALL(m_mockRouteSync, getIfName(ifindex, _, _)) + .WillOnce(DoAll( + [](int32_t, char *ifname, size_t size) { + strncpy(ifname, "docker0", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + + /* Call the target function */ + m_mockRouteSync.onPicContextMsg(nlh_docker0, expected_length); + + /* Check the results */ + auto it = m_mockRouteSync.m_nh_groups.find(id); + ASSERT_EQ(it, m_mockRouteSync.m_nh_groups.end()) << "Failed to skip docker0 case"; + + free(nlh_docker0); + } +} + +TEST_F(FpmSyncdResponseTest, TestPicContext_NHG) +{ + // Prepare the information of nexthops + uint16_t msg_type = RTM_NEWPICCONTEXT; + uint32_t nh1_id = 1; + uint32_t nh2_id = 2; + uint32_t nh3_id = 3; + const char *gateway_1 = "2001:db8::1"; + const char *gateway_2 = "2002:db8::1"; + const char *gateway_3 = "2003:db8::1"; + uint32_t ifindex_1 = 1; + uint32_t ifindex_2 = 2; + uint32_t ifindex_3 = 3; + unsigned char nh_family = AF_INET6; + const char *seg6_sid_1 = "2001:db8::2"; + const char *seg6_sid_2 = "2002:db8::2"; + const char *seg6_sid_3 = "2003:db8::2"; + const char *seg6_src_1 = "2001:db8::3"; + const char *seg6_src_2 = "2002:db8::3"; + const char *seg6_src_3 = "2003:db8::3"; + + /* First, we need to add the nexthops to m_nh_groups */ + struct nlmsghdr *nlh_1 = createPicContextMsgHdr(msg_type, nh1_id, gateway_1, ifindex_1, + nh_family, seg6_sid_1, seg6_src_1); + struct nlmsghdr *nlh_2 = createPicContextMsgHdr(msg_type, nh2_id, gateway_2, ifindex_2, + nh_family, seg6_sid_2, seg6_src_2); + struct nlmsghdr *nlh_3 = createPicContextMsgHdr(msg_type, nh3_id, gateway_3, ifindex_3, + nh_family, seg6_sid_3, seg6_src_3); + if (!nlh_1 || !nlh_2 || !nlh_3) { + ADD_FAILURE() << "Failed to create Pic Context nlmsghdr"; + return; + } + + // Construct the IfName + EXPECT_CALL(m_mockRouteSync, getIfName(ifindex_1, _, _)) + .WillOnce(DoAll( + [](int32_t, char *ifname, size_t size) { + strncpy(ifname, "Ethernet1", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + EXPECT_CALL(m_mockRouteSync, getIfName(ifindex_2, _, _)) + .WillOnce(DoAll( + [](int32_t, char *ifname, size_t size) { + strncpy(ifname, "Ethernet2", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + EXPECT_CALL(m_mockRouteSync, getIfName(ifindex_3, _, _)) + .WillOnce(DoAll( + [](int32_t, char *ifname, size_t size) { + strncpy(ifname, "Ethernet3", size); + ifname[size-1] = '\0'; + }, + Return(true) + )); + // Call onPicContextMsg to insert these nexthops + m_mockRouteSync.onPicContextMsg(nlh_1, (int)(nlh_1->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg)))); + m_mockRouteSync.onPicContextMsg(nlh_2, (int)(nlh_2->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg)))); + m_mockRouteSync.onPicContextMsg(nlh_3, (int)(nlh_3->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg)))); + + /* Create a nexthop group with these nexthops */ + uint32_t group_id = 100; + vector> group_members = { + {nh1_id, 1}, // id=1, weight=1 + {nh2_id, 2}, // id=2, weight=2 + {nh3_id, 3}, // id=3, weight=3 + }; + + // Create group_nlh + struct nlmsghdr* group_nlh = createNewNextHopMsgHdr(group_members, group_id, RTM_NEWPICCONTEXT); + ASSERT_NE(group_nlh, nullptr) << "Failed to create group nexthop message"; + + // Call the target function + m_mockRouteSync.onPicContextMsg(group_nlh, (int)(group_nlh->nlmsg_len - NLMSG_LENGTH(sizeof(struct nhmsg)))); + + // Verify the group was added correctly + auto it = m_mockRouteSync.m_nh_groups.find(group_id); + ASSERT_NE(it, m_mockRouteSync.m_nh_groups.end()) << "Failed to add nexthop group"; + + // Verify group members + const auto& group = it->second.group; + ASSERT_EQ(group.size(), 3) << "Wrong number of group members"; + + // Check each member's ID and weight + EXPECT_EQ(group[0].first, nh1_id); + EXPECT_EQ(group[0].second, 1); + EXPECT_EQ(group[1].first, nh2_id); + EXPECT_EQ(group[1].second, 2); + EXPECT_EQ(group[2].first, nh3_id); + EXPECT_EQ(group[2].second, 3); + + // Check values in PIC table + Table pic_context_group_table(m_db.get(), APP_PIC_CONTEXT_TABLE_NAME); + vector fieldValues; + string key = to_string(group_id); + pic_context_group_table.get(key, fieldValues); + + ASSERT_EQ(fieldValues.size(), 5) << "Wrong number of fields in DB"; + + // Verify the DB fields + string nexthops, ifnames, sids, srcs, weights; + for (const auto& fv : fieldValues) { + if (fvField(fv) == "nexthop") { + nexthops = fvValue(fv); + } else if (fvField(fv) == "ifname") { + ifnames = fvValue(fv); + } else if (fvField(fv) == "vpn_sid") { + sids = fvValue(fv); + } else if (fvField(fv) == "seg_src") { + srcs = fvValue(fv); + } else if (fvField(fv) == "weight") { + weights = fvValue(fv); + } + } + EXPECT_EQ(nexthops, "2001:db8::1,2002:db8::1,2003:db8::1"); + EXPECT_EQ(ifnames, "Ethernet1,Ethernet2,Ethernet3"); + EXPECT_EQ(sids, "2001:db8::2,2002:db8::2,2003:db8::2"); + EXPECT_EQ(srcs, "2001:db8::3,2002:db8::3,2003:db8::3"); + EXPECT_EQ(weights, "1,2,3"); + + free(nlh_1); + free(nlh_2); + free(nlh_3); + free(group_nlh); +} \ No newline at end of file diff --git a/tests/mock_tests/fpmsyncd/ut_helpers_fpmsyncd.cpp b/tests/mock_tests/fpmsyncd/ut_helpers_fpmsyncd.cpp new file mode 100644 index 00000000000..b0a9a81dbb9 --- /dev/null +++ b/tests/mock_tests/fpmsyncd/ut_helpers_fpmsyncd.cpp @@ -0,0 +1,545 @@ +#include "ut_helpers_fpmsyncd.h" +#include "ipaddress.h" +#include "ipprefix.h" +#include +#include + +#define IPV6_MAX_BYTE 16 +#define IPV6_MAX_BITLEN 128 + +/* + * Mock rtnl_link_i2name() call + * We simulate the existence of a VRF called Vrf10 with ifindex 10. + * Calling rtnl_link_i2name(_, 10, _, _) will return the name of the VRF (i.e., "Vrf10" string) + */ +extern "C" { +char *__wrap_rtnl_link_i2name(struct nl_cache *cache, int ifindex, char *dst, size_t len) +{ + switch (ifindex) + { + case 10: + strncpy(dst, "Vrf10", 6); + return dst; + case 30: + strncpy(dst, "invalidVrf", 11); + return dst; + default: + return NULL; + } +} +} + +namespace ut_fpmsyncd +{ + /* Add a unspecific attribute to netlink message */ + bool nl_attr_put(struct nlmsghdr *n, unsigned int maxlen, int type, + const void *data, unsigned int alen) + { + int len; + struct rtattr *rta; + + len = (int)RTA_LENGTH(alen); + + if (NLMSG_ALIGN(n->nlmsg_len) + RTA_ALIGN(len) > maxlen) + return false; + + rta = reinterpret_cast(static_cast(((char *)n) + NLMSG_ALIGN(n->nlmsg_len))); + rta->rta_type = (uint16_t)type; + rta->rta_len = (uint16_t)len; + + if (data) + memcpy(RTA_DATA(rta), data, alen); + else + assert(alen == 0); + + n->nlmsg_len = NLMSG_ALIGN(n->nlmsg_len) + RTA_ALIGN(len); + + return true; + } + + /* Add 8 bit integer attribute to netlink message */ + bool nl_attr_put8(struct nlmsghdr *n, unsigned int maxlen, int type, + uint16_t data) + { + return nl_attr_put(n, maxlen, type, &data, sizeof(uint8_t)); + } + + /* Add 16 bit integer attribute to netlink message */ + bool nl_attr_put16(struct nlmsghdr *n, unsigned int maxlen, int type, + uint16_t data) + { + return nl_attr_put(n, maxlen, type, &data, sizeof(uint16_t)); + } + + /* Add 32 bit integer attribute to netlink message */ + bool nl_attr_put32(struct nlmsghdr *n, unsigned int maxlen, int type, + uint32_t data) + { + return nl_attr_put(n, maxlen, type, &data, sizeof(uint32_t)); + } + + /* Start a new level of nested attributes */ + struct rtattr *nl_attr_nest(struct nlmsghdr *n, unsigned int maxlen, int type) + { + struct rtattr *nest = NLMSG_TAIL(n); + + if (!nl_attr_put(n, maxlen, type, NULL, 0)) + return NULL; + + nest->rta_type |= NLA_F_NESTED; + return nest; + } + + /* Finalize nesting of attributes */ + int nl_attr_nest_end(struct nlmsghdr *n, struct rtattr *nest) + { + nest->rta_len = (uint16_t)((uint8_t *)NLMSG_TAIL(n) - (uint8_t *)nest); + return n->nlmsg_len; + } + + /* Build a Netlink object containing an SRv6 VPN Route */ + struct nlmsg *create_srv6_vpn_route_nlmsg( + uint16_t cmd, + IpPrefix *dst, + IpAddress *encap_src_addr, + IpAddress *vpn_sid, + uint16_t table_id, + uint8_t prefixlen, + uint8_t address_family, + uint8_t rtm_type, + uint32_t nhg_id, + uint32_t pic_id) + { + struct rtattr *nest; + + /* Allocate memory for the Netlink objct */ + struct nlmsg *nl_obj = (struct nlmsg *)calloc(1, sizeof(struct nlmsg)); + if (!nl_obj) + throw std::runtime_error("netlink: nlmsg object allocation failed"); + + nl_obj->n.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg)); + nl_obj->n.nlmsg_flags = NLM_F_CREATE | NLM_F_REQUEST; + + if (cmd == RTM_NEWROUTE && + dst && dst->isV4()) + nl_obj->n.nlmsg_flags |= NLM_F_REPLACE; + + nl_obj->n.nlmsg_type = cmd; + + nl_obj->n.nlmsg_pid = 100; + + if (address_family > 0) + nl_obj->r.rtm_family = address_family; + else + nl_obj->r.rtm_family = dst ? dst->getIp().getIp().family : AF_INET6; + if (prefixlen > 0) + nl_obj->r.rtm_dst_len = prefixlen; + else + nl_obj->r.rtm_dst_len = dst ? (unsigned char)(dst->getMaskLength()): IPV6_MAX_BITLEN; + nl_obj->r.rtm_scope = RT_SCOPE_UNIVERSE; + + nl_obj->r.rtm_protocol = 11; // ZEBRA protocol + + if (rtm_type > 0) + nl_obj->r.rtm_type = rtm_type; + else + nl_obj->r.rtm_type = RTN_UNICAST; + + /* Add the destination address */ + if (dst) + { + if (dst->isV4()) + { + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), + RTA_DST, dst->getIp().getV4Addr())) + return NULL; + } + else + { + if (!nl_attr_put(&nl_obj->n, sizeof(*nl_obj), + RTA_DST, dst->getIp().getV6Addr(), IPV6_MAX_BYTE)) + return NULL; + } + } + + /* Add the table ID */ + if (table_id < 256) + nl_obj->r.rtm_table = (unsigned char)table_id; + else + { + nl_obj->r.rtm_table = RT_TABLE_UNSPEC; + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), RTA_TABLE, table_id)) + return NULL; + } + + /* Add encapsulation type NH_ENCAP_SRV6_ROUTE (SRv6 Route) */ + if (!nl_attr_put16(&nl_obj->n, sizeof(*nl_obj), RTA_ENCAP_TYPE, + NH_ENCAP_SRV6_ROUTE)) + return NULL; + + /* Add encapsulation information */ + nest = nl_attr_nest(&nl_obj->n, sizeof(*nl_obj), RTA_ENCAP); + if (!nest) + return NULL; + + /* Add source address for SRv6 encapsulation */ + if (!nl_attr_put( + &nl_obj->n, sizeof(*nl_obj), ROUTE_ENCAP_SRV6_ENCAP_SRC_ADDR, + encap_src_addr->getV6Addr(), 16)) + return NULL; + + /* Add the VPN SID */ + if (!nl_attr_put(&nl_obj->n, sizeof(*nl_obj), ROUTE_ENCAP_SRV6_VPN_SID, + vpn_sid->getV6Addr(), 16)) + return NULL; + + /* Add PIC_ID */ + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), ROUTE_ENCAP_SRV6_PIC_ID, + pic_id)) + return NULL; + + /* Add NHG_ID */ + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), ROUTE_ENCAP_SRV6_NH_ID, + nhg_id)) + return NULL; + + nl_attr_nest_end(&nl_obj->n, nest); + + return nl_obj; + } + + /* Build a Netlink object containing an SRv6 My SID */ + struct nlmsg *create_srv6_mysid_nlmsg( + uint16_t cmd, + IpAddress *mysid, + int8_t block_len, + int8_t node_len, + int8_t func_len, + int8_t arg_len, + uint32_t action, + char *vrf, + IpAddress *adj, + char *intf, + uint16_t table_id, + uint8_t prefixlen, + uint8_t address_family + ) + { + struct rtattr *nest; + + /* Allocate memory for the Netlink object */ + struct nlmsg *nl_obj = (struct nlmsg *)malloc(sizeof(struct nlmsg)); + if (!nl_obj) + throw std::runtime_error("netlink: nlmsg object allocation failed"); + + memset(nl_obj, 0, sizeof(*nl_obj)); + + nl_obj->n.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg)); + nl_obj->n.nlmsg_flags = NLM_F_CREATE | NLM_F_REQUEST; + + nl_obj->n.nlmsg_type = cmd; + + nl_obj->n.nlmsg_pid = 100; + + if (address_family > 0) + nl_obj->r.rtm_family = address_family; + else + nl_obj->r.rtm_family = mysid ? mysid->getIp().family : AF_INET6; + if (prefixlen > 0) + nl_obj->r.rtm_dst_len = prefixlen; + else + nl_obj->r.rtm_dst_len = IPV6_MAX_BITLEN; + nl_obj->r.rtm_scope = RT_SCOPE_UNIVERSE; + + nl_obj->r.rtm_protocol = 11; // Protocol ZEBRA + + if (cmd != RTM_DELROUTE) + nl_obj->r.rtm_type = RTN_UNICAST; + + /* Add my SID address */ + if (mysid) + { + if (mysid->isV4()) + { + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), + RTA_DST, mysid->getV4Addr())) + return NULL; + } + else + { + if (!nl_attr_put(&nl_obj->n, sizeof(*nl_obj), + RTA_DST, mysid->getV6Addr(), 16)) + return NULL; + } + } + + /* Add table ID */ + if (table_id < 256) + nl_obj->r.rtm_table = (unsigned char)table_id; + else + { + nl_obj->r.rtm_table = RT_TABLE_UNSPEC; + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), RTA_TABLE, table_id)) + return NULL; + } + + /* Add SID format information */ + if (block_len > 0 || + node_len > 0 || + func_len > 0 || + arg_len > 0) + { + nest = + nl_attr_nest(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_FORMAT); + + /* Add block bits length */ + if (block_len >= 0) + if (!nl_attr_put8( + &nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_FORMAT_BLOCK_LEN, + block_len)) + return NULL; + + /* Add node bits length */ + if (node_len >= 0) + if (!nl_attr_put8( + &nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_FORMAT_NODE_LEN, + node_len)) + return NULL; + + /* Add function bits length */ + if (func_len >= 0) + if (!nl_attr_put8( + &nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_FORMAT_FUNC_LEN, + func_len)) + return NULL; + + /* Add argument bits length */ + if (arg_len >= 0) + if (!nl_attr_put8( + &nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_FORMAT_ARG_LEN, + arg_len)) + return NULL; + + nl_attr_nest_end(&nl_obj->n, nest); + } + + /* If the Netlink message is a Delete Route message, we have done */ + if (cmd == RTM_DELROUTE) + { + NLMSG_ALIGN(nl_obj->n.nlmsg_len); + return nl_obj; + } + + /* Add my SID behavior (action and parameters) */ + switch (action) + { + case SRV6_LOCALSID_ACTION_END: + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_ACTION, + SRV6_LOCALSID_ACTION_END)) + return NULL; + break; + case SRV6_LOCALSID_ACTION_END_X: + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_ACTION, + SRV6_LOCALSID_ACTION_END_X)) + return NULL; + if (adj) + { + if (!nl_attr_put(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_NH6, + adj->getV6Addr(), 16)) + return NULL; + } + if (intf) + { + if (!nl_attr_put(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_IFNAME, + intf, (uint32_t)strlen(intf))) + return NULL; + } + break; + case SRV6_LOCALSID_ACTION_END_T: + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_ACTION, + SRV6_LOCALSID_ACTION_END_T)) + return NULL; + if (vrf) + { + if (!nl_attr_put(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_VRFNAME, + vrf, (uint32_t)strlen(vrf))) + return NULL; + } + break; + case SRV6_LOCALSID_ACTION_END_DX4: + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_ACTION, + SRV6_LOCALSID_ACTION_END_DX4)) + return NULL; + if (adj) + { + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_NH4, + adj->getV4Addr())) + return NULL; + } + break; + case SRV6_LOCALSID_ACTION_END_DX6: + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_ACTION, + SRV6_LOCALSID_ACTION_END_DX6)) + return NULL; + if (adj) + { + if (!nl_attr_put(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_NH6, + adj->getV6Addr(), 16)) + return NULL; + } + break; + case SRV6_LOCALSID_ACTION_END_DT4: + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_ACTION, + SRV6_LOCALSID_ACTION_END_DT4)) + return NULL; + if (vrf) + { + if (!nl_attr_put(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_VRFNAME, + vrf, (uint32_t)strlen(vrf))) + return NULL; + } + break; + case SRV6_LOCALSID_ACTION_END_DT6: + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_ACTION, + SRV6_LOCALSID_ACTION_END_DT6)) + return NULL; + if (vrf) + { + if (!nl_attr_put(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_VRFNAME, + vrf, (uint32_t)strlen(vrf))) + return NULL; + } + break; + case SRV6_LOCALSID_ACTION_END_DT46: + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_ACTION, + SRV6_LOCALSID_ACTION_END_DT46)) + return NULL; + if (vrf) + { + if (!nl_attr_put(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_VRFNAME, + vrf, (uint32_t)strlen(vrf))) + return NULL; + } + break; + case SRV6_LOCALSID_ACTION_UN: + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_ACTION, + SRV6_LOCALSID_ACTION_UN)) + return NULL; + break; + case SRV6_LOCALSID_ACTION_UA: + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_ACTION, + SRV6_LOCALSID_ACTION_UA)) + return NULL; + if (adj) + { + if (!nl_attr_put(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_NH6, + adj->getV6Addr(), 16)) + return NULL; + } + if (intf) + { + if (!nl_attr_put(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_IFNAME, + intf, (uint32_t)strlen(intf))) + return NULL; + } + break; + case SRV6_LOCALSID_ACTION_UDX4: + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_ACTION, + SRV6_LOCALSID_ACTION_UDX4)) + return NULL; + if (adj) + { + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_NH4, + adj->getV4Addr())) + return NULL; + } + break; + case SRV6_LOCALSID_ACTION_UDX6: + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_ACTION, + SRV6_LOCALSID_ACTION_UDX6)) + return NULL; + if (adj) + { + if (!nl_attr_put(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_NH6, + adj->getV6Addr(), 16)) + return NULL; + } + break; + case SRV6_LOCALSID_ACTION_UDT4: + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_ACTION, + SRV6_LOCALSID_ACTION_UDT4)) + return NULL; + if (vrf) + { + if (!nl_attr_put(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_VRFNAME, + vrf, (uint32_t)strlen(vrf))) + return NULL; + } + break; + case SRV6_LOCALSID_ACTION_UDT6: + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_ACTION, + SRV6_LOCALSID_ACTION_UDT6)) + return NULL; + if (vrf) + { + if (!nl_attr_put(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_VRFNAME, + vrf, (uint32_t)strlen(vrf))) + return NULL; + } + break; + case SRV6_LOCALSID_ACTION_UDT46: + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_ACTION, + SRV6_LOCALSID_ACTION_UDT46)) + return NULL; + if (vrf) + { + if (!nl_attr_put(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_VRFNAME, + vrf, (uint32_t)strlen(vrf))) + return NULL; + } + break; + default: + if (!nl_attr_put32(&nl_obj->n, sizeof(*nl_obj), + SRV6_LOCALSID_ACTION, + action)) + return NULL; + } + + return nl_obj; + } +} \ No newline at end of file diff --git a/tests/mock_tests/fpmsyncd/ut_helpers_fpmsyncd.h b/tests/mock_tests/fpmsyncd/ut_helpers_fpmsyncd.h new file mode 100644 index 00000000000..614202c462e --- /dev/null +++ b/tests/mock_tests/fpmsyncd/ut_helpers_fpmsyncd.h @@ -0,0 +1,116 @@ +#include "ipaddress.h" +#include "ipprefix.h" +#include +#include + +using namespace swss; + +#define NLMSG_TAIL(nmsg) \ + (reinterpret_cast(static_cast((((uint8_t *)nmsg)) + NLMSG_ALIGN((nmsg)->nlmsg_len)))) + +/* Values copied from fpmsyncd/fpmlink.h */ +#define RTM_NEWSRV6LOCALSID 1000 +#define RTM_DELSRV6LOCALSID 1001 + +/* Values copied from fpmsyncd/routesync.cpp */ +#define NH_ENCAP_SRV6_ROUTE 101 + +enum { /* Values copied from fpmsyncd/routesync.cpp */ + ROUTE_ENCAP_SRV6_UNSPEC = 0, + ROUTE_ENCAP_SRV6_VPN_SID = 1, + ROUTE_ENCAP_SRV6_ENCAP_SRC_ADDR = 2, + ROUTE_ENCAP_SRV6_PIC_ID = 3, + ROUTE_ENCAP_SRV6_NH_ID = 4, +}; + +enum srv6_localsid_action { /* Values copied from fpmsyncd/routesync.cpp */ + SRV6_LOCALSID_ACTION_UNSPEC = 0, + SRV6_LOCALSID_ACTION_END = 1, + SRV6_LOCALSID_ACTION_END_X = 2, + SRV6_LOCALSID_ACTION_END_T = 3, + SRV6_LOCALSID_ACTION_END_DX2 = 4, + SRV6_LOCALSID_ACTION_END_DX6 = 5, + SRV6_LOCALSID_ACTION_END_DX4 = 6, + SRV6_LOCALSID_ACTION_END_DT6 = 7, + SRV6_LOCALSID_ACTION_END_DT4 = 8, + SRV6_LOCALSID_ACTION_END_DT46 = 9, + SRV6_LOCALSID_ACTION_B6_ENCAPS = 10, + SRV6_LOCALSID_ACTION_B6_ENCAPS_RED = 11, + SRV6_LOCALSID_ACTION_B6_INSERT = 12, + SRV6_LOCALSID_ACTION_B6_INSERT_RED = 13, + SRV6_LOCALSID_ACTION_UN = 14, + SRV6_LOCALSID_ACTION_UA = 15, + SRV6_LOCALSID_ACTION_UDX2 = 16, + SRV6_LOCALSID_ACTION_UDX6 = 17, + SRV6_LOCALSID_ACTION_UDX4 = 18, + SRV6_LOCALSID_ACTION_UDT6 = 19, + SRV6_LOCALSID_ACTION_UDT4 = 20, + SRV6_LOCALSID_ACTION_UDT46 = 21, +}; + +enum { /* Values copied from fpmsyncd/routesync.cpp */ + SRV6_LOCALSID_UNSPEC = 0, + SRV6_LOCALSID_SID_VALUE = 1, + SRV6_LOCALSID_FORMAT = 2, + SRV6_LOCALSID_ACTION = 3, + SRV6_LOCALSID_VRFNAME = 4, + SRV6_LOCALSID_NH6 = 5, + SRV6_LOCALSID_NH4 = 6, + SRV6_LOCALSID_IIF = 7, + SRV6_LOCALSID_OIF = 8, + SRV6_LOCALSID_BPF = 9, + SRV6_LOCALSID_SIDLIST = 10, + SRV6_LOCALSID_ENCAP_SRC_ADDR = 11, + SRV6_LOCALSID_IFNAME = 12, +}; + +enum { /* Values copied from fpmsyncd/routesync.cpp */ + SRV6_LOCALSID_FORMAT_UNSPEC = 0, + SRV6_LOCALSID_FORMAT_BLOCK_LEN = 1, + SRV6_LOCALSID_FORMAT_NODE_LEN = 2, + SRV6_LOCALSID_FORMAT_FUNC_LEN = 3, + SRV6_LOCALSID_FORMAT_ARG_LEN = 4, +}; + +namespace ut_fpmsyncd +{ + struct nlmsg + { + struct nlmsghdr n; + struct rtmsg r; + char buf[512]; + }; + + /* Add a unspecific attribute to netlink message */ + bool nl_attr_put(struct nlmsghdr *n, unsigned int maxlen, int type, + const void *data, unsigned int alen); + /* Add 8 bit integer attribute to netlink message */ + bool nl_attr_put8(struct nlmsghdr *n, unsigned int maxlen, int type, + uint16_t data); + /* Add 16 bit integer attribute to netlink message */ + bool nl_attr_put16(struct nlmsghdr *n, unsigned int maxlen, int type, + uint16_t data); + /* Add 32 bit integer attribute to netlink message */ + bool nl_attr_put32(struct nlmsghdr *n, unsigned int maxlen, int type, + uint32_t data); + /* Start a new level of nested attributes */ + struct rtattr *nl_attr_nest(struct nlmsghdr *n, unsigned int maxlen, int type); + /* Finalize nesting of attributes */ + int nl_attr_nest_end(struct nlmsghdr *n, struct rtattr *nest); + /* Build a Netlink object containing an SRv6 VPN Route */ + struct nlmsg *create_srv6_vpn_route_nlmsg(uint16_t cmd, IpPrefix *dst, IpAddress *encap_src_addr, + IpAddress *vpn_sid, uint16_t table_id = 10, uint8_t prefixlen = 0, + uint8_t address_family = 0, uint8_t rtm_type = 0, + uint32_t nhg_id = 0, uint32_t pic_id = 0); + /* Build a Netlink object containing an SRv6 My SID */ + struct nlmsg *create_srv6_mysid_nlmsg(uint16_t cmd, IpAddress *mysid, int8_t block_len, + int8_t node_len, int8_t func_len, int8_t arg_len, + uint32_t action, char *vrf = NULL, IpAddress *nh = NULL, + char *intf = NULL, uint16_t table_id = 10, uint8_t prefixlen = 0, + uint8_t address_family = 0); + /* Free the memory allocated for a Netlink object */ + inline void free_nlobj(struct nlmsg *msg) + { + free(msg); + } +} \ No newline at end of file diff --git a/tests/mock_tests/intfsorch_ut.cpp b/tests/mock_tests/intfsorch_ut.cpp index ffbf348ed45..b77d0a6c922 100644 --- a/tests/mock_tests/intfsorch_ut.cpp +++ b/tests/mock_tests/intfsorch_ut.cpp @@ -167,7 +167,11 @@ namespace intfsorch_test ASSERT_EQ(gNeighOrch, nullptr); gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); - auto* tunnel_decap_orch = new TunnelDecapOrch(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + vector tunnel_tables = { + APP_TUNNEL_DECAP_TABLE_NAME, + APP_TUNNEL_DECAP_TERM_TABLE_NAME + }; + auto* tunnel_decap_orch = new TunnelDecapOrch(m_app_db.get(), m_state_db.get(), m_config_db.get(), tunnel_tables); vector mux_tables = { CFG_MUX_CABLE_TABLE_NAME, CFG_PEER_SWITCH_TABLE_NAME @@ -186,11 +190,16 @@ namespace intfsorch_test gFgNhgOrch = new FgNhgOrch(m_config_db.get(), m_app_db.get(), m_state_db.get(), fgnhg_tables, gNeighOrch, gIntfsOrch, gVrfOrch); ASSERT_EQ(gSrv6Orch, nullptr); - vector srv6_tables = { - APP_SRV6_SID_LIST_TABLE_NAME, - APP_SRV6_MY_SID_TABLE_NAME + TableConnector srv6_sid_list_table(m_app_db.get(), APP_SRV6_SID_LIST_TABLE_NAME); + TableConnector srv6_my_sid_table(m_app_db.get(), APP_SRV6_MY_SID_TABLE_NAME); + TableConnector srv6_my_sid_cfg_table(m_config_db.get(), CFG_SRV6_MY_SID_TABLE_NAME); + + vector srv6_tables = { + srv6_sid_list_table, + srv6_my_sid_table, + srv6_my_sid_cfg_table }; - gSrv6Orch = new Srv6Orch(m_app_db.get(), srv6_tables, gSwitchOrch, gVrfOrch, gNeighOrch); + gSrv6Orch = new Srv6Orch(m_config_db.get(), m_app_db.get(), srv6_tables, gSwitchOrch, gVrfOrch, gNeighOrch); // Start FlowCounterRouteOrch static const vector route_pattern_tables = { @@ -326,5 +335,63 @@ namespace intfsorch_test static_cast(gIntfsOrch)->doTask(); ASSERT_EQ(current_create_count + 1, create_rif_count); ASSERT_EQ(current_remove_count + 1, remove_rif_count); + }; + + TEST_F(IntfsOrchTest, IntfsOrchVrfUpdate) + { + //create a new vrf + std::deque entries; + entries.push_back({"Vrf-Blue", "SET", { {"NULL", "NULL"}}}); + auto consumer = dynamic_cast(gVrfOrch->getExecutor(APP_VRF_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gVrfOrch)->doTask(); + ASSERT_TRUE(gVrfOrch->isVRFexists("Vrf-Blue")); + auto new_vrf_reference_count = gVrfOrch->getVrfRefCount("Vrf-Blue"); + ASSERT_EQ(new_vrf_reference_count, 0); + + // create an interface + entries.clear(); + entries.push_back({"Loopback2", "SET", {}}); + consumer = dynamic_cast(gIntfsOrch->getExecutor(APP_INTF_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gIntfsOrch)->doTask(); + IntfsTable m_syncdIntfses = gIntfsOrch->getSyncdIntfses(); + ASSERT_EQ(m_syncdIntfses["Loopback2"].vrf_id, gVirtualRouterId); + + // change vrf and check if it worked + entries.clear(); + entries.push_back({"Loopback2", "SET", { {"vrf_name", "Vrf-Blue"}}}); + consumer = dynamic_cast(gIntfsOrch->getExecutor(APP_INTF_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gIntfsOrch)->doTask(); + auto new_vrf_updated_reference_count = gVrfOrch->getVrfRefCount("Vrf-Blue"); + ASSERT_EQ(new_vrf_reference_count + 1, new_vrf_updated_reference_count); + m_syncdIntfses = gIntfsOrch->getSyncdIntfses(); + ASSERT_EQ(m_syncdIntfses["Loopback2"].vrf_id, gVrfOrch->getVRFid("Vrf-Blue")); + + // create an interface + entries.clear(); + entries.push_back({"Loopback3", "SET", {}}); + consumer = dynamic_cast(gIntfsOrch->getExecutor(APP_INTF_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gIntfsOrch)->doTask(); + m_syncdIntfses = gIntfsOrch->getSyncdIntfses(); + ASSERT_EQ(m_syncdIntfses["Loopback3"].vrf_id, gVirtualRouterId); + + // Add IP address to the interface + entries.clear(); + entries.push_back({"Loopback3:3.3.3.3/32", "SET", {{"scope", "global"},{"family", "IPv4"}}}); + consumer = dynamic_cast(gIntfsOrch->getExecutor(APP_INTF_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gIntfsOrch)->doTask(); + + // change vrf and check it doesn't affect the interface due to existing IP + entries.clear(); + entries.push_back({"Loopback3", "SET", { {"vrf_name", "Vrf-Blue"}}}); + consumer = dynamic_cast(gIntfsOrch->getExecutor(APP_INTF_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gIntfsOrch)->doTask(); + m_syncdIntfses = gIntfsOrch->getSyncdIntfses(); + ASSERT_EQ(m_syncdIntfses["Loopback3"].vrf_id, gVirtualRouterId); } } \ No newline at end of file diff --git a/tests/mock_tests/mirrororch_ut.cpp b/tests/mock_tests/mirrororch_ut.cpp new file mode 100644 index 00000000000..7e3f9c94a40 --- /dev/null +++ b/tests/mock_tests/mirrororch_ut.cpp @@ -0,0 +1,57 @@ +// Make selected privates visible for unit testing +#define private public +#include "directory.h" +#undef private + +#define protected public +#include "orch.h" +#undef protected + +#define private public +#include "switchorch.h" +#undef private + +#include "portsorch.h" +#define private public +#include "mirrororch.h" +#undef private +#include "mock_orch_test.h" + +namespace mirrororch_test +{ + using namespace mock_orch_test; + + class MirrorOrchTest : public MockOrchTest + { + }; + + TEST_F(MirrorOrchTest, RejectsIngressWhenUnsupported) + { + // Ensure environment initialized by MockOrchTest + ASSERT_NE(gSwitchOrch, nullptr); + ASSERT_NE(gMirrorOrch, nullptr); + + // Force ingress unsupported and egress supported + gSwitchOrch->m_portIngressMirrorSupported = false; + gSwitchOrch->m_portEgressMirrorSupported = true; + + Port dummyPort; // Unused due to early return + auto ret = gMirrorOrch->setUnsetPortMirror(dummyPort, /*ingress*/ true, /*set*/ true, /*sessionId*/ SAI_NULL_OBJECT_ID); + ASSERT_FALSE(ret); + } + + TEST_F(MirrorOrchTest, RejectsEgressWhenUnsupported) + { + ASSERT_NE(gSwitchOrch, nullptr); + ASSERT_NE(gMirrorOrch, nullptr); + + // Force egress unsupported and ingress supported + gSwitchOrch->m_portIngressMirrorSupported = true; + gSwitchOrch->m_portEgressMirrorSupported = false; + + Port dummyPort; // Unused due to early return + auto ret = gMirrorOrch->setUnsetPortMirror(dummyPort, /*ingress*/ false, /*set*/ true, /*sessionId*/ SAI_NULL_OBJECT_ID); + ASSERT_FALSE(ret); + } +} + diff --git a/tests/mock_tests/mock_consumerstatetable.cpp b/tests/mock_tests/mock_consumerstatetable.cpp index 822727929a8..2764bb10f21 100644 --- a/tests/mock_tests/mock_consumerstatetable.cpp +++ b/tests/mock_tests/mock_consumerstatetable.cpp @@ -7,4 +7,34 @@ namespace swss TableName_KeySet(tableName) { } + + void ConsumerStateTable::pops(std::deque &vkco, const std::string& /*prefix*/) + { + int count = 0; + swss::Table table(getDbConnector(), getTableName()); + std::vector keys; + table.getKeys(keys); + for (const auto &key: keys) + { + // pop with batch size + if (count < POP_BATCH_SIZE) + { + count++; + } + else + { + break; + } + + KeyOpFieldsValuesTuple kco; + kfvKey(kco) = key; + kfvOp(kco) = SET_COMMAND; + if (!table.get(key, kfvFieldsValues(kco))) + { + continue; + } + table.del(key); + vkco.push_back(kco); + } + } } diff --git a/tests/mock_tests/mock_dash_orch_test.cpp b/tests/mock_tests/mock_dash_orch_test.cpp new file mode 100644 index 00000000000..077ad8c5c46 --- /dev/null +++ b/tests/mock_tests/mock_dash_orch_test.cpp @@ -0,0 +1,163 @@ +#include "mock_dash_orch_test.h" +#include "dash_api/outbound_port_map.pb.h" + +namespace mock_orch_test +{ + + void MockDashOrchTest::SetDashTable(std::string table_name, std::string key, const google::protobuf::Message &message, bool set, bool expect_empty) + { + auto it = dash_table_orch_map.find(table_name); + if (it == dash_table_orch_map.end()) + { + FAIL() << "Table " << table_name << " not found in dash_table_orch_map."; + } + Orch *target_orch = *(it->second); + + auto consumer = make_unique( + new swss::ConsumerStateTable(m_app_db.get(), table_name), + target_orch, table_name); + auto op = set ? SET_COMMAND : DEL_COMMAND; + consumer->addToSync( + swss::KeyOpFieldsValuesTuple(key, op, { { "pb", message.SerializeAsString() } })); + target_orch->doTask(*consumer.get()); + + auto it2 = consumer->m_toSync.begin(); + if (expect_empty) + { + EXPECT_EQ(it2, consumer->m_toSync.end()) + << "Expected consumer to be empty after operation on table " << table_name + << " with key " << key; + } + else + { + EXPECT_NE(it2, consumer->m_toSync.end()) + << "Expected consumer to not be empty after operation on table " << table_name + << " with key " << key; + } + } + + dash::appliance::Appliance MockDashOrchTest::BuildApplianceEntry() + { + swss::IpAddress sip("1.1.1.1"); + dash::appliance::Appliance appliance = dash::appliance::Appliance(); + appliance.mutable_sip()->set_ipv4(sip.getV4Addr()); + appliance.set_local_region_id(100); + appliance.set_vm_vni(9999); + return appliance; + } + + void MockDashOrchTest::CreateApplianceEntry() + { + SetDashTable(APP_DASH_APPLIANCE_TABLE_NAME, appliance1, BuildApplianceEntry()); + } + + void MockDashOrchTest::CreateVnet() + { + dash::vnet::Vnet vnet = dash::vnet::Vnet(); + vnet.set_vni(5555); + SetDashTable(APP_DASH_VNET_TABLE_NAME, vnet1, vnet); + } + + void MockDashOrchTest::RemoveVnet(bool expect_empty) + { + SetDashTable(APP_DASH_VNET_TABLE_NAME, vnet1, dash::vnet::Vnet(), false, expect_empty); + } + + void MockDashOrchTest::AddVnetEncapRoutingType(dash::route_type::EncapType encap_type) + { + dash::route_type::RouteType route_type = dash::route_type::RouteType(); + dash::route_type::RouteTypeItem *rt_item = route_type.add_items(); + rt_item->set_action_type(dash::route_type::ACTION_TYPE_STATICENCAP); + rt_item->set_encap_type(encap_type); + SetDashTable(APP_DASH_ROUTING_TYPE_TABLE_NAME, "VNET_ENCAP", route_type); + } + + void MockDashOrchTest::AddPLRoutingType() + { + dash::route_type::RouteType route_type = dash::route_type::RouteType(); + dash::route_type::RouteTypeItem *rt_item = route_type.add_items(); + rt_item->set_action_type(dash::route_type::ACTION_TYPE_4_to_6); + rt_item = route_type.add_items(); + rt_item->set_action_type(dash::route_type::ACTION_TYPE_STATICENCAP); + rt_item->set_encap_type(dash::route_type::ENCAP_TYPE_VXLAN); + rt_item->set_vni(100); + SetDashTable(APP_DASH_ROUTING_TYPE_TABLE_NAME, "PRIVATELINK", route_type); + } + + void MockDashOrchTest::AddOutboundRoutingGroup() + { + dash::route_group::RouteGroup route_group = dash::route_group::RouteGroup(); + route_group.set_version("1"); + route_group.set_guid("group_guid"); + SetDashTable(APP_DASH_ROUTE_GROUP_TABLE_NAME, route_group1, route_group); + } + + void MockDashOrchTest::AddOutboundRoutingEntry(bool expect_empty) + { + dash::route::Route route = dash::route::Route(); + route.set_routing_type(dash::route_type::ROUTING_TYPE_VNET); + route.set_vnet(vnet1); + route.set_tunnel(tunnel1); + SetDashTable(APP_DASH_ROUTE_TABLE_NAME, route_group1 + ":1.2.3.4/32", route, true, expect_empty); + } + + void MockDashOrchTest::AddTunnel() + { + dash::tunnel::Tunnel tunnel = dash::tunnel::Tunnel(); + tunnel.set_encap_type(dash::route_type::ENCAP_TYPE_VXLAN); + tunnel.set_vni(5555); + SetDashTable(APP_DASH_TUNNEL_TABLE_NAME, tunnel1, tunnel); + } + + void MockDashOrchTest::AddVnetMap(bool expect_empty) + { + dash::vnet_mapping::VnetMapping vnet_map = dash::vnet_mapping::VnetMapping(); + vnet_map.set_routing_type(dash::route_type::ROUTING_TYPE_VNET_ENCAP); + vnet_map.mutable_underlay_ip()->set_ipv4(swss::IpAddress("7.7.7.7").getV4Addr()); + SetDashTable(APP_DASH_VNET_MAPPING_TABLE_NAME, vnet1 + ":" + vnet_map_ip1, vnet_map, true, expect_empty); + } + + void MockDashOrchTest::AddVnetMapPL(bool expect_empty) + { + dash::vnet_mapping::VnetMapping vnet_map = dash::vnet_mapping::VnetMapping(); + vnet_map.set_routing_type(dash::route_type::ROUTING_TYPE_PRIVATELINK); + vnet_map.mutable_underlay_ip()->set_ipv4(swss::IpAddress("7.7.7.7").getV4Addr()); + + vnet_map.mutable_overlay_sip_prefix()->mutable_ip()->set_ipv6(reinterpret_cast(swss::IpAddress("fd40:108:0:d204:0:200::0").getV6Addr())); + vnet_map.mutable_overlay_sip_prefix()->mutable_mask()->set_ipv6(reinterpret_cast(swss::IpAddress("ffff:ffff:ffff:ffff:ffff:ffff::").getV6Addr())); + vnet_map.mutable_overlay_dip_prefix()->mutable_ip()->set_ipv6(reinterpret_cast(swss::IpAddress("2603:10e1:100:2::3401:203").getV6Addr())); + vnet_map.mutable_overlay_dip_prefix()->mutable_mask()->set_ipv6(reinterpret_cast(swss::IpAddress("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff").getV6Addr())); + + vnet_map.set_port_map(portmap1); + SetDashTable(APP_DASH_VNET_MAPPING_TABLE_NAME, vnet1 + ":" + vnet_map_ip2, vnet_map, true, expect_empty); + } + + void MockDashOrchTest::RemoveVnetMapPL(bool expect_empty) + { + SetDashTable(APP_DASH_VNET_MAPPING_TABLE_NAME, vnet1 + ":" + vnet_map_ip2, dash::vnet_mapping::VnetMapping(), false, expect_empty); + } + + void MockDashOrchTest::RemoveVnetMap() + { + SetDashTable(APP_DASH_VNET_MAPPING_TABLE_NAME, vnet1 + ":" + vnet_map_ip1, dash::vnet_mapping::VnetMapping(), false); + } + + void MockDashOrchTest::AddPortMap() + { + dash::outbound_port_map::OutboundPortMap portmap = dash::outbound_port_map::OutboundPortMap(); + SetDashTable(APP_DASH_OUTBOUND_PORT_MAP_TABLE_NAME, portmap1, portmap); + } + + dash::eni::Eni MockDashOrchTest::BuildEniEntry() + { + dash::eni::Eni eni; + std::string mac = "f4:93:9f:ef:c4:7e"; + eni.set_admin_state(dash::eni::State::STATE_ENABLED); + eni.set_eni_id(eni1); + eni.set_mac_address(mac); + eni.set_vnet(vnet1); + eni.mutable_underlay_ip()->set_ipv4(swss::IpAddress("1.2.3.4").getV4Addr()); + eni.set_eni_mode(dash::eni::MODE_VM); + return eni; + } +} diff --git a/tests/mock_tests/mock_dash_orch_test.h b/tests/mock_tests/mock_dash_orch_test.h new file mode 100644 index 00000000000..ab4ffd59517 --- /dev/null +++ b/tests/mock_tests/mock_dash_orch_test.h @@ -0,0 +1,48 @@ +#include "mock_orch_test.h" +#include + +namespace mock_orch_test +{ + class MockDashOrchTest : public MockOrchTest + { + protected: + // Orchs may not be initialized yet so we need double pointers to access them once they are initialized + std::unordered_map dash_table_orch_map = { + {APP_DASH_VNET_TABLE_NAME, (Orch**) &m_dashVnetOrch}, + {APP_DASH_VNET_MAPPING_TABLE_NAME, (Orch**) &m_dashVnetOrch}, + {APP_DASH_APPLIANCE_TABLE_NAME, (Orch**) &m_DashOrch}, + {APP_DASH_ROUTING_TYPE_TABLE_NAME, (Orch**) &m_DashOrch}, + {APP_DASH_ROUTE_GROUP_TABLE_NAME, (Orch**) &m_DashRouteOrch}, + {APP_DASH_ROUTE_TABLE_NAME, (Orch**) &m_DashRouteOrch}, + {APP_DASH_TUNNEL_TABLE_NAME, (Orch**) &m_DashTunnelOrch}, + {APP_DASH_ENI_TABLE_NAME, (Orch**) &m_DashOrch}, + { APP_DASH_OUTBOUND_PORT_MAP_TABLE_NAME, (Orch **)&m_dashPortMapOrch }, + { APP_DASH_OUTBOUND_PORT_MAP_RANGE_TABLE_NAME, (Orch **)&m_dashPortMapOrch } + }; + void SetDashTable(std::string table_name, std::string key, const google::protobuf::Message &message, bool set = true, bool expect_empty = true); + dash::appliance::Appliance BuildApplianceEntry(); + void CreateApplianceEntry(); + void AddVnetEncapRoutingType(dash::route_type::EncapType encap_type); + void AddPLRoutingType(); + void CreateVnet(); + void RemoveVnet(bool expect_empty = true); + void AddVnetMap(bool expect_empty = true); + void RemoveVnetMap(); + void AddOutboundRoutingGroup(); + void AddOutboundRoutingEntry(bool expect_empty = true); + void AddTunnel(); + void AddVnetMapPL(bool expect_empty = true); + void RemoveVnetMapPL(bool expect_empty = true); + void AddPortMap(); + dash::eni::Eni BuildEniEntry(); + + std::string vnet1 = "VNET_1"; + std::string vnet_map_ip1 = "2.2.2.2"; + std::string vnet_map_ip2 = "2.3.3.3"; + std::string appliance1 = "APPLIANCE_1"; + std::string route_group1 = "ROUTE_GROUP_1"; + std::string tunnel1 = "TUNNEL_1"; + std::string eni1 = "ENI_1"; + std::string portmap1 = "PORTMAP_1"; + }; +} diff --git a/tests/mock_tests/mock_dbconnector.cpp b/tests/mock_tests/mock_dbconnector.cpp index 7cabdc2224f..74a289907c5 100644 --- a/tests/mock_tests/mock_dbconnector.cpp +++ b/tests/mock_tests/mock_dbconnector.cpp @@ -11,6 +11,10 @@ namespace swss { + + DBConnector::DBConnector(const DBConnector& other) : DBConnector(other.m_dbName, 0, false) + {} + DBConnector::DBConnector(int dbId, const std::string &hostname, int port, unsigned int timeout) : m_dbId(dbId) { diff --git a/tests/mock_tests/mock_orch_test.cpp b/tests/mock_tests/mock_orch_test.cpp new file mode 100644 index 00000000000..89d47744d02 --- /dev/null +++ b/tests/mock_tests/mock_orch_test.cpp @@ -0,0 +1,359 @@ +#include "mock_orch_test.h" + +using namespace std; + +namespace mock_orch_test +{ + +void MockOrchTest::ApplyInitialConfigs() {} +void MockOrchTest::PostSetUp() {} +void MockOrchTest::PreTearDown() {} +void MockOrchTest::ApplySaiMock() {} + +void MockOrchTest::PrepareSai() +{ + sai_attribute_t attr; + + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + sai_status_t status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + // Get switch source MAC address + attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gMacAddress = attr.value.mac; + + attr.id = SAI_SWITCH_ATTR_DEFAULT_VIRTUAL_ROUTER_ID; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gVirtualRouterId = attr.value.oid; + + /* Create a loopback underlay router interface */ + vector underlay_intf_attrs; + + sai_attribute_t underlay_intf_attr; + underlay_intf_attr.id = SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID; + underlay_intf_attr.value.oid = gVirtualRouterId; + underlay_intf_attrs.push_back(underlay_intf_attr); + + underlay_intf_attr.id = SAI_ROUTER_INTERFACE_ATTR_TYPE; + underlay_intf_attr.value.s32 = SAI_ROUTER_INTERFACE_TYPE_LOOPBACK; + underlay_intf_attrs.push_back(underlay_intf_attr); + + underlay_intf_attr.id = SAI_ROUTER_INTERFACE_ATTR_MTU; + underlay_intf_attr.value.u32 = 9100; + underlay_intf_attrs.push_back(underlay_intf_attr); + + status = sai_router_intfs_api->create_router_interface(&gUnderlayIfId, gSwitchId, (uint32_t)underlay_intf_attrs.size(), underlay_intf_attrs.data()); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + // Bulkers will use the SAI implementation that exists when they are created in Orch constructors + // so we need to apply the mock SAI API before any Orchs are created + ApplySaiMock(); +} + +void MockOrchTest::SetUp() +{ + map profile = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + + ut_helper::initSaiApi(profile); + m_app_db = make_shared("APPL_DB", 0); + m_config_db = make_shared("CONFIG_DB", 0); + m_state_db = make_shared("STATE_DB", 0); + m_dpu_app_db = make_shared("DPU_APPL_DB", 0); + m_dpu_app_state_db = make_shared("DPU_APPL_STATE_DB", 0); + m_chassis_app_db = make_shared("CHASSIS_APP_DB", 0); + + PrepareSai(); + + const int portsorch_base_pri = 40; + vector ports_tables = { + { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, + { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, + { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, + { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } + }; + + TableConnector stateDbSwitchTable(m_state_db.get(), STATE_SWITCH_CAPABILITY_TABLE_NAME); + TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); + TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); + + vector switch_tables = { + conf_asic_sensors, + app_switch_table + }; + + gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); + gDirectory.set(gSwitchOrch); + ut_orch_list.push_back((Orch **)&gSwitchOrch); + global_orch_list.insert((Orch **)&gSwitchOrch); + + vector flex_counter_tables = { + CFG_FLEX_COUNTER_TABLE_NAME + }; + + m_FlexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); + gDirectory.set(m_FlexCounterOrch); + ut_orch_list.push_back((Orch **)&m_FlexCounterOrch); + + static const vector route_pattern_tables = { + CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME, + }; + gFlowCounterRouteOrch = new FlowCounterRouteOrch(m_config_db.get(), route_pattern_tables); + gDirectory.set(gFlowCounterRouteOrch); + ut_orch_list.push_back((Orch **)&gFlowCounterRouteOrch); + global_orch_list.insert((Orch **)&gFlowCounterRouteOrch); + + gVrfOrch = new VRFOrch(m_app_db.get(), APP_VRF_TABLE_NAME, m_state_db.get(), STATE_VRF_OBJECT_TABLE_NAME); + gDirectory.set(gVrfOrch); + ut_orch_list.push_back((Orch **)&gVrfOrch); + global_orch_list.insert((Orch **)&gVrfOrch); + + gIntfsOrch = new IntfsOrch(m_app_db.get(), APP_INTF_TABLE_NAME, gVrfOrch, m_chassis_app_db.get()); + gDirectory.set(gIntfsOrch); + ut_orch_list.push_back((Orch **)&gIntfsOrch); + global_orch_list.insert((Orch **)&gIntfsOrch); + + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + gDirectory.set(gPortsOrch); + ut_orch_list.push_back((Orch **)&gPortsOrch); + global_orch_list.insert((Orch **)&gPortsOrch); + + const int fgnhgorch_pri = 15; + + vector fgnhg_tables = { + { CFG_FG_NHG, fgnhgorch_pri }, + { CFG_FG_NHG_PREFIX, fgnhgorch_pri }, + { CFG_FG_NHG_MEMBER, fgnhgorch_pri } + }; + + gFgNhgOrch = new FgNhgOrch(m_config_db.get(), m_app_db.get(), m_state_db.get(), fgnhg_tables, gNeighOrch, gIntfsOrch, gVrfOrch); + gDirectory.set(gFgNhgOrch); + ut_orch_list.push_back((Orch **)&gFgNhgOrch); + global_orch_list.insert((Orch **)&gFgNhgOrch); + + const int fdborch_pri = 20; + + vector app_fdb_tables = { + { APP_FDB_TABLE_NAME, FdbOrch::fdborch_pri }, + { APP_VXLAN_FDB_TABLE_NAME, FdbOrch::fdborch_pri }, + { APP_MCLAG_FDB_TABLE_NAME, fdborch_pri } + }; + + TableConnector stateDbFdb(m_state_db.get(), STATE_FDB_TABLE_NAME); + TableConnector stateMclagDbFdb(m_state_db.get(), STATE_MCLAG_REMOTE_FDB_TABLE_NAME); + gFdbOrch = new FdbOrch(m_app_db.get(), app_fdb_tables, stateDbFdb, stateMclagDbFdb, gPortsOrch); + gDirectory.set(gFdbOrch); + ut_orch_list.push_back((Orch **)&gFdbOrch); + global_orch_list.insert((Orch **)&gFdbOrch); + + gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); + gDirectory.set(gNeighOrch); + ut_orch_list.push_back((Orch **)&gNeighOrch); + global_orch_list.insert((Orch **)&gNeighOrch); + + vector tunnel_tables = { + APP_TUNNEL_DECAP_TABLE_NAME, + APP_TUNNEL_DECAP_TERM_TABLE_NAME + }; + m_TunnelDecapOrch = new TunnelDecapOrch(m_app_db.get(), m_state_db.get(), m_config_db.get(), tunnel_tables); + gDirectory.set(m_TunnelDecapOrch); + ut_orch_list.push_back((Orch **)&m_TunnelDecapOrch); + vector mux_tables = { + CFG_MUX_CABLE_TABLE_NAME, + CFG_PEER_SWITCH_TABLE_NAME + }; + + vector buffer_tables = { + APP_BUFFER_POOL_TABLE_NAME, + APP_BUFFER_PROFILE_TABLE_NAME, + APP_BUFFER_QUEUE_TABLE_NAME, + APP_BUFFER_PG_TABLE_NAME, + APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, + APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME + }; + gBufferOrch = new BufferOrch(m_app_db.get(), m_config_db.get(), m_state_db.get(), buffer_tables); + ut_orch_list.push_back((Orch **)&gBufferOrch); + global_orch_list.insert((Orch **)&gBufferOrch); + + vector policer_tables = { + TableConnector(m_config_db.get(), CFG_POLICER_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_PORT_STORM_CONTROL_TABLE_NAME) + }; + + TableConnector stateDbStorm(m_state_db.get(), STATE_BUM_STORM_CAPABILITY_TABLE_NAME); + gPolicerOrch = new PolicerOrch(policer_tables, gPortsOrch); + gDirectory.set(gPolicerOrch); + ut_orch_list.push_back((Orch **)&gPolicerOrch); + global_orch_list.insert((Orch **)&gPolicerOrch); + + gNhgOrch = new NhgOrch(m_app_db.get(), APP_NEXTHOP_GROUP_TABLE_NAME); + gDirectory.set(gNhgOrch); + ut_orch_list.push_back((Orch **)&gNhgOrch); + global_orch_list.insert((Orch **)&gNhgOrch); + + TableConnector srv6_sid_list_table(m_app_db.get(), APP_SRV6_SID_LIST_TABLE_NAME); + TableConnector srv6_my_sid_table(m_app_db.get(), APP_SRV6_MY_SID_TABLE_NAME); + TableConnector srv6_my_sid_cfg_table(m_config_db.get(), CFG_SRV6_MY_SID_TABLE_NAME); + + vector srv6_tables = { + srv6_sid_list_table, + srv6_my_sid_table, + srv6_my_sid_cfg_table + }; + gSrv6Orch = new Srv6Orch(m_config_db.get(), m_app_db.get(), srv6_tables, gSwitchOrch, gVrfOrch, gNeighOrch); + gDirectory.set(gSrv6Orch); + ut_orch_list.push_back((Orch **)&gSrv6Orch); + global_orch_list.insert((Orch **)&gSrv6Orch); + + gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); + gDirectory.set(gCrmOrch); + ut_orch_list.push_back((Orch **)&gCrmOrch); + global_orch_list.insert((Orch **)&gCrmOrch); + + const int routeorch_pri = 5; + vector route_tables = { + { APP_ROUTE_TABLE_NAME, routeorch_pri }, + { APP_LABEL_ROUTE_TABLE_NAME, routeorch_pri } + }; + gRouteOrch = new RouteOrch(m_app_db.get(), route_tables, gSwitchOrch, gNeighOrch, gIntfsOrch, gVrfOrch, gFgNhgOrch, gSrv6Orch); + gDirectory.set(gRouteOrch); + ut_orch_list.push_back((Orch **)&gRouteOrch); + global_orch_list.insert((Orch **)&gRouteOrch); + + TableConnector stateDbMirrorSession(m_state_db.get(), STATE_MIRROR_SESSION_TABLE_NAME); + TableConnector confDbMirrorSession(m_config_db.get(), CFG_MIRROR_SESSION_TABLE_NAME); + gMirrorOrch = new MirrorOrch(stateDbMirrorSession, confDbMirrorSession, gPortsOrch, gRouteOrch, gNeighOrch, gFdbOrch, gPolicerOrch, gSwitchOrch); + gDirectory.set(gMirrorOrch); + ut_orch_list.push_back((Orch **)&gMirrorOrch); + global_orch_list.insert((Orch **)&gMirrorOrch); + + vector dash_tables = { + APP_DASH_APPLIANCE_TABLE_NAME, + APP_DASH_ROUTING_TYPE_TABLE_NAME, + APP_DASH_ENI_TABLE_NAME, + APP_DASH_ENI_ROUTE_TABLE_NAME, + APP_DASH_QOS_TABLE_NAME + }; + + m_DashOrch = new DashOrch(m_app_db.get(), dash_tables, m_dpu_app_state_db.get(), nullptr); + gDirectory.set(m_DashOrch); + ut_orch_list.push_back((Orch **)&m_DashOrch); + + vector dash_meter_tables = { + APP_DASH_METER_POLICY_TABLE_NAME, + APP_DASH_METER_RULE_TABLE_NAME + }; + + m_DashMeterOrch = new DashMeterOrch(m_app_db.get(), dash_meter_tables, m_DashOrch, m_dpu_app_state_db.get(), nullptr); + gDirectory.set(m_DashMeterOrch); + ut_orch_list.push_back((Orch **)&m_DashMeterOrch); + + TableConnector confDbAclTable(m_config_db.get(), CFG_ACL_TABLE_TABLE_NAME); + TableConnector confDbAclTableType(m_config_db.get(), CFG_ACL_TABLE_TYPE_TABLE_NAME); + TableConnector confDbAclRuleTable(m_config_db.get(), CFG_ACL_RULE_TABLE_NAME); + TableConnector appDbAclTable(m_app_db.get(), APP_ACL_TABLE_TABLE_NAME); + TableConnector appDbAclTableType(m_app_db.get(), APP_ACL_TABLE_TYPE_TABLE_NAME); + TableConnector appDbAclRuleTable(m_app_db.get(), APP_ACL_RULE_TABLE_NAME); + + vector acl_table_connectors = { + confDbAclTableType, + confDbAclTable, + confDbAclRuleTable, + appDbAclTable, + appDbAclRuleTable, + appDbAclTableType, + }; + gAclOrch = new AclOrch(acl_table_connectors, m_state_db.get(), + gSwitchOrch, gPortsOrch, gMirrorOrch, gNeighOrch, gRouteOrch, NULL); + gDirectory.set(gAclOrch); + ut_orch_list.push_back((Orch **)&gAclOrch); + global_orch_list.insert((Orch **)&gAclOrch); + + m_MuxOrch = new MuxOrch(m_config_db.get(), mux_tables, m_TunnelDecapOrch, gNeighOrch, gFdbOrch); + gDirectory.set(m_MuxOrch); + ut_orch_list.push_back((Orch **)&m_MuxOrch); + + m_MuxCableOrch = new MuxCableOrch(m_app_db.get(), m_state_db.get(), APP_MUX_CABLE_TABLE_NAME); + gDirectory.set(m_MuxCableOrch); + ut_orch_list.push_back((Orch **)&m_MuxCableOrch); + + m_MuxStateOrch = new MuxStateOrch(m_state_db.get(), STATE_HW_MUX_CABLE_TABLE_NAME); + gDirectory.set(m_MuxStateOrch); + ut_orch_list.push_back((Orch **)&m_MuxStateOrch); + + m_VxlanTunnelOrch = new VxlanTunnelOrch(m_state_db.get(), m_app_db.get(), APP_VXLAN_TUNNEL_TABLE_NAME); + gDirectory.set(m_VxlanTunnelOrch); + ut_orch_list.push_back((Orch **)&m_VxlanTunnelOrch); + + m_vnetOrch = new VNetOrch(m_app_db.get(), APP_VNET_TABLE_NAME); + gDirectory.set(m_vnetOrch); + ut_orch_list.push_back((Orch **)&m_vnetOrch); + + vector dash_vnet_tables = { + APP_DASH_VNET_TABLE_NAME, + APP_DASH_VNET_MAPPING_TABLE_NAME + }; + + m_dashVnetOrch = new DashVnetOrch(m_app_db.get(), dash_vnet_tables, m_dpu_app_state_db.get(), nullptr); + gDirectory.set(m_dashVnetOrch); + ut_orch_list.push_back((Orch **)&m_dashVnetOrch); + + vector dash_route_tables = { + APP_DASH_ROUTE_TABLE_NAME, + APP_DASH_ROUTE_RULE_TABLE_NAME, + APP_DASH_ROUTE_GROUP_TABLE_NAME + }; + + m_DashRouteOrch = new DashRouteOrch(m_app_db.get(), dash_route_tables, m_DashOrch, m_dpu_app_state_db.get(), nullptr); + gDirectory.set(m_DashRouteOrch); + ut_orch_list.push_back((Orch **)&m_DashRouteOrch); + + vector dash_tunnel_tables = { + APP_DASH_TUNNEL_TABLE_NAME + }; + m_DashTunnelOrch= new DashTunnelOrch(m_app_db.get(), dash_tunnel_tables, m_dpu_app_state_db.get(), nullptr); + gDirectory.set(m_DashTunnelOrch); + ut_orch_list.push_back((Orch **)&m_DashTunnelOrch); + + vector dash_port_map_tables = { + APP_DASH_OUTBOUND_PORT_MAP_TABLE_NAME, + APP_DASH_OUTBOUND_PORT_MAP_RANGE_TABLE_NAME + }; + m_dashPortMapOrch = new DashPortMapOrch(m_app_db.get(), dash_port_map_tables, m_dpu_app_state_db.get(), nullptr); + gDirectory.set(m_dashPortMapOrch); + ut_orch_list.push_back((Orch **)&m_dashPortMapOrch); + + ApplyInitialConfigs(); + PostSetUp(); +} + +void MockOrchTest::TearDown() +{ + PreTearDown(); + for (std::vector::reverse_iterator rit = ut_orch_list.rbegin(); rit != ut_orch_list.rend(); ++rit) + { + Orch **orch = *rit; + delete *orch; + if (global_orch_list.find(orch) != global_orch_list.end()) + { + *orch = nullptr; + } + } + + gDirectory.m_values.clear(); + + ut_helper::uninitSaiApi(); +} +} diff --git a/tests/mock_tests/mock_orch_test.h b/tests/mock_tests/mock_orch_test.h new file mode 100644 index 00000000000..a3922200e82 --- /dev/null +++ b/tests/mock_tests/mock_orch_test.h @@ -0,0 +1,73 @@ +#define private public +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "gtest/gtest.h" +#include + + +namespace mock_orch_test +{ + static const string PEER_SWITCH_HOSTNAME = "peer_hostname"; + static const string PEER_IPV4_ADDRESS = "1.1.1.1"; + static const string ACTIVE_INTERFACE = "Ethernet4"; + static const string STANDBY_INTERFACE = "Ethernet8"; + static const string ETHERNET0 = "Ethernet0"; + static const string ETHERNET4 = "Ethernet4"; + static const string ETHERNET8 = "Ethernet8"; + static const string ETHERNET12 = "Ethernet12"; + static const string ACTIVE_STATE = "active"; + static const string STANDBY_STATE = "standby"; + static const string STATE = "state"; + static const string VLAN_1000 = "Vlan1000"; + static const string VLAN_2000 = "Vlan2000"; + static const string VLAN_3000 = "Vlan3000"; + static const string VLAN_4000 = "Vlan4000"; + static const string SERVER_IP1 = "192.168.0.2"; + static const string SERVER_IP2 = "192.168.0.3"; + static const string MAC1 = "62:f9:65:10:2f:01"; + static const string MAC2 = "62:f9:65:10:2f:02"; + static const string MAC3 = "62:f9:65:10:2f:03"; + static const string MAC4 = "62:f9:65:10:2f:04"; + static const string MAC5 = "62:f9:65:10:2f:05"; + + class MockOrchTest: public ::testing::Test + { + protected: + std::vector ut_orch_list; + std::set global_orch_list; + shared_ptr m_app_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + shared_ptr m_dpu_app_db; + shared_ptr m_dpu_app_state_db; + shared_ptr m_chassis_app_db; + MuxOrch *m_MuxOrch; + MuxCableOrch *m_MuxCableOrch; + MuxCable *m_MuxCable; + TunnelDecapOrch *m_TunnelDecapOrch; + MuxStateOrch *m_MuxStateOrch; + FlexCounterOrch *m_FlexCounterOrch; + VxlanTunnelOrch *m_VxlanTunnelOrch; + VNetOrch *m_vnetOrch; + DashOrch *m_DashOrch; + DashVnetOrch *m_dashVnetOrch; + DashHaOrch *m_dashHaOrch; + DashRouteOrch *m_DashRouteOrch; + DashTunnelOrch *m_DashTunnelOrch; + DashPortMapOrch *m_dashPortMapOrch; + DashMeterOrch *m_DashMeterOrch; + + void PrepareSai(); + void SetUp(); + void TearDown(); + virtual void ApplyInitialConfigs(); + virtual void PostSetUp(); + virtual void PreTearDown(); + virtual void ApplySaiMock(); + }; +} diff --git a/tests/mock_tests/mock_orchagent_main.cpp b/tests/mock_tests/mock_orchagent_main.cpp index e709824707f..6fa8b19a8b7 100644 --- a/tests/mock_tests/mock_orchagent_main.cpp +++ b/tests/mock_tests/mock_orchagent_main.cpp @@ -13,10 +13,20 @@ MacAddress gMacAddress; MacAddress gVxlanMacAddress; string gMySwitchType = "switch"; +string gMySwitchSubType = "SmartSwitch"; int32_t gVoqMySwitchId = 0; string gMyHostName = "Linecard1"; string gMyAsicName = "Asic0"; +bool gTraditionalFlexCounter = false; +bool gSyncMode = false; +sai_redis_communication_mode_t gRedisCommunicationMode = SAI_REDIS_COMMUNICATION_MODE_REDIS_ASYNC; VRFOrch *gVrfOrch; void syncd_apply_view() {} + +bool gMultiAsicVoq = false; +bool isChassisDbInUse() +{ + return gMultiAsicVoq; +} diff --git a/tests/mock_tests/mock_orchagent_main.h b/tests/mock_tests/mock_orchagent_main.h index 93c1588b9b3..b087066fdd5 100644 --- a/tests/mock_tests/mock_orchagent_main.h +++ b/tests/mock_tests/mock_orchagent_main.h @@ -4,6 +4,7 @@ #include "switchorch.h" #include "crmorch.h" #include "portsorch.h" +#include "debugcounterorch.h" #include "routeorch.h" #include "flowcounterrouteorch.h" #include "intfsorch.h" @@ -11,6 +12,9 @@ #include "fdborch.h" #include "mirrororch.h" #define private public +#include "dashorch.h" +#include "dashrouteorch.h" +#include "dashmeterorch.h" #include "bufferorch.h" #include "qosorch.h" #define protected public @@ -27,7 +31,17 @@ #include "muxorch.h" #include "nhgorch.h" #include "copporch.h" +#include "twamporch.h" +#include "mlagorch.h" +#include "high_frequency_telemetry/hftelorch.h" +#define private public +#include "stporch.h" +#undef private #include "directory.h" +#include "dashvnetorch.h" +#include "dashhaorch.h" +#include "dashtunnelorch.h" +#include "dashportmaporch.h" extern int gBatchSize; @@ -41,6 +55,7 @@ extern sai_object_id_t gUnderlayIfId; extern SwitchOrch *gSwitchOrch; extern CrmOrch *gCrmOrch; extern PortsOrch *gPortsOrch; +extern DebugCounterOrch *gDebugCounterOrch; extern FgNhgOrch *gFgNhgOrch; extern RouteOrch *gRouteOrch; extern FlowCounterRouteOrch *gFlowCounterRouteOrch; @@ -57,6 +72,10 @@ extern Srv6Orch *gSrv6Orch; extern BfdOrch *gBfdOrch; extern AclOrch *gAclOrch; extern PolicerOrch *gPolicerOrch; +extern TunnelDecapOrch *gTunneldecapOrch; +extern StpOrch *gStpOrch; +extern MlagOrch *gMlagOrch; +extern HFTelOrch *gHFTOrch; extern Directory gDirectory; extern sai_acl_api_t *sai_acl_api; @@ -86,3 +105,20 @@ extern sai_mpls_api_t* sai_mpls_api; extern sai_counter_api_t* sai_counter_api; extern sai_samplepacket_api_t *sai_samplepacket_api; extern sai_fdb_api_t* sai_fdb_api; +extern sai_twamp_api_t* sai_twamp_api; +extern sai_tam_api_t* sai_tam_api; +extern sai_dash_vip_api_t* sai_dash_vip_api; +extern sai_dash_direction_lookup_api_t* sai_dash_direction_lookup_api; +extern sai_dash_eni_api_t* sai_dash_eni_api; +extern sai_dash_ha_api_t* sai_dash_ha_api; +extern sai_stp_api_t* sai_stp_api; +extern sai_dash_outbound_ca_to_pa_api_t* sai_dash_outbound_ca_to_pa_api; +extern sai_dash_pa_validation_api_t* sai_dash_pa_validation_api; +extern sai_dash_vnet_api_t* sai_dash_vnet_api; +extern sai_dash_appliance_api_t* sai_dash_appliance_api; +extern sai_dash_outbound_routing_api_t* sai_dash_outbound_routing_api; +extern sai_dash_inbound_routing_api_t* sai_dash_inbound_routing_api; +extern sai_dash_meter_api_t* sai_dash_meter_api; +extern sai_dash_tunnel_api_t* sai_dash_tunnel_api; +extern sai_dash_outbound_port_map_api_t* sai_dash_outbound_port_map_api; +extern sai_dash_trusted_vni_api_t* sai_dash_trusted_vni_api; diff --git a/tests/mock_tests/mock_sai_api.cpp b/tests/mock_tests/mock_sai_api.cpp new file mode 100644 index 00000000000..1f7e7e63efe --- /dev/null +++ b/tests/mock_tests/mock_sai_api.cpp @@ -0,0 +1,25 @@ +#include "mock_sai_api.h" + +std::set apply_mock_fns; +std::set remove_mock_fns; + +void MockSaiApis() +{ + if (apply_mock_fns.empty()) + { + EXPECT_TRUE(false) << "No mock application functions found. Did you call DEFINE_SAI_API_MOCK and INIT_SAI_API_MOCK for the necessary SAI object type?"; + } + + for (auto apply_fn : apply_mock_fns) + { + (*apply_fn)(); + } +} + +void RestoreSaiApis() +{ + for (auto remove_fn : remove_mock_fns) + { + (*remove_fn)(); + } +} \ No newline at end of file diff --git a/tests/mock_tests/mock_sai_api.h b/tests/mock_tests/mock_sai_api.h index 63d8921bf1a..629c996907b 100644 --- a/tests/mock_tests/mock_sai_api.h +++ b/tests/mock_tests/mock_sai_api.h @@ -1,82 +1,223 @@ +#ifndef MOCK_SAI_API_H +#define MOCK_SAI_API_H #include "mock_orchagent_main.h" #include +/* +To mock a particular SAI API: +1. At the top of the test CPP file using the mock, call DEFINE_SAI_API_MOCK or DEFINE_SAI_GENERIC_API_MOCK + for each SAI API you want to mock. +2. At the top of the test CPP file using the mock, call EXTERN_MOCK_FNS. +3. In the SetUp method of the test class, call INIT_SAI_API_MOCK for each SAI API you want to mock. +4. In the SetUp method of the test class, call MockSaiApis. +5. In the TearDown method of the test class, call RestoreSaiApis. +6. After RestoreSaiApis, call DEINIT_SAI_API_MOCK +*/ + using ::testing::Return; using ::testing::NiceMock; -std::set apply_mock_fns; -std::set remove_mock_fns; +#define EXTERN_MOCK_FNS \ + extern std::set apply_mock_fns; \ + extern std::set remove_mock_fns; + +EXTERN_MOCK_FNS -#define CREATE_PARAMS(sai_object_type) _In_ const sai_##sai_object_type##_entry_t *sai_object_type##_entry, _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list -#define REMOVE_PARAMS(sai_object_type) _In_ const sai_##sai_object_type##_entry_t *sai_object_type##_entry -#define CREATE_ARGS(sai_object_type) sai_object_type##_entry, attr_count, attr_list -#define REMOVE_ARGS(sai_object_type) sai_object_type##_entry +#define CREATE_PARAMS(sai_entry_type) _In_ const sai_##sai_entry_type##_entry_t *sai_entry_type##_entry, _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list +#define REMOVE_PARAMS(sai_entry_type) _In_ const sai_##sai_entry_type##_entry_t *sai_entry_type##_entry +#define CREATE_BULK_PARAMS(sai_entry_type) _In_ uint32_t object_count, _In_ const sai_##sai_entry_type##_entry_t *sai_entry_type##_entry, _In_ const uint32_t *attr_count, _In_ const sai_attribute_t **attr_list, _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_status_t *object_statuses +#define REMOVE_BULK_PARAMS(sai_entry_type) _In_ uint32_t object_count, _In_ const sai_##sai_entry_type##_entry_t *sai_entry_type##_entry, _In_ sai_bulk_op_error_mode_t mode, _In_ sai_status_t *object_statuses +#define SET_BULK_ATTR_PARAMS(sai_entry_type) _In_ uint32_t object_count, _In_ const sai_##sai_entry_type##_entry_t *sai_entry_type##__entry, _In_ const sai_attribute_t *attr_list, _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_status_t *object_statuses +#define CREATE_ARGS(sai_entry_type) sai_entry_type##_entry, attr_count, attr_list +#define REMOVE_ARGS(sai_entry_type) sai_entry_type##_entry +#define CREATE_BULK_ARGS(sai_entry_type) object_count, sai_entry_type##_entry, attr_count, attr_list, mode, object_statuses +#define REMOVE_BULK_ARGS(sai_entry_type) object_count, sai_entry_type##_entry, mode, object_statuses +#define SET_BULK_ATTR_ARGS(sai_entry_type) object_count, sai_entry_type##__entry, attr_list, mode, object_statuses #define GENERIC_CREATE_PARAMS(sai_object_type) _Out_ sai_object_id_t *sai_object_type##_id, _In_ sai_object_id_t switch_id, _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list #define GENERIC_REMOVE_PARAMS(sai_object_type) _In_ sai_object_id_t sai_object_type##_id +#define GENERIC_BULK_CREATE_PARAMS(sai_object_type) _In_ sai_object_id_t switch_id, _In_ uint32_t object_count, _In_ const uint32_t *attr_count, _In_ const sai_attribute_t **attr_list, _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_object_id_t *object_id, _Out_ sai_status_t *object_statuses +#define GENERIC_BULK_REMOVE_PARAMS(sai_object_type) _In_ uint32_t object_count, _In_ const sai_object_id_t *object_id, _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_status_t *object_statuses #define GENERIC_CREATE_ARGS(sai_object_type) sai_object_type##_id, switch_id, attr_count, attr_list #define GENERIC_REMOVE_ARGS(sai_object_type) sai_object_type##_id +#define GENERIC_BULK_CREATE_ARGS(sai_object_type) switch_id, object_count, attr_count, attr_list, mode, object_id, object_statuses +#define GENERIC_BULK_REMOVE_ARGS(sai_object_type) object_count, object_id, mode, object_statuses + +#define DEFINE_SAI_API_MOCK_SPECIFY_ENTRY(sai_api_type, sai_entry_type) \ + static sai_##sai_api_type##_api_t *old_sai_##sai_api_type##_api; \ + static sai_##sai_api_type##_api_t ut_sai_##sai_api_type##_api; \ + class mock_sai_##sai_api_type##_api_t \ + { \ + public: \ + mock_sai_##sai_api_type##_api_t() \ + { \ + ON_CALL(*this, create_##sai_entry_type##_entry) \ + .WillByDefault( \ + [this](CREATE_PARAMS(sai_entry_type)) { \ + return old_sai_##sai_api_type##_api->create_##sai_entry_type##_entry(CREATE_ARGS(sai_entry_type)); \ + }); \ + ON_CALL(*this, remove_##sai_entry_type##_entry) \ + .WillByDefault( \ + [this](REMOVE_PARAMS(sai_entry_type)) { \ + return old_sai_##sai_api_type##_api->remove_##sai_entry_type##_entry(REMOVE_ARGS(sai_entry_type)); \ + }); \ + ON_CALL(*this, create_##sai_entry_type##_entries) \ + .WillByDefault( \ + [this](CREATE_BULK_PARAMS(sai_entry_type)) { \ + return old_sai_##sai_api_type##_api->create_##sai_entry_type##_entries(CREATE_BULK_ARGS(sai_entry_type)); \ + }); \ + ON_CALL(*this, remove_##sai_entry_type##_entries) \ + .WillByDefault( \ + [this](REMOVE_BULK_PARAMS(sai_entry_type)) { \ + return old_sai_##sai_api_type##_api->remove_##sai_entry_type##_entries(REMOVE_BULK_ARGS(sai_entry_type)); \ + }); \ + } \ + MOCK_METHOD3(create_##sai_entry_type##_entry, sai_status_t(CREATE_PARAMS(sai_entry_type))); \ + MOCK_METHOD1(remove_##sai_entry_type##_entry, sai_status_t(REMOVE_PARAMS(sai_entry_type))); \ + MOCK_METHOD6(create_##sai_entry_type##_entries, sai_status_t(CREATE_BULK_PARAMS(sai_entry_type))); \ + MOCK_METHOD4(remove_##sai_entry_type##_entries, sai_status_t(REMOVE_BULK_PARAMS(sai_entry_type))); \ + }; \ + static mock_sai_##sai_api_type##_api_t *mock_sai_##sai_api_type##_api; \ + inline sai_status_t mock_create_##sai_entry_type##_entry(CREATE_PARAMS(sai_entry_type)) \ + { \ + return mock_sai_##sai_api_type##_api->create_##sai_entry_type##_entry(CREATE_ARGS(sai_entry_type)); \ + } \ + inline sai_status_t mock_remove_##sai_entry_type##_entry(REMOVE_PARAMS(sai_entry_type)) \ + { \ + return mock_sai_##sai_api_type##_api->remove_##sai_entry_type##_entry(REMOVE_ARGS(sai_entry_type)); \ + } \ + inline sai_status_t mock_create_##sai_entry_type##_entries(CREATE_BULK_PARAMS(sai_entry_type)) \ + { \ + return mock_sai_##sai_api_type##_api->create_##sai_entry_type##_entries(CREATE_BULK_ARGS(sai_entry_type)); \ + } \ + inline sai_status_t mock_remove_##sai_entry_type##_entries(REMOVE_BULK_PARAMS(sai_entry_type)) \ + { \ + return mock_sai_##sai_api_type##_api->remove_##sai_entry_type##_entries(REMOVE_BULK_ARGS(sai_entry_type)); \ + } \ + inline void apply_sai_##sai_api_type##_api_mock() \ + { \ + mock_sai_##sai_api_type##_api = new NiceMock(); \ + \ + old_sai_##sai_api_type##_api = sai_##sai_api_type##_api; \ + ut_sai_##sai_api_type##_api = *sai_##sai_api_type##_api; \ + sai_##sai_api_type##_api = &ut_sai_##sai_api_type##_api; \ + \ + sai_##sai_api_type##_api->create_##sai_entry_type##_entry = mock_create_##sai_entry_type##_entry; \ + sai_##sai_api_type##_api->remove_##sai_entry_type##_entry = mock_remove_##sai_entry_type##_entry; \ + sai_##sai_api_type##_api->create_##sai_entry_type##_entries = mock_create_##sai_entry_type##_entries; \ + sai_##sai_api_type##_api->remove_##sai_entry_type##_entries = mock_remove_##sai_entry_type##_entries; \ + } \ + inline void remove_sai_##sai_api_type##_api_mock() \ + { \ + sai_##sai_api_type##_api = old_sai_##sai_api_type##_api; \ + delete mock_sai_##sai_api_type##_api; \ + } /* -The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the sai_object_type it is called on: -1. Create a pointer to store the original API -2. Create a new SAI_API where we can safely mock without affecting the original API -3. Define a class with mocked methods to create and remove the object type (to be used with gMock) -4. Create a pointer of the above class -5. Define two wrapper functions to create and remove the object type that has the same signature as the original SAI API function -6. Define a method to apply the mock -7. Define a method to remove the mock +The same as DEFINE_SAI_API_MOCK but with addition definitions for set__entries_attribute. +This is required since some sai_api do not support this function call yet. */ -#define DEFINE_SAI_API_MOCK(sai_object_type) \ - sai_##sai_object_type##_api_t *old_sai_##sai_object_type##_api; \ - sai_##sai_object_type##_api_t ut_sai_##sai_object_type##_api; \ - class mock_sai_##sai_object_type##_api_t \ - { \ - public: \ - mock_sai_##sai_object_type##_api_t() \ - { \ - ON_CALL(*this, create_##sai_object_type##_entry) \ - .WillByDefault( \ - [this](CREATE_PARAMS(sai_object_type)) { \ - return old_sai_##sai_object_type##_api->create_##sai_object_type##_entry(CREATE_ARGS(sai_object_type)); \ - }); \ - ON_CALL(*this, remove_##sai_object_type##_entry) \ - .WillByDefault( \ - [this](REMOVE_PARAMS(sai_object_type)) { \ - return old_sai_##sai_object_type##_api->remove_##sai_object_type##_entry(REMOVE_ARGS(sai_object_type)); \ - }); \ - } \ - MOCK_METHOD3(create_##sai_object_type##_entry, sai_status_t(CREATE_PARAMS(sai_object_type))); \ - MOCK_METHOD1(remove_##sai_object_type##_entry, sai_status_t(REMOVE_PARAMS(sai_object_type))); \ - }; \ - mock_sai_##sai_object_type##_api_t *mock_sai_##sai_object_type##_api; \ - sai_status_t mock_create_##sai_object_type##_entry(CREATE_PARAMS(sai_object_type)) \ - { \ - return mock_sai_##sai_object_type##_api->create_##sai_object_type##_entry(CREATE_ARGS(sai_object_type)); \ - } \ - sai_status_t mock_remove_##sai_object_type##_entry(REMOVE_PARAMS(sai_object_type)) \ - { \ - return mock_sai_##sai_object_type##_api->remove_##sai_object_type##_entry(REMOVE_ARGS(sai_object_type)); \ - } \ - void apply_sai_##sai_object_type##_api_mock() \ - { \ - mock_sai_##sai_object_type##_api = new NiceMock(); \ - \ - old_sai_##sai_object_type##_api = sai_##sai_object_type##_api; \ - ut_sai_##sai_object_type##_api = *sai_##sai_object_type##_api; \ - sai_##sai_object_type##_api = &ut_sai_##sai_object_type##_api; \ - \ - sai_##sai_object_type##_api->create_##sai_object_type##_entry = mock_create_##sai_object_type##_entry; \ - sai_##sai_object_type##_api->remove_##sai_object_type##_entry = mock_remove_##sai_object_type##_entry; \ - } \ - void remove_sai_##sai_object_type##_api_mock() \ - { \ - sai_##sai_object_type##_api = old_sai_##sai_object_type##_api; \ - delete mock_sai_##sai_object_type##_api; \ +#define DEFINE_SAI_API_MOCK_SPECIFY_ENTRY_WITH_SET(sai_api_type, sai_entry_type) \ + static sai_##sai_api_type##_api_t *old_sai_##sai_api_type##_api; \ + static sai_##sai_api_type##_api_t ut_sai_##sai_api_type##_api; \ + class mock_sai_##sai_api_type##_api_t \ + { \ + public: \ + mock_sai_##sai_api_type##_api_t() \ + { \ + ON_CALL(*this, create_##sai_entry_type##_entry) \ + .WillByDefault( \ + [this](CREATE_PARAMS(sai_entry_type)) { \ + return old_sai_##sai_api_type##_api->create_##sai_entry_type##_entry(CREATE_ARGS(sai_entry_type)); \ + }); \ + ON_CALL(*this, remove_##sai_entry_type##_entry) \ + .WillByDefault( \ + [this](REMOVE_PARAMS(sai_entry_type)) { \ + return old_sai_##sai_api_type##_api->remove_##sai_entry_type##_entry(REMOVE_ARGS(sai_entry_type)); \ + }); \ + ON_CALL(*this, create_##sai_entry_type##_entries) \ + .WillByDefault( \ + [this](CREATE_BULK_PARAMS(sai_entry_type)) { \ + return old_sai_##sai_api_type##_api->create_##sai_entry_type##_entries(CREATE_BULK_ARGS(sai_entry_type)); \ + }); \ + ON_CALL(*this, remove_##sai_entry_type##_entries) \ + .WillByDefault( \ + [this](REMOVE_BULK_PARAMS(sai_entry_type)) { \ + return old_sai_##sai_api_type##_api->remove_##sai_entry_type##_entries(REMOVE_BULK_ARGS(sai_entry_type)); \ + }); \ + ON_CALL(*this, set_##sai_entry_type##_entries_attribute) \ + .WillByDefault( \ + [this](SET_BULK_ATTR_PARAMS(sai_entry_type)) { \ + return old_sai_##sai_api_type##_api->set_##sai_entry_type##_entries_attribute( \ + SET_BULK_ATTR_ARGS(sai_entry_type)); \ + }); \ + } \ + MOCK_METHOD3(create_##sai_entry_type##_entry, sai_status_t(CREATE_PARAMS(sai_entry_type))); \ + MOCK_METHOD1(remove_##sai_entry_type##_entry, sai_status_t(REMOVE_PARAMS(sai_entry_type))); \ + MOCK_METHOD6(create_##sai_entry_type##_entries, sai_status_t(CREATE_BULK_PARAMS(sai_entry_type))); \ + MOCK_METHOD4(remove_##sai_entry_type##_entries, sai_status_t(REMOVE_BULK_PARAMS(sai_entry_type))); \ + MOCK_METHOD5(set_##sai_entry_type##_entries_attribute, sai_status_t(SET_BULK_ATTR_PARAMS(sai_entry_type))); \ + }; \ + static mock_sai_##sai_api_type##_api_t *mock_sai_##sai_api_type##_api; \ + inline sai_status_t mock_create_##sai_entry_type##_entry(CREATE_PARAMS(sai_entry_type)) \ + { \ + return mock_sai_##sai_api_type##_api->create_##sai_entry_type##_entry(CREATE_ARGS(sai_entry_type)); \ + } \ + inline sai_status_t mock_remove_##sai_entry_type##_entry(REMOVE_PARAMS(sai_entry_type)) \ + { \ + return mock_sai_##sai_api_type##_api->remove_##sai_entry_type##_entry(REMOVE_ARGS(sai_entry_type)); \ + } \ + inline sai_status_t mock_create_##sai_entry_type##_entries(CREATE_BULK_PARAMS(sai_entry_type)) \ + { \ + return mock_sai_##sai_api_type##_api->create_##sai_entry_type##_entries(CREATE_BULK_ARGS(sai_entry_type)); \ + } \ + inline sai_status_t mock_remove_##sai_entry_type##_entries(REMOVE_BULK_PARAMS(sai_entry_type)) \ + { \ + return mock_sai_##sai_api_type##_api->remove_##sai_entry_type##_entries(REMOVE_BULK_ARGS(sai_entry_type)); \ + } \ + inline sai_status_t mock_set_##sai_entry_type##_entries_attribute(SET_BULK_ATTR_PARAMS(sai_entry_type)) \ + { \ + return mock_sai_##sai_api_type##_api->set_##sai_entry_type##_entries_attribute(SET_BULK_ATTR_ARGS(sai_entry_type)); \ + } \ + inline void apply_sai_##sai_api_type##_api_mock() \ + { \ + mock_sai_##sai_api_type##_api = new NiceMock(); \ + \ + old_sai_##sai_api_type##_api = sai_##sai_api_type##_api; \ + ut_sai_##sai_api_type##_api = *sai_##sai_api_type##_api; \ + sai_##sai_api_type##_api = &ut_sai_##sai_api_type##_api; \ + \ + sai_##sai_api_type##_api->create_##sai_entry_type##_entry = mock_create_##sai_entry_type##_entry; \ + sai_##sai_api_type##_api->remove_##sai_entry_type##_entry = mock_remove_##sai_entry_type##_entry; \ + sai_##sai_api_type##_api->create_##sai_entry_type##_entries = mock_create_##sai_entry_type##_entries; \ + sai_##sai_api_type##_api->remove_##sai_entry_type##_entries = mock_remove_##sai_entry_type##_entries; \ + sai_##sai_api_type##_api->set_##sai_entry_type##_entries_attribute = mock_set_##sai_entry_type##_entries_attribute; \ + } \ + inline void remove_sai_##sai_api_type##_api_mock() \ + { \ + sai_##sai_api_type##_api = old_sai_##sai_api_type##_api; \ + delete mock_sai_##sai_api_type##_api; \ } +#define DEFINE_SAI_API_MOCK_MATCH_ENTRY(sai_api_type) DEFINE_SAI_API_MOCK_SPECIFY_ENTRY(sai_api_type, sai_api_type) + +/* Overload DEFINE_SAI_API_MOCK by number of arguments to account for entry types that do not match the API name + * 1. If one argument is provided, assume the entry type matches API name (e.g. sai_neighbor_api_t and sai_neighbor_entry_t) + * 2. If two arguments are provided, use the second argument as the entry type (e.g. sai_dash_outbound_ca_to_pa_api_t and sai_outbound_ca_to_pa_entry_t) + */ +#define GET_MOCK_MACRO(_1, _2, NAME, ...) NAME +/* + * DEFINE_SAI_API_MOCK is deprecated. + * Use DEFINE_SAI_ENTRY_APIS_MOCK which supports mocking multiple entry types for a single sai api class. + */ +#define DEFINE_SAI_API_MOCK(...) \ + GET_MOCK_MACRO(__VA_ARGS__, DEFINE_SAI_API_MOCK_SPECIFY_ENTRY, DEFINE_SAI_API_MOCK_MATCH_ENTRY)(__VA_ARGS__) + +/* DEFINE_SAI_GENERIC_API_MOCK will be deprecated. + * Please use DEFINE_SAI_GENERIC_APIS_MOCK which supports one or multiple sai_object_type to be mocked in one sai api class. + */ #define DEFINE_SAI_GENERIC_API_MOCK(sai_api_name, sai_object_type) \ - sai_##sai_api_name##_api_t *old_sai_##sai_api_name##_api; \ - sai_##sai_api_name##_api_t ut_sai_##sai_api_name##_api; \ + static sai_##sai_api_name##_api_t *old_sai_##sai_api_name##_api; \ + static sai_##sai_api_name##_api_t ut_sai_##sai_api_name##_api; \ class mock_sai_##sai_api_name##_api_t \ { \ public: \ @@ -96,16 +237,16 @@ The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the MOCK_METHOD4(create_##sai_object_type, sai_status_t(GENERIC_CREATE_PARAMS(sai_object_type))); \ MOCK_METHOD1(remove_##sai_object_type, sai_status_t(GENERIC_REMOVE_PARAMS(sai_object_type))); \ }; \ - mock_sai_##sai_api_name##_api_t *mock_sai_##sai_api_name##_api; \ - sai_status_t mock_create_##sai_object_type(GENERIC_CREATE_PARAMS(sai_object_type)) \ + static mock_sai_##sai_api_name##_api_t *mock_sai_##sai_api_name##_api; \ + inline sai_status_t mock_create_##sai_object_type(GENERIC_CREATE_PARAMS(sai_object_type)) \ { \ return mock_sai_##sai_api_name##_api->create_##sai_object_type(GENERIC_CREATE_ARGS(sai_object_type)); \ } \ - sai_status_t mock_remove_##sai_object_type(GENERIC_REMOVE_PARAMS(sai_object_type)) \ + inline sai_status_t mock_remove_##sai_object_type(GENERIC_REMOVE_PARAMS(sai_object_type)) \ { \ return mock_sai_##sai_api_name##_api->remove_##sai_object_type(GENERIC_REMOVE_ARGS(sai_object_type)); \ } \ - void apply_sai_##sai_api_name##_api_mock() \ + inline void apply_sai_##sai_api_name##_api_mock() \ { \ mock_sai_##sai_api_name##_api = new NiceMock(); \ \ @@ -116,34 +257,277 @@ The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the sai_##sai_api_name##_api->create_##sai_object_type = mock_create_##sai_object_type; \ sai_##sai_api_name##_api->remove_##sai_object_type = mock_remove_##sai_object_type; \ } \ - void remove_sai_##sai_api_name##_api_mock() \ + inline void remove_sai_##sai_api_name##_api_mock() \ { \ sai_##sai_api_name##_api = old_sai_##sai_api_name##_api; \ delete mock_sai_##sai_api_name##_api; \ } -// Stores pointers to mock apply/remove functions to avoid needing to manually call each function -#define INIT_SAI_API_MOCK(sai_object_type) \ - apply_mock_fns.insert(&apply_sai_##sai_object_type##_api_mock); \ - remove_mock_fns.insert(&remove_sai_##sai_object_type##_api_mock); - -void MockSaiApis() -{ - if (apply_mock_fns.empty()) - { - EXPECT_TRUE(false) << "No mock application functions found. Did you call DEFINE_SAI_API_MOCK and INIT_SAI_API_MOCK for the necessary SAI object type?"; +/* Helper macros to iterate over multiple sai_object_type inputs */ +#define FOR_EACH_1(action, api_name, x) action(api_name, x) +#define FOR_EACH_2(action, api_name, x, ...) action(api_name, x) FOR_EACH_1(action, api_name, __VA_ARGS__) + +#define GET_FOR_EACH_MACRO(_1, _2, NAME, ...) NAME +#define FOR_EACH(action, api_name, ...) \ + GET_FOR_EACH_MACRO(__VA_ARGS__, FOR_EACH_2, FOR_EACH_1)(action, api_name, __VA_ARGS__) + +#define DEFINE_ON_CALL_DEFAULTS(sai_api_name, sai_object_type) \ + ON_CALL(*this, create_##sai_object_type).WillByDefault([this](GENERIC_CREATE_PARAMS(sai_object_type)) { \ + return old_sai_##sai_api_name##_api->create_##sai_object_type(GENERIC_CREATE_ARGS(sai_object_type)); \ + }); \ + ON_CALL(*this, remove_##sai_object_type).WillByDefault([this](GENERIC_REMOVE_PARAMS(sai_object_type)) { \ + return old_sai_##sai_api_name##_api->remove_##sai_object_type(GENERIC_REMOVE_ARGS(sai_object_type)); \ + }); \ + ON_CALL(*this, set_##sai_object_type##_attribute).WillByDefault([this](sai_object_id_t oid, const sai_attribute_t *attr) { \ + return old_sai_##sai_api_name##_api->set_##sai_object_type##_attribute(oid, attr); \ + }); \ + ON_CALL(*this, create_##sai_object_type##s).WillByDefault([this](GENERIC_BULK_CREATE_PARAMS(sai_object_type)) { \ + return old_sai_##sai_api_name##_api->create_##sai_object_type##s(GENERIC_BULK_CREATE_ARGS(sai_object_type)); \ + }); \ + ON_CALL(*this, remove_##sai_object_type##s).WillByDefault([this](GENERIC_BULK_REMOVE_PARAMS(sai_object_type)) { \ + return old_sai_##sai_api_name##_api->remove_##sai_object_type##s(GENERIC_BULK_REMOVE_ARGS(sai_object_type)); \ + }); +#define DEFINE_ENTRY_ON_CALL_DEFAULTS(sai_api_type, sai_entry_type) \ + ON_CALL(*this, create_##sai_entry_type##_entry).WillByDefault([this](CREATE_PARAMS(sai_entry_type)) { \ + return old_sai_##sai_api_type##_api->create_##sai_entry_type##_entry(CREATE_ARGS(sai_entry_type)); \ + }); \ + ON_CALL(*this, remove_##sai_entry_type##_entry).WillByDefault([this](REMOVE_PARAMS(sai_entry_type)) { \ + return old_sai_##sai_api_type##_api->remove_##sai_entry_type##_entry(REMOVE_ARGS(sai_entry_type)); \ + }); \ + ON_CALL(*this, create_##sai_entry_type##_entries).WillByDefault([this](CREATE_BULK_PARAMS(sai_entry_type)) { \ + return old_sai_##sai_api_type##_api->create_##sai_entry_type##_entries(CREATE_BULK_ARGS(sai_entry_type)); \ + }); \ + ON_CALL(*this, remove_##sai_entry_type##_entries).WillByDefault([this](REMOVE_BULK_PARAMS(sai_entry_type)) { \ + return old_sai_##sai_api_type##_api->remove_##sai_entry_type##_entries(REMOVE_BULK_ARGS(sai_entry_type)); \ + }); + +#define DEFINE_MOCK_METHODS(sai_api_name, sai_object_type) \ + MOCK_METHOD4(create_##sai_object_type, sai_status_t(GENERIC_CREATE_PARAMS(sai_object_type))); \ + MOCK_METHOD1(remove_##sai_object_type, sai_status_t(GENERIC_REMOVE_PARAMS(sai_object_type))); \ + MOCK_METHOD2(set_##sai_object_type##_attribute, sai_status_t(sai_object_id_t, const sai_attribute_t *)); \ + MOCK_METHOD7(create_##sai_object_type##s, sai_status_t(GENERIC_BULK_CREATE_PARAMS(sai_object_type))); \ + MOCK_METHOD4(remove_##sai_object_type##s, sai_status_t(GENERIC_BULK_REMOVE_PARAMS(sai_object_type))); +#define DEFINE_ENTRY_MOCK_METHODS(sai_api_name, sai_entry_type) \ + MOCK_METHOD3(create_##sai_entry_type##_entry, sai_status_t(CREATE_PARAMS(sai_entry_type))); \ + MOCK_METHOD1(remove_##sai_entry_type##_entry, sai_status_t(REMOVE_PARAMS(sai_entry_type))); \ + MOCK_METHOD6(create_##sai_entry_type##_entries, sai_status_t(CREATE_BULK_PARAMS(sai_entry_type))); \ + MOCK_METHOD4(remove_##sai_entry_type##_entries, sai_status_t(REMOVE_BULK_PARAMS(sai_entry_type))); + +#define DEFINE_WRAPPER_FUNCTIONS(sai_api_name, sai_object_type) \ + inline sai_status_t mock_create_##sai_object_type(GENERIC_CREATE_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_api_name##_api->create_##sai_object_type(GENERIC_CREATE_ARGS(sai_object_type)); \ + } \ + inline sai_status_t mock_remove_##sai_object_type(GENERIC_REMOVE_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_api_name##_api->remove_##sai_object_type(GENERIC_REMOVE_ARGS(sai_object_type)); \ + } \ + inline sai_status_t mock_set_##sai_object_type##_attribute(sai_object_id_t oid, const sai_attribute_t *attr) \ + { \ + return mock_sai_##sai_api_name##_api->set_##sai_object_type##_attribute(oid, attr); \ + } \ + inline sai_status_t mock_create_##sai_object_type##s(GENERIC_BULK_CREATE_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_api_name##_api->create_##sai_object_type##s(GENERIC_BULK_CREATE_ARGS(sai_object_type)); \ + } \ + inline sai_status_t mock_remove_##sai_object_type##s(GENERIC_BULK_REMOVE_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_api_name##_api->remove_##sai_object_type##s(GENERIC_BULK_REMOVE_ARGS(sai_object_type)); \ + } +#define DEFINE_WRAPPER_ENTRY_FUNCTIONS(sai_api_name, sai_entry_type) \ + inline sai_status_t mock_create_##sai_entry_type##_entry(CREATE_PARAMS(sai_entry_type)) \ + { \ + return mock_sai_##sai_api_name##_api->create_##sai_entry_type##_entry(CREATE_ARGS(sai_entry_type)); \ + } \ + inline sai_status_t mock_remove_##sai_entry_type##_entry(REMOVE_PARAMS(sai_entry_type)) \ + { \ + return mock_sai_##sai_api_name##_api->remove_##sai_entry_type##_entry(REMOVE_ARGS(sai_entry_type)); \ + } \ + inline sai_status_t mock_create_##sai_entry_type##_entries(CREATE_BULK_PARAMS(sai_entry_type)) \ + { \ + return mock_sai_##sai_api_name##_api->create_##sai_entry_type##_entries(CREATE_BULK_ARGS(sai_entry_type)); \ + } \ + inline sai_status_t mock_remove_##sai_entry_type##_entries(REMOVE_BULK_PARAMS(sai_entry_type)) \ + { \ + return mock_sai_##sai_api_name##_api->remove_##sai_entry_type##_entries(REMOVE_BULK_ARGS(sai_entry_type)); \ + } + +#define APPLY_MOCK_FUNCTIONS(sai_api_name, sai_object_type) \ + sai_##sai_api_name##_api->create_##sai_object_type = mock_create_##sai_object_type; \ + sai_##sai_api_name##_api->remove_##sai_object_type = mock_remove_##sai_object_type; \ + sai_##sai_api_name##_api->set_##sai_object_type##_attribute = mock_set_##sai_object_type##_attribute; \ + sai_##sai_api_name##_api->create_##sai_object_type##s = mock_create_##sai_object_type##s; \ + sai_##sai_api_name##_api->remove_##sai_object_type##s = mock_remove_##sai_object_type##s; +#define APPLY_ENTRY_MOCK_FUNCTIONS(sai_api_name, sai_entry_type) \ + sai_##sai_api_name##_api->create_##sai_entry_type##_entry = mock_create_##sai_entry_type##_entry; \ + sai_##sai_api_name##_api->remove_##sai_entry_type##_entry = mock_remove_##sai_entry_type##_entry; \ + sai_##sai_api_name##_api->create_##sai_entry_type##_entries = mock_create_##sai_entry_type##_entries; \ + sai_##sai_api_name##_api->remove_##sai_entry_type##_entries = mock_remove_##sai_entry_type##_entries; + +#define DEFINE_SAI_GENERIC_APIS_MOCK(sai_api_name, ...) \ + static sai_##sai_api_name##_api_t *old_sai_##sai_api_name##_api; \ + static sai_##sai_api_name##_api_t ut_sai_##sai_api_name##_api; \ + class mock_sai_##sai_api_name##_api_t \ + { \ + public: \ + mock_sai_##sai_api_name##_api_t(){ \ + FOR_EACH(DEFINE_ON_CALL_DEFAULTS, sai_api_name, __VA_ARGS__) \ + } FOR_EACH(DEFINE_MOCK_METHODS, sai_api_name, __VA_ARGS__) \ + }; \ + static mock_sai_##sai_api_name##_api_t *mock_sai_##sai_api_name##_api; \ + FOR_EACH(DEFINE_WRAPPER_FUNCTIONS, sai_api_name, __VA_ARGS__) \ + inline void apply_sai_##sai_api_name##_api_mock() \ + { \ + mock_sai_##sai_api_name##_api = new NiceMock(); \ + old_sai_##sai_api_name##_api = sai_##sai_api_name##_api; \ + ut_sai_##sai_api_name##_api = *sai_##sai_api_name##_api; \ + sai_##sai_api_name##_api = &ut_sai_##sai_api_name##_api; \ + FOR_EACH(APPLY_MOCK_FUNCTIONS, sai_api_name, __VA_ARGS__) \ + } \ + inline void remove_sai_##sai_api_name##_api_mock() \ + { \ + sai_##sai_api_name##_api = old_sai_##sai_api_name##_api; \ + delete mock_sai_##sai_api_name##_api; \ } - for (auto apply_fn : apply_mock_fns) - { - (*apply_fn)(); +// Use this macro when you need to mock both an object type and an entry type in the same SAI API. +#define DEFINE_SAI_API_COMBINED_MOCK(sai_api_name, sai_object_type, sai_entry_type) \ + static sai_##sai_api_name##_api_t *old_sai_##sai_api_name##_api; \ + static sai_##sai_api_name##_api_t ut_sai_##sai_api_name##_api; \ + class mock_sai_##sai_api_name##_api_t \ + { \ + public: \ + mock_sai_##sai_api_name##_api_t(){ \ + DEFINE_ENTRY_ON_CALL_DEFAULTS(sai_api_name, sai_entry_type) \ + DEFINE_ON_CALL_DEFAULTS(sai_api_name, sai_object_type) \ + } DEFINE_ENTRY_MOCK_METHODS(sai_api_name, sai_entry_type) \ + DEFINE_MOCK_METHODS(sai_api_name, sai_object_type) \ + }; \ + static mock_sai_##sai_api_name##_api_t *mock_sai_##sai_api_name##_api; \ + DEFINE_WRAPPER_ENTRY_FUNCTIONS(sai_api_name, sai_entry_type) \ + DEFINE_WRAPPER_FUNCTIONS(sai_api_name, sai_object_type) \ + inline void apply_sai_##sai_api_name##_api_mock() \ + { \ + mock_sai_##sai_api_name##_api = new NiceMock(); \ + \ + old_sai_##sai_api_name##_api = sai_##sai_api_name##_api; \ + ut_sai_##sai_api_name##_api = *sai_##sai_api_name##_api; \ + sai_##sai_api_name##_api = &ut_sai_##sai_api_name##_api; \ + \ + APPLY_ENTRY_MOCK_FUNCTIONS(sai_api_name, sai_entry_type) \ + APPLY_MOCK_FUNCTIONS(sai_api_name, sai_object_type) \ + } \ + inline void remove_sai_##sai_api_name##_api_mock() \ + { \ + sai_##sai_api_name##_api = old_sai_##sai_api_name##_api; \ + delete mock_sai_##sai_api_name##_api; \ + } +#define DEFINE_SAI_ENTRY_APIS_MOCK(sai_api_name, ...) \ + static sai_##sai_api_name##_api_t *old_sai_##sai_api_name##_api; \ + static sai_##sai_api_name##_api_t ut_sai_##sai_api_name##_api; \ + class mock_sai_##sai_api_name##_api_t { \ + public: \ + mock_sai_##sai_api_name##_api_t() { \ + FOR_EACH(DEFINE_ENTRY_ON_CALL_DEFAULTS, sai_api_name, __VA_ARGS__) \ + } FOR_EACH(DEFINE_ENTRY_MOCK_METHODS, sai_api_name, __VA_ARGS__) \ + }; \ + static mock_sai_##sai_api_name##_api_t *mock_sai_##sai_api_name##_api; \ + FOR_EACH(DEFINE_WRAPPER_ENTRY_FUNCTIONS, sai_api_name, __VA_ARGS__) \ + inline void apply_sai_##sai_api_name##_api_mock() { \ + mock_sai_##sai_api_name##_api = new NiceMock(); \ + old_sai_##sai_api_name##_api = sai_##sai_api_name##_api; \ + ut_sai_##sai_api_name##_api = *sai_##sai_api_name##_api; \ + sai_##sai_api_name##_api = &ut_sai_##sai_api_name##_api; \ + FOR_EACH(APPLY_ENTRY_MOCK_FUNCTIONS, sai_api_name, __VA_ARGS__) \ + } \ + inline void remove_sai_##sai_api_name##_api_mock() { \ + sai_##sai_api_name##_api = old_sai_##sai_api_name##_api; \ + delete mock_sai_##sai_api_name##_api; \ } -} -void RestoreSaiApis() -{ - for (auto remove_fn : remove_mock_fns) - { - (*remove_fn)(); +#define DEFINE_SAI_GENERIC_API_OBJECT_BULK_MOCK(sai_api_name, sai_object_type) \ + static sai_##sai_api_name##_api_t *old_sai_##sai_api_name##_api; \ + static sai_##sai_api_name##_api_t ut_sai_##sai_api_name##_api; \ + class mock_sai_##sai_api_name##_api_t \ + { \ + public: \ + mock_sai_##sai_api_name##_api_t() \ + { \ + ON_CALL(*this, create_##sai_object_type) \ + .WillByDefault( \ + [this](GENERIC_CREATE_PARAMS(sai_object_type)) { \ + return old_sai_##sai_api_name##_api->create_##sai_object_type(GENERIC_CREATE_ARGS(sai_object_type)); \ + }); \ + ON_CALL(*this, remove_##sai_object_type) \ + .WillByDefault( \ + [this](GENERIC_REMOVE_PARAMS(sai_object_type)) { \ + return old_sai_##sai_api_name##_api->remove_##sai_object_type(GENERIC_REMOVE_ARGS(sai_object_type)); \ + }); \ + ON_CALL(*this, create_##sai_object_type##s) \ + .WillByDefault( \ + [this](GENERIC_BULK_CREATE_PARAMS(sai_object_type)) { \ + return old_sai_##sai_api_name##_api->create_##sai_object_type##s(GENERIC_BULK_CREATE_ARGS(sai_object_type)); \ + }); \ + ON_CALL(*this, remove_##sai_object_type##s) \ + .WillByDefault( \ + [this](GENERIC_BULK_REMOVE_PARAMS(sai_object_type)) { \ + return old_sai_##sai_api_name##_api->remove_##sai_object_type##s(GENERIC_BULK_REMOVE_ARGS(sai_object_type)); \ + }); \ + } \ + MOCK_METHOD4(create_##sai_object_type, sai_status_t(GENERIC_CREATE_PARAMS(sai_object_type))); \ + MOCK_METHOD1(remove_##sai_object_type, sai_status_t(GENERIC_REMOVE_PARAMS(sai_object_type))); \ + MOCK_METHOD7(create_##sai_object_type##s, sai_status_t(GENERIC_BULK_CREATE_PARAMS(sai_object_type))); \ + MOCK_METHOD4(remove_##sai_object_type##s, sai_status_t(GENERIC_BULK_REMOVE_PARAMS(sai_object_type))); \ + }; \ + static mock_sai_##sai_api_name##_api_t *mock_sai_##sai_api_name##_api; \ + inline sai_status_t mock_create_##sai_object_type(GENERIC_CREATE_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_api_name##_api->create_##sai_object_type(GENERIC_CREATE_ARGS(sai_object_type)); \ + } \ + inline sai_status_t mock_remove_##sai_object_type(GENERIC_REMOVE_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_api_name##_api->remove_##sai_object_type(GENERIC_REMOVE_ARGS(sai_object_type)); \ + } \ + inline sai_status_t mock_create_##sai_object_type##s(GENERIC_BULK_CREATE_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_api_name##_api->create_##sai_object_type##s(GENERIC_BULK_CREATE_ARGS(sai_object_type)); \ + } \ + inline sai_status_t mock_remove_##sai_object_type##s(GENERIC_BULK_REMOVE_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_api_name##_api->remove_##sai_object_type##s(GENERIC_BULK_REMOVE_ARGS(sai_object_type)); \ + } \ + inline void apply_sai_##sai_api_name##_api_mock() \ + { \ + mock_sai_##sai_api_name##_api = new NiceMock(); \ + \ + old_sai_##sai_api_name##_api = sai_##sai_api_name##_api; \ + ut_sai_##sai_api_name##_api = *sai_##sai_api_name##_api; \ + sai_##sai_api_name##_api = &ut_sai_##sai_api_name##_api; \ + \ + sai_##sai_api_name##_api->create_##sai_object_type = mock_create_##sai_object_type; \ + sai_##sai_api_name##_api->remove_##sai_object_type = mock_remove_##sai_object_type; \ + sai_##sai_api_name##_api->create_##sai_object_type##s = mock_create_##sai_object_type##s; \ + sai_##sai_api_name##_api->remove_##sai_object_type##s = mock_remove_##sai_object_type##s; \ + } \ + inline void remove_sai_##sai_api_name##_api_mock() \ + { \ + sai_##sai_api_name##_api = old_sai_##sai_api_name##_api; \ + delete mock_sai_##sai_api_name##_api; \ } -} + +// Stores pointers to mock apply/remove functions to avoid needing to manually call each function +#define INIT_SAI_API_MOCK(sai_api_type) \ + apply_mock_fns.insert(&apply_sai_##sai_api_type##_api_mock); \ + remove_mock_fns.insert(&remove_sai_##sai_api_type##_api_mock); + +/* + Call this after RestoreSaiApis to clear the mock_fns + Required when same SAI_API is being mocked in multiple files eg: acl API in multiple tests +*/ +#define DEINIT_SAI_API_MOCK(sai_api_type) \ + apply_mock_fns.erase(&apply_sai_##sai_api_type##_api_mock); \ + remove_mock_fns.erase(&remove_sai_##sai_api_type##_api_mock); + +void MockSaiApis(); +void RestoreSaiApis(); +#endif \ No newline at end of file diff --git a/tests/mock_tests/mock_saihelper.cpp b/tests/mock_tests/mock_saihelper.cpp new file mode 100644 index 00000000000..17973ea31b5 --- /dev/null +++ b/tests/mock_tests/mock_saihelper.cpp @@ -0,0 +1,586 @@ +#define private public // make Directory::m_values available to clean it. +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_table.h" +#include "mock_response_publisher.h" +#include "saihelper.h" +#include + +namespace saihelper_test +{ + using namespace std; + using ::testing::_; + using ::testing::Throw; + using namespace testing_db; + + sai_switch_api_t ut_sai_switch_api; + sai_switch_api_t *old_sai_switch_api; + + shared_ptr m_app_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + + bool set_comm_mode_not_supported; + bool use_pipeline_not_supported; + bool record_output_dir_failure; + bool record_filename_failure; + bool record_failure; + bool response_timeout_failure; + uint32_t *_sai_syncd_notifications_count; + int32_t *_sai_syncd_notification_event; + + sai_status_t _ut_stub_sai_set_switch_attribute( + _In_ sai_object_id_t switch_id, + _In_ const sai_attribute_t *attr) + { + switch (attr[0].id) + { + case SAI_REDIS_SWITCH_ATTR_REDIS_COMMUNICATION_MODE: + if (set_comm_mode_not_supported) + { + return SAI_STATUS_NOT_SUPPORTED; + } + break; + case SAI_REDIS_SWITCH_ATTR_USE_PIPELINE: + if (use_pipeline_not_supported) + { + return SAI_STATUS_NOT_SUPPORTED; + } + break; + case SAI_REDIS_SWITCH_ATTR_RECORDING_OUTPUT_DIR: + if (record_output_dir_failure) + { + return SAI_STATUS_FAILURE; + } + break; + case SAI_REDIS_SWITCH_ATTR_RECORDING_FILENAME: + if (record_filename_failure) + { + return SAI_STATUS_FAILURE; + } + break; + case SAI_REDIS_SWITCH_ATTR_RECORD: + if (record_failure) + { + return SAI_STATUS_FAILURE; + } + break; + case SAI_REDIS_SWITCH_ATTR_SYNC_OPERATION_RESPONSE_TIMEOUT: + if (response_timeout_failure) + { + return SAI_STATUS_FAILURE; + } + break; + case SAI_REDIS_SWITCH_ATTR_NOTIFY_SYNCD: + *_sai_syncd_notifications_count = *_sai_syncd_notifications_count + 1; + *_sai_syncd_notification_event = attr[0].value.s32; + break; + default: + break; + } + return SAI_STATUS_SUCCESS; + } + + void _hook_sai_apis() + { + ut_sai_switch_api = *sai_switch_api; + old_sai_switch_api = sai_switch_api; + ut_sai_switch_api.set_switch_attribute = _ut_stub_sai_set_switch_attribute; + sai_switch_api = &ut_sai_switch_api; + } + + void _unhook_sai_apis() + { + sai_switch_api = old_sai_switch_api; + } + + class MockDBTable : public Table { + public: + MockDBTable(swss::DBConnector* db, const std::string& tableName) : Table(db, tableName) {} + + MOCK_METHOD(void, set, (const std::string &key, const std::vector &values, const std::string &op, const std::string &prefix), (override)); + MOCK_METHOD(void, del, (const std::string &key, const std::string &op, const std::string &prefix), (override)); + }; + + class SaihelperTest : public ::testing::Test + { + public: + + SaihelperTest() + { + }; + + ~SaihelperTest() + { + }; + + void SetUp() override + { + // Init switch and create dependencies + m_app_db = make_shared("APPL_DB", 0); + m_config_db = make_shared("CONFIG_DB", 0); + m_state_db = make_shared("STATE_DB", 0); + + set_comm_mode_not_supported = false; + use_pipeline_not_supported = false; + record_output_dir_failure = false; + record_filename_failure = false; + record_failure = false; + response_timeout_failure = false; + + map profile = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + + ut_helper::initSaiApi(profile); + + sai_attribute_t attr; + + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + auto status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + } + + void initSwitchOrch() + { + TableConnector stateDbSwitchTable(m_state_db.get(), "SWITCH_CAPABILITY"); + TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); + TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); + TableConnector conf_suppress_asic_sdk_health_categories(m_config_db.get(), CFG_SUPPRESS_ASIC_SDK_HEALTH_EVENT_NAME); + + vector switch_tables = { + conf_asic_sensors, + conf_suppress_asic_sdk_health_categories, + app_switch_table + }; + + ASSERT_EQ(gSwitchOrch, nullptr); + gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); + } + + void TearDown() override + { + ::testing_db::reset(); + + gDirectory.m_values.clear(); + + delete gSwitchOrch; + gSwitchOrch = nullptr; + + ut_helper::uninitSaiApi(); + } + }; + + TEST_F(SaihelperTest, TestSetCommunicationModeFailure) { + set_comm_mode_not_supported = true; + _hook_sai_apis(); + initSwitchOrch(); + + _sai_syncd_notifications_count = (uint32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + _sai_syncd_notification_event = (int32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *_sai_syncd_notifications_count = 0; + uint32_t notif_count = *_sai_syncd_notifications_count; + + initSaiRedis(); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + set_comm_mode_not_supported = false; + _unhook_sai_apis(); + } + + TEST_F(SaihelperTest, TestSetRedisPipelineFailure) { + use_pipeline_not_supported = true; + _hook_sai_apis(); + initSwitchOrch(); + + _sai_syncd_notifications_count = (uint32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + _sai_syncd_notification_event = (int32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *_sai_syncd_notifications_count = 0; + uint32_t notif_count = *_sai_syncd_notifications_count; + + initSaiRedis(); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + use_pipeline_not_supported = false; + _unhook_sai_apis(); + } + + TEST_F(SaihelperTest, TestSetRecordingOutputDirFailure) { + record_output_dir_failure = true; + _hook_sai_apis(); + initSwitchOrch(); + + _sai_syncd_notifications_count = (uint32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + _sai_syncd_notification_event = (int32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *_sai_syncd_notifications_count = 0; + uint32_t notif_count = *_sai_syncd_notifications_count; + + initSaiRedis(); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + record_output_dir_failure = false; + _unhook_sai_apis(); + } + + TEST_F(SaihelperTest, TestSetRecordingFilenameFailure) { + record_filename_failure = true; + _hook_sai_apis(); + initSwitchOrch(); + + _sai_syncd_notifications_count = (uint32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + _sai_syncd_notification_event = (int32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *_sai_syncd_notifications_count = 0; + uint32_t notif_count = *_sai_syncd_notifications_count; + + initSaiRedis(); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + record_filename_failure = false; + _unhook_sai_apis(); + } + + TEST_F(SaihelperTest, TestSetRecordFailure) { + record_failure = true; + _hook_sai_apis(); + initSwitchOrch(); + + _sai_syncd_notifications_count = (uint32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + _sai_syncd_notification_event = (int32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *_sai_syncd_notifications_count = 0; + uint32_t notif_count = *_sai_syncd_notifications_count; + + initSaiRedis(); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + record_failure = false; + _unhook_sai_apis(); + } + + TEST_F(SaihelperTest, TestSetResponseTimeoutFailure) { + response_timeout_failure = true; + _hook_sai_apis(); + initSwitchOrch(); + + _sai_syncd_notifications_count = (uint32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + _sai_syncd_notification_event = (int32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *_sai_syncd_notifications_count = 0; + uint32_t notif_count = *_sai_syncd_notifications_count; + (void) setenv("platform", "mellanox", 1); + + initSaiRedis(); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + response_timeout_failure = false; + (void) unsetenv("platform"); + _unhook_sai_apis(); + } + + TEST_F(SaihelperTest, TestCreateFailure) { + _hook_sai_apis(); + initSwitchOrch(); + + _sai_syncd_notifications_count = (uint32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + _sai_syncd_notification_event = (int32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *_sai_syncd_notifications_count = 0; + uint32_t notif_count = *_sai_syncd_notifications_count; + task_process_status status; + + status = handleSaiCreateStatus(SAI_API_ROUTE, SAI_STATUS_FAILURE); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + ASSERT_EQ(status, task_failed); + + status = handleSaiCreateStatus(SAI_API_PORT, SAI_STATUS_FAILURE); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + ASSERT_EQ(status, task_failed); + + status = handleSaiCreateStatus(SAI_API_TUNNEL, SAI_STATUS_NOT_SUPPORTED); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + ASSERT_EQ(status, task_failed); + + status = handleSaiCreateStatus(SAI_API_TUNNEL, SAI_STATUS_NOT_IMPLEMENTED); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + ASSERT_EQ(status, task_failed); + + status = handleSaiCreateStatus(SAI_API_NEXT_HOP_GROUP, SAI_STATUS_INVALID_PARAMETER); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + ASSERT_EQ(status, task_failed); + + status = handleSaiCreateStatus(SAI_API_SWITCH, SAI_STATUS_UNINITIALIZED); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + ASSERT_EQ(status, task_failed); + + status = handleSaiCreateStatus((sai_api_t) SAI_API_DASH_OUTBOUND_ROUTING, SAI_STATUS_INVALID_OBJECT_ID); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + ASSERT_EQ(status, task_failed); + + status = handleSaiCreateStatus(SAI_API_ACL, SAI_STATUS_MANDATORY_ATTRIBUTE_MISSING); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + ASSERT_EQ(status, task_failed); + + status = handleSaiCreateStatus(SAI_API_QOS_MAP, SAI_STATUS_INVALID_ATTR_VALUE_MAX); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + ASSERT_EQ(status, task_failed); + + status = handleSaiCreateStatus(SAI_API_TUNNEL, SAI_STATUS_ATTR_NOT_IMPLEMENTED_6); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + ASSERT_EQ(status, task_failed); + + status = handleSaiCreateStatus(SAI_API_ROUTER_INTERFACE, SAI_STATUS_UNKNOWN_ATTRIBUTE_0); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + ASSERT_EQ(status, task_failed); + + status = handleSaiCreateStatus(SAI_API_ROUTER_INTERFACE, SAI_STATUS_ATTR_NOT_SUPPORTED_0); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + ASSERT_EQ(status, task_failed); + + status = handleSaiCreateStatus(SAI_API_LAG, SAI_STATUS_INVALID_PORT_NUMBER); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + ASSERT_EQ(status, task_failed); + + _unhook_sai_apis(); + } + + TEST_F(SaihelperTest, TestSetFailure) { + _hook_sai_apis(); + initSwitchOrch(); + + _sai_syncd_notifications_count = (uint32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + _sai_syncd_notification_event = (int32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *_sai_syncd_notifications_count = 0; + uint32_t notif_count = *_sai_syncd_notifications_count; + task_process_status status; + + status = handleSaiSetStatus(SAI_API_ROUTE, SAI_STATUS_FAILURE); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + ASSERT_EQ(status, task_failed); + + status = handleSaiSetStatus(SAI_API_ROUTE, SAI_STATUS_NOT_EXECUTED); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + ASSERT_EQ(status, task_failed); + + status = handleSaiSetStatus(SAI_API_PORT, SAI_STATUS_FAILURE); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + ASSERT_EQ(status, task_failed); + + status = handleSaiSetStatus(SAI_API_TUNNEL, SAI_STATUS_NOT_IMPLEMENTED); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + ASSERT_EQ(status, task_failed); + + status = handleSaiSetStatus(SAI_API_HOSTIF, SAI_STATUS_INVALID_PARAMETER); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + ASSERT_EQ(status, task_failed); + + status = handleSaiSetStatus(SAI_API_PORT, SAI_STATUS_ATTR_NOT_SUPPORTED_0); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + ASSERT_EQ(status, task_failed); + + status = handleSaiSetStatus(SAI_API_LAG, SAI_STATUS_INVALID_PORT_NUMBER); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + ASSERT_EQ(status, task_failed); + + _unhook_sai_apis(); + } + + TEST_F(SaihelperTest, TestGetFailure) { + _hook_sai_apis(); + initSwitchOrch(); + + _sai_syncd_notifications_count = (uint32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *_sai_syncd_notifications_count = 0; + task_process_status status; + + status = handleSaiGetStatus(SAI_API_FDB, SAI_STATUS_INVALID_PARAMETER); + ASSERT_EQ(*_sai_syncd_notifications_count, 0); + ASSERT_EQ(status, task_failed); + _unhook_sai_apis(); + } + + TEST_F(SaihelperTest, TestAllSuccess) { + _hook_sai_apis(); + initSwitchOrch(); + + _sai_syncd_notifications_count = (uint32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *_sai_syncd_notifications_count = 0; + task_process_status status; + + status = handleSaiCreateStatus(SAI_API_NEXT_HOP_GROUP, SAI_STATUS_SUCCESS); + ASSERT_EQ(*_sai_syncd_notifications_count, 0); + ASSERT_EQ(status, task_success); + + status = handleSaiSetStatus(SAI_API_ROUTE, SAI_STATUS_SUCCESS); + ASSERT_EQ(*_sai_syncd_notifications_count, 0); + ASSERT_EQ(status, task_success); + + status = handleSaiRemoveStatus(SAI_API_NEXT_HOP, SAI_STATUS_SUCCESS); + ASSERT_EQ(*_sai_syncd_notifications_count, 0); + ASSERT_EQ(status, task_success); + + status = handleSaiCreateStatus(SAI_API_VLAN, SAI_STATUS_ITEM_ALREADY_EXISTS); + ASSERT_EQ(*_sai_syncd_notifications_count, 0); + ASSERT_EQ(status, task_success); + + status = handleSaiSetStatus(SAI_API_ROUTE, SAI_STATUS_ITEM_ALREADY_EXISTS); + ASSERT_EQ(*_sai_syncd_notifications_count, 0); + ASSERT_EQ(status, task_success); + + status = handleSaiCreateStatus(SAI_API_MIRROR, SAI_STATUS_ITEM_NOT_FOUND); + ASSERT_EQ(*_sai_syncd_notifications_count, 0); + ASSERT_EQ(status, task_success); + + status = handleSaiSetStatus(SAI_API_NEIGHBOR, SAI_STATUS_ITEM_NOT_FOUND); + ASSERT_EQ(*_sai_syncd_notifications_count, 0); + ASSERT_EQ(status, task_success); + + status = handleSaiSetStatus(SAI_API_NEXT_HOP, SAI_STATUS_OBJECT_IN_USE); + ASSERT_EQ(*_sai_syncd_notifications_count, 0); + ASSERT_EQ(status, task_success); + + status = handleSaiRemoveStatus(SAI_API_LAG, SAI_STATUS_ITEM_NOT_FOUND); + ASSERT_EQ(*_sai_syncd_notifications_count, 0); + ASSERT_EQ(status, task_success); + + status = handleSaiRemoveStatus(SAI_API_PORT, SAI_STATUS_ITEM_ALREADY_EXISTS); + ASSERT_EQ(*_sai_syncd_notifications_count, 0); + ASSERT_EQ(status, task_success); + + _unhook_sai_apis(); + } + + TEST_F(SaihelperTest, TestCreateSetResourceFailure) { + _hook_sai_apis(); + initSwitchOrch(); + + _sai_syncd_notifications_count = (uint32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *_sai_syncd_notifications_count = 0; + task_process_status status; + + status = handleSaiCreateStatus(SAI_API_ACL, SAI_STATUS_INSUFFICIENT_RESOURCES); + ASSERT_EQ(*_sai_syncd_notifications_count, 0); + ASSERT_EQ(status, task_need_retry); + + status = handleSaiSetStatus(SAI_API_PORT, SAI_STATUS_INSUFFICIENT_RESOURCES); + ASSERT_EQ(*_sai_syncd_notifications_count, 0); + ASSERT_EQ(status, task_need_retry); + + status = handleSaiCreateStatus(SAI_API_TUNNEL, SAI_STATUS_TABLE_FULL); + ASSERT_EQ(*_sai_syncd_notifications_count, 0); + ASSERT_EQ(status, task_need_retry); + + status = handleSaiSetStatus(SAI_API_ROUTER_INTERFACE, SAI_STATUS_TABLE_FULL); + ASSERT_EQ(*_sai_syncd_notifications_count, 0); + ASSERT_EQ(status, task_need_retry); + + status = handleSaiCreateStatus(SAI_API_NEXT_HOP_GROUP, SAI_STATUS_NO_MEMORY); + ASSERT_EQ(*_sai_syncd_notifications_count, 0); + ASSERT_EQ(status, task_need_retry); + + status = handleSaiSetStatus(SAI_API_NEXT_HOP_GROUP, SAI_STATUS_NO_MEMORY); + ASSERT_EQ(*_sai_syncd_notifications_count, 0); + ASSERT_EQ(status, task_need_retry); + + _unhook_sai_apis(); + } + + TEST_F(SaihelperTest, TestRemoveObjectInUse) { + _hook_sai_apis(); + initSwitchOrch(); + + _sai_syncd_notifications_count = (uint32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *_sai_syncd_notifications_count = 0; + task_process_status status; + + status = handleSaiRemoveStatus(SAI_API_NEXT_HOP_GROUP, SAI_STATUS_OBJECT_IN_USE); + ASSERT_EQ(*_sai_syncd_notifications_count, 0); + ASSERT_EQ(status, task_need_retry); + + _unhook_sai_apis(); + } + + TEST(WriteToDBTest, SetThrowsException) { + auto db = std::make_shared("DPU_APPL_STATE_DB", 0); + std::unique_ptr mockTable = std::make_unique(db.get(), "APP_DASH_APPLIANCE_TABLE_NAME"); + std::string key = "testKey"; + uint32_t res = 1; + std::string version = "v1"; + + auto* mock = dynamic_cast(mockTable.get()); + ASSERT_NE(mock, nullptr); + + // Simulate set() throwing an exception + EXPECT_CALL(*mock, set(_, _, _, _)) + .WillOnce(Throw(std::runtime_error("Set failed"))); + + writeResultToDB(mockTable, key, res, version); + } + + TEST(RemoveFromDBTest, DelThrowsException) { + auto db = std::make_shared("DPU_APPL_STATE_DB", 0); + std::unique_ptr mockTable = std::make_unique(db.get(), "APP_DASH_APPLIANCE_TABLE_NAME"); + std::string key = "testKey"; + + auto* mock = dynamic_cast(mockTable.get()); + ASSERT_NE(mock, nullptr); + + // Simulate del() throwing an exception + EXPECT_CALL(*mock, del(_, _, _)) + .WillOnce(Throw(std::runtime_error("Del failed"))); + + removeResultFromDB(mockTable, key); + } + +} + diff --git a/tests/mock_tests/mock_subscriberstatetable.cpp b/tests/mock_tests/mock_subscriberstatetable.cpp new file mode 100644 index 00000000000..55481919407 --- /dev/null +++ b/tests/mock_tests/mock_subscriberstatetable.cpp @@ -0,0 +1,30 @@ +#include "subscriberstatetable.h" + +namespace swss +{ + SubscriberStateTable::SubscriberStateTable(DBConnector *db, const std::string &tableName, int popBatchSize, int pri) : + ConsumerTableBase(db, tableName, popBatchSize, pri), + m_table(db, tableName) + { + } + + void SubscriberStateTable::pops(std::deque &vkco, const std::string& /*prefix*/) + { + std::vector keys; + m_table.getKeys(keys); + for (const auto &key: keys) + { + KeyOpFieldsValuesTuple kco; + + kfvKey(kco) = key; + kfvOp(kco) = SET_COMMAND; + + if (!m_table.get(key, kfvFieldsValues(kco))) + { + continue; + } + m_table.del(key); + vkco.push_back(kco); + } + } +} diff --git a/tests/mock_tests/mock_table.cpp b/tests/mock_tests/mock_table.cpp index 4d512a98354..94251f16c3b 100644 --- a/tests/mock_tests/mock_table.cpp +++ b/tests/mock_tests/mock_table.cpp @@ -1,6 +1,9 @@ #include "table.h" #include "producerstatetable.h" +#include "producertable.h" +#include "mock_table.h" #include +#include using TableDataT = std::map>; using TablesT = std::map; @@ -23,21 +26,29 @@ namespace swss using namespace testing_db; - bool Table::get(const std::string &key, std::vector &ovalues) + void merge_values(std::vector &existing_values, const std::vector &values) { - auto table = gDB[m_pipe->getDbId()][getTableName()]; - if (table.find(key) == table.end()) + std::vector new_values(values); + std::set field_set; + for (auto &value : values) { - return false; + field_set.insert(fvField(value)); } - - ovalues = table[key]; - return true; + for (auto &value : existing_values) + { + auto &field = fvField(value); + if (field_set.find(field) != field_set.end()) + { + continue; + } + new_values.push_back(value); + } + existing_values.swap(new_values); } - bool Table::hget(const std::string &key, const std::string &field, std::string &value) + bool _hget(int dbId, const std::string &tableName, const std::string &key, const std::string &field, std::string &value) { - auto table = gDB[m_pipe->getDbId()][getTableName()]; + auto table = gDB[dbId][tableName]; if (table.find(key) == table.end()) { return false; @@ -55,13 +66,47 @@ namespace swss return false; } + bool Table::get(const std::string &key, std::vector &ovalues) + { + auto table = gDB[m_pipe->getDbId()][getTableName()]; + if (table.find(key) == table.end()) + { + return false; + } + + ovalues = table[key]; + return true; + } + + bool Table::hget(const std::string &key, const std::string &field, std::string &value) + { + return _hget(m_pipe->getDbId(), getTableName(), key, field, value); + } + void Table::set(const std::string &key, const std::vector &values, const std::string &op, const std::string &prefix) { auto &table = gDB[m_pipe->getDbId()][getTableName()]; - table[key] = values; + auto iter = table.find(key); + if (iter == table.end()) + { + table[key] = values; + } + else + { + merge_values(iter->second, values); + } + } + + void Table::hset(const std::string &key, const std::string &field, const std::string &value, + const std::string& op, const std::string& prefix) + { + FieldValueTuple fvp(field, value); + std::vector attrs = { fvp }; + + Table::set(key, attrs, op, prefix); } void Table::getKeys(std::vector &keys) @@ -81,6 +126,35 @@ namespace swss table->second.erase(key); } } + + void Table::hdel(const std::string &key, const std::string &field, const std::string &op, const std::string &prefix) + { + auto &table = gDB[m_pipe->getDbId()][getTableName()]; + auto key_iter = table.find(key); + if (key_iter == table.end()) + { + return; + } + + auto &attrs = key_iter->second; + std::vector new_attrs; + for (const auto &attr : attrs) + { + if (attr.first != field) + { + new_attrs.push_back(attr); + } + } + + if (new_attrs.empty()) + { + table.erase(key); + } + else + { + table[key] = new_attrs; + } + } void ProducerStateTable::set(const std::string &key, const std::vector &values, @@ -95,28 +169,130 @@ namespace swss } else { - std::vector new_values(values); - std::set field_set; - for (auto &value : values) + merge_values(iter->second, values); + } + } + + void ProducerStateTable::del(const std::string &key, + const std::string &op, + const std::string &prefix) + { + auto &table = gDB[m_pipe->getDbId()][getTableName()]; + table.erase(key); + } + + void ProducerStateTable::set(const std::vector& values) + { + for (const auto& kfv : values) + { + const std::string& key = kfvKey(kfv); + const std::string& op = kfvOp(kfv); + const std::vector& fvs = kfvFieldsValues(kfv); + + if (op == SET_COMMAND) { - field_set.insert(fvField(value)); + set(key, fvs); } - for (auto &value : iter->second) + else if (op == DEL_COMMAND) { - auto &field = fvField(value); - if (field_set.find(field) != field_set.end()) - { - continue; - } - new_values.push_back(value); + del(key); } - iter->second.swap(new_values); } } - void ProducerStateTable::del(const std::string &key, - const std::string &op, - const std::string &prefix) + void ProducerStateTable::del(const std::vector& keys) + { + for (const auto& key : keys) + { + del(key); + } + } + + std::shared_ptr DBConnector::hget(const std::string &key, const std::string &field) + { + std::string value; + + if (field == HGET_THROW_EXCEPTION_FIELD_NAME) + { + throw std::runtime_error("HGET failed, unexpected reply type, memory exception"); + } + + if (_hget(getDbId(), key, "", field, value)) + { + std::shared_ptr ptr(new std::string(value)); + return ptr; + } + else + { + return std::shared_ptr(NULL); + } + } + + int64_t DBConnector::hdel(const std::string &key, const std::string &field) + { + auto &table = gDB[getDbId()][key]; + auto key_iter = table.find(""); + if (key_iter == table.end()) + { + return 0; + } + + int removed = 0; + auto attrs = key_iter->second; + std::vector new_attrs; + for (auto attr_iter : attrs) + { + if (attr_iter.first == field) + { + removed += 1; + continue; + } + + new_attrs.push_back(attr_iter); + } + + table[""] = new_attrs; + + return removed; + } + + void DBConnector::hset(const std::string &key, const std::string &field, const std::string &value) + { + FieldValueTuple fvp(field, value); + std::vector attrs = { fvp }; + + auto &table = gDB[getDbId()][key]; + auto iter = table.find(""); + if (iter == table.end()) + { + table[""] = attrs; + } + else + { + merge_values(iter->second, attrs); + } + } + + void ProducerTable::set(const std::string &key, + const std::vector &values, + const std::string &op, + const std::string &prefix) + { + auto &table = gDB[m_pipe->getDbId()][getTableName()]; + auto iter = table.find(key); + if (iter == table.end()) + { + table[key] = values; + } + else + { + merge_values(iter->second, values); + } + } + + void ProducerTable::del(const std::string &key, + const std::string &op, + const std::string &prefix) { auto &table = gDB[m_pipe->getDbId()][getTableName()]; table.erase(key); diff --git a/tests/mock_tests/mock_table.h b/tests/mock_tests/mock_table.h index 88aed225ea9..24eaa39692e 100644 --- a/tests/mock_tests/mock_table.h +++ b/tests/mock_tests/mock_table.h @@ -2,6 +2,9 @@ #include "table.h" +// Use this field in the mock test to simulate an exception during hget. +#define HGET_THROW_EXCEPTION_FIELD_NAME "hget_throw_exception" + namespace testing_db { void reset(); diff --git a/tests/mock_tests/mux_rollback_ut.cpp b/tests/mock_tests/mux_rollback_ut.cpp index 578b6c817b6..06349fed51d 100644 --- a/tests/mock_tests/mux_rollback_ut.cpp +++ b/tests/mock_tests/mux_rollback_ut.cpp @@ -5,47 +5,44 @@ #include "orch.h" #undef protected #include "ut_helper.h" +#define private public +#include "neighorch.h" +#include "muxorch.h" +#undef private #include "mock_orchagent_main.h" #include "mock_sai_api.h" +#include "mock_orch_test.h" #include "gtest/gtest.h" #include -DEFINE_SAI_API_MOCK(neighbor); -DEFINE_SAI_API_MOCK(route); -DEFINE_SAI_GENERIC_API_MOCK(acl, acl_entry); -DEFINE_SAI_GENERIC_API_MOCK(next_hop, next_hop); +EXTERN_MOCK_FNS namespace mux_rollback_test { + DEFINE_SAI_API_MOCK(neighbor); + DEFINE_SAI_API_MOCK(route); + DEFINE_SAI_GENERIC_API_MOCK(acl, acl_entry); + DEFINE_SAI_GENERIC_API_OBJECT_BULK_MOCK(next_hop, next_hop); + using ::testing::_; using namespace std; + using namespace mock_orch_test; using ::testing::Return; using ::testing::Throw; + using ::testing::DoAll; + using ::testing::SetArrayArgument; - static const string PEER_SWITCH_HOSTNAME = "peer_hostname"; - static const string PEER_IPV4_ADDRESS = "1.1.1.1"; static const string TEST_INTERFACE = "Ethernet4"; - static const string ACTIVE = "active"; - static const string STANDBY = "standby"; - static const string STATE = "state"; - static const string VLAN_NAME = "Vlan1000"; - static const string SERVER_IP = "192.168.0.2"; - class MuxRollbackTest : public ::testing::Test + sai_bulk_create_neighbor_entry_fn old_create_neighbor_entries; + sai_bulk_remove_neighbor_entry_fn old_remove_neighbor_entries; + sai_bulk_create_route_entry_fn old_create_route_entries; + sai_bulk_remove_route_entry_fn old_remove_route_entries; + sai_bulk_object_create_fn old_object_create; + sai_bulk_object_remove_fn old_object_remove; + + class MuxRollbackTest : public MockOrchTest { protected: - std::vector ut_orch_list; - shared_ptr m_app_db; - shared_ptr m_config_db; - shared_ptr m_state_db; - shared_ptr m_chassis_app_db; - MuxOrch *m_MuxOrch; - MuxCableOrch *m_MuxCableOrch; - MuxCable *m_MuxCable; - TunnelDecapOrch *m_TunnelDecapOrch; - MuxStateOrch *m_MuxStateOrch; - FlexCounterOrch *m_FlexCounterOrch; - mock_sai_neighbor_api_t mock_sai_neighbor_api_; - void SetMuxStateFromAppDb(std::string state) { Table mux_cable_table = Table(m_app_db.get(), APP_MUX_CABLE_TABLE_NAME); @@ -60,10 +57,11 @@ namespace mux_rollback_test EXPECT_EQ(state, m_MuxCable->getState()); } - void ApplyDualTorConfigs() + void ApplyInitialConfigs() { Table peer_switch_table = Table(m_config_db.get(), CFG_PEER_SWITCH_TABLE_NAME); - Table tunnel_table = Table(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + Table decap_tunnel_table = Table(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + Table decap_term_table = Table(m_app_db.get(), APP_TUNNEL_DECAP_TERM_TABLE_NAME); Table mux_cable_table = Table(m_config_db.get(), CFG_MUX_CABLE_TABLE_NAME); Table port_table = Table(m_app_db.get(), APP_PORT_TABLE_NAME); Table vlan_table = Table(m_app_db.get(), APP_VLAN_TABLE_NAME); @@ -77,35 +75,39 @@ namespace mux_rollback_test port_table.set("PortInitDone", { {} }); neigh_table.set( - VLAN_NAME + neigh_table.getTableNameSeparator() + SERVER_IP, { { "neigh", "62:f9:65:10:2f:04" }, + VLAN_1000 + neigh_table.getTableNameSeparator() + SERVER_IP1, { { "neigh", "62:f9:65:10:2f:04" }, { "family", "IPv4" } }); - vlan_table.set(VLAN_NAME, { { "admin_status", "up" }, + vlan_table.set(VLAN_1000, { { "admin_status", "up" }, { "mtu", "9100" }, { "mac", "00:aa:bb:cc:dd:ee" } }); vlan_member_table.set( - VLAN_NAME + vlan_member_table.getTableNameSeparator() + TEST_INTERFACE, + VLAN_1000 + vlan_member_table.getTableNameSeparator() + TEST_INTERFACE, { { "tagging_mode", "untagged" } }); - intf_table.set(VLAN_NAME, { { "grat_arp", "enabled" }, + intf_table.set(VLAN_1000, { { "grat_arp", "enabled" }, { "proxy_arp", "enabled" }, { "mac_addr", "00:00:00:00:00:00" } }); intf_table.set( - VLAN_NAME + neigh_table.getTableNameSeparator() + "192.168.0.1/21", { + VLAN_1000 + neigh_table.getTableNameSeparator() + "192.168.0.1/21", { { "scope", "global" }, { "family", "IPv4" }, }); - tunnel_table.set(MUX_TUNNEL, { { "dscp_mode", "uniform" }, - { "dst_ip", "2.2.2.2" }, - { "ecn_mode", "copy_from_outer" }, - { "encap_ecn_mode", "standard" }, - { "ttl_mode", "pipe" }, - { "tunnel_type", "IPINIP" } }); + decap_term_table.set( + MUX_TUNNEL + neigh_table.getTableNameSeparator() + "2.2.2.2", { { "src_ip", "1.1.1.1" }, + { "term_type", "P2P" } }); + + decap_tunnel_table.set(MUX_TUNNEL, { { "dscp_mode", "uniform" }, + { "src_ip", "1.1.1.1" }, + { "ecn_mode", "copy_from_outer" }, + { "encap_ecn_mode", "standard" }, + { "ttl_mode", "pipe" }, + { "tunnel_type", "IPINIP" } }); peer_switch_table.set(PEER_SWITCH_HOSTNAME, { { "address_ipv4", PEER_IPV4_ADDRESS } }); - mux_cable_table.set(TEST_INTERFACE, { { "server_ipv4", SERVER_IP + "/32" }, + mux_cable_table.set(TEST_INTERFACE, { { "server_ipv4", SERVER_IP1 + "/32" }, { "server_ipv6", "a::a/128" }, { "state", "auto" } }); @@ -117,7 +119,8 @@ namespace mux_rollback_test gIntfsOrch->addExistingData(&intf_table); static_cast(gIntfsOrch)->doTask(); - m_TunnelDecapOrch->addExistingData(&tunnel_table); + m_TunnelDecapOrch->addExistingData(&decap_tunnel_table); + m_TunnelDecapOrch->addExistingData(&decap_term_table); static_cast(m_TunnelDecapOrch)->doTask(); m_MuxOrch->addExistingData(&peer_switch_table); @@ -132,368 +135,169 @@ namespace mux_rollback_test m_MuxCable = m_MuxOrch->getMuxCable(TEST_INTERFACE); // We always expect the mux to be initialized to standby - EXPECT_EQ(STANDBY, m_MuxCable->getState()); - } - - void PrepareSai() - { - sai_attribute_t attr; - - attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; - attr.value.booldata = true; - - sai_status_t status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); - ASSERT_EQ(status, SAI_STATUS_SUCCESS); - - // Get switch source MAC address - attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; - status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); - - ASSERT_EQ(status, SAI_STATUS_SUCCESS); - - gMacAddress = attr.value.mac; - - attr.id = SAI_SWITCH_ATTR_DEFAULT_VIRTUAL_ROUTER_ID; - status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); - - ASSERT_EQ(status, SAI_STATUS_SUCCESS); - - gVirtualRouterId = attr.value.oid; - - /* Create a loopback underlay router interface */ - vector underlay_intf_attrs; - - sai_attribute_t underlay_intf_attr; - underlay_intf_attr.id = SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID; - underlay_intf_attr.value.oid = gVirtualRouterId; - underlay_intf_attrs.push_back(underlay_intf_attr); - - underlay_intf_attr.id = SAI_ROUTER_INTERFACE_ATTR_TYPE; - underlay_intf_attr.value.s32 = SAI_ROUTER_INTERFACE_TYPE_LOOPBACK; - underlay_intf_attrs.push_back(underlay_intf_attr); - - underlay_intf_attr.id = SAI_ROUTER_INTERFACE_ATTR_MTU; - underlay_intf_attr.value.u32 = 9100; - underlay_intf_attrs.push_back(underlay_intf_attr); - - status = sai_router_intfs_api->create_router_interface(&gUnderlayIfId, gSwitchId, (uint32_t)underlay_intf_attrs.size(), underlay_intf_attrs.data()); - ASSERT_EQ(status, SAI_STATUS_SUCCESS); + EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); } - void SetUp() override + void PostSetUp() override { - map profile = { - { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, - { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } - }; - - ut_helper::initSaiApi(profile); - m_app_db = make_shared("APPL_DB", 0); - m_config_db = make_shared("CONFIG_DB", 0); - m_state_db = make_shared("STATE_DB", 0); - m_chassis_app_db = make_shared("CHASSIS_APP_DB", 0); - - PrepareSai(); - - const int portsorch_base_pri = 40; - vector ports_tables = { - { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, - { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, - { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, - { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, - { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } - }; - - vector flex_counter_tables = { - CFG_FLEX_COUNTER_TABLE_NAME - }; - - m_FlexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); - gDirectory.set(m_FlexCounterOrch); - ut_orch_list.push_back((Orch **)&m_FlexCounterOrch); - - static const vector route_pattern_tables = { - CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME, - }; - gFlowCounterRouteOrch = new FlowCounterRouteOrch(m_config_db.get(), route_pattern_tables); - gDirectory.set(gFlowCounterRouteOrch); - ut_orch_list.push_back((Orch **)&gFlowCounterRouteOrch); - - gVrfOrch = new VRFOrch(m_app_db.get(), APP_VRF_TABLE_NAME, m_state_db.get(), STATE_VRF_OBJECT_TABLE_NAME); - gDirectory.set(gVrfOrch); - ut_orch_list.push_back((Orch **)&gVrfOrch); - - gIntfsOrch = new IntfsOrch(m_app_db.get(), APP_INTF_TABLE_NAME, gVrfOrch, m_chassis_app_db.get()); - gDirectory.set(gIntfsOrch); - ut_orch_list.push_back((Orch **)&gIntfsOrch); - - gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); - gDirectory.set(gPortsOrch); - ut_orch_list.push_back((Orch **)&gPortsOrch); - - const int fgnhgorch_pri = 15; - - vector fgnhg_tables = { - { CFG_FG_NHG, fgnhgorch_pri }, - { CFG_FG_NHG_PREFIX, fgnhgorch_pri }, - { CFG_FG_NHG_MEMBER, fgnhgorch_pri } - }; - - gFgNhgOrch = new FgNhgOrch(m_config_db.get(), m_app_db.get(), m_state_db.get(), fgnhg_tables, gNeighOrch, gIntfsOrch, gVrfOrch); - gDirectory.set(gFgNhgOrch); - ut_orch_list.push_back((Orch **)&gFgNhgOrch); - - const int fdborch_pri = 20; - - vector app_fdb_tables = { - { APP_FDB_TABLE_NAME, FdbOrch::fdborch_pri }, - { APP_VXLAN_FDB_TABLE_NAME, FdbOrch::fdborch_pri }, - { APP_MCLAG_FDB_TABLE_NAME, fdborch_pri } - }; - - TableConnector stateDbFdb(m_state_db.get(), STATE_FDB_TABLE_NAME); - TableConnector stateMclagDbFdb(m_state_db.get(), STATE_MCLAG_REMOTE_FDB_TABLE_NAME); - gFdbOrch = new FdbOrch(m_app_db.get(), app_fdb_tables, stateDbFdb, stateMclagDbFdb, gPortsOrch); - gDirectory.set(gFdbOrch); - ut_orch_list.push_back((Orch **)&gFdbOrch); - - gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); - gDirectory.set(gNeighOrch); - ut_orch_list.push_back((Orch **)&gNeighOrch); - - m_TunnelDecapOrch = new TunnelDecapOrch(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); - gDirectory.set(m_TunnelDecapOrch); - ut_orch_list.push_back((Orch **)&m_TunnelDecapOrch); - vector mux_tables = { - CFG_MUX_CABLE_TABLE_NAME, - CFG_PEER_SWITCH_TABLE_NAME - }; - - vector buffer_tables = { - APP_BUFFER_POOL_TABLE_NAME, - APP_BUFFER_PROFILE_TABLE_NAME, - APP_BUFFER_QUEUE_TABLE_NAME, - APP_BUFFER_PG_TABLE_NAME, - APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, - APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME - }; - gBufferOrch = new BufferOrch(m_app_db.get(), m_config_db.get(), m_state_db.get(), buffer_tables); - - TableConnector stateDbSwitchTable(m_state_db.get(), STATE_SWITCH_CAPABILITY_TABLE_NAME); - TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); - TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); - - vector switch_tables = { - conf_asic_sensors, - app_switch_table - }; - vector policer_tables = { - TableConnector(m_config_db.get(), CFG_POLICER_TABLE_NAME), - TableConnector(m_config_db.get(), CFG_PORT_STORM_CONTROL_TABLE_NAME) - }; - - TableConnector stateDbStorm(m_state_db.get(), STATE_BUM_STORM_CAPABILITY_TABLE_NAME); - gPolicerOrch = new PolicerOrch(policer_tables, gPortsOrch); - gDirectory.set(gPolicerOrch); - ut_orch_list.push_back((Orch **)&gPolicerOrch); - - gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); - gDirectory.set(gSwitchOrch); - ut_orch_list.push_back((Orch **)&gSwitchOrch); - - gNhgOrch = new NhgOrch(m_app_db.get(), APP_NEXTHOP_GROUP_TABLE_NAME); - gDirectory.set(gNhgOrch); - ut_orch_list.push_back((Orch **)&gNhgOrch); - - vector srv6_tables = { - APP_SRV6_SID_LIST_TABLE_NAME, - APP_SRV6_MY_SID_TABLE_NAME - }; - gSrv6Orch = new Srv6Orch(m_app_db.get(), srv6_tables, gSwitchOrch, gVrfOrch, gNeighOrch); - gDirectory.set(gSrv6Orch); - ut_orch_list.push_back((Orch **)&gSrv6Orch); - gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); - gDirectory.set(gCrmOrch); - ut_orch_list.push_back((Orch **)&gCrmOrch); - - const int routeorch_pri = 5; - vector route_tables = { - { APP_ROUTE_TABLE_NAME, routeorch_pri }, - { APP_LABEL_ROUTE_TABLE_NAME, routeorch_pri } - }; - gRouteOrch = new RouteOrch(m_app_db.get(), route_tables, gSwitchOrch, gNeighOrch, gIntfsOrch, gVrfOrch, gFgNhgOrch, gSrv6Orch); - gDirectory.set(gRouteOrch); - ut_orch_list.push_back((Orch **)&gRouteOrch); - TableConnector stateDbMirrorSession(m_state_db.get(), STATE_MIRROR_SESSION_TABLE_NAME); - TableConnector confDbMirrorSession(m_config_db.get(), CFG_MIRROR_SESSION_TABLE_NAME); - gMirrorOrch = new MirrorOrch(stateDbMirrorSession, confDbMirrorSession, gPortsOrch, gRouteOrch, gNeighOrch, gFdbOrch, gPolicerOrch); - gDirectory.set(gMirrorOrch); - ut_orch_list.push_back((Orch **)&gMirrorOrch); - - TableConnector confDbAclTable(m_config_db.get(), CFG_ACL_TABLE_TABLE_NAME); - TableConnector confDbAclTableType(m_config_db.get(), CFG_ACL_TABLE_TYPE_TABLE_NAME); - TableConnector confDbAclRuleTable(m_config_db.get(), CFG_ACL_RULE_TABLE_NAME); - TableConnector appDbAclTable(m_app_db.get(), APP_ACL_TABLE_TABLE_NAME); - TableConnector appDbAclTableType(m_app_db.get(), APP_ACL_TABLE_TYPE_TABLE_NAME); - TableConnector appDbAclRuleTable(m_app_db.get(), APP_ACL_RULE_TABLE_NAME); - - vector acl_table_connectors = { - confDbAclTableType, - confDbAclTable, - confDbAclRuleTable, - appDbAclTable, - appDbAclRuleTable, - appDbAclTableType, - }; - gAclOrch = new AclOrch(acl_table_connectors, m_state_db.get(), - gSwitchOrch, gPortsOrch, gMirrorOrch, gNeighOrch, gRouteOrch, NULL); - gDirectory.set(gAclOrch); - ut_orch_list.push_back((Orch **)&gAclOrch); - - m_MuxOrch = new MuxOrch(m_config_db.get(), mux_tables, m_TunnelDecapOrch, gNeighOrch, gFdbOrch); - gDirectory.set(m_MuxOrch); - ut_orch_list.push_back((Orch **)&m_MuxOrch); - - m_MuxCableOrch = new MuxCableOrch(m_app_db.get(), m_state_db.get(), APP_MUX_CABLE_TABLE_NAME); - gDirectory.set(m_MuxCableOrch); - ut_orch_list.push_back((Orch **)&m_MuxCableOrch); - - m_MuxStateOrch = new MuxStateOrch(m_state_db.get(), STATE_HW_MUX_CABLE_TABLE_NAME); - gDirectory.set(m_MuxStateOrch); - ut_orch_list.push_back((Orch **)&m_MuxStateOrch); - - ApplyDualTorConfigs(); INIT_SAI_API_MOCK(neighbor); INIT_SAI_API_MOCK(route); INIT_SAI_API_MOCK(acl); INIT_SAI_API_MOCK(next_hop); MockSaiApis(); + old_create_neighbor_entries = gNeighOrch->gNeighBulker.create_entries; + old_remove_neighbor_entries = gNeighOrch->gNeighBulker.remove_entries; + old_object_create = gNeighOrch->gNextHopBulker.create_entries; + old_object_remove = gNeighOrch->gNextHopBulker.remove_entries; + old_create_route_entries = m_MuxCable->nbr_handler_->gRouteBulker.create_entries; + old_remove_route_entries = m_MuxCable->nbr_handler_->gRouteBulker.remove_entries; + gNeighOrch->gNeighBulker.create_entries = mock_create_neighbor_entries; + gNeighOrch->gNeighBulker.remove_entries = mock_remove_neighbor_entries; + gNeighOrch->gNextHopBulker.create_entries = mock_create_next_hops; + gNeighOrch->gNextHopBulker.remove_entries = mock_remove_next_hops; + m_MuxCable->nbr_handler_->gRouteBulker.create_entries = mock_create_route_entries; + m_MuxCable->nbr_handler_->gRouteBulker.remove_entries = mock_remove_route_entries; } - void TearDown() override + void PreTearDown() override { - for (std::vector::reverse_iterator rit = ut_orch_list.rbegin(); rit != ut_orch_list.rend(); ++rit) - { - Orch **orch = *rit; - delete *orch; - *orch = nullptr; - } - - gDirectory.m_values.clear(); - RestoreSaiApis(); - ut_helper::uninitSaiApi(); + DEINIT_SAI_API_MOCK(next_hop); + DEINIT_SAI_API_MOCK(acl); + DEINIT_SAI_API_MOCK(route); + DEINIT_SAI_API_MOCK(neighbor); + gNeighOrch->gNeighBulker.create_entries = old_create_neighbor_entries; + gNeighOrch->gNeighBulker.remove_entries = old_remove_neighbor_entries; + gNeighOrch->gNextHopBulker.create_entries = old_object_create; + gNeighOrch->gNextHopBulker.remove_entries = old_object_remove; + m_MuxCable->nbr_handler_->gRouteBulker.create_entries = old_create_route_entries; + m_MuxCable->nbr_handler_->gRouteBulker.remove_entries = old_remove_route_entries; } }; TEST_F(MuxRollbackTest, StandbyToActiveNeighborAlreadyExists) { - EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry) - .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); - SetAndAssertMuxState(ACTIVE); + std::vector exp_status{SAI_STATUS_ITEM_ALREADY_EXISTS}; + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entries) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_ITEM_ALREADY_EXISTS))); + SetAndAssertMuxState(ACTIVE_STATE); } TEST_F(MuxRollbackTest, ActiveToStandbyNeighborNotFound) { - SetAndAssertMuxState(ACTIVE); - EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry) - .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); - SetAndAssertMuxState(STANDBY); + SetAndAssertMuxState(ACTIVE_STATE); + std::vector exp_status{SAI_STATUS_ITEM_NOT_FOUND}; + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entries) + .WillOnce(DoAll(SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_ITEM_NOT_FOUND))); + SetAndAssertMuxState(STANDBY_STATE); } TEST_F(MuxRollbackTest, StandbyToActiveRouteNotFound) { - EXPECT_CALL(*mock_sai_route_api, remove_route_entry) - .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); - SetAndAssertMuxState(ACTIVE); + std::vector exp_status{SAI_STATUS_ITEM_NOT_FOUND}; + EXPECT_CALL(*mock_sai_route_api, remove_route_entries) + .WillOnce(DoAll(SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_ITEM_NOT_FOUND))); + SetAndAssertMuxState(ACTIVE_STATE); } TEST_F(MuxRollbackTest, ActiveToStandbyRouteAlreadyExists) { - SetAndAssertMuxState(ACTIVE); - EXPECT_CALL(*mock_sai_route_api, create_route_entry) - .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); - SetAndAssertMuxState(STANDBY); + SetAndAssertMuxState(ACTIVE_STATE); + std::vector exp_status{SAI_STATUS_ITEM_ALREADY_EXISTS}; + EXPECT_CALL(*mock_sai_route_api, create_route_entries) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_ITEM_ALREADY_EXISTS))); + SetAndAssertMuxState(STANDBY_STATE); } TEST_F(MuxRollbackTest, StandbyToActiveAclNotFound) { EXPECT_CALL(*mock_sai_acl_api, remove_acl_entry) .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); - SetAndAssertMuxState(ACTIVE); + SetAndAssertMuxState(ACTIVE_STATE); } TEST_F(MuxRollbackTest, ActiveToStandbyAclAlreadyExists) { - SetAndAssertMuxState(ACTIVE); + SetAndAssertMuxState(ACTIVE_STATE); EXPECT_CALL(*mock_sai_acl_api, create_acl_entry) .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); - SetAndAssertMuxState(STANDBY); + SetAndAssertMuxState(STANDBY_STATE); } TEST_F(MuxRollbackTest, StandbyToActiveNextHopAlreadyExists) { - EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop) - .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); - SetAndAssertMuxState(ACTIVE); + std::vector exp_status{SAI_STATUS_ITEM_ALREADY_EXISTS}; + EXPECT_CALL(*mock_sai_next_hop_api, create_next_hops) + .WillOnce(DoAll(SetArrayArgument<6>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_ITEM_ALREADY_EXISTS))); + SetAndAssertMuxState(ACTIVE_STATE); } TEST_F(MuxRollbackTest, ActiveToStandbyNextHopNotFound) { - SetAndAssertMuxState(ACTIVE); - EXPECT_CALL(*mock_sai_next_hop_api, remove_next_hop) - .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); - SetAndAssertMuxState(STANDBY); + SetAndAssertMuxState(ACTIVE_STATE); + std::vector exp_status{SAI_STATUS_ITEM_NOT_FOUND}; + EXPECT_CALL(*mock_sai_next_hop_api, remove_next_hops) + .WillOnce(DoAll(SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_ITEM_NOT_FOUND))); + SetAndAssertMuxState(STANDBY_STATE); } TEST_F(MuxRollbackTest, StandbyToActiveRuntimeErrorRollbackToStandby) { - EXPECT_CALL(*mock_sai_route_api, remove_route_entry) + EXPECT_CALL(*mock_sai_route_api, remove_route_entries) .WillOnce(Throw(runtime_error("Mock runtime error"))); - SetMuxStateFromAppDb(ACTIVE); - EXPECT_EQ(STANDBY, m_MuxCable->getState()); + SetMuxStateFromAppDb(ACTIVE_STATE); + EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); } TEST_F(MuxRollbackTest, ActiveToStandbyRuntimeErrorRollbackToActive) { - SetAndAssertMuxState(ACTIVE); - EXPECT_CALL(*mock_sai_route_api, create_route_entry) + SetAndAssertMuxState(ACTIVE_STATE); + EXPECT_CALL(*mock_sai_route_api, create_route_entries) .WillOnce(Throw(runtime_error("Mock runtime error"))); - SetMuxStateFromAppDb(STANDBY); - EXPECT_EQ(ACTIVE, m_MuxCable->getState()); + SetMuxStateFromAppDb(STANDBY_STATE); + EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); } TEST_F(MuxRollbackTest, StandbyToActiveLogicErrorRollbackToStandby) { - EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry) + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entries) .WillOnce(Throw(logic_error("Mock logic error"))); - SetMuxStateFromAppDb(ACTIVE); - EXPECT_EQ(STANDBY, m_MuxCable->getState()); + SetMuxStateFromAppDb(ACTIVE_STATE); + EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); } TEST_F(MuxRollbackTest, ActiveToStandbyLogicErrorRollbackToActive) { - SetAndAssertMuxState(ACTIVE); - EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry) + SetAndAssertMuxState(ACTIVE_STATE); + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entries) .WillOnce(Throw(logic_error("Mock logic error"))); - SetMuxStateFromAppDb(STANDBY); - EXPECT_EQ(ACTIVE, m_MuxCable->getState()); + SetMuxStateFromAppDb(STANDBY_STATE); + EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); } TEST_F(MuxRollbackTest, StandbyToActiveExceptionRollbackToStandby) { - EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop) + EXPECT_CALL(*mock_sai_next_hop_api, create_next_hops) .WillOnce(Throw(exception())); - SetMuxStateFromAppDb(ACTIVE); - EXPECT_EQ(STANDBY, m_MuxCable->getState()); + SetMuxStateFromAppDb(ACTIVE_STATE); + EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); } TEST_F(MuxRollbackTest, ActiveToStandbyExceptionRollbackToActive) { - SetAndAssertMuxState(ACTIVE); - EXPECT_CALL(*mock_sai_next_hop_api, remove_next_hop) + SetAndAssertMuxState(ACTIVE_STATE); + EXPECT_CALL(*mock_sai_next_hop_api, remove_next_hops) .WillOnce(Throw(exception())); - SetMuxStateFromAppDb(STANDBY); - EXPECT_EQ(ACTIVE, m_MuxCable->getState()); + SetMuxStateFromAppDb(STANDBY_STATE); + EXPECT_EQ(ACTIVE_STATE, m_MuxCable->getState()); + } + + TEST_F(MuxRollbackTest, StandbyToActiveNextHopTableFullRollbackToActive) + { + std::vector exp_status{SAI_STATUS_TABLE_FULL}; + EXPECT_CALL(*mock_sai_next_hop_api, create_next_hops) + .WillOnce(DoAll(SetArrayArgument<6>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_TABLE_FULL))); + SetMuxStateFromAppDb(ACTIVE_STATE); + EXPECT_EQ(STANDBY_STATE, m_MuxCable->getState()); } } diff --git a/tests/mock_tests/neighorch_ut.cpp b/tests/mock_tests/neighorch_ut.cpp new file mode 100644 index 00000000000..7b08de08751 --- /dev/null +++ b/tests/mock_tests/neighorch_ut.cpp @@ -0,0 +1,271 @@ +#define private public +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#define private public +#include "routeorch.h" +#undef private +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_sai_api.h" +#include "mock_orch_test.h" + +EXTERN_MOCK_FNS + +namespace neighorch_test +{ + DEFINE_SAI_API_MOCK(neighbor); + using namespace std; + using namespace mock_orch_test; + using ::testing::Return; + using ::testing::Throw; + + static const string TEST_IP = "10.10.10.10"; + static const string VRF_3000 = "Vrf3000"; + static const NeighborEntry VLAN1000_NEIGH = NeighborEntry(TEST_IP, VLAN_1000); + static const NeighborEntry VLAN2000_NEIGH = NeighborEntry(TEST_IP, VLAN_2000); + static const NeighborEntry VLAN3000_NEIGH = NeighborEntry(TEST_IP, VLAN_3000); + static const NeighborEntry VLAN4000_NEIGH = NeighborEntry(TEST_IP, VLAN_4000); + + class NeighOrchTest : public MockOrchTest + { + protected: + void SetAndAssertMuxState(std::string interface, std::string state) + { + MuxCable *muxCable = m_MuxOrch->getMuxCable(interface); + muxCable->setState(state); + EXPECT_EQ(state, muxCable->getState()); + } + + void LearnNeighbor(std::string vlan, std::string ip, std::string mac) + { + Table neigh_table = Table(m_app_db.get(), APP_NEIGH_TABLE_NAME); + string key = vlan + neigh_table.getTableNameSeparator() + ip; + neigh_table.set(key, { { "neigh", mac }, { "family", "IPv4" } }); + gNeighOrch->addExistingData(&neigh_table); + static_cast(gNeighOrch)->doTask(); + neigh_table.del(key); + } + + void ApplyInitialConfigs() + { + Table port_table = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table vlan_table = Table(m_app_db.get(), APP_VLAN_TABLE_NAME); + Table vlan_member_table = Table(m_app_db.get(), APP_VLAN_MEMBER_TABLE_NAME); + Table neigh_table = Table(m_app_db.get(), APP_NEIGH_TABLE_NAME); + Table intf_table = Table(m_app_db.get(), APP_INTF_TABLE_NAME); + Table fdb_table = Table(m_app_db.get(), APP_FDB_TABLE_NAME); + Table vrf_table = Table(m_app_db.get(), APP_VRF_TABLE_NAME); + + auto ports = ut_helper::getInitialSaiPorts(); + port_table.set(ETHERNET0, ports[ETHERNET0]); + port_table.set(ETHERNET4, ports[ETHERNET4]); + port_table.set(ETHERNET8, ports[ETHERNET8]); + port_table.set("PortConfigDone", { { "count", to_string(1) } }); + port_table.set("PortInitDone", { {} }); + + vrf_table.set(VRF_3000, { {"NULL", "NULL"} }); + + vlan_table.set(VLAN_1000, { { "admin_status", "up" }, + { "mtu", "9100" }, + { "mac", "00:aa:bb:cc:dd:ee" } }); + vlan_table.set(VLAN_2000, { { "admin_status", "up" }, + { "mtu", "9100" }, + { "mac", "aa:11:bb:22:cc:33" } }); + vlan_table.set(VLAN_3000, { { "admin_status", "up" }, + { "mtu", "9100" }, + { "mac", "99:ff:88:ee:77:dd" } }); + vlan_table.set(VLAN_4000, { { "admin_status", "up" }, + { "mtu", "9100" }, + { "mac", "99:ff:88:ee:77:dd" } }); + vlan_member_table.set( + VLAN_1000 + vlan_member_table.getTableNameSeparator() + ETHERNET0, + { { "tagging_mode", "untagged" } }); + + vlan_member_table.set( + VLAN_2000 + vlan_member_table.getTableNameSeparator() + ETHERNET4, + { { "tagging_mode", "untagged" } }); + + vlan_member_table.set( + VLAN_3000 + vlan_member_table.getTableNameSeparator() + ETHERNET8, + { { "tagging_mode", "untagged" } }); + + vlan_member_table.set( + VLAN_4000 + vlan_member_table.getTableNameSeparator() + ETHERNET12, + { { "tagging_mode", "untagged" } }); + + intf_table.set(VLAN_1000, { { "grat_arp", "enabled" }, + { "proxy_arp", "enabled" }, + { "mac_addr", "00:00:00:00:00:00" } }); + + intf_table.set(VLAN_2000, { { "grat_arp", "enabled" }, + { "proxy_arp", "enabled" }, + { "mac_addr", "00:00:00:00:00:00" } }); + + intf_table.set(VLAN_3000, { { "grat_arp", "enabled" }, + { "proxy_arp", "enabled" }, + { "vrf_name", VRF_3000 }, + { "mac_addr", "00:00:00:00:00:00" } }); + + intf_table.set(VLAN_4000, { { "grat_arp", "enabled" }, + { "proxy_arp", "enabled" }, + { "vrf_name", VRF_3000 }, + { "mac_addr", "00:00:00:00:00:00" } }); + + intf_table.set( + VLAN_1000 + neigh_table.getTableNameSeparator() + "192.168.0.1/24", { + { "scope", "global" }, + { "family", "IPv4" }, + }); + + intf_table.set( + VLAN_2000 + neigh_table.getTableNameSeparator() + "192.168.2.1/24", { + { "scope", "global" }, + { "family", "IPv4" }, + }); + intf_table.set( + VLAN_3000 + neigh_table.getTableNameSeparator() + "192.168.3.1/24", { + { "scope", "global" }, + { "family", "IPv4" }, + }); + + intf_table.set( + VLAN_4000 + neigh_table.getTableNameSeparator() + "192.168.3.1/24", { + { "scope", "global" }, + { "family", "IPv4" }, + }); + + gPortsOrch->addExistingData(&port_table); + gPortsOrch->addExistingData(&vlan_table); + gPortsOrch->addExistingData(&vlan_member_table); + static_cast(gPortsOrch)->doTask(); + + gVrfOrch->addExistingData(&vrf_table); + static_cast(gVrfOrch)->doTask(); + + gIntfsOrch->addExistingData(&intf_table); + static_cast(gIntfsOrch)->doTask(); + + fdb_table.set( + VLAN_1000 + fdb_table.getTableNameSeparator() + MAC1, + { { "type", "dynamic" }, + { "port", ETHERNET0 } }); + + fdb_table.set( + VLAN_2000 + fdb_table.getTableNameSeparator() + MAC2, + { { "type", "dynamic" }, + { "port", ETHERNET4 } }); + + fdb_table.set( + VLAN_1000 + fdb_table.getTableNameSeparator() + MAC3, + { { "type", "dynamic" }, + { "port", ETHERNET0 } }); + + fdb_table.set( + VLAN_3000 + fdb_table.getTableNameSeparator() + MAC4, + { { "type", "dynamic" }, + { "port", ETHERNET8 } }); + + fdb_table.set( + VLAN_4000 + fdb_table.getTableNameSeparator() + MAC5, + { { "type", "dynamic" }, + { "port", ETHERNET12 } }); + + gFdbOrch->addExistingData(&fdb_table); + static_cast(gFdbOrch)->doTask(); + } + + void PostSetUp() override + { + INIT_SAI_API_MOCK(neighbor); + MockSaiApis(); + } + + void PreTearDown() override + { + RestoreSaiApis(); + } + }; + + TEST_F(NeighOrchTest, MultiVlanDuplicateNeighbor) + { + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); + LearnNeighbor(VLAN_1000, TEST_IP, MAC1); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 1); + + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry); + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); + LearnNeighbor(VLAN_2000, TEST_IP, MAC2); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 0); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN2000_NEIGH), 1); + + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry); + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); + LearnNeighbor(VLAN_1000, TEST_IP, MAC3); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 1); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN2000_NEIGH), 0); + } + + TEST_F(NeighOrchTest, MultiVlanUnableToRemoveNeighbor) + { + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); + LearnNeighbor(VLAN_1000, TEST_IP, MAC1); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 1); + NextHopKey nexthop = { TEST_IP, VLAN_1000 }; + gNeighOrch->m_syncdNextHops[nexthop].ref_count = 1; + + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry).Times(0); + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry).Times(0); + LearnNeighbor(VLAN_2000, TEST_IP, MAC2); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 1); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN2000_NEIGH), 0); + } + + TEST_F(NeighOrchTest, MultiVlanDifferentVrfDuplicateNeighbor) + { + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); + LearnNeighbor(VLAN_1000, TEST_IP, MAC1); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 1); + + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry).Times(0); + LearnNeighbor(VLAN_3000, TEST_IP, MAC4); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN1000_NEIGH), 1); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN3000_NEIGH), 1); + } + + TEST_F(NeighOrchTest, MultiVlanSameVrfDuplicateNeighbor) + { + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); + LearnNeighbor(VLAN_3000, TEST_IP, MAC4); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN3000_NEIGH), 1); + + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry); + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry); + LearnNeighbor(VLAN_4000, TEST_IP, MAC5); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN3000_NEIGH), 0); + ASSERT_EQ(gNeighOrch->m_syncdNeighbors.count(VLAN4000_NEIGH), 1); + } + + TEST_F(NeighOrchTest, MultiVlanDuplicateNeighborMissingExistingVlanPort) + { + LearnNeighbor(VLAN_1000, TEST_IP, MAC1); + + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry).Times(0); + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry).Times(0); + gPortsOrch->m_portList.erase(VLAN_1000); + LearnNeighbor(VLAN_2000, TEST_IP, MAC2); + } + + TEST_F(NeighOrchTest, MultiVlanDuplicateNeighborMissingNewVlanPort) + { + LearnNeighbor(VLAN_1000, TEST_IP, MAC1); + + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry).Times(0); + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry).Times(0); + gPortsOrch->m_portList.erase(VLAN_2000); + LearnNeighbor(VLAN_2000, TEST_IP, MAC2); + } +} diff --git a/tests/mock_tests/orchdaemon_ut.cpp b/tests/mock_tests/orchdaemon_ut.cpp index a107b6ba6da..82a214d1f5c 100644 --- a/tests/mock_tests/orchdaemon_ut.cpp +++ b/tests/mock_tests/orchdaemon_ut.cpp @@ -1,8 +1,12 @@ +#define protected public +#include "orch.h" #include "orchdaemon.h" +#undef protected #include "dbconnector.h" #include #include #include "mock_sai_switch.h" +#include "saihelper.h" extern sai_switch_api_t* sai_switch_api; sai_switch_api_t test_sai_switch; @@ -13,6 +17,7 @@ namespace orchdaemon_test using ::testing::_; using ::testing::Return; using ::testing::StrictMock; + using ::testing::InSequence; DBConnector appl_db("APPL_DB", 0); DBConnector state_db("STATE_DB", 0); @@ -40,7 +45,9 @@ namespace orchdaemon_test ~OrchDaemonTest() { sai_switch_api = nullptr; + delete orchd; }; + }; TEST_F(OrchDaemonTest, logRotate) @@ -49,4 +56,160 @@ namespace orchdaemon_test orchd->logRotate(); } + + TEST_F(OrchDaemonTest, ringBuffer) + { + int test_ring_size = 2; + + auto ring = new RingBuffer(test_ring_size); + + for (int i = 0; i < test_ring_size - 1; i++) + { + EXPECT_TRUE(ring->push([](){})); + } + EXPECT_FALSE(ring->push([](){})); + + AnyTask task; + for (int i = 0; i < test_ring_size - 1; i++) + { + EXPECT_TRUE(ring->pop(task)); + } + + EXPECT_FALSE(ring->pop(task)); + + ring->setIdle(true); + EXPECT_TRUE(ring->IsIdle()); + delete ring; + } + + TEST_F(OrchDaemonTest, RingThread) + { + orchd->enableRingBuffer(); + + // verify ring buffer is created + EXPECT_TRUE(Executor::gRingBuffer != nullptr); + EXPECT_TRUE(Executor::gRingBuffer == Orch::gRingBuffer); + + orchd->ring_thread = std::thread(&OrchDaemon::popRingBuffer, orchd); + auto gRingBuffer = orchd->gRingBuffer; + + // verify ring_thread is created + while (!gRingBuffer->thread_created) + { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + + bool task_executed = false; + AnyTask task = [&task_executed]() { task_executed = true;}; + gRingBuffer->push(task); + + // verify ring thread is conditional locked + EXPECT_TRUE(gRingBuffer->IsIdle()); + EXPECT_FALSE(task_executed); + + gRingBuffer->notify(); + + // verify notify() would activate the ring thread when buffer is not empty + while (!gRingBuffer->IsEmpty() || !gRingBuffer->IsIdle()) + { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + + EXPECT_TRUE(task_executed); + + delete orchd; + + // verify the destructor of orchdaemon will stop the ring thread + EXPECT_FALSE(orchd->ring_thread.joinable()); + // verify the destructor of orchdaemon also resets ring buffer + EXPECT_TRUE(Executor::gRingBuffer == nullptr); + + // reset the orchd for other testcases + orchd = new OrchDaemon(&appl_db, &config_db, &state_db, &counters_db, nullptr); + } + + TEST_F(OrchDaemonTest, PushRingBuffer) + { + orchd->enableRingBuffer(); + + auto gRingBuffer = orchd->gRingBuffer; + + std::vector tables = {"ROUTE_TABLE", "OTHER_TABLE"}; + auto orch = make_shared(&appl_db, tables); + auto route_consumer = dynamic_cast(orch->getExecutor("ROUTE_TABLE")); + auto other_consumer = dynamic_cast(orch->getExecutor("OTHER_TABLE")); + + EXPECT_TRUE(gRingBuffer->serves("ROUTE_TABLE")); + EXPECT_FALSE(gRingBuffer->serves("OTHER_TABLE")); + + int x = 0; + route_consumer->processAnyTask([&](){x=3;}); + // verify `processAnyTask` is equivalent to executing the task immediately + EXPECT_TRUE(gRingBuffer->IsEmpty() && gRingBuffer->IsIdle() && !gRingBuffer->thread_created && x==3); + + gRingBuffer->thread_created = true; // set the flag to assume the ring thread is created (actually not) + + // verify `processAnyTask` is equivalent to executing the task immediately when ring is empty and idle + other_consumer->processAnyTask([&](){x=4;}); + EXPECT_TRUE(gRingBuffer->IsEmpty() && gRingBuffer->IsIdle() && x==4); + + route_consumer->processAnyTask([&](){x=5;}); + // verify `processAnyTask` would not execute the task if thread_created is true + // it only pushes the task to the ring buffer, without executing it + EXPECT_TRUE(!gRingBuffer->IsEmpty() && x==4); + + AnyTask task; + gRingBuffer->pop(task); + task(); + // hence the task needs to be popped and explicitly executed + EXPECT_TRUE(gRingBuffer->IsEmpty() && x==5); + + orchd->disableRingBuffer(); + } + + TEST_F(OrchDaemonTest, TestRedisFlushFailure) + { + InSequence s; + + EXPECT_CALL(mock_sai_switch_, set_switch_attribute( _, _)).WillOnce(Return(SAI_STATUS_FAILURE)); + EXPECT_CALL(mock_sai_switch_, set_switch_attribute(_, _)); + + orchd->flush(); + } + + TEST_F(OrchDaemonTest, TestFlushWithRingBufferEntry) + { + // Allow one or more calls to set_switch_attribute + EXPECT_CALL(mock_sai_switch_, set_switch_attribute(testing::_, testing::_)) + .WillRepeatedly(Return(SAI_STATUS_SUCCESS)); + + orchd->enableRingBuffer(); + + auto gRingBuffer = orchd->gRingBuffer; + + std::vector tables = {"ROUTE_TABLE", "OTHER_TABLE"}; + auto orch = make_shared(&appl_db, tables); + auto route_consumer = dynamic_cast(orch->getExecutor("ROUTE_TABLE")); + + EXPECT_TRUE(gRingBuffer->serves("ROUTE_TABLE")); + + int x = 0; + + gRingBuffer->thread_created = true; // set the flag to assume the ring thread is created (actually not) + route_consumer->processAnyTask([&](){x=5;}); + + // Ring is not empty, flush would not be triggered + orchd->flush(); + EXPECT_TRUE(!gRingBuffer->IsEmpty() && x==0); + AnyTask task; + gRingBuffer->pop(task); + task(); + // hence the task needs to be popped and explicitly executed + EXPECT_TRUE(gRingBuffer->IsEmpty() && x==5); + // Ring is empty, flush would be triggered + orchd->flush(); + + orchd->disableRingBuffer(); + } + } diff --git a/tests/mock_tests/portal.h b/tests/mock_tests/portal.h index 8f0c4ab2dbe..cfc78ed45de 100644 --- a/tests/mock_tests/portal.h +++ b/tests/mock_tests/portal.h @@ -7,6 +7,7 @@ #include "crmorch.h" #include "copporch.h" #include "sfloworch.h" +#include "twamporch.h" #include "directory.h" #undef protected @@ -81,6 +82,16 @@ struct Portal obj.getTrapIdsFromTrapGroup(trapGroupOid, trapIdList); return trapIdList; } + + static task_process_status processCoppRule(CoppOrch &obj, Consumer& processCoppRule) + { + return obj.processCoppRule(processCoppRule); + } + + static const std::unordered_set& getSupportedTrapIds(const CoppOrch &obj) + { + return obj.supported_trap_ids; + } }; struct SflowOrchInternal @@ -101,6 +112,19 @@ struct Portal } }; + struct TwampOrchInternal + { + static bool getTwampSessionStatus(TwampOrch &obj, const string &name, string& status) + { + return obj.getSessionStatus(name, status); + } + + static TwampStatsTable getTwampSessionStatistics(TwampOrch &obj) + { + return obj.m_twampStatistics; + } + }; + struct DirectoryInternal { template diff --git a/tests/mock_tests/portmgr_ut.cpp b/tests/mock_tests/portmgr_ut.cpp index 27dc61e03e3..b7b83590bd5 100644 --- a/tests/mock_tests/portmgr_ut.cpp +++ b/tests/mock_tests/portmgr_ut.cpp @@ -123,4 +123,82 @@ namespace portmgr_ut ASSERT_EQ("/sbin/ip link set dev \"Ethernet0\" mtu \"1518\"", mockCallArgs[0]); ASSERT_EQ("/sbin/ip link set dev \"Ethernet0\" up", mockCallArgs[1]); } + + TEST_F(PortMgrTest, ConfigurePortPTDefaultTimestampTemplate) + { + Table state_port_table(m_state_db.get(), STATE_PORT_TABLE_NAME); + Table app_port_table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table cfg_port_table(m_config_db.get(), CFG_PORT_TABLE_NAME); + + // Port is not ready, verify that doTask does not handle port configuration + + cfg_port_table.set("Ethernet0", { + {"speed", "100000"}, + {"index", "1"}, + {"pt_interface_id", "129"} + }); + mockCallArgs.clear(); + m_portMgr->addExistingData(&cfg_port_table); + m_portMgr->doTask(); + ASSERT_TRUE(mockCallArgs.empty()); + std::vector values; + app_port_table.get("Ethernet0", values); + auto value_opt = swss::fvsGetValue(values, "mtu", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ(DEFAULT_MTU_STR, value_opt.get()); + value_opt = swss::fvsGetValue(values, "admin_status", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ(DEFAULT_ADMIN_STATUS_STR, value_opt.get()); + value_opt = swss::fvsGetValue(values, "speed", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ("100000", value_opt.get()); + value_opt = swss::fvsGetValue(values, "index", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ("1", value_opt.get()); + value_opt = swss::fvsGetValue(values, "pt_interface_id", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ("129", value_opt.get()); + value_opt = swss::fvsGetValue(values, "pt_timestamp_template", true); + ASSERT_FALSE(value_opt); + } + + TEST_F(PortMgrTest, ConfigurePortPTNonDefaultTimestampTemplate) + { + Table state_port_table(m_state_db.get(), STATE_PORT_TABLE_NAME); + Table app_port_table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table cfg_port_table(m_config_db.get(), CFG_PORT_TABLE_NAME); + + // Port is not ready, verify that doTask does not handle port configuration + + cfg_port_table.set("Ethernet0", { + {"speed", "100000"}, + {"index", "1"}, + {"pt_interface_id", "129"}, + {"pt_timestamp_template", "template2"} + }); + mockCallArgs.clear(); + m_portMgr->addExistingData(&cfg_port_table); + m_portMgr->doTask(); + ASSERT_TRUE(mockCallArgs.empty()); + std::vector values; + app_port_table.get("Ethernet0", values); + auto value_opt = swss::fvsGetValue(values, "mtu", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ(DEFAULT_MTU_STR, value_opt.get()); + value_opt = swss::fvsGetValue(values, "admin_status", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ(DEFAULT_ADMIN_STATUS_STR, value_opt.get()); + value_opt = swss::fvsGetValue(values, "speed", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ("100000", value_opt.get()); + value_opt = swss::fvsGetValue(values, "index", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ("1", value_opt.get()); + value_opt = swss::fvsGetValue(values, "pt_interface_id", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ("129", value_opt.get()); + value_opt = swss::fvsGetValue(values, "pt_timestamp_template", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ("template2", value_opt.get()); + } } diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index fca4f34bebf..63c3df5d946 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -11,6 +11,7 @@ #define private public #include "pfcactionhandler.h" #include "switchorch.h" +#include "notifications.h" #include #undef private #define private public @@ -19,7 +20,21 @@ #include +// Add operator<< for vector to support boost::variant printing in tests for SerdesValue +namespace std { + inline ostream& operator<<(ostream& os, const vector& vec) { + os << "["; + for (size_t i = 0; i < vec.size(); ++i) { + if (i > 0) os << ", "; + os << vec[i]; // Decimal format + } + os << "]"; + return os; + } +} + extern redisReply *mockReply; +extern sai_redis_communication_mode_t gRedisCommunicationMode; using ::testing::_; using ::testing::StrictMock; @@ -27,6 +42,8 @@ namespace portsorch_test { using namespace std; + bool support_object_type_list = true; + // SAI default ports std::map> defaultPortList; @@ -37,6 +54,8 @@ namespace portsorch_test bool not_support_fetching_fec; uint32_t _sai_set_port_fec_count; + uint32_t _sai_set_port_auto_neg_count; + uint32_t _sai_set_port_tpid_count; int32_t _sai_port_fec_mode; vector mock_port_fec_modes = {SAI_PORT_FEC_MODE_RS, SAI_PORT_FEC_MODE_FC}; @@ -68,6 +87,11 @@ namespace portsorch_test attr_list[0].value.s32 = _sai_port_fec_mode; status = SAI_STATUS_SUCCESS; } + else if (attr_count== 1 && attr_list[0].id == SAI_PORT_ATTR_OPER_STATUS) + { + attr_list[0].value.u32 = (uint32_t)SAI_PORT_OPER_STATUS_UP; + status = SAI_STATUS_SUCCESS; + } else { status = pold_sai_port_api->get_port_attribute(port_id, attr_count, attr_list); @@ -78,6 +102,23 @@ namespace portsorch_test uint32_t _sai_set_pfc_mode_count; uint32_t _sai_set_admin_state_up_count; uint32_t _sai_set_admin_state_down_count; + bool set_pt_interface_id_fail = false; + bool set_pt_timestamp_template_fail = false; + bool set_port_tam_fail = false; + uint32_t set_pt_interface_id_count = false; + uint32_t set_pt_timestamp_template_count = false; + uint32_t set_port_tam_count = false; + uint32_t set_pt_interface_id_failures = 0; + uint32_t set_pt_timestamp_template_failures = 0; + uint32_t set_port_tam_failures = 0; + bool set_link_event_damping_success = true; + uint32_t _sai_set_link_event_damping_algorithm_count; + uint32_t _sai_set_link_event_damping_config_count; + int32_t _sai_link_event_damping_algorithm = 0; + bool set_pfc_asym_not_supported = false; + uint32_t set_pfc_asym_failures; + sai_redis_link_event_damping_algo_aied_config_t _sai_link_event_damping_config = {0, 0, 0, 0, 0}; + sai_status_t _ut_stub_sai_set_port_attribute( _In_ sai_object_id_t port_id, _In_ const sai_attribute_t *attr) @@ -89,12 +130,19 @@ namespace portsorch_test } else if (attr[0].id == SAI_PORT_ATTR_AUTO_NEG_MODE) { + _sai_set_port_auto_neg_count++; /* Simulating failure case */ return SAI_STATUS_FAILURE; } - else if (attr[0].id == SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_COMBINED) - { - _sai_set_pfc_mode_count++; + else if (attr[0].id == SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_MODE) + { + _sai_set_pfc_mode_count++; + /* Simulating failure case */ + if (set_pfc_asym_not_supported) + { + set_pfc_asym_failures++; + return SAI_STATUS_NOT_SUPPORTED; + } } else if (attr[0].id == SAI_PORT_ATTR_ADMIN_STATE) { @@ -104,9 +152,102 @@ namespace portsorch_test _sai_set_admin_state_down_count++; } } + else if (attr[0].id == SAI_PORT_ATTR_PATH_TRACING_INTF) + { + set_pt_interface_id_count++; + /* Simulating failure case */ + if (set_pt_interface_id_fail) + { + set_pt_interface_id_failures++; + return SAI_STATUS_INVALID_ATTR_VALUE_0; + } + } + else if (attr[0].id == SAI_PORT_ATTR_PATH_TRACING_TIMESTAMP_TYPE) + { + set_pt_timestamp_template_count++; + /* Simulating failure case */ + if (set_pt_timestamp_template_fail) + { + set_pt_timestamp_template_failures++; + return SAI_STATUS_INVALID_ATTR_VALUE_0; + } + } + else if (attr[0].id == SAI_PORT_ATTR_TAM_OBJECT) + { + set_port_tam_count++; + /* Simulating failure case */ + if (set_port_tam_fail) + { + set_port_tam_failures++; + return SAI_STATUS_INVALID_ATTR_VALUE_0; + } + } + else if (attr[0].id == SAI_PORT_ATTR_TPID) + { + _sai_set_port_tpid_count++; + } + else if (attr[0].id == SAI_REDIS_PORT_ATTR_LINK_EVENT_DAMPING_ALGORITHM) + { + _sai_set_link_event_damping_algorithm_count++; + + if (set_link_event_damping_success) { + _sai_link_event_damping_algorithm = attr[0].value.s32; + return SAI_STATUS_SUCCESS; + } + return SAI_STATUS_FAILURE; + } + else if (attr[0].id == SAI_REDIS_PORT_ATTR_LINK_EVENT_DAMPING_ALGO_AIED_CONFIG) + { + _sai_set_link_event_damping_config_count++; + + if (set_link_event_damping_success) { + _sai_link_event_damping_config = *(reinterpret_cast(attr[0].value.ptr)); + return SAI_STATUS_SUCCESS; + } + return SAI_STATUS_FAILURE; + } return pold_sai_port_api->set_port_attribute(port_id, attr); } + vector supported_sai_objects = { + SAI_OBJECT_TYPE_PORT, + SAI_OBJECT_TYPE_LAG, + SAI_OBJECT_TYPE_TAM, + SAI_OBJECT_TYPE_TAM_INT, + SAI_OBJECT_TYPE_TAM_COLLECTOR, + SAI_OBJECT_TYPE_TAM_REPORT, + SAI_OBJECT_TYPE_TAM_TRANSPORT, + SAI_OBJECT_TYPE_TAM_TELEMETRY, + SAI_OBJECT_TYPE_TAM_EVENT_THRESHOLD + }; + + sai_status_t _ut_stub_sai_get_switch_attribute( + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t *attr_list) + { + sai_status_t status; + if (attr_count == 1 && attr_list[0].id == SAI_SWITCH_ATTR_SUPPORTED_OBJECT_TYPE_LIST) + { + if (!support_object_type_list) + { + return SAI_STATUS_ATTR_NOT_IMPLEMENTED_0; + } + uint32_t i; + for (i = 0; i < attr_list[0].value.s32list.count && i < supported_sai_objects.size(); i++) + { + attr_list[0].value.s32list.list[i] = supported_sai_objects[i]; + } + attr_list[0].value.s32list.count = i; + status = SAI_STATUS_SUCCESS; + } + else + { + status = pold_sai_switch_api->get_switch_attribute(switch_id, attr_count, attr_list); + } + return status; + } + uint32_t *_sai_syncd_notifications_count; int32_t *_sai_syncd_notification_event; uint32_t _sai_switch_dlr_packet_action_count; @@ -117,7 +258,7 @@ namespace portsorch_test { if (attr[0].id == SAI_REDIS_SWITCH_ATTR_NOTIFY_SYNCD) { - *_sai_syncd_notifications_count =+ 1; + *_sai_syncd_notifications_count = *_sai_syncd_notifications_count + 1; *_sai_syncd_notification_event = attr[0].value.s32; } else if (attr[0].id == SAI_SWITCH_ATTR_PFC_DLR_PACKET_ACTION) @@ -147,6 +288,7 @@ namespace portsorch_test ut_sai_switch_api = *sai_switch_api; pold_sai_switch_api = sai_switch_api; ut_sai_switch_api.set_switch_attribute = _ut_stub_sai_set_switch_attribute; + ut_sai_switch_api.get_switch_attribute = _ut_stub_sai_get_switch_attribute; sai_switch_api = &ut_sai_switch_api; } @@ -415,6 +557,22 @@ namespace portsorch_test ASSERT_EQ((gPfcwdOrch), nullptr); gPfcwdOrch = new PfcWdSwOrch(m_config_db.get(), pfc_wd_tables, portStatIds, queueStatIds, queueAttrIds, 100); + vector mlag_tables = { + { CFG_MCLAG_TABLE_NAME }, + { CFG_MCLAG_INTF_TABLE_NAME } + }; + + ASSERT_EQ(gMlagOrch, nullptr); + gMlagOrch = new MlagOrch(m_config_db.get(), mlag_tables); + + vector debug_counter_tables = { + CFG_DEBUG_COUNTER_TABLE_NAME, + CFG_DEBUG_COUNTER_DROP_REASON_TABLE_NAME, + CFG_DEBUG_DROP_MONITOR_TABLE_NAME + }; + + ASSERT_EQ(gDebugCounterOrch, nullptr); + gDebugCounterOrch = new DebugCounterOrch(m_config_db.get(), debug_counter_tables, 1000); } virtual void TearDown() override @@ -433,6 +591,8 @@ namespace portsorch_test gFdbOrch = nullptr; delete gIntfsOrch; gIntfsOrch = nullptr; + delete gDebugCounterOrch; + gDebugCounterOrch = nullptr; delete gPortsOrch; gPortsOrch = nullptr; delete gBufferOrch; @@ -443,7 +603,8 @@ namespace portsorch_test gQosOrch = nullptr; delete gSwitchOrch; gSwitchOrch = nullptr; - + delete gMlagOrch; + gMlagOrch = nullptr; // clear orchs saved in directory gDirectory.m_values.clear(); } @@ -500,6 +661,237 @@ namespace portsorch_test }; + /* + * Test port flap count + */ + TEST_F(PortsOrchTest, PortFlapCount) + { + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate port table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone, PortInitDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + portTable.set("PortInitDone", { { "lanes", "0" } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + // Apply configuration : create ports + static_cast(gPortsOrch)->doTask(); + + // Get first port, expect the oper status is not UP + Port port; + gPortsOrch->getPort("Ethernet0", port); + ASSERT_TRUE(port.m_oper_status != SAI_PORT_OPER_STATUS_UP); + ASSERT_TRUE(port.m_flap_count == 0); + + auto exec = static_cast(gPortsOrch->getExecutor("PORT_STATUS_NOTIFICATIONS")); + auto consumer = exec->getNotificationConsumer(); + + // mock a redis reply for notification, it notifies that Ehernet0 is going to up + for (uint32_t count=0; count < 5; count++) { + sai_port_oper_status_t oper_status = (count % 2 == 0) ? SAI_PORT_OPER_STATUS_UP : SAI_PORT_OPER_STATUS_DOWN; + mockReply = (redisReply *)calloc(sizeof(redisReply), 1); + mockReply->type = REDIS_REPLY_ARRAY; + mockReply->elements = 3; // REDIS_PUBLISH_MESSAGE_ELEMNTS + mockReply->element = (redisReply **)calloc(sizeof(redisReply *), mockReply->elements); + mockReply->element[2] = (redisReply *)calloc(sizeof(redisReply), 1); + mockReply->element[2]->type = REDIS_REPLY_STRING; + sai_port_oper_status_notification_t port_oper_status; + memset(&port_oper_status, 0, sizeof(port_oper_status)); + port_oper_status.port_state = oper_status; + port_oper_status.port_id = port.m_port_id; + std::string data = sai_serialize_port_oper_status_ntf(1, &port_oper_status); + std::vector notifyValues; + FieldValueTuple opdata("port_state_change", data); + notifyValues.push_back(opdata); + std::string msg = swss::JSon::buildJson(notifyValues); + mockReply->element[2]->str = (char*)calloc(1, msg.length() + 1); + memcpy(mockReply->element[2]->str, msg.c_str(), msg.length()); + + // trigger the notification + consumer->readData(); + gPortsOrch->doTask(*consumer); + mockReply = nullptr; + + // Call the orchagent port state change callback method with zmq mode + sai_redis_communication_mode_t oldRedisCommunicationMode = gRedisCommunicationMode; + gRedisCommunicationMode = SAI_REDIS_COMMUNICATION_MODE_ZMQ_SYNC; + on_port_state_change(1, &port_oper_status); + gRedisCommunicationMode = oldRedisCommunicationMode; + + gPortsOrch->getPort("Ethernet0", port); + ASSERT_TRUE(port.m_oper_status == oper_status); + ASSERT_TRUE(port.m_flap_count == count+1); + } + + cleanupPorts(gPortsOrch); + } + + /* + * Test port oper error count + */ + TEST_F(PortsOrchTest, PortOperErrorStatus) + { + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table portTableOpErrState = Table(m_state_db.get(), STATE_PORT_OPER_ERR_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate port table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone, PortInitDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + portTable.set("PortInitDone", { { "lanes", "0" } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + // Apply configuration : create ports + static_cast(gPortsOrch)->doTask(); + + // Get first port, expect the oper status is not UP + Port port; + gPortsOrch->getPort("Ethernet0", port); + ASSERT_TRUE(port.m_oper_status != SAI_PORT_OPER_STATUS_UP); + ASSERT_TRUE(port.m_flap_count == 0); + + auto exec = static_cast(gPortsOrch->getExecutor("PORT_STATUS_NOTIFICATIONS")); + auto consumer = exec->getNotificationConsumer(); + + std::vector errors = { + SAI_PORT_ERROR_STATUS_MAC_LOCAL_FAULT, + SAI_PORT_ERROR_STATUS_MAC_REMOTE_FAULT, + static_cast( + SAI_PORT_ERROR_STATUS_FEC_SYNC_LOSS | + SAI_PORT_ERROR_STATUS_MAC_LOCAL_FAULT), + static_cast( + SAI_PORT_ERROR_STATUS_FEC_LOSS_ALIGNMENT_MARKER | + SAI_PORT_ERROR_STATUS_HIGH_SER | + SAI_PORT_ERROR_STATUS_HIGH_BER | + SAI_PORT_ERROR_STATUS_CRC_RATE), + SAI_PORT_ERROR_STATUS_DATA_UNIT_CRC_ERROR, + static_cast( + SAI_PORT_ERROR_STATUS_FEC_SYNC_LOSS | + SAI_PORT_ERROR_STATUS_DATA_UNIT_SIZE | + SAI_PORT_ERROR_STATUS_DATA_UNIT_MISALIGNMENT_ERROR), + static_cast( + SAI_PORT_ERROR_STATUS_CODE_GROUP_ERROR | + SAI_PORT_ERROR_STATUS_SIGNAL_LOCAL_ERROR | + SAI_PORT_ERROR_STATUS_NO_RX_REACHABILITY), + static_cast( + SAI_PORT_ERROR_STATUS_FEC_SYNC_LOSS | + SAI_PORT_ERROR_STATUS_MAC_REMOTE_FAULT) + }; + + // mock a redis reply for notification, it notifies that Ehernet0 is going to up + for (uint32_t count=0; count < errors.size(); count++) { + sai_port_oper_status_t oper_status = SAI_PORT_OPER_STATUS_DOWN; + mockReply = (redisReply *)calloc(sizeof(redisReply), 1); + mockReply->type = REDIS_REPLY_ARRAY; + mockReply->elements = 3; // REDIS_PUBLISH_MESSAGE_ELEMNTS + mockReply->element = (redisReply **)calloc(sizeof(redisReply *), mockReply->elements); + mockReply->element[2] = (redisReply *)calloc(sizeof(redisReply), 1); + mockReply->element[2]->type = REDIS_REPLY_STRING; + sai_port_oper_status_notification_t port_oper_status; + memset(&port_oper_status, 0, sizeof(port_oper_status)); + port_oper_status.port_error_status = errors[count]; + port_oper_status.port_state = oper_status; + port_oper_status.port_id = port.m_port_id; + std::string data = sai_serialize_port_oper_status_ntf(1, &port_oper_status); + std::vector notifyValues; + FieldValueTuple opdata("port_state_change", data); + notifyValues.push_back(opdata); + std::string msg = swss::JSon::buildJson(notifyValues); + mockReply->element[2]->str = (char*)calloc(1, msg.length() + 1); + memcpy(mockReply->element[2]->str, msg.c_str(), msg.length()); + + // trigger the notification + consumer->readData(); + gPortsOrch->doTask(*consumer); + mockReply = nullptr; + gPortsOrch->getPort("Ethernet0", port); + gPortsOrch->updatePortErrorStatus(port, errors[count]); + ASSERT_TRUE(port.m_oper_error_status == errors[count]); + } + + std::vector values; + portTableOpErrState.get("Ethernet0", values); + + for (auto &valueTuple : values) + { + if (fvField(valueTuple) == "mac_local_fault_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "2"); + } + else if (fvField(valueTuple) == "mac_remote_fault_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "2"); + } + else if (fvField(valueTuple) == "oper_error_status") + { + ASSERT_TRUE(fvValue(valueTuple) == "3"); + } + else if (fvField(valueTuple) == "fec_sync_loss_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "3"); + } + else if (fvField(valueTuple) == "fec_alignment_loss_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "1"); + } + else if (fvField(valueTuple) == "high_ser_error_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "1"); + } + else if (fvField(valueTuple) == "high_ber_error_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "1"); + } + else if (fvField(valueTuple) == "crc_rate_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "1"); + } + else if (fvField(valueTuple) == "data_unit_crc_error_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "1"); + } + else if (fvField(valueTuple) == "data_unit_size_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "1"); + } + else if (fvField(valueTuple) == "data_unit_misalignment_error_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "1"); + } + else if (fvField(valueTuple) == "code_group_error_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "1"); + } + else if (fvField(valueTuple) == "signal_local_error_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "1"); + } + else if (fvField(valueTuple) == "no_rx_reachability_count") + { + ASSERT_TRUE(fvValue(valueTuple) == "1"); + } + } + + cleanupPorts(gPortsOrch); + } + TEST_F(PortsOrchTest, PortBulkCreateRemove) { auto portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); @@ -529,6 +921,7 @@ namespace portsorch_test { "lanes", lanes.str() }, { "speed", "100000" }, { "autoneg", "off" }, + { "unreliable_los", "off" }, { "adv_speeds", "all" }, { "interface_type", "none" }, { "adv_interface_types", "all" }, @@ -562,6 +955,111 @@ namespace portsorch_test // Cleanup ports cleanupPorts(gPortsOrch); + + // Check buffer maximum parameter table entries are removed + auto bufferMaxParameterTable = Table(m_state_db.get(), STATE_BUFFER_MAXIMUM_VALUE_TABLE); + std::vector keys; + bufferMaxParameterTable.getKeys(keys); + ASSERT_TRUE(keys.empty()); + } + + // Verifies certain port attributes are set on port creation, ensures no set API calls are made. + TEST_F(PortsOrchTest, PortAttributeSetOnCreation) + { + auto portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + const auto alias = "Ethernet0"; + + // Get SAI default ports + auto &ports = defaultPortList; + ASSERT_TRUE(!ports.empty()); + + // default Ethernet0 has different lanes so create_ports() is triggered + std::vector fvList = { + { "alias", alias }, + { "index", "0" }, + { "lanes", "0,1,2,3" }, + { "speed", "100000" }, + { "autoneg", "on" }, + { "adv_speeds", "all" }, + { "interface_type", "none" }, + { "adv_interface_types", "all" }, + { "fec", "rs" }, + { "mtu", "9100" }, + { "tpid", "0x8101" }, + { "pfc_asym", "on" }, + { "admin_status", "up" }, + { "description", "FP port" } + }; + + portTable.set(alias, fvList); + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", std::to_string(ports.size()) } }); + + // Refill consumer + gPortsOrch->addExistingData(&portTable); + + const auto set_port_fec_count = _sai_set_port_fec_count; + const auto set_port_auto_neg_count = _sai_set_port_auto_neg_count; + const auto set_port_tpid_count = _sai_set_port_tpid_count; + const auto sai_set_pfc_mode_count = _sai_set_pfc_mode_count; + + _hook_sai_port_api(); + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + + // Dump pending tasks + std::vector taskList; + gPortsOrch->dumpPendingTasks(taskList); + EXPECT_TRUE(taskList.empty()); + + _unhook_sai_port_api(); + + Port p; + EXPECT_TRUE(gPortsOrch->getPort(alias, p)); + + // Validate SAI port configuration + + sai_attribute_t attr; + + attr.id = SAI_PORT_ATTR_FEC_MODE; + EXPECT_EQ(SAI_STATUS_SUCCESS, sai_port_api->get_port_attribute(p.m_port_id, 1, &attr)); + EXPECT_EQ(attr.value.s32, SAI_PORT_FEC_MODE_RS); + + if (gPortsOrch->fec_override_sup) + { + attr.id = SAI_PORT_ATTR_AUTO_NEG_FEC_MODE_OVERRIDE; + EXPECT_EQ(SAI_STATUS_SUCCESS, sai_port_api->get_port_attribute(p.m_port_id, 1, &attr)); + EXPECT_FALSE(attr.value.booldata); + } + + attr.id = SAI_PORT_ATTR_AUTO_NEG_MODE; + EXPECT_EQ(SAI_STATUS_SUCCESS, sai_port_api->get_port_attribute(p.m_port_id, 1, &attr)); + EXPECT_TRUE(attr.value.booldata); + + attr.id = SAI_PORT_ATTR_TPID; + EXPECT_EQ(SAI_STATUS_SUCCESS, sai_port_api->get_port_attribute(p.m_port_id, 1, &attr)); + EXPECT_EQ(attr.value.u16, 0x8101); + + attr.id = SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_MODE; + EXPECT_EQ(SAI_STATUS_SUCCESS, sai_port_api->get_port_attribute(p.m_port_id, 1, &attr)); + EXPECT_EQ(attr.value.s32, SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_SEPARATE); + + attr.id = SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_RX; + EXPECT_EQ(SAI_STATUS_SUCCESS, sai_port_api->get_port_attribute(p.m_port_id, 1, &attr)); + EXPECT_EQ(attr.value.u8, 0xff); + + // Validate no set API calls performed for specified attributes + + EXPECT_EQ(set_port_fec_count, _sai_set_port_fec_count); + EXPECT_EQ(set_port_auto_neg_count, _sai_set_port_auto_neg_count); + EXPECT_EQ(set_port_tpid_count, _sai_set_port_tpid_count); + EXPECT_EQ(sai_set_pfc_mode_count, _sai_set_pfc_mode_count); + + // Cleanup ports + cleanupPorts(gPortsOrch); } TEST_F(PortsOrchTest, PortBasicConfig) @@ -596,6 +1094,7 @@ namespace portsorch_test SET_COMMAND, { { "speed", "100000" }, { "autoneg", "on" }, + { "unreliable_los", "on" }, { "adv_speeds", "1000,10000,100000" }, { "interface_type", "CR" }, { "adv_interface_types", "CR,CR2,CR4,CR8" }, @@ -625,6 +1124,9 @@ namespace portsorch_test // Verify auto-negotiation ASSERT_TRUE(p.m_autoneg); + // Verify unreliablelos + ASSERT_TRUE(p.m_unreliable_los); + // Verify advertised speed std::set adv_speeds = { 1000, 10000, 100000 }; ASSERT_EQ(p.m_adv_speeds, adv_speeds); @@ -694,27 +1196,30 @@ namespace portsorch_test // Port count: 32 Data + 1 CPU ASSERT_EQ(gPortsOrch->getAllPorts().size(), ports.size() + 1); + std::string custom_serdes_attrs = "{'attributes':[{'attr_xyz':{'value':[1,2,3,4]}}]}"; // Generate port serdes config std::deque kfvList = {{ "Ethernet0", SET_COMMAND, { - { "preemphasis", "0xcad0,0xc6e0,0xc6e0,0xd2b0" }, - { "idriver", "0x5,0x3,0x4,0x1" }, - { "ipredriver", "0x1,0x4,0x3,0x5" }, - { "pre1", "0xfff0,0xfff2,0xfff1,0xfff3" }, - { "pre2", "0xfff0,0xfff2,0xfff1,0xfff3" }, - { "pre3", "0xfff0,0xfff2,0xfff1,0xfff3" }, - { "main", "0x90,0x92,0x91,0x93" }, - { "post1", "0x10,0x12,0x11,0x13" }, - { "post2", "0x10,0x12,0x11,0x13" }, - { "post3", "0x10,0x12,0x11,0x13" }, - { "attn", "0x80,0x82,0x81,0x83" }, - { "ob_m2lp", "0x4,0x6,0x5,0x7" }, - { "ob_alev_out", "0xf,0x11,0x10,0x12" }, - { "obplev", "0x69,0x6b,0x6a,0x6c" }, - { "obnlev", "0x5f,0x61,0x60,0x62" }, - { "regn_bfm1p", "0x1e,0x20,0x1f,0x21" }, - { "regn_bfm1n", "0xaa,0xac,0xab,0xad" } + { "preemphasis", "0xcad0,0xc6e0,0xc6e0,0xd2b0" }, + { "idriver", "0x5,0x3,0x4,0x1" }, + { "ipredriver", "0x1,0x4,0x3,0x5" }, + { "pre1", "0xfff0,0xfff2,0xfff1,0xfff3" }, + { "pre2", "0xfff0,0xfff2,0xfff1,0xfff3" }, + { "pre3", "0xfff0,0xfff2,0xfff1,0xfff3" }, + { "main", "0x90,0x92,0x91,0x93" }, + { "post1", "0x10,0x12,0x11,0x13" }, + { "post2", "0x10,0x12,0x11,0x13" }, + { "post3", "0x10,0x12,0x11,0x13" }, + { "attn", "0x80,0x82,0x81,0x83" }, + { "unreliable_los","off" }, + { "ob_m2lp", "0x4,0x6,0x5,0x7" }, + { "ob_alev_out", "0xf,0x11,0x10,0x12" }, + { "obplev", "0x69,0x6b,0x6a,0x6c" }, + { "obnlev", "0x5f,0x61,0x60,0x62" }, + { "regn_bfm1p", "0x1e,0x20,0x1f,0x21" }, + { "regn_bfm1n", "0xaa,0xac,0xab,0xad" }, + { "custom_serdes_attrs", custom_serdes_attrs } } }}; @@ -731,71 +1236,77 @@ namespace portsorch_test // Verify preemphasis std::vector preemphasis = { 0xcad0, 0xc6e0, 0xc6e0, 0xd2b0 }; - ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_PREEMPHASIS), preemphasis); + ASSERT_EQ(p.m_serdes_attrs.at(SAI_PORT_SERDES_ATTR_PREEMPHASIS), SerdesValue(preemphasis)); // Verify idriver std::vector idriver = { 0x5, 0x3, 0x4, 0x1 }; - ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_IDRIVER), idriver); + ASSERT_EQ(p.m_serdes_attrs.at(SAI_PORT_SERDES_ATTR_IDRIVER), SerdesValue(idriver)); // Verify ipredriver std::vector ipredriver = { 0x1, 0x4, 0x3, 0x5 }; - ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_IPREDRIVER), ipredriver); + ASSERT_EQ(p.m_serdes_attrs.at(SAI_PORT_SERDES_ATTR_IPREDRIVER), SerdesValue(ipredriver)); // Verify pre1 std::vector pre1 = { 0xfff0, 0xfff2, 0xfff1, 0xfff3 }; - ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_PRE1), pre1); + ASSERT_EQ(p.m_serdes_attrs.at(SAI_PORT_SERDES_ATTR_TX_FIR_PRE1), SerdesValue(pre1)); // Verify pre2 std::vector pre2 = { 0xfff0, 0xfff2, 0xfff1, 0xfff3 }; - ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_PRE2), pre2); + ASSERT_EQ(p.m_serdes_attrs.at(SAI_PORT_SERDES_ATTR_TX_FIR_PRE2), SerdesValue(pre2)); // Verify pre3 std::vector pre3 = { 0xfff0, 0xfff2, 0xfff1, 0xfff3 }; - ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_PRE3), pre3); + ASSERT_EQ(p.m_serdes_attrs.at(SAI_PORT_SERDES_ATTR_TX_FIR_PRE3), SerdesValue(pre3)); // Verify main std::vector main = { 0x90, 0x92, 0x91, 0x93 }; - ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_MAIN), main); + ASSERT_EQ(p.m_serdes_attrs.at(SAI_PORT_SERDES_ATTR_TX_FIR_MAIN), SerdesValue(main)); // Verify post1 std::vector post1 = { 0x10, 0x12, 0x11, 0x13 }; - ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_POST1), post1); + ASSERT_EQ(p.m_serdes_attrs.at(SAI_PORT_SERDES_ATTR_TX_FIR_POST1), SerdesValue(post1)); // Verify post2 std::vector post2 = { 0x10, 0x12, 0x11, 0x13 }; - ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_POST2), post2); + ASSERT_EQ(p.m_serdes_attrs.at(SAI_PORT_SERDES_ATTR_TX_FIR_POST2), SerdesValue(post2)); // Verify post3 std::vector post3 = { 0x10, 0x12, 0x11, 0x13 }; - ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_POST3), post3); + ASSERT_EQ(p.m_serdes_attrs.at(SAI_PORT_SERDES_ATTR_TX_FIR_POST3), SerdesValue(post3)); // Verify attn std::vector attn = { 0x80, 0x82, 0x81, 0x83 }; - ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_ATTN), attn); + ASSERT_EQ(p.m_serdes_attrs.at(SAI_PORT_SERDES_ATTR_TX_FIR_ATTN), SerdesValue(attn)); // Verify ob_m2lp std::vector ob_m2lp = { 0x4, 0x6, 0x5, 0x7 }; - ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_PAM4_RATIO), ob_m2lp); + ASSERT_EQ(p.m_serdes_attrs.at(SAI_PORT_SERDES_ATTR_TX_PAM4_RATIO), SerdesValue(ob_m2lp)); // Verify ob_alev_out std::vector ob_alev_out = { 0xf, 0x11, 0x10, 0x12 }; - ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_OUT_COMMON_MODE), ob_alev_out); + ASSERT_EQ(p.m_serdes_attrs.at(SAI_PORT_SERDES_ATTR_TX_OUT_COMMON_MODE), SerdesValue(ob_alev_out)); // Verify obplev std::vector obplev = { 0x69, 0x6b, 0x6a, 0x6c }; - ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_PMOS_COMMON_MODE), obplev); + ASSERT_EQ(p.m_serdes_attrs.at(SAI_PORT_SERDES_ATTR_TX_PMOS_COMMON_MODE), SerdesValue(obplev)); // Verify obnlev std::vector obnlev = { 0x5f, 0x61, 0x60, 0x62 }; - ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_NMOS_COMMON_MODE), obnlev); + ASSERT_EQ(p.m_serdes_attrs.at(SAI_PORT_SERDES_ATTR_TX_NMOS_COMMON_MODE), SerdesValue(obnlev)); // Verify regn_bfm1p std::vector regn_bfm1p = { 0x1e, 0x20, 0x1f, 0x21 }; - ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_PMOS_VLTG_REG), regn_bfm1p); + ASSERT_EQ(p.m_serdes_attrs.at(SAI_PORT_SERDES_ATTR_TX_PMOS_VLTG_REG), SerdesValue(regn_bfm1p)); // Verify regn_bfm1n std::vector regn_bfm1n = { 0xaa, 0xac, 0xab, 0xad }; - ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_NMOS_VLTG_REG), regn_bfm1n); + ASSERT_EQ(p.m_serdes_attrs.at(SAI_PORT_SERDES_ATTR_TX_NMOS_VLTG_REG), SerdesValue(regn_bfm1n)); + + // Verify custom_serdes_attrs + ASSERT_EQ(p.m_serdes_attrs.at(SAI_PORT_SERDES_ATTR_CUSTOM_COLLECTION), SerdesValue(custom_serdes_attrs)); + + // Verify unreliablelos + ASSERT_EQ(p.m_unreliable_los, false); // Dump pending tasks std::vector taskList; @@ -859,7 +1370,6 @@ namespace portsorch_test std::deque kfvSerdes = {{ "Ethernet0", SET_COMMAND, { - { "admin_status", "up" }, { "idriver" , "0x6,0x6,0x6,0x6" } } }}; @@ -868,7 +1378,8 @@ namespace portsorch_test consumer->addToSync(kfvSerdes); _hook_sai_port_api(); - uint32_t current_sai_api_call_count = _sai_set_admin_state_down_count; + uint32_t down_call_count = _sai_set_admin_state_down_count; + uint32_t up_call_count = _sai_set_admin_state_up_count; // Apply configuration static_cast(gPortsOrch)->doTask(); @@ -880,11 +1391,40 @@ namespace portsorch_test // Verify idriver std::vector idriver = { 0x6, 0x6, 0x6, 0x6 }; - ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_IDRIVER), idriver); + ASSERT_EQ(p.m_serdes_attrs.at(SAI_PORT_SERDES_ATTR_IDRIVER), SerdesValue(idriver)); // Verify admin-disable then admin-enable - ASSERT_EQ(_sai_set_admin_state_down_count, ++current_sai_api_call_count); - ASSERT_EQ(_sai_set_admin_state_up_count, current_sai_api_call_count); + ASSERT_EQ(_sai_set_admin_state_down_count, ++down_call_count); + ASSERT_EQ(_sai_set_admin_state_up_count, ++up_call_count); + + // Configure non-serdes attribute that does not trigger admin state change + std::deque kfvMtu = {{ + "Ethernet0", + SET_COMMAND, { + { "mtu", "1234" }, + } + }}; + + // Refill consumer + consumer->addToSync(kfvMtu); + + _hook_sai_port_api(); + down_call_count = _sai_set_admin_state_down_count; + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + + _unhook_sai_port_api(); + + ASSERT_TRUE(gPortsOrch->getPort("Ethernet0", p)); + ASSERT_TRUE(p.m_admin_state_up); + + // Verify mtu is set + ASSERT_EQ(p.m_mtu, 1234); + + // Verify no admin-disable then admin-enable + ASSERT_EQ(_sai_set_admin_state_down_count, down_call_count); + ASSERT_EQ(_sai_set_admin_state_up_count, up_call_count); // Dump pending tasks std::vector taskList; @@ -928,17 +1468,16 @@ namespace portsorch_test ASSERT_NE(port.m_port_id, SAI_NULL_OBJECT_ID); // Get queue info - string type; + sai_queue_type_t type; uint8_t index; auto queue_id = port.m_queue_ids[0]; auto ut_sai_get_queue_attr_count = _sai_get_queue_attr_count; gPortsOrch->getQueueTypeAndIndex(queue_id, type, index); - ASSERT_EQ(type, "SAI_QUEUE_TYPE_UNICAST"); + ASSERT_EQ(type, SAI_QUEUE_TYPE_UNICAST); ASSERT_EQ(index, 0); - type = ""; index = 255; gPortsOrch->getQueueTypeAndIndex(queue_id, type, index); - ASSERT_EQ(type, "SAI_QUEUE_TYPE_UNICAST"); + ASSERT_EQ(type, SAI_QUEUE_TYPE_UNICAST); ASSERT_EQ(index, 0); ASSERT_EQ(++ut_sai_get_queue_attr_count, _sai_get_queue_attr_count); @@ -954,46 +1493,943 @@ namespace portsorch_test _unhook_sai_queue_api(); } - /** - * Test case: PortsOrch::addBridgePort() does not add router port to .1Q bridge - */ - TEST_F(PortsOrchTest, addBridgePortOnRouterPort) + TEST_F(PortsOrchTest, PortDeleteQueueCountersCleanup) { - _hook_sai_bridge_api(); - - StrictMock mock_sai_bridge_; - mock_sai_bridge = &mock_sai_bridge_; - sai_bridge_api->create_bridge_port = mock_create_bridge_port; - Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + auto &ports = defaultPortList; + ASSERT_TRUE(!ports.empty()); + + // Create ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + portTable.set("PortInitDone", { { "lanes", "0" } }); + + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + Port port; + auto testQueueIdx = 0; + string portName = "Ethernet0"; + ASSERT_TRUE(gPortsOrch->getPort(portName, port)); + ASSERT_NE(port.m_port_id, SAI_NULL_OBJECT_ID); + ASSERT_GT(port.m_queue_ids.size(), 0u); + + // Enable Queue flex counters + Table flexCounterCfg = Table(m_config_db.get(), CFG_FLEX_COUNTER_TABLE_NAME); + const std::vector enable({ {FLEX_COUNTER_STATUS_FIELD, "enable"} }); + flexCounterCfg.set("QUEUE_WATERMARK", enable); + flexCounterCfg.set("QUEUE", enable); + + auto flexCounterOrch = gDirectory.get(); + flexCounterOrch->addExistingData(&flexCounterCfg); + static_cast(flexCounterOrch)->doTask(); + + sai_object_id_t targetQueueOid = port.m_queue_ids[testQueueIdx]; + + // Delete the port + entries.push_back({portName, "DEL", { { } }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + Table countersQueueNameMap(m_counters_db.get(), "COUNTERS_QUEUE_NAME_MAP"); + Table countersQueuePortMap(m_counters_db.get(), "COUNTERS_QUEUE_PORT_MAP"); + + // Verify specific alias:idx field is gone from name maps + string dummy; + ASSERT_FALSE(countersQueueNameMap.hget("", portName + ":" + to_string(testQueueIdx), dummy)); + ASSERT_FALSE(countersQueuePortMap.hget("", sai_serialize_object_id(targetQueueOid), dummy)); + + // Re-add the same port + auto it = ports.find(portName); + entries.push_back({portName, "SET", it->second}); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + // Fetch the re-added port and verify COUNTERS_DB entries exist for the queue index + Port readded; + ASSERT_TRUE(gPortsOrch->getPort(portName, readded)); + ASSERT_GT(readded.m_queue_ids.size(), 0u); + sai_object_id_t readdedQ1 = readded.m_queue_ids[testQueueIdx]; + + // Name-map should contain alias:idx as a field + ASSERT_TRUE(countersQueueNameMap.hget("", portName + ":" + to_string(testQueueIdx), dummy)); + // Port-map should contain queue OID -> port OID mapping as a field + ASSERT_TRUE(countersQueuePortMap.hget("", sai_serialize_object_id(readdedQ1), dummy)); + } + + TEST_F(PortsOrchTest, PortPTConfigDefaultTimestampTemplate) + { + auto portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Port p; + std::deque kfvList; + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + + // Get SAI default ports to populate DB + auto &ports = defaultPortList; + ASSERT_TRUE(!ports.empty()); + + // Generate port config + for (const auto &cit : ports) + { + portTable.set(cit.first, cit.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", std::to_string(ports.size()) } }); + + // Refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + + // Port count: 32 Data + 1 CPU + ASSERT_EQ(gPortsOrch->getAllPorts().size(), ports.size() + 1); + + // Get port + ASSERT_TRUE(gPortsOrch->getPort("Ethernet8", p)); + + // Verify PT Interface ID + ASSERT_EQ(p.m_pt_intf_id, 0); + + // Verify PT Timestamp Template + ASSERT_EQ(p.m_pt_timestamp_template, SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_16_23); + + // Enable Path Tracing on Ethernet8 with Interface ID 128 and default Timestamp Template + kfvList = {{ + "Ethernet8", + SET_COMMAND, { + { "pt_interface_id", "128" } + } + }}; + + // Refill consumer + consumer->addToSync(kfvList); + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + kfvList.clear(); + + // Get port + ASSERT_TRUE(gPortsOrch->getPort("Ethernet8", p)); + + // Verify PT Interface ID + ASSERT_EQ(p.m_pt_intf_id, 128); + + // Verify PT Timestamp Template + ASSERT_EQ(p.m_pt_timestamp_template, SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_16_23); + + // Disable Path Tracing on Ethernet8 + kfvList = {{ + "Ethernet8", + SET_COMMAND, { + { "pt_interface_id", "None" }, + { "pt_timestamp_template", "None" } + } + }}; + + // Refill consumer + consumer->addToSync(kfvList); + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + kfvList.clear(); + + // Get port + ASSERT_TRUE(gPortsOrch->getPort("Ethernet8", p)); + + // Verify PT Interface ID + ASSERT_EQ(p.m_pt_intf_id, 0); + + // Verify PT Timestamp Template + ASSERT_EQ(p.m_pt_timestamp_template, SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_16_23); + + // Dump pending tasks + std::vector taskList; + gPortsOrch->dumpPendingTasks(taskList); + ASSERT_TRUE(taskList.empty()); + } + + TEST_F(PortsOrchTest, PortPTConfigNonDefaultTimestampTemplate) + { + auto portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Port p; + std::deque kfvList; + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + + // Get SAI default ports + auto &ports = defaultPortList; + ASSERT_TRUE(!ports.empty()); + + // Generate port config + for (const auto &cit : ports) + { + portTable.set(cit.first, cit.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", std::to_string(ports.size()) } }); + + // Refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + + // Port count: 32 Data + 1 CPU + ASSERT_EQ(gPortsOrch->getAllPorts().size(), ports.size() + 1); + + // Get port + ASSERT_TRUE(gPortsOrch->getPort("Ethernet9", p)); + + // Verify PT Interface ID + ASSERT_EQ(p.m_pt_intf_id, 0); + + // Verify PT Timestamp Template + ASSERT_EQ(p.m_pt_timestamp_template, SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_16_23); + + // Enable Path Tracing on Ethernet9 with Interface ID 129 and Timestamp Template template2 + kfvList = {{ + "Ethernet9", + SET_COMMAND, { + { "pt_interface_id", "129" }, + { "pt_timestamp_template", "template2" } + } + }}; + + // Refill consumer + consumer->addToSync(kfvList); + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + kfvList.clear(); + + // Get port + ASSERT_TRUE(gPortsOrch->getPort("Ethernet9", p)); + + // Verify PT Interface ID + ASSERT_EQ(p.m_pt_intf_id, 129); + + // Verify PT Timestamp Template + ASSERT_EQ(p.m_pt_timestamp_template, SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_12_19); + + // Disable Path Tracing on Ethernet9 + kfvList = {{ + "Ethernet9", + SET_COMMAND, { + { "pt_interface_id", "None" }, + { "pt_timestamp_template", "None" } + } + }}; + + // Refill consumer + consumer->addToSync(kfvList); + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + kfvList.clear(); + + // Get port + ASSERT_TRUE(gPortsOrch->getPort("Ethernet9", p)); + + // Verify PT Interface ID + ASSERT_EQ(p.m_pt_intf_id, 0); + + // Verify PT Timestamp Template + ASSERT_EQ(p.m_pt_timestamp_template, SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_16_23); + + // Dump pending tasks + std::vector taskList; + gPortsOrch->dumpPendingTasks(taskList); + ASSERT_TRUE(taskList.empty()); + } + + TEST_F(PortsOrchTest, PortPTConfigInvalidInterfaceID) + { + auto portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Port p; + std::deque kfvList; + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + + // Get SAI default ports + auto &ports = defaultPortList; + ASSERT_TRUE(!ports.empty()); + + // Generate port config + for (const auto &cit : ports) + { + portTable.set(cit.first, cit.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", std::to_string(ports.size()) } }); + + // Refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + + // Port count: 32 Data + 1 CPU + ASSERT_EQ(gPortsOrch->getAllPorts().size(), ports.size() + 1); + + // Get port + ASSERT_TRUE(gPortsOrch->getPort("Ethernet9", p)); + + // Verify PT Interface ID + ASSERT_EQ(p.m_pt_intf_id, 0); + + // Verify PT Timestamp Template + ASSERT_EQ(p.m_pt_timestamp_template, SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_16_23); + + // Enable Path Tracing on Ethernet9 with Interface ID 4096 (INVALID) and Timestamp Template template2 + kfvList = {{ + "Ethernet9", + SET_COMMAND, { + { "pt_interface_id", "4096" }, + { "pt_timestamp_template", "template2" } + } + }}; + + // Refill consumer + consumer->addToSync(kfvList); + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + kfvList.clear(); + + // Get port + ASSERT_TRUE(gPortsOrch->getPort("Ethernet9", p)); + + // Verify PT Interface ID + // We provided an invalid Path Tracing Interface ID, therefore we expect PortsOrch rejects the port + // configuration and Path Tracing remains disabled (i.e., Tracing Interface ID should be 0) + ASSERT_EQ(p.m_pt_intf_id, 0); + + // Dump pending tasks + std::vector taskList; + gPortsOrch->dumpPendingTasks(taskList); + ASSERT_TRUE(taskList.empty()); + } + + TEST_F(PortsOrchTest, PortPTConfigInvalidInterfaceTimestampTemplate) + { + auto portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Port p; + std::deque kfvList; + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + + // Get SAI default ports + auto &ports = defaultPortList; + ASSERT_TRUE(!ports.empty()); + + // Generate port config + for (const auto &cit : ports) + { + portTable.set(cit.first, cit.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", std::to_string(ports.size()) } }); + + // Refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + + // Port count: 32 Data + 1 CPU + ASSERT_EQ(gPortsOrch->getAllPorts().size(), ports.size() + 1); + + // Get port + ASSERT_TRUE(gPortsOrch->getPort("Ethernet9", p)); + + // Verify PT Interface ID + ASSERT_EQ(p.m_pt_intf_id, 0); + + // Verify PT Timestamp Template + ASSERT_EQ(p.m_pt_timestamp_template, SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_16_23); + + // Enable Path Tracing on Ethernet9 with Interface ID 129 and Timestamp Template template5 (INVALID) + kfvList = {{ + "Ethernet9", + SET_COMMAND, { + { "pt_interface_id", "129" }, + { "pt_timestamp_template", "template5" } + } + }}; + + // Refill consumer + consumer->addToSync(kfvList); + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + kfvList.clear(); + + // Get port + ASSERT_TRUE(gPortsOrch->getPort("Ethernet9", p)); + + // Verify PT Interface ID + // We provided an invalid Timestamp Template, therefore we expect PortsOrch rejects the port + // configuration and Path Tracing remains disabled (i.e., Tracing Interface ID should be 0) + ASSERT_EQ(p.m_pt_intf_id, 0); + + // Dump pending tasks + std::vector taskList; + gPortsOrch->dumpPendingTasks(taskList); + ASSERT_TRUE(taskList.empty()); + } + + TEST_F(PortsOrchTest, PortPTSAIFailureHandling) + { + _hook_sai_port_api(); + _hook_sai_switch_api(); + + _sai_syncd_notifications_count = (uint32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + _sai_syncd_notification_event = (int32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *_sai_syncd_notifications_count = 0; + uint32_t notif_count = *_sai_syncd_notifications_count; + auto portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Port p; + std::deque kfvList; + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + + // Get SAI default ports + auto &ports = defaultPortList; + ASSERT_TRUE(!ports.empty()); + + // Generate port config + for (const auto &cit : ports) + { + portTable.set(cit.first, cit.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", std::to_string(ports.size()) } }); + + // Refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + + // Port count: 32 Data + 1 CPU + ASSERT_EQ(gPortsOrch->getAllPorts().size(), ports.size() + 1); + + // Get port + ASSERT_TRUE(gPortsOrch->getPort("Ethernet9", p)); + + // Verify PT Interface ID + ASSERT_EQ(p.m_pt_intf_id, 0); + + // Verify PT Timestamp Template + ASSERT_EQ(p.m_pt_timestamp_template, SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_16_23); + + // Simulate failure when PortsOrch attempts to set the Path Tracing Interface ID + set_pt_interface_id_fail = true; + + // Enable Path Tracing on Ethernet9 with Interface ID 129 and Timestamp Template template2 + kfvList = {{ + "Ethernet9", + SET_COMMAND, { + { "pt_interface_id", "129" }, + { "pt_timestamp_template", "template2" } + } + }}; + + // Refill consumer + consumer->addToSync(kfvList); + + static_cast(gPortsOrch)->doTask(); + ASSERT_EQ(set_pt_interface_id_failures, 1); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + set_pt_interface_id_fail = false; + + + // Simulate failure when PortsOrch attempts to set the Path Tracing Timestamp Template + set_pt_timestamp_template_fail = true; + + // Enable Path Tracing on Ethernet10 with Interface ID 129 and Timestamp Template template2 + kfvList = {{ + "Ethernet10", + SET_COMMAND, { + { "pt_interface_id", "129" }, + { "pt_timestamp_template", "template2" } + } + }}; + + // Refill consumer + consumer->addToSync(kfvList); + + static_cast(gPortsOrch)->doTask(); + + ASSERT_EQ(set_pt_timestamp_template_failures, 1); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + set_pt_timestamp_template_fail = false; + + + // Simulate failure when PortsOrch attempts to set the port TAM object + set_port_tam_fail = true; + + // Enable Path Tracing on Ethernet11 with Interface ID 129 and Timestamp Template template2 + kfvList = {{ + "Ethernet11", + SET_COMMAND, { + { "pt_interface_id", "129" }, + { "pt_timestamp_template", "template2" } + } + }}; + + // Refill consumer + consumer->addToSync(kfvList); + + static_cast(gPortsOrch)->doTask(); + + ASSERT_EQ(set_port_tam_failures, 1); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + set_port_tam_fail = false; + + _unhook_sai_switch_api(); + _unhook_sai_port_api(); + } + + TEST_F(PortsOrchTest, PortPTCapabilityUnsupported) + { + _hook_sai_port_api(); + _hook_sai_switch_api(); + + auto portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Port p; + std::deque kfvList; + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + + // Get SAI default ports + auto &ports = defaultPortList; + ASSERT_TRUE(!ports.empty()); + + // Generate port config + for (const auto &cit : ports) + { + portTable.set(cit.first, cit.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", std::to_string(ports.size()) } }); + + // Refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration + static_cast(gPortsOrch)->doTask(); + + // Port count: 32 Data + 1 CPU + ASSERT_EQ(gPortsOrch->getAllPorts().size(), ports.size() + 1); + + // Scenario 0: Query to fetch OBJECT_TYPE_LIST is not Implemented by the vendor + support_object_type_list = false; + ASSERT_FALSE(gPortsOrch->checkPathTracingCapability()); + support_object_type_list = true; + + // Scenario 1: Path Tracing supported + ASSERT_TRUE(gPortsOrch->checkPathTracingCapability()); + + // Scenario 2: Path Tracing is not supported + supported_sai_objects.erase(std::remove(supported_sai_objects.begin(), supported_sai_objects.end(), SAI_OBJECT_TYPE_TAM), supported_sai_objects.end()); + ASSERT_FALSE(gPortsOrch->checkPathTracingCapability()); + + kfvList = {{ + "Ethernet10", + SET_COMMAND, { + { "pt_interface_id", "129"} + } + }}; + consumer->addToSync(kfvList); + static_cast(gPortsOrch)->doTask(); + ASSERT_TRUE(gPortsOrch->getPort("Ethernet10", p)); + // Expect Path Tracing Interface ID is ignored because Path Tracing is not supported on the switch + ASSERT_EQ(p.m_pt_intf_id, 0); + + kfvList = {{ + "Ethernet10", + SET_COMMAND, { + { "pt_timestamp_template", "template2"} + } + }}; + consumer->addToSync(kfvList); + static_cast(gPortsOrch)->doTask(); + ASSERT_TRUE(gPortsOrch->getPort("Ethernet10", p)); + // Expect Path Tracing template is ignored because Path Tracing is not supported on the switch + ASSERT_NE(p.m_pt_timestamp_template, SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_12_19); + + _unhook_sai_switch_api(); + _unhook_sai_port_api(); + } + + /** + * Test case: PortsOrch::addBridgePort() does not add router port to .1Q bridge + */ + TEST_F(PortsOrchTest, addBridgePortOnRouterPort) + { + _hook_sai_bridge_api(); + + StrictMock mock_sai_bridge_; + mock_sai_bridge = &mock_sai_bridge_; + sai_bridge_api->create_bridge_port = mock_create_bridge_port; + + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate port table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone, PortInitDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + portTable.set("PortInitDone", { { "lanes", "0" } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + // Apply configuration : create ports + static_cast(gPortsOrch)->doTask(); + + // Get first port and set its rif id to simulate it is router port + Port port; + gPortsOrch->getPort("Ethernet0", port); + port.m_rif_id = 1; + + ASSERT_FALSE(gPortsOrch->addBridgePort(port)); + EXPECT_CALL(mock_sai_bridge_, create_bridge_port(_, _, _, _)).Times(0); + + _unhook_sai_bridge_api(); + } + + TEST_F(PortsOrchTest, SupportedLinkEventDampingAlgorithmSuccess) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_link_event_damping_algorithm_count; + + entries.push_back({"Ethernet0", "SET", + { + {"link_event_damping_algorithm", "aied"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + // verify SAI call was made and set algorithm successfully + ASSERT_EQ(_sai_set_link_event_damping_algorithm_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_link_event_damping_algorithm, SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_AIED); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + + TEST_F(PortsOrchTest, SupportedLinkEventDampingAlgorithmFailure) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + set_link_event_damping_success = false; + _sai_link_event_damping_algorithm = SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_DISABLED; + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_link_event_damping_algorithm_count; + + + entries.push_back({"Ethernet0", "SET", + { + {"link_event_damping_algorithm", "aied"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + // Verify that SAI call was made, algorithm not set + ASSERT_EQ(_sai_set_link_event_damping_algorithm_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_link_event_damping_algorithm, SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_DISABLED); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + + TEST_F(PortsOrchTest, NotSupportedLinkEventDampingAlgorithm) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; // Get SAI default ports to populate DB auto ports = ut_helper::getInitialSaiPorts(); - // Populate port table with SAI ports for (const auto &it : ports) { portTable.set(it.first, it.second); } - // Set PortConfigDone, PortInitDone + // Set PortConfigDone portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); - portTable.set("PortInitDone", { { "lanes", "0" } }); // refill consumer gPortsOrch->addExistingData(&portTable); - // Apply configuration : create ports + + // Apply configuration : + // create ports static_cast(gPortsOrch)->doTask(); - // Get first port and set its rif id to simulate it is router port - Port port; - gPortsOrch->getPort("Ethernet0", port); - port.m_rif_id = 1; + uint32_t current_sai_api_call_count = _sai_set_link_event_damping_algorithm_count; - ASSERT_FALSE(gPortsOrch->addBridgePort(port)); - EXPECT_CALL(mock_sai_bridge_, create_bridge_port(_, _, _, _)).Times(0); + entries.push_back({"Ethernet0", "SET", + { + {"link_event_damping_algorithm", "test_algo"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); - _unhook_sai_bridge_api(); + // Verify that no SAI call was made + ASSERT_EQ(_sai_set_link_event_damping_algorithm_count, current_sai_api_call_count); + ASSERT_EQ(_sai_link_event_damping_algorithm, SAI_REDIS_LINK_EVENT_DAMPING_ALGORITHM_DISABLED); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + + TEST_F(PortsOrchTest, SetLinkEventDampingFullConfigSuccess) { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + set_link_event_damping_success = true; + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_link_event_damping_config_count; + + entries.push_back({"Ethernet0", "SET", + { + {"max_suppress_time", "64000"}, + {"decay_half_life", "45000"}, + {"suppress_threshold", "1650"}, + {"reuse_threshold", "1500"}, + {"flap_penalty", "1000"}, + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + ASSERT_EQ(_sai_set_link_event_damping_config_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_link_event_damping_config.max_suppress_time, 64000); + ASSERT_EQ(_sai_link_event_damping_config.decay_half_life, 45000); + ASSERT_EQ(_sai_link_event_damping_config.suppress_threshold, 1650); + ASSERT_EQ(_sai_link_event_damping_config.reuse_threshold, 1500); + ASSERT_EQ(_sai_link_event_damping_config.flap_penalty, 1000); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + + TEST_F(PortsOrchTest, SetLinkEventDampingPartialConfigSuccess) { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + _sai_link_event_damping_config = {0, 0, 0, 0, 0}; + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_link_event_damping_config_count; + + entries.push_back({"Ethernet0", "SET", + { + {"decay_half_life", "30000"}, + {"reuse_threshold", "1200"}, + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + ASSERT_EQ(_sai_set_link_event_damping_config_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_link_event_damping_config.max_suppress_time, 0); + ASSERT_EQ(_sai_link_event_damping_config.decay_half_life, 30000); + ASSERT_EQ(_sai_link_event_damping_config.suppress_threshold, 0); + ASSERT_EQ(_sai_link_event_damping_config.reuse_threshold, 1200); + ASSERT_EQ(_sai_link_event_damping_config.flap_penalty, 0); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + + TEST_F(PortsOrchTest, SetLinkEventDampingConfigFailure) { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + set_link_event_damping_success = false; + _sai_link_event_damping_config = {0, 0, 0, 0, 0}; + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_link_event_damping_config_count; + + entries.push_back({"Ethernet0", "SET", + { + {"max_suppress_time", "64000"}, + {"decay_half_life", "45000"}, + {"suppress_threshold", "1650"}, + {"reuse_threshold", "1500"}, + {"flap_penalty", "1000"}, + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + // Verify that config is not set + ASSERT_EQ(_sai_set_link_event_damping_config_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_link_event_damping_config.max_suppress_time, 0); + ASSERT_EQ(_sai_link_event_damping_config.decay_half_life, 0); + ASSERT_EQ(_sai_link_event_damping_config.suppress_threshold, 0); + ASSERT_EQ(_sai_link_event_damping_config.reuse_threshold, 0); + ASSERT_EQ(_sai_link_event_damping_config.flap_penalty, 0); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); } TEST_F(PortsOrchTest, PortSupportedFecModes) @@ -1166,6 +2602,7 @@ namespace portsorch_test { _hook_sai_port_api(); Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table statePortTable = Table(m_state_db.get(), STATE_PORT_TABLE_NAME); std::deque entries; not_support_fetching_fec = false; @@ -1215,9 +2652,89 @@ namespace portsorch_test ASSERT_EQ(fec_mode, SAI_PORT_FEC_MODE_RS); + gPortsOrch->refreshPortStatus(); + std::vector values; + statePortTable.get("Ethernet0", values); + bool fec_found = false; + for (auto &valueTuple : values) + { + if (fvField(valueTuple) == "fec") + { + fec_found = true; + ASSERT_TRUE(fvValue(valueTuple) == "rs"); + } + } + ASSERT_TRUE(fec_found == true); + + /*Mock an invalid fec mode with high value*/ + _sai_port_fec_mode = 100; + gPortsOrch->refreshPortStatus(); + statePortTable.get("Ethernet0", values); + fec_found = false; + for (auto &valueTuple : values) + { + if (fvField(valueTuple) == "fec") + { + fec_found = true; + ASSERT_TRUE(fvValue(valueTuple) == "N/A"); + } + } mock_port_fec_modes = old_mock_port_fec_modes; _unhook_sai_port_api(); } + + /* + * Test case: SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_MODE is not supported by vendor + **/ + TEST_F(PortsOrchTest, PortPFCNotSupported) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + set_pfc_asym_not_supported = true; + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_pfc_mode_count; + + entries.push_back({"Ethernet0", "SET", + { + { "pfc_asym", "on"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + ASSERT_EQ(_sai_set_pfc_mode_count, ++current_sai_api_call_count); + ASSERT_EQ(set_pfc_asym_failures, 1); + + set_pfc_asym_not_supported = false; + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + TEST_F(PortsOrchTest, PortTestSAIFailureHandling) { _hook_sai_port_api(); @@ -1256,7 +2773,7 @@ namespace portsorch_test }}); auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); consumer->addToSync(entries); - ASSERT_DEATH({static_cast(gPortsOrch)->doTask();}, ""); + gPortsOrch->doTask(); ASSERT_EQ(*_sai_syncd_notifications_count, 1); ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); @@ -1378,6 +2895,7 @@ namespace portsorch_test Table pgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); Table profileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); Table poolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table transceieverInfoTable = Table(m_state_db.get(), STATE_TRANSCEIVER_INFO_TABLE_NAME); // Get SAI default ports to populate DB @@ -1411,6 +2929,8 @@ namespace portsorch_test for (const auto &it : ports) { portTable.set(it.first, it.second); + portTable.set(it.first, {{"oper_status", "up"}}); + transceieverInfoTable.set(it.first, {}); } // Set PortConfigDone, PortInitDone @@ -1438,28 +2958,132 @@ namespace portsorch_test ASSERT_FALSE(gPortsOrch->allPortsReady()); - // Drain remaining + // Drain remaining + + static_cast(gBufferOrch)->doTask(); + static_cast(gPortsOrch)->doTask(); + + // Now ports should be ready + + ASSERT_TRUE(gPortsOrch->allPortsReady()); + + // No more tasks + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + ts.clear(); + + gBufferOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + // Verify port configuration + vector port_list; + port_list.resize(ports.size()); + sai_attribute_t attr; + sai_status_t status; + attr.id = SAI_SWITCH_ATTR_PORT_LIST; + attr.value.objlist.count = static_cast(port_list.size()); + attr.value.objlist.list = port_list.data(); + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + for (uint32_t i = 0; i < port_list.size(); i++) + { + attr.id = SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE; + status = sai_port_api->get_port_attribute(port_list[i], 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + ASSERT_TRUE(attr.value.booldata); + } + + // Verify host if configuration + for (const auto& iter: ports) + { + const auto& portName = iter.first; + + Port port; + gPortsOrch->getPort(portName, port); + + ASSERT_TRUE(port.m_oper_status == SAI_PORT_OPER_STATUS_UP); + + attr.id = SAI_HOSTIF_ATTR_OPER_STATUS; + status = sai_hostif_api->get_hostif_attribute(port.m_hif_id, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + ASSERT_TRUE(attr.value.booldata); + } + } + + TEST_F(PortsOrchTest, PortsWithNoPGsQueuesSchedulerGroups) + { + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + auto original_api = sai_port_api->get_ports_attribute; + // Mock SAI port API to return 0 number of PGs, queues and scheduler groups + auto spy = SpyOn(&sai_port_api->get_ports_attribute); + spy->callFake([&]( + uint32_t object_count, + const sai_object_id_t *object_id, + const uint32_t *attr_count, + sai_attribute_t **attr_list, + sai_bulk_op_error_mode_t mode, + sai_status_t *object_statuses) -> sai_status_t + { + assert(object_count > 1); + assert(attr_count[0] > 1); + switch (attr_list[0]->id) + { + case SAI_PORT_ATTR_NUMBER_OF_INGRESS_PRIORITY_GROUPS: + case SAI_PORT_ATTR_QOS_NUMBER_OF_QUEUES: + case SAI_PORT_ATTR_QOS_NUMBER_OF_SCHEDULER_GROUPS: + for (size_t i = 0; i < object_count; i++) + { + attr_list[i]->value.u32 = 0; + object_statuses[i] = SAI_STATUS_SUCCESS; + } + return SAI_STATUS_SUCCESS; + } + return original_api( + object_count, + object_id, + attr_count, + attr_list, + mode, + object_statuses); + } + ); + + // Get SAI default ports to populate DB - static_cast(gBufferOrch)->doTask(); - static_cast(gPortsOrch)->doTask(); + auto ports = ut_helper::getInitialSaiPorts(); - // Now ports should be ready + // Populate pot table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } - ASSERT_TRUE(gPortsOrch->allPortsReady()); + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); - // No more tasks + gPortsOrch->addExistingData(&portTable); - vector ts; + // Apply configuration : + // create ports - gPortsOrch->dumpPendingTasks(ts); - ASSERT_TRUE(ts.empty()); + static_cast(gPortsOrch)->doTask(); - ts.clear(); + Port port; + gPortsOrch->getPort("Ethernet0", port); - gBufferOrch->dumpPendingTasks(ts); - ASSERT_TRUE(ts.empty()); + ASSERT_TRUE(port.m_init); + ASSERT_EQ(port.m_priority_group_ids.size(), 0); + ASSERT_EQ(port.m_queue_ids.size(), 0); } + TEST_F(PortsOrchTest, PfcDlrHandlerCallingDlrInitAttribute) { _hook_sai_port_api(); @@ -1663,6 +3287,85 @@ namespace portsorch_test _unhook_sai_switch_api(); } + TEST_F(PortsOrchTest, DebugDropMonitorToggle) + { + // setup the tables with data + std::deque entries; + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate port table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + portTable.set("PortInitDone", { { "lanes", "0" } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + // Apply configuration again + static_cast(gPortsOrch)->doTask(); + + ASSERT_TRUE(gPortsOrch->allPortsReady()); + + // Check if default status is disabled + ASSERT_TRUE(!gDebugCounterOrch->getDebugMonitorStatus()); + + // Set status to enabled + entries.clear(); + entries.push_back({ "CONFIG", "SET", { + { "status", "enabled" } + } }); + auto DebugCounterConsumer = dynamic_cast(gDebugCounterOrch->getExecutor("DEBUG_DROP_MONITOR")); + DebugCounterConsumer->addToSync(entries); + static_cast(gDebugCounterOrch)->doTask(); + ASSERT_TRUE(gDebugCounterOrch->getDebugMonitorStatus()); + entries.clear(); + + // Set status with an invalid value + entries.push_back({ "CONFIG", "SET", { + { "status", "turnoff" } + } }); + DebugCounterConsumer->addToSync(entries); + static_cast(gDebugCounterOrch)->doTask(); + ASSERT_TRUE(gDebugCounterOrch->getDebugMonitorStatus()); + entries.clear(); + + // Set an unsupported attribute + entries.push_back({ "CONFIG", "SET", { + { "enable", "false" } + } }); + DebugCounterConsumer->addToSync(entries); + static_cast(gDebugCounterOrch)->doTask(); + ASSERT_TRUE(gDebugCounterOrch->getDebugMonitorStatus()); + entries.clear(); + + // Use an unsupported operation type + entries.push_back({ "CONFIG", "GET", { + { "status", "disable" } + } }); + DebugCounterConsumer->addToSync(entries); + static_cast(gDebugCounterOrch)->doTask(); + ASSERT_TRUE(gDebugCounterOrch->getDebugMonitorStatus()); + entries.clear(); + + // Set status back to disabled + entries.push_back({ "CONFIG", "SET", { + { "status", "disabled" } + } }); + DebugCounterConsumer->addToSync(entries); + static_cast(gDebugCounterOrch)->doTask(); + ASSERT_TRUE(!gDebugCounterOrch->getDebugMonitorStatus()); + entries.clear(); + } + TEST_F(PortsOrchTest, PfcZeroBufferHandler) { Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); @@ -1774,6 +3477,60 @@ namespace portsorch_test ts.clear(); } + /* This test passes an incorrect LAG entry and verifies that this entry is not + * erased from the consumer table. + */ + TEST_F(PortsOrchTest, LagMemberCanNotBeLocated) + { + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table lagTable = Table(m_app_db.get(), APP_LAG_TABLE_NAME); + Table lagMemberTable = Table(m_app_db.get(), APP_LAG_MEMBER_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + /* + * Next we will prepare some configuration data to be consumed by PortsOrch + * 32 Ports, 1 LAG, 1 port is an invalid LAG member. + */ + + // Populate pot table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + portTable.set("PortInitDone", { { } }); + lagTable.set("PortChannel999", + { + {"admin_status", "up"}, + {"mtu", "9100"} + } + ); + + // Add invalid lag member + lagMemberTable.set( + std::string("InvalidLagMember") + lagMemberTable.getTableNameSeparator() + ports.begin()->first, + { {"status", "enabled"} }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + gPortsOrch->addExistingData(&lagTable); + gPortsOrch->addExistingData(&lagMemberTable); + + static_cast(gPortsOrch)->doTask(); + + // verify there is a pending task to do. + vector ts; + auto exec = gPortsOrch->getExecutor(APP_LAG_MEMBER_TABLE_NAME); + auto consumer = static_cast(exec); + ts.clear(); + consumer->dumpPendingTasks(ts); + ASSERT_FALSE(ts.empty()); + } + /* This test checks that a LAG member validation happens on orchagent level * and no SAI call is executed in case a port requested to be a LAG member * is already a LAG member. @@ -1956,6 +3713,7 @@ namespace portsorch_test gPortsOrch->getPort("Ethernet0", port); ASSERT_TRUE(port.m_oper_status == SAI_PORT_OPER_STATUS_UP); + ASSERT_TRUE(port.m_flap_count == 1); std::vector values; portTable.get("Ethernet0", values); @@ -1987,6 +3745,126 @@ namespace portsorch_test sai_port_api = orig_port_api; } + /* This test verifies that an invalid configuration + * of pfc stat history will not enable the featuer + */ + TEST_F(PortsOrchTest, PfcInvalidHistoryToggle) + { + _hook_sai_switch_api(); + // setup the tables with data + std::deque entries; + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate port table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + portTable.set("PortInitDone", { { "lanes", "0" } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + // Apply configuration + // ports + static_cast(gPortsOrch)->doTask(); + + ASSERT_TRUE(gPortsOrch->allPortsReady()); + + // No more tasks + vector ts; + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + ts.clear(); + + entries.clear(); + entries.push_back({ "Ethernet0", "SET", { { "pfc_enable", "3,4" }, { "pfcwd_sw_enable", "3,4" } } }); + auto portQosMapConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + portQosMapConsumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + + entries.push_back({ "GLOBAL", "SET", {{ "POLL_INTERVAL", "200" }}}); + entries.push_back({ "Ethernet0", "SET", { + { "action", "drop" }, + { "detection_time", "200" }, + { "restoration_time", "200" } + } }); + auto PfcwdConsumer = dynamic_cast(gPfcwdOrch->getExecutor(CFG_PFC_WD_TABLE_NAME)); + PfcwdConsumer->addToSync(entries); + + // trigger the notification + static_cast(gPfcwdOrch)->doTask(); + ASSERT_EQ((gPfcwdOrch->m_pfcwd_ports.size()), 1); + entries.clear(); + + // create pfcwd entry with an invalid history setting + entries.push_back({ "Ethernet0", "SET", { + { "action", "drop" }, + { "detection_time", "200" }, + { "restoration_time", "200" }, + { "pfc_stat_history", "up" } + } }); + PfcwdConsumer->addToSync(entries); + static_cast(gPfcwdOrch)->doTask(); + ASSERT_EQ((gPfcwdOrch->m_pfcwd_ports.size()), 1); + entries.clear(); + + // verify in counters db that history is NOT enabled + Port port; + gPortsOrch->getPort("Ethernet0", port); + auto countersTable = make_shared
(m_counters_db.get(), COUNTERS_TABLE); + auto entryMap = gPfcwdOrch->m_entryMap; + + sai_object_id_t queueId = port.m_queue_ids[3]; + ASSERT_NE(entryMap.find(queueId), entryMap.end()); + + string queueIdStr = sai_serialize_object_id(queueId); + vector countersFieldValues; + countersTable->get(queueIdStr, countersFieldValues); + ASSERT_NE(countersFieldValues.size(), 0); + + for (auto &valueTuple : countersFieldValues) + { + if (fvField(valueTuple) == "PFC_STAT_HISTORY") + { + ASSERT_TRUE(fvValue(valueTuple) == "disable"); + } + } + + queueId = port.m_queue_ids[4]; + ASSERT_NE(entryMap.find(queueId), entryMap.end()); + + queueIdStr = sai_serialize_object_id(queueId); + countersFieldValues.clear(); + countersTable->get(queueIdStr, countersFieldValues); + ASSERT_NE(countersFieldValues.size(), 0); + + for (auto &valueTuple : countersFieldValues) + { + if (fvField(valueTuple) == "PFC_STAT_HISTORY") + { + ASSERT_TRUE(fvValue(valueTuple) == "disable"); + } + } + + // remove from monitoring + entries.push_back({ "Ethernet0", "DEL", { {} } }); + PfcwdConsumer->addToSync(entries); + entries.clear(); + static_cast(gPfcwdOrch)->doTask(); + ASSERT_EQ((gPfcwdOrch->m_pfcwd_ports.size()), 0); + + _unhook_sai_switch_api(); + } + /* * The scope of this test is to verify that LAG member is * added to a LAG before any other object on LAG is created, like RIF, bridge port in warm mode. @@ -2102,4 +3980,96 @@ namespace portsorch_test ASSERT_FALSE(bridgePortCalledBeforeLagMember); // bridge port created on lag before lag member was created } + struct PostPortInitTests : PortsOrchTest + { + }; + + // This test ensures post port initialization is performed when calling onWarmBootEnd() + TEST_F(PostPortInitTests, PortPostInit) + { + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table pgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); + Table profileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); + Table poolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table stateTable = Table(m_state_db.get(), STATE_BUFFER_MAXIMUM_VALUE_TABLE); + + // Get SAI default ports to populate DB + + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate pot table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone, PortInitDone + + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + portTable.set("PortInitDone", { { "lanes", "0" } }); + + // Set warm boot flag + gPortsOrch->m_isWarmRestoreStage = true; + + gPortsOrch->bake(); + gPortsOrch->doTask(); + + std::string value; + bool stateDbSet; + + // At this point postPortInit() hasn't been called yet, so don't expect + // to find "max_priority_groups" field in STATE_DB + stateDbSet = stateTable.hget("Ethernet0", "max_priority_groups", value); + ASSERT_FALSE(stateDbSet); + + gPortsOrch->onWarmBootEnd(); + + // Now the field "max_priority_groups" is set + stateDbSet = stateTable.hget("Ethernet0", "max_priority_groups", value); + ASSERT_TRUE(stateDbSet); + } + + struct PortsOrchNegativeTests : PortsOrchTest + { + }; + + TEST_F(PortsOrchNegativeTests, PortHostIfCreateFailed) + { + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + auto original_api = sai_hostif_api->create_hostif; + auto hostIfSpy = SpyOn(&sai_hostif_api->create_hostif); + hostIfSpy->callFake([&](sai_object_id_t*, sai_object_id_t, uint32_t, const sai_attribute_t*) -> sai_status_t { + return SAI_STATUS_INSUFFICIENT_RESOURCES; + } + ); + + // Get SAI default ports to populate DB + + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate pot table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + + static_cast(gPortsOrch)->doTask(); + + sai_hostif_api->create_hostif = original_api; + + Port port; + gPortsOrch->getPort("Ethernet0", port); + + ASSERT_FALSE(port.m_init); + } + } diff --git a/tests/mock_tests/portsyncd/portsyncd_ut.cpp b/tests/mock_tests/portsyncd/portsyncd_ut.cpp index f97a80e3d6a..93575f68bc3 100644 --- a/tests/mock_tests/portsyncd/portsyncd_ut.cpp +++ b/tests/mock_tests/portsyncd/portsyncd_ut.cpp @@ -187,18 +187,6 @@ namespace portsyncd_ut namespace portsyncd_ut { - TEST_F(PortSyncdTest, test_linkSyncInit) - { - if_ni_mock = populateNetDev(); - mockCmdStdcout = "up\n"; - swss::LinkSync sync(m_app_db.get(), m_state_db.get()); - std::vector keys; - sync.m_stateMgmtPortTable.getKeys(keys); - ASSERT_EQ(keys.size(), 1); - ASSERT_EQ(keys.back(), "eth0"); - ASSERT_EQ(mockCallArgs.back(), "cat /sys/class/net/\"eth0\"/operstate"); - } - TEST_F(PortSyncdTest, test_cacheOldIfaces) { if_ni_mock = populateNetDevAdvanced(); @@ -295,29 +283,6 @@ namespace portsyncd_ut ASSERT_EQ(sync.m_statePortTable.get("Ethernet0", ovalues), false); } - TEST_F(PortSyncdTest, test_onMsgMgmtIface){ - swss::LinkSync sync(m_app_db.get(), m_state_db.get()); - - /* Generate a netlink notification about the eth0 netdev iface */ - std::vector flags = {IFF_UP}; - struct nl_object* msg = draft_nlmsg("eth0", - flags, - "", - "00:50:56:28:0e:4a", - 16222, - 9100, - 0); - sync.onMsg(RTM_NEWLINK, msg); - - /* Verify if the update has been written to State DB */ - std::string oper_status; - ASSERT_EQ(sync.m_stateMgmtPortTable.hget("eth0", "oper_status", oper_status), true); - ASSERT_EQ(oper_status, "down"); - - /* Free Nl_object */ - free_nlobj(msg); - } - TEST_F(PortSyncdTest, test_onMsgIgnoreOldNetDev){ if_ni_mock = populateNetDevAdvanced(); swss::LinkSync sync(m_app_db.get(), m_state_db.get()); diff --git a/tests/mock_tests/qosorch_ut.cpp b/tests/mock_tests/qosorch_ut.cpp index 713238e9cdf..0cdda7812db 100644 --- a/tests/mock_tests/qosorch_ut.cpp +++ b/tests/mock_tests/qosorch_ut.cpp @@ -426,7 +426,11 @@ namespace qosorch_test gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); ASSERT_EQ(tunnel_decap_orch, nullptr); - tunnel_decap_orch = new TunnelDecapOrch(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + vector tunnel_tables = { + APP_TUNNEL_DECAP_TABLE_NAME, + APP_TUNNEL_DECAP_TERM_TABLE_NAME + }; + tunnel_decap_orch = new TunnelDecapOrch(m_app_db.get(), m_state_db.get(), m_config_db.get(), tunnel_tables); vector qos_tables = { CFG_TC_TO_QUEUE_MAP_TABLE_NAME, @@ -605,6 +609,9 @@ namespace qosorch_test delete gQosOrch; gQosOrch = nullptr; + delete gBufferOrch; + gBufferOrch = nullptr; + delete tunnel_decap_orch; tunnel_decap_orch = nullptr; @@ -1160,6 +1167,7 @@ namespace qosorch_test static_cast(gQosOrch)->doTask(); // Check DSCP_TO_TC_MAP|AZURE is applied to switch ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE"].m_saiObjectId, switch_dscp_to_tc_map_id); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "global", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); // Remove global DSCP_TO_TC_MAP entries.push_back({"global", "DEL", {}}); @@ -1182,7 +1190,37 @@ namespace qosorch_test // Check DSCP_TO_TC_MAP|AZURE is removed, and the switch_level dscp_to_tc_map is set to NULL ASSERT_EQ(current_sai_remove_qos_map_count + 1, sai_remove_qos_map_count); ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME]).count("AZURE"), 0); + + // Run the test in reverse order + entries.push_back({"global", "SET", + { + {"dscp_to_tc_map", "AZURE"} + }}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + + // Try draining PORT_QOS_MAP table + static_cast(gQosOrch)->doTask(); + // Check DSCP_TO_TC_MAP|AZURE is applied to switch + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "global", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE"].m_saiObjectId, switch_dscp_to_tc_map_id); + + entries.push_back({"AZURE", "SET", + { + {"1", "0"}, + {"0", "1"} + }}); + + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_DSCP_TO_TC_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Try draining DSCP_TO_TC_MAP and PORT_QOS_MAP table + static_cast(gQosOrch)->doTask(); + // Check DSCP_TO_TC_MAP|AZURE is applied to switch + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "global", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE"].m_saiObjectId, switch_dscp_to_tc_map_id); } TEST_F(QosOrchTest, QosOrchTestRetryFirstItem) diff --git a/tests/mock_tests/response_publisher/response_publisher_ut.cpp b/tests/mock_tests/response_publisher/response_publisher_ut.cpp index 9e836bad043..ca4956fdcfa 100644 --- a/tests/mock_tests/response_publisher/response_publisher_ut.cpp +++ b/tests/mock_tests/response_publisher/response_publisher_ut.cpp @@ -9,7 +9,7 @@ TEST(ResponsePublisher, TestPublish) DBConnector conn{"APPL_STATE_DB", 0}; Table stateTable{&conn, "SOME_TABLE"}; std::string value; - ResponsePublisher publisher{}; + ResponsePublisher publisher{"APPL_STATE_DB"}; publisher.publish("SOME_TABLE", "SOME_KEY", {{"field", "value"}}, ReturnCode(SAI_STATUS_SUCCESS)); ASSERT_TRUE(stateTable.hget("SOME_KEY", "field", value)); @@ -21,7 +21,7 @@ TEST(ResponsePublisher, TestPublishBuffered) DBConnector conn{"APPL_STATE_DB", 0}; Table stateTable{&conn, "SOME_TABLE"}; std::string value; - ResponsePublisher publisher{}; + ResponsePublisher publisher{"APPL_STATE_DB"}; publisher.setBuffered(true); diff --git a/tests/mock_tests/routeorch_ut.cpp b/tests/mock_tests/routeorch_ut.cpp index 8073aee92b0..103dd888f92 100644 --- a/tests/mock_tests/routeorch_ut.cpp +++ b/tests/mock_tests/routeorch_ut.cpp @@ -8,6 +8,7 @@ #include "mock_orchagent_main.h" #include "mock_table.h" #include "mock_response_publisher.h" +#include "mock_sai_api.h" #include "bulker.h" extern string gMySwitchType; @@ -16,21 +17,61 @@ extern std::unique_ptr gMockResponsePublisher; using ::testing::_; +EXTERN_MOCK_FNS + namespace routeorch_test { using namespace std; + using ::testing::SetArrayArgument; + using ::testing::Return; + using ::testing::DoAll; + + static bool stateRouteStateFieldExists(swss::DBConnector* state_db, + const std::string& prefix) + { + Table stateRoute(state_db, "ROUTE_TABLE"); + std::vector fvs; + stateRoute.get(prefix, fvs); + for (const auto &fv : fvs) + if (fvField(fv) == "state") + return true; + return false; + } + + static bool waitStateRouteState(swss::DBConnector* state_db, + const std::string& prefix, + const std::string& want, + int attempts = 30) + { + Table stateRoute(state_db, "ROUTE_TABLE"); + for (int i = 0; i < attempts; ++i) + { + std::vector fvs; + stateRoute.get(prefix, fvs); + for (const auto &fv : fvs) + if (fvField(fv) == "state" && fvValue(fv) == want) + return true; + + // Let orch process any pending work again + static_cast(gRouteOrch)->doTask(); + } + return false; + } + + DEFINE_SAI_API_MOCK_SPECIFY_ENTRY_WITH_SET(route, route); shared_ptr m_app_db; shared_ptr m_config_db; shared_ptr m_state_db; shared_ptr m_chassis_app_db; - int create_route_count; - int set_route_count; - int remove_route_count; - int sai_fail_count; + int create_route_count = 0; + int set_route_count = 0; + int remove_route_count = 0; + int sai_fail_count = 0; + int drop_set_count = 0; - sai_route_api_t ut_sai_route_api; + // sai_route_api_t ut_sai_route_api; sai_route_api_t *pold_sai_route_api; sai_bulk_create_route_entry_fn old_create_route_entries; @@ -86,6 +127,10 @@ namespace routeorch_test } } + if(drop) { + drop_set_count++; + } + // Drop and a valid nexthop can not be provided for the same prefix if (drop && valid_nexthop) sai_fail_count++; @@ -109,13 +154,15 @@ namespace routeorch_test ut_helper::initSaiApi(profile); + INIT_SAI_API_MOCK(route); + MockSaiApis(); + // Hack the route create function old_create_route_entries = sai_route_api->create_route_entries; old_remove_route_entries = sai_route_api->remove_route_entries; old_set_route_entries_attribute = sai_route_api->set_route_entries_attribute; pold_sai_route_api = sai_route_api; - ut_sai_route_api = *sai_route_api; sai_route_api = &ut_sai_route_api; sai_route_api->create_route_entries = _ut_stub_sai_bulk_create_route_entry; @@ -182,6 +229,7 @@ namespace routeorch_test ASSERT_EQ(gPortsOrch, nullptr); gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + gDirectory.set(gPortsOrch); vector flex_counter_tables = { CFG_FLEX_COUNTER_TABLE_NAME @@ -197,6 +245,10 @@ namespace routeorch_test ASSERT_EQ(gVrfOrch, nullptr); gVrfOrch = new VRFOrch(m_app_db.get(), APP_VRF_TABLE_NAME, m_state_db.get(), STATE_VRF_OBJECT_TABLE_NAME); + gDirectory.set(gVrfOrch); + + EvpnNvoOrch *evpn_orch = new EvpnNvoOrch(m_app_db.get(), APP_VXLAN_EVPN_NVO_TABLE_NAME); + gDirectory.set(evpn_orch); ASSERT_EQ(gIntfsOrch, nullptr); gIntfsOrch = new IntfsOrch(m_app_db.get(), APP_INTF_TABLE_NAME, gVrfOrch, m_chassis_app_db.get()); @@ -217,12 +269,18 @@ namespace routeorch_test ASSERT_EQ(gNeighOrch, nullptr); gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); - TunnelDecapOrch *tunnel_decap_orch = new TunnelDecapOrch(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + ASSERT_EQ(gTunneldecapOrch, nullptr); + vector tunnel_tables = { + APP_TUNNEL_DECAP_TABLE_NAME, + APP_TUNNEL_DECAP_TERM_TABLE_NAME + }; + gTunneldecapOrch = new TunnelDecapOrch(m_app_db.get(), m_state_db.get(), m_config_db.get(), tunnel_tables); + vector mux_tables = { CFG_MUX_CABLE_TABLE_NAME, CFG_PEER_SWITCH_TABLE_NAME }; - MuxOrch *mux_orch = new MuxOrch(m_config_db.get(), mux_tables, tunnel_decap_orch, gNeighOrch, gFdbOrch); + MuxOrch *mux_orch = new MuxOrch(m_config_db.get(), mux_tables, gTunneldecapOrch, gNeighOrch, gFdbOrch); gDirectory.set(mux_orch); ASSERT_EQ(gFgNhgOrch, nullptr); @@ -236,11 +294,16 @@ namespace routeorch_test gFgNhgOrch = new FgNhgOrch(m_config_db.get(), m_app_db.get(), m_state_db.get(), fgnhg_tables, gNeighOrch, gIntfsOrch, gVrfOrch); ASSERT_EQ(gSrv6Orch, nullptr); - vector srv6_tables = { - APP_SRV6_SID_LIST_TABLE_NAME, - APP_SRV6_MY_SID_TABLE_NAME + TableConnector srv6_sid_list_table(m_app_db.get(), APP_SRV6_SID_LIST_TABLE_NAME); + TableConnector srv6_my_sid_table(m_app_db.get(), APP_SRV6_MY_SID_TABLE_NAME); + TableConnector srv6_my_sid_cfg_table(m_config_db.get(), CFG_SRV6_MY_SID_TABLE_NAME); + + vector srv6_tables = { + srv6_sid_list_table, + srv6_my_sid_table, + srv6_my_sid_cfg_table }; - gSrv6Orch = new Srv6Orch(m_app_db.get(), srv6_tables, gSwitchOrch, gVrfOrch, gNeighOrch); + gSrv6Orch = new Srv6Orch(m_config_db.get(), m_app_db.get(), srv6_tables, gSwitchOrch, gVrfOrch, gNeighOrch); ASSERT_EQ(gRouteOrch, nullptr); const int routeorch_pri = 5; @@ -270,6 +333,7 @@ namespace routeorch_test for (const auto &it : ports) { portTable.set(it.first, it.second); + portTable.set(it.first, {{ "oper_status", "up" }}); } // Set PortConfigDone @@ -282,6 +346,10 @@ namespace routeorch_test static_cast(gPortsOrch)->doTask(); Table intfTable = Table(m_app_db.get(), APP_INTF_TABLE_NAME); + intfTable.set("Loopback0", { {"NULL", "NULL" }, + {"mac_addr", "00:00:00:00:00:00" }}); + intfTable.set("Loopback0:10.1.0.32/32", { { "scope", "global" }, + { "family", "IPv4" }}); intfTable.set("Ethernet0", { {"NULL", "NULL" }, {"mac_addr", "00:00:00:00:00:00" }}); intfTable.set("Ethernet0:10.0.0.1/24", { { "scope", "global" }, @@ -290,6 +358,11 @@ namespace routeorch_test {"mac_addr", "00:00:00:00:00:00" }}); intfTable.set("Ethernet4:11.0.0.1/32", { { "scope", "global" }, { "family", "IPv4" }}); + intfTable.set("Ethernet8", { {"NULL", "NULL" }, + {"vrf_name", "Vrf1"}, + {"mac_addr", "00:00:00:00:00:00" }}); + intfTable.set("Ethernet8:20.0.0.1/24", { { "scope", "global" }, + { "family", "IPv4" }}); gIntfsOrch->addExistingData(&intfTable); static_cast(gIntfsOrch)->doTask(); @@ -315,6 +388,9 @@ namespace routeorch_test void TearDown() override { + RestoreSaiApis(); + DEINIT_SAI_API_MOCK(route); + gDirectory.m_values.clear(); delete gCrmOrch; @@ -335,6 +411,9 @@ namespace routeorch_test delete gNeighOrch; gNeighOrch = nullptr; + delete gTunneldecapOrch; + gTunneldecapOrch = nullptr; + delete gFdbOrch; gFdbOrch = nullptr; @@ -347,11 +426,217 @@ namespace routeorch_test delete gPortsOrch; gPortsOrch = nullptr; + delete gBufferOrch; + gBufferOrch = nullptr; + sai_route_api = pold_sai_route_api; ut_helper::uninitSaiApi(); } }; + TEST_F(RouteOrchTest, RouteOrch_AddDeleteIPv6) + { + // Add IPv6 interface IPs (like the pytest does) and an IPv6 neighbor. + { + Table intfTable(m_app_db.get(), APP_INTF_TABLE_NAME); + intfTable.set("Ethernet0:2000::1/64", { {"scope","global"}, {"family","IPv6"} }); + intfTable.set("Ethernet4:2001::1/64", { {"scope","global"}, {"family","IPv6"} }); + gIntfsOrch->addExistingData(&intfTable); + static_cast(gIntfsOrch)->doTask(); + + Table neighborTable(m_app_db.get(), APP_NEIGH_TABLE_NAME); + neighborTable.set("Ethernet0:2000::2", { {"neigh","00:00:00:00:00:22"}, {"family","IPv6"} }); + gNeighOrch->addExistingData(&neighborTable); + static_cast(gNeighOrch)->doTask(); + } + + auto *routeConsumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + ASSERT_NE(routeConsumer, nullptr); + + // PART A: Add/Remove IPv6 prefix 3000::/64 via 2000::2 on Ethernet0 + { + std::deque entries; + entries.push_back({ "3000::/64", "SET", + { {"ifname","Ethernet0"}, {"nexthop","2000::2"} }}); + routeConsumer->addToSync(entries); + + auto base_create = create_route_count; + auto base_set = set_route_count; + auto base_remove = remove_route_count; + + static_cast(gRouteOrch)->doTask(); + + // Expect CREATE +1 for new route, no SET/REMOVE yet + ASSERT_EQ(base_create + 1, create_route_count); + ASSERT_EQ(base_set, set_route_count); + ASSERT_EQ(base_remove, remove_route_count); + + // Remove the route + entries.clear(); + entries.push_back({ "3000::/64", "DEL", {} }); + routeConsumer->addToSync(entries); + + base_create = create_route_count; + base_set = set_route_count; + base_remove = remove_route_count; + + static_cast(gRouteOrch)->doTask(); + + // Expect REMOVE +1, create/set unchanged + ASSERT_EQ(base_create, create_route_count); + ASSERT_EQ(base_set, set_route_count); + ASSERT_EQ(base_remove + 1, remove_route_count); + } + + // PART B: IPv6 default route (::/0): SET to add (state -> ok), DEL to remove (state -> na) + { + const std::string def6 = "::/0"; + const bool hasStateField = stateRouteStateFieldExists(m_state_db.get(), def6); + + // Add default v6 route (::/0) via SET path + std::deque entries; + entries.push_back({ def6, "SET", { {"ifname","Ethernet0"}, {"nexthop","2000::2"} }}); + routeConsumer->addToSync(entries); + + auto base_create = create_route_count; + auto base_set = set_route_count; + auto base_remove = remove_route_count; + + static_cast(gRouteOrch)->doTask(); + + // Default route typically programs via attribute SET (no create/remove) + ASSERT_EQ(base_create, create_route_count); + ASSERT_EQ(base_remove, remove_route_count); + ASSERT_EQ(base_set + 1, set_route_count); + ASSERT_EQ(sai_fail_count, 0); + + if (hasStateField) + { + ASSERT_TRUE(waitStateRouteState(m_state_db.get(), def6, "ok")) + << "Expected IPv6 default-route state to become 'ok' after SET."; + } + + // Now delete the default v6 route + entries.clear(); + entries.push_back({ def6, "DEL", {} }); + routeConsumer->addToSync(entries); + + base_create = create_route_count; + base_set = set_route_count; + base_remove = remove_route_count; + + static_cast(gRouteOrch)->doTask(); + + // Expect another SET (no create/remove), and no invalid SAI programming + ASSERT_EQ(base_create, create_route_count); + ASSERT_EQ(base_remove, remove_route_count); + ASSERT_EQ(base_set + 1, set_route_count); + ASSERT_EQ(sai_fail_count, 0); + + if (hasStateField) + { + ASSERT_TRUE(waitStateRouteState(m_state_db.get(), def6, "na")) + << "Expected IPv6 default-route state to become 'na' after DEL."; + } + } + } + + TEST_F(RouteOrchTest, RouteOrch_AddDeleteIPv4) + { + auto *routeConsumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + ASSERT_NE(routeConsumer, nullptr); + + // PART A: Regular prefix add/remove (2.2.2.0/24) + { + std::deque entries; + entries.push_back({ "2.2.2.0/24", "SET", + { {"ifname","Ethernet0"}, {"nexthop","10.0.0.2"} }}); + routeConsumer->addToSync(entries); + + auto base_create = create_route_count; + auto base_set = set_route_count; + auto base_remove = remove_route_count; + + static_cast(gRouteOrch)->doTask(); + + // Expect create +1, set unchanged, remove unchanged + ASSERT_EQ(base_create + 1, create_route_count); + ASSERT_EQ(base_set, set_route_count); + ASSERT_EQ(base_remove, remove_route_count); + + // Now remove the route + entries.clear(); + entries.push_back({ "2.2.2.0/24", "DEL", {} }); + routeConsumer->addToSync(entries); + + base_create = create_route_count; + base_set = set_route_count; + base_remove = remove_route_count; + + static_cast(gRouteOrch)->doTask(); + + // Expect remove +1, create/set unchanged + ASSERT_EQ(base_create, create_route_count); + ASSERT_EQ(base_set, set_route_count); + ASSERT_EQ(base_remove + 1, remove_route_count); + } + + // PART B: Default route DEL -> state 'na' -> SET -> state 'ok' + { + const std::string def = "0.0.0.0/0"; + ASSERT_TRUE(stateRouteStateFieldExists(m_state_db.get(), def)) + << "Expected STATE_DB:ROUTE_TABLE to expose 'state' for the default route."; + + // SetUp() seeds a default route; if state is exposed, it should become 'ok' + + ASSERT_TRUE(waitStateRouteState(m_state_db.get(), def, "ok")) + << "Expected initial default-route state to become 'ok'."; + + + // DEL default route + std::deque entries; + entries.push_back({ def, "DEL", {} }); + routeConsumer->addToSync(entries); + + auto base_create = create_route_count; + auto base_set = set_route_count; + auto base_remove = remove_route_count; + + static_cast(gRouteOrch)->doTask(); + + // For default route, expect attribute SET path (no create/remove), set +1 + ASSERT_EQ(base_create, create_route_count); + ASSERT_EQ(base_remove, remove_route_count); + ASSERT_EQ(base_set + 1, set_route_count); + ASSERT_EQ(sai_fail_count, 0); + + ASSERT_TRUE(waitStateRouteState(m_state_db.get(), def, "na")) + << "Expected default-route state to become 'na' after DEL."; + + + // Re-SET default route + entries.clear(); + entries.push_back({ def, "SET", { {"ifname","Ethernet0"}, {"nexthop","10.0.0.2"} }}); + routeConsumer->addToSync(entries); + + base_create = create_route_count; + base_set = set_route_count; + base_remove = remove_route_count; + + static_cast(gRouteOrch)->doTask(); + + // Expect another SET (no create/remove) + ASSERT_EQ(base_create, create_route_count); + ASSERT_EQ(base_remove, remove_route_count); + ASSERT_EQ(base_set + 1, set_route_count); + ASSERT_EQ(sai_fail_count, 0); + + ASSERT_TRUE(waitStateRouteState(m_state_db.get(), def, "ok")) + << "Expected default-route state to return to 'ok' after re-SET."; + + } + } + TEST_F(RouteOrchTest, RouteOrchTestDelSetSameNexthop) { std::deque entries; @@ -484,4 +769,261 @@ namespace routeorch_test gMockResponsePublisher.reset(); } + + TEST_F(RouteOrchTest, RouteOrchLoopbackRoute) + { + gMockResponsePublisher = std::make_unique(); + + std::deque entries; + std::string key = "fc00:1::/64"; + std::vector fvs{{"ifname", "Loopback"}, {"nexthop", "::"}, {"protocol", "static"}}; + entries.push_back({key, "SET", fvs}); + + auto consumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + consumer->addToSync(entries); + + EXPECT_CALL(*gMockResponsePublisher, publish(APP_ROUTE_TABLE_NAME, key, std::vector{{"protocol", "static"}}, ReturnCode(SAI_STATUS_SUCCESS), false)).Times(1); + static_cast(gRouteOrch)->doTask(); + + gMockResponsePublisher.reset(); + } + + TEST_F(RouteOrchTest, RouteOrchTestInvalidEvpnRoute) + { + std::deque entries; + entries.push_back({"Vrf1", "SET", { {"vni", "500100"}, {"v4", "true"}}}); + auto consumer = dynamic_cast(gVrfOrch->getExecutor(APP_VRF_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gVrfOrch)->doTask(); + + entries.clear(); + entries.push_back({"Vrf1:1.1.1.0/24", "SET", { {"ifname", "Ethernet0,Ethernet0"}, + {"nexthop", "10.0.0.2,10.0.0.3"}, + {"vni_label", "500100"}, + {"router_mac", "7e:f0:c0:e4:b2:5a,7e:f0:c0:e4:b2:5b"}}}); + entries.push_back({"Vrf1:2.1.1.0/24", "SET", { {"ifname", "Ethernet0,Ethernet0"}, + {"nexthop", "10.0.0.2,10.0.0.3"}, + {"vni_label", "500100,500100"}, + {"router_mac", "7e:f0:c0:e4:b2:5b"}}}); + consumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + consumer->addToSync(entries); + + auto current_create_count = create_route_count; + auto current_set_count = set_route_count; + + static_cast(gRouteOrch)->doTask(); + ASSERT_EQ(current_create_count, create_route_count); + ASSERT_EQ(current_set_count, set_route_count); + } + + TEST_F(RouteOrchTest, RouteOrchTestVrfRoute) + { + std::deque entries; + entries.push_back({"Vrf2", "SET", { {"vni", "500200"}}}); + auto vrfConsumer = dynamic_cast(gVrfOrch->getExecutor(APP_VRF_TABLE_NAME)); + vrfConsumer->addToSync(entries); + static_cast(gVrfOrch)->doTask(); + entries.clear(); + entries.push_back({"Ethernet8", "SET", { {"vrf_name", "Vrf2"}}}); + auto intfConsumer = dynamic_cast(gIntfsOrch->getExecutor(APP_INTF_TABLE_NAME)); + intfConsumer->addToSync(entries); + static_cast(gIntfsOrch)->doTask(); + auto routeConsumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + entries.clear(); + entries.push_back({"Vrf2:fe80::/64", "DEL", {}}); + entries.push_back({"Vrf2:20.0.0.0/24", "DEL", {}}); + entries.push_back({"Vrf2:fe80::/64", "SET", { {"protocol", "kernel"}, + {"nexthop", "::"}, + {"ifname", "Ethernet8"}}}); + entries.push_back({"Vrf2:20.0.0.0/24", "SET", { {"protocol", "kernel"}, + {"nexthop", "0.0.0.0"}, + {"ifname", "Ethernet8"}}}); + routeConsumer->addToSync(entries); + static_cast(gRouteOrch)->doTask(); + } + + /* Tests SAI_STATUS_ITEM_NOT_FOUND error handling for setting route */ + TEST_F(RouteOrchTest, RouteOrchSetItemNotFound) + { + IpPrefix prefix("1.1.1.0/32"); + NextHopGroupKey nhg_key("10.0.0.2"); + RouteNhg route_nhg(nhg_key, ""); + + gRouteOrch->m_syncdRoutes[gVirtualRouterId][prefix] = route_nhg; + + std::deque entries; + entries.push_back({"1.1.1.0/32", "SET", { {"ifname", "Ethernet0"}, + {"nexthop", "10.0.0.3"}}}); + + auto consumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + consumer->addToSync(entries); + + std::vector exp_status{SAI_STATUS_ITEM_NOT_FOUND}; + EXPECT_CALL(*mock_sai_route_api, set_route_entries_attribute) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_ITEM_NOT_FOUND))); + static_cast(gRouteOrch)->doTask(); + + exp_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(*mock_sai_route_api, create_route_entries) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + static_cast(gRouteOrch)->doTask(); + } + + /* Test default route DEL followed by SET scenario to verify bulker state handling */ + TEST_F(RouteOrchTest, RouteOrchTestDefaultRouteDelSetBulkerState) + { + // This test verifies the fix for the default route race condition where: + // 1. A DEL event occurs and automatically adds a DROP action (creating a setting_entry in bulker) + // 2. A subsequent SET operation needs to check for both pending removals AND pending sets + // 3. The bulk_entry_pending_removal_or_set() method should detect the pending operation + + std::deque entries; + + // First, delete the default route (0.0.0.0/0) that was set up in SetUp() + // This simulates a scenario where the default route is being removed + entries.push_back({"0.0.0.0/0", "DEL", {}}); + + auto consumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + consumer->addToSync(entries); + + auto current_create_count = create_route_count; + auto current_remove_count = remove_route_count; + auto current_set_count = set_route_count; + auto current_drop_set_count = drop_set_count; + + // Process the DEL operation + static_cast(gRouteOrch)->doTask(); + + // Verify that remove translated to a set + ASSERT_EQ(current_create_count, create_route_count); + ASSERT_EQ(current_remove_count, remove_route_count); + ASSERT_EQ(current_set_count + 1, set_route_count); + + // Verify that we set a DROP action + ASSERT_EQ(current_drop_set_count + 1, drop_set_count); + + // Now immediately SET the default route with a new nexthop + // This simulates a rapid DEL/SET sequence that can happen in production + entries.clear(); + entries.push_back({"0.0.0.0/0", "SET", { {"ifname", "Ethernet0"}, + {"nexthop", "10.0.0.3"}}}); + + consumer->addToSync(entries); + current_create_count = create_route_count; + current_remove_count = remove_route_count; + current_set_count = set_route_count; + current_drop_set_count = drop_set_count; + + // Process the SET operation + static_cast(gRouteOrch)->doTask(); + + // Verify that create was not called for the new route, instead orchagent + // would only set the pre-existing route + ASSERT_EQ(current_create_count, create_route_count); + ASSERT_EQ(current_remove_count, remove_route_count); + ASSERT_EQ(current_set_count + 1, set_route_count); + // Verify that we do not set a DROP action + ASSERT_EQ(current_drop_set_count, drop_set_count); + + // Verify the bulker state is clean after processing + // The bulker should have flushed all pending operations + ASSERT_EQ(gRouteOrch->gRouteBulker.creating_entries_count(), 0); + ASSERT_EQ(gRouteOrch->gRouteBulker.setting_entries_count(), 0); + ASSERT_EQ(gRouteOrch->gRouteBulker.removing_entries_count(), 0); + } + + /* Test default route DEL and SET in same bulk operation */ + TEST_F(RouteOrchTest, RouteOrchTestDefaultRouteDelSetSameBulk) + { + // This test verifies that when DEL and SET for default route come in the same bulk, + // the bulker correctly handles the pending operations using bulk_entry_pending_removal_or_set() + + std::deque entries; + + // Add both DEL and SET for default route in the same bulk + entries.push_back({"0.0.0.0/0", "DEL", {}}); + entries.push_back({"0.0.0.0/0", "SET", { {"ifname", "Ethernet0"}, + {"nexthop", "10.0.0.3"}}}); + + auto consumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + consumer->addToSync(entries); + + auto current_create_count = create_route_count; + auto current_remove_count = remove_route_count; + auto current_set_count = set_route_count; + auto current_drop_set_count = drop_set_count; + + // Process both operations in one doTask() call + static_cast(gRouteOrch)->doTask(); + + // Verify that set was called + ASSERT_EQ(current_create_count, create_route_count); + ASSERT_EQ(current_remove_count, remove_route_count); + ASSERT_EQ(current_set_count + 1, set_route_count); + // Verify that we do not set a DROP action + ASSERT_EQ(current_drop_set_count, drop_set_count); + + // Verify the bulker state is clean after processing + ASSERT_EQ(gRouteOrch->gRouteBulker.creating_entries_count(), 0); + ASSERT_EQ(gRouteOrch->gRouteBulker.setting_entries_count(), 0); + ASSERT_EQ(gRouteOrch->gRouteBulker.removing_entries_count(), 0); + } + + /* Test IPv6 default route DEL followed by SET */ + TEST_F(RouteOrchTest, RouteOrchTestIPv6DefaultRouteDelSet) + { + // Test the same scenario with IPv6 default route (::/0) + // to ensure the fix works for both address families + + std::deque entries; + + // First, create an IPv6 default route + entries.push_back({"::/0", "SET", { {"ifname", "Ethernet0"}, + {"nexthop", "fc00::2"}}}); + + auto consumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + consumer->addToSync(entries); + + auto current_create_count = create_route_count; + auto current_remove_count = remove_route_count; + auto current_set_count = set_route_count; + auto current_drop_set_count = drop_set_count; + + // Process the initial SET + static_cast(gRouteOrch)->doTask(); + + ASSERT_EQ(current_create_count, create_route_count); + ASSERT_EQ(current_remove_count, remove_route_count); + ASSERT_EQ(current_set_count, set_route_count); + // Verify that we do not set a DROP action + ASSERT_EQ(current_drop_set_count, drop_set_count); + + // Now test DEL followed by SET + entries.clear(); + entries.push_back({"::/0", "DEL", {}}); + entries.push_back({"::/0", "SET", { {"ifname", "Ethernet0"}, + {"nexthop", "fc00::3"}}}); + + consumer->addToSync(entries); + current_create_count = create_route_count; + current_remove_count = remove_route_count; + current_set_count = set_route_count; + current_drop_set_count = drop_set_count; + + // Process both operations + static_cast(gRouteOrch)->doTask(); + + // Verify that both remove and create were called + ASSERT_EQ(current_remove_count, remove_route_count); + ASSERT_EQ(current_create_count, create_route_count); + ASSERT_EQ(current_set_count + 1, set_route_count); + // Verify that DROP action happens. This is because the nexthop used + // ("fc00::3") is not known to m_neighOrch. + ASSERT_EQ(current_drop_set_count + 1, drop_set_count); + + // Verify the bulker state is clean + ASSERT_EQ(gRouteOrch->gRouteBulker.creating_entries_count(), 0); + ASSERT_EQ(gRouteOrch->gRouteBulker.setting_entries_count(), 0); + ASSERT_EQ(gRouteOrch->gRouteBulker.removing_entries_count(), 0); + } } diff --git a/tests/mock_tests/saispy.h b/tests/mock_tests/saispy.h index a8a5925fd77..1bea90c41ef 100644 --- a/tests/mock_tests/saispy.h +++ b/tests/mock_tests/saispy.h @@ -33,14 +33,27 @@ struct SaiSpyFunctor using original_fn_ptr_t = R (**)(arglist...); original_fn_t original_fn; + original_fn_ptr_t original_fn_ptr; static std::function fake; SaiSpyFunctor(original_fn_ptr_t fn_ptr) : - original_fn(*fn_ptr) + original_fn(*fn_ptr), + original_fn_ptr(fn_ptr) { *fn_ptr = spy; } + SaiSpyFunctor(const SaiSpyFunctor&) = delete; + SaiSpyFunctor &operator=(const SaiSpyFunctor&) = delete; + + SaiSpyFunctor(SaiSpyFunctor&&) noexcept = delete; + SaiSpyFunctor &operator=(SaiSpyFunctor&&) noexcept = delete; + + ~SaiSpyFunctor() + { + *original_fn_ptr = original_fn; + } + void callFake(std::function fn) { fake = fn; @@ -105,3 +118,13 @@ std::shared_ptr(fn_ptr); } + +// get bulk entry attribute +template +std::shared_ptr> + SpyOn(sai_status_t (**fn_ptr)(uint32_t, const sai_object_id_t*, const uint32_t*, sai_attribute_t**, sai_bulk_op_error_mode_t, sai_status_t*)) +{ + using SaiSpyGetAttrFunctor = SaiSpyFunctor; + + return std::make_shared(fn_ptr); +} diff --git a/tests/mock_tests/saispy_ut.cpp b/tests/mock_tests/saispy_ut.cpp index 075db3c0aba..50bae76338a 100644 --- a/tests/mock_tests/saispy_ut.cpp +++ b/tests/mock_tests/saispy_ut.cpp @@ -217,3 +217,38 @@ TEST(SaiSpy, create_switch_and_acl_table) switch_api->create_switch(&oid, 0, nullptr); ASSERT_EQ(oid, exp_oid_2); } + +TEST(SaiSpy, Destructor) +{ + auto acl_api = std::make_shared(); + + acl_api->create_acl_table = [](sai_object_id_t *oid, sai_object_id_t, uint32_t, + const sai_attribute_t *) { + *oid = 1; + return (sai_status_t)SAI_STATUS_SUCCESS; + }; + + sai_object_id_t oid; + + auto status = acl_api->create_acl_table(&oid, 1, 0, nullptr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + ASSERT_EQ(oid, 1); + + // Setup SAI spy in a limited scope. + { + auto aclSpy = SpyOn(&acl_api.get()->create_acl_table); + aclSpy->callFake([&](sai_object_id_t *oid, sai_object_id_t, uint32_t, const sai_attribute_t *) -> sai_status_t { + *oid = 2; + return SAI_STATUS_SUCCESS; + }); + + status = acl_api->create_acl_table(&oid, 1, 0, nullptr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + ASSERT_EQ(oid, 2); + } + + // Make sure original SAI API is restored. + status = acl_api->create_acl_table(&oid, 1, 0, nullptr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + ASSERT_EQ(oid, 1); +} diff --git a/tests/mock_tests/stporch_ut.cpp b/tests/mock_tests/stporch_ut.cpp new file mode 100644 index 00000000000..1a8d9ce88c2 --- /dev/null +++ b/tests/mock_tests/stporch_ut.cpp @@ -0,0 +1,290 @@ +#include +#include + +#define private public // make Directory::m_values available to clean it. +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "dbconnector.h" +#include "mock_orchagent_main.h" +#include "mock_sai_api.h" +#include "mock_orch_test.h" +#include "mock_table.h" +#define private public +#include "stporch.h" +#undef private +#include "mock_sai_stp.h" + + +namespace stporch_test +{ + using namespace std; + using namespace swss; + using namespace mock_orch_test; + using ::testing::StrictMock; + + using ::testing::_; + using ::testing::Return; + using ::gStpOrch; + + sai_status_t _ut_stub_sai_set_vlan_attribute(_In_ sai_object_id_t vlan_oid, + _In_ const sai_attribute_t *attr) + { + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_stub_sai_flush_fdb_entries(_In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list) + { + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_stub_sai_get_switch_attribute( + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t *attr_list) + { + return SAI_STATUS_SUCCESS; + if (attr_count == 2) + { + if (attr_list[0].id == SAI_SWITCH_ATTR_DEFAULT_STP_INST_ID) + { + attr_list[0].value.oid = 0; + } + if (attr_list[1].id == SAI_SWITCH_ATTR_MAX_STP_INSTANCE) + { + attr_list[1].value.u32 = 510; + } + } + return SAI_STATUS_SUCCESS; + } + + + class StpOrchTest : public MockOrchTest { + protected: + void ApplyInitialConfigs() + { + Table port_table = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table vlan_table = Table(m_app_db.get(), APP_VLAN_TABLE_NAME); + Table vlan_member_table = Table(m_app_db.get(), APP_VLAN_MEMBER_TABLE_NAME); + + auto ports = ut_helper::getInitialSaiPorts(); + port_table.set(ETHERNET0, ports[ETHERNET0]); + port_table.set(ETHERNET4, ports[ETHERNET4]); + port_table.set(ETHERNET8, ports[ETHERNET8]); + port_table.set("PortConfigDone", { { "count", to_string(1) } }); + port_table.set("PortInitDone", { {} }); + + vlan_table.set(VLAN_1000, { { "admin_status", "up" }, + { "mtu", "9100" }, + { "mac", "00:aa:bb:cc:dd:ee" } }); + vlan_member_table.set( + VLAN_1000 + vlan_member_table.getTableNameSeparator() + ETHERNET0, + { { "tagging_mode", "untagged" } }); + + gPortsOrch->addExistingData(&port_table); + gPortsOrch->addExistingData(&vlan_table); + gPortsOrch->addExistingData(&vlan_member_table); + static_cast(gPortsOrch)->doTask(); + } + void PostSetUp() override + { + vector tableNames = + {"STP_TABLE", + "STP_VLAN_INSTANCE_TABLE", + "STP_PORT_STATE_TABLE", + "STP_FASTAGEING_FLUSH_TABLE", + "STP_INST_PORT_FLUSH_TABLE"}; + _hook_sai_switch_api(); + gStpOrch = new StpOrch(m_app_db.get(), m_state_db.get(), tableNames); + } + void PreTearDown() override + { + delete gStpOrch; + gStpOrch = nullptr; + _unhook_sai_switch_api(); + } + + sai_stp_api_t ut_sai_stp_api; + sai_stp_api_t *org_sai_stp_api; + + void _hook_sai_stp_api() + { + ut_sai_stp_api = *sai_stp_api; + org_sai_stp_api = sai_stp_api; + sai_stp_api = &ut_sai_stp_api; + } + + void _unhook_sai_stp_api() + { + sai_stp_api = org_sai_stp_api; + } + + sai_vlan_api_t ut_sai_vlan_api; + sai_vlan_api_t *org_sai_vlan_api; + + void _hook_sai_vlan_api() + { + ut_sai_vlan_api = *sai_vlan_api; + org_sai_vlan_api = sai_vlan_api; + ut_sai_vlan_api.set_vlan_attribute = _ut_stub_sai_set_vlan_attribute; + sai_vlan_api = &ut_sai_vlan_api; + } + + void _unhook_sai_vlan_api() + { + sai_vlan_api = org_sai_vlan_api; + } + + sai_fdb_api_t ut_sai_fdb_api; + sai_fdb_api_t *org_sai_fdb_api; + void _hook_sai_fdb_api() + { + ut_sai_fdb_api = *sai_fdb_api; + org_sai_fdb_api = sai_fdb_api; + ut_sai_fdb_api.flush_fdb_entries = _ut_stub_sai_flush_fdb_entries; + sai_fdb_api = &ut_sai_fdb_api; + } + + void _unhook_sai_fdb_api() + { + sai_fdb_api = org_sai_fdb_api; + } + + sai_switch_api_t ut_sai_switch_api; + sai_switch_api_t *pold_sai_switch_api; + void _hook_sai_switch_api() + { + ut_sai_switch_api = *sai_switch_api; + pold_sai_switch_api = sai_switch_api; + ut_sai_switch_api.get_switch_attribute = _ut_stub_sai_get_switch_attribute; + sai_switch_api = &ut_sai_switch_api; + } + void _unhook_sai_switch_api() + { + sai_switch_api = pold_sai_switch_api; + } + }; + + TEST_F(StpOrchTest, TestAddRemoveStpPort) { + _hook_sai_stp_api(); + _hook_sai_vlan_api(); + _hook_sai_fdb_api(); + + StrictMock mock_sai_stp_; + mock_sai_stp = &mock_sai_stp_; + sai_stp_api->create_stp = mock_create_stp; + sai_stp_api->remove_stp = mock_remove_stp; + sai_stp_api->create_stp_port = mock_create_stp_port; + sai_stp_api->remove_stp_port = mock_remove_stp_port; + sai_stp_api->set_stp_port_attribute = mock_set_stp_port_attribute; + + Port port; + Port port1; + sai_uint16_t stp_instance = 1; + sai_object_id_t stp_port_oid = 67890; + sai_object_id_t stp_oid = 98765; + bool result; + + ASSERT_TRUE(gPortsOrch->getPort(ETHERNET0, port)); + ASSERT_TRUE(gPortsOrch->getPort(ETHERNET4, port1)); + + EXPECT_CALL(mock_sai_stp_, + create_stp(_, _, _, _)).WillOnce(::testing::DoAll(::testing::SetArgPointee<0>(stp_oid), + ::testing::Return(SAI_STATUS_SUCCESS))); + result = gStpOrch->addVlanToStpInstance(VLAN_1000, stp_instance); + ASSERT_TRUE(result); + + EXPECT_CALL(mock_sai_stp_, + create_stp_port(_, _, 3, _)).WillOnce(::testing::DoAll(::testing::SetArgPointee<0>(stp_port_oid), + ::testing::Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_stp_, + set_stp_port_attribute(_,_)).WillOnce(::testing::Return(SAI_STATUS_SUCCESS)); + port.m_bridge_port_id = 1234; + result = gStpOrch->updateStpPortState(port, stp_instance, STP_STATE_FORWARDING); + ASSERT_TRUE(result); + + result = gStpOrch->stpVlanFdbFlush(VLAN_1000); + ASSERT_TRUE(result); + + EXPECT_CALL(mock_sai_stp_, + remove_stp_port(_)).WillOnce(::testing::Return(SAI_STATUS_SUCCESS)); + result = gStpOrch->removeStpPort(port, stp_instance); + ASSERT_TRUE(result); + + EXPECT_CALL(mock_sai_stp_, + create_stp_port(_, _, 3, _)).WillOnce(::testing::DoAll(::testing::SetArgPointee<0>(stp_port_oid), + ::testing::Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_stp_, + set_stp_port_attribute(_,_)).WillOnce(::testing::Return(SAI_STATUS_SUCCESS)); + port1.m_bridge_port_id = 1111; + result = gStpOrch->updateStpPortState(port1, stp_instance, STP_STATE_BLOCKING); + ASSERT_TRUE(result); + + EXPECT_CALL(mock_sai_stp_, + remove_stp_port(_)).WillOnce(::testing::Return(SAI_STATUS_SUCCESS)); + result = gStpOrch->removeStpPorts(port1); + ASSERT_TRUE(result); + + EXPECT_CALL(mock_sai_stp_, + remove_stp(_)).WillOnce(::testing::Return(SAI_STATUS_SUCCESS)); + result = gStpOrch->removeVlanFromStpInstance(VLAN_1000, stp_instance); + ASSERT_TRUE(result); + + std::deque entries; + entries.push_back({"Vlan1000", "SET", { {"stp_instance", "1"}}}); + EXPECT_CALL(mock_sai_stp_, + create_stp(_, _, _, _)).WillOnce(::testing::DoAll(::testing::SetArgPointee<0>(stp_oid), + ::testing::Return(SAI_STATUS_SUCCESS))); + + auto consumer = dynamic_cast(gStpOrch->getExecutor("STP_VLAN_INSTANCE_TABLE")); + consumer->addToSync(entries); + static_cast(gStpOrch)->doTask(); + + entries.clear(); + EXPECT_CALL(mock_sai_stp_, + create_stp_port(_, _, 3, _)).WillOnce(::testing::DoAll(::testing::SetArgPointee<0>(stp_port_oid), + ::testing::Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_stp_, + set_stp_port_attribute(_,_)).WillOnce(::testing::Return(SAI_STATUS_SUCCESS)); + entries.push_back({"Ethernet0:1", "SET", { {"state", "4"}}}); + consumer = dynamic_cast(gStpOrch->getExecutor("STP_PORT_STATE_TABLE")); + consumer->addToSync(entries); + static_cast(gStpOrch)->doTask(); + + entries.clear(); + entries.push_back({"Ethernet0:1", "SET", { {"state", "true"}}}); + consumer = dynamic_cast(gStpOrch->getExecutor("STP_FASTAGEING_FLUSH_TABLE")); + consumer->addToSync(entries); + static_cast(gStpOrch)->doTask(); + + entries.clear(); + entries.push_back({"Ethernet0:1", "DEL", { {} }}); + EXPECT_CALL(mock_sai_stp_, + remove_stp_port(_)).WillOnce(::testing::Return(SAI_STATUS_SUCCESS)); + consumer = dynamic_cast(gStpOrch->getExecutor("STP_PORT_STATE_TABLE")); + consumer->addToSync(entries); + static_cast(gStpOrch)->doTask(); + + entries.clear(); + entries.push_back({"Vlan1000", "DEL", { {} }}); + EXPECT_CALL(mock_sai_stp_, + remove_stp(_)).WillOnce(::testing::Return(SAI_STATUS_SUCCESS)); + consumer = dynamic_cast(gStpOrch->getExecutor("STP_VLAN_INSTANCE_TABLE")); + consumer->addToSync(entries); + static_cast(gStpOrch)->doTask(); + + entries.clear(); + entries.push_back({"1:Ethernet0", "SET", { {"state", "true"} }}); + consumer = dynamic_cast(gStpOrch->getExecutor(APP_STP_INST_PORT_FLUSH_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gStpOrch)->doTask(); + + _unhook_sai_stp_api(); + _unhook_sai_vlan_api(); + _unhook_sai_fdb_api(); + } +} diff --git a/tests/mock_tests/switchorch_ut.cpp b/tests/mock_tests/switchorch_ut.cpp new file mode 100644 index 00000000000..b6fdf16d04f --- /dev/null +++ b/tests/mock_tests/switchorch_ut.cpp @@ -0,0 +1,354 @@ +#define private public // make Directory::m_values available to clean it. +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_table.h" +#include "mock_response_publisher.h" +#include "switchorch.h" + +extern void on_switch_asic_sdk_health_event(sai_object_id_t switch_id, + sai_switch_asic_sdk_health_severity_t severity, + sai_timespec_t timestamp, + sai_switch_asic_sdk_health_category_t category, + sai_switch_health_data_t data, + const sai_u8_list_t description); + +namespace switchorch_test +{ + using namespace std; + + sai_switch_api_t ut_sai_switch_api; + sai_switch_api_t *pold_sai_switch_api; + + shared_ptr m_app_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + + sai_switch_attr_t _ut_stub_asic_sdk_health_event_attribute_to_check; + bool _ut_stub_asic_sdk_health_event_check_all; + uint32_t _ut_stub_asic_sdk_health_event_call_count; + map> _ut_stub_asic_sdk_health_event_category_sets; + set _ut_stub_asic_sdk_health_event_passed_categories; + + bool _ut_reg_event_unsupported; + + sai_status_t _ut_stub_sai_set_switch_attribute( + _In_ sai_object_id_t switch_id, + _In_ const sai_attribute_t *attr) + { + switch (attr[0].id) + { + case SAI_SWITCH_ATTR_SWITCH_ASIC_SDK_HEALTH_EVENT_NOTIFY: + if (_ut_reg_event_unsupported) + { + return SAI_STATUS_NOT_IMPLEMENTED; + } + break; + case SAI_SWITCH_ATTR_REG_FATAL_SWITCH_ASIC_SDK_HEALTH_CATEGORY: + case SAI_SWITCH_ATTR_REG_WARNING_SWITCH_ASIC_SDK_HEALTH_CATEGORY: + case SAI_SWITCH_ATTR_REG_NOTICE_SWITCH_ASIC_SDK_HEALTH_CATEGORY: + if (_ut_stub_asic_sdk_health_event_check_all) + { + _ut_stub_asic_sdk_health_event_call_count++; + auto *passed_category_list = reinterpret_cast(attr[0].value.s32list.list); + _ut_stub_asic_sdk_health_event_category_sets[(sai_switch_attr_t)attr[0].id] = set(passed_category_list, passed_category_list + attr[0].value.s32list.count); + } + return SAI_STATUS_SUCCESS; + default: + break; + } + return pold_sai_switch_api->set_switch_attribute(switch_id, attr); + } + + void _hook_sai_apis() + { + ut_sai_switch_api = *sai_switch_api; + pold_sai_switch_api = sai_switch_api; + ut_sai_switch_api.set_switch_attribute = _ut_stub_sai_set_switch_attribute; + sai_switch_api = &ut_sai_switch_api; + } + + void _unhook_sai_apis() + { + sai_switch_api = pold_sai_switch_api; + } + + struct SwitchOrchTest : public ::testing::Test + { + SwitchOrchTest() + { + } + + void SetUp() override + { + // Init switch and create dependencies + m_app_db = make_shared("APPL_DB", 0); + m_config_db = make_shared("CONFIG_DB", 0); + m_state_db = make_shared("STATE_DB", 0); + + _ut_reg_event_unsupported = false; + + map profile = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + + ut_helper::initSaiApi(profile); + + sai_attribute_t attr; + + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + auto status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + } + + void initSwitchOrch() + { + TableConnector stateDbSwitchTable(m_state_db.get(), "SWITCH_CAPABILITY"); + TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); + TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); + TableConnector conf_suppress_asic_sdk_health_categories(m_config_db.get(), CFG_SUPPRESS_ASIC_SDK_HEALTH_EVENT_NAME); + + vector switch_tables = { + conf_asic_sensors, + conf_suppress_asic_sdk_health_categories, + app_switch_table + }; + + ASSERT_EQ(gSwitchOrch, nullptr); + gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); + } + + void checkAsicSdkHealthEvent(const sai_timespec_t ×tamp, const string &expected_key="") + { + initSwitchOrch(); + + sai_switch_health_data_t data; + memset(&data, 0, sizeof(data)); + data.data_type = SAI_HEALTH_DATA_TYPE_GENERAL; + vector data_from_sai({100, 101, 115, 99, 114, 105, 112, 116, 105, 245, 111, 110, 245, 10, 123, 125, 100, 100}); + sai_u8_list_t description; + description.list = data_from_sai.data(); + description.count = (uint32_t)(data_from_sai.size() - 2); + on_switch_asic_sdk_health_event(gSwitchId, + SAI_SWITCH_ASIC_SDK_HEALTH_SEVERITY_FATAL, + timestamp, + SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_FW, + data, + description); + + string key; + if (expected_key.empty()) + { + vector keys; + gSwitchOrch->m_asicSdkHealthEventTable->getKeys(keys); + key = keys[0]; + } + else + { + key = expected_key; + } + string value; + gSwitchOrch->m_asicSdkHealthEventTable->hget(key, "category", value); + ASSERT_EQ(value, "firmware"); + gSwitchOrch->m_asicSdkHealthEventTable->hget(key, "severity", value); + ASSERT_EQ(value, "fatal"); + gSwitchOrch->m_asicSdkHealthEventTable->hget(key, "description", value); + ASSERT_EQ(value, "description\n{}"); + } + + void TearDown() override + { + ::testing_db::reset(); + + _ut_stub_asic_sdk_health_event_category_sets.clear(); + + gDirectory.m_values.clear(); + + delete gSwitchOrch; + gSwitchOrch = nullptr; + + ut_helper::uninitSaiApi(); + } + }; + + TEST_F(SwitchOrchTest, SwitchOrchTestSuppressCategories) + { + Table suppressAsicSdkHealthEventTable = Table(m_config_db.get(), CFG_SUPPRESS_ASIC_SDK_HEALTH_EVENT_NAME); + + suppressAsicSdkHealthEventTable.set("fatal", + { + {"max_events", "1000"} + }); + suppressAsicSdkHealthEventTable.set("warning", + { + {"categories", "software,firmware,cpu_hw,asic_hw"} + }); + + _ut_stub_asic_sdk_health_event_check_all = true; + auto call_count = _ut_stub_asic_sdk_health_event_call_count; + + _hook_sai_apis(); + initSwitchOrch(); + + vector ts; + std::deque entries; + set all_categories({ + SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_SW, + SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_FW, + SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_CPU_HW, + SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_ASIC_HW}); + set empty_category; + + call_count += 2; + ASSERT_EQ(_ut_stub_asic_sdk_health_event_call_count, call_count); + ASSERT_EQ(_ut_stub_asic_sdk_health_event_category_sets[SAI_SWITCH_ATTR_REG_FATAL_SWITCH_ASIC_SDK_HEALTH_CATEGORY], all_categories); + ASSERT_EQ(_ut_stub_asic_sdk_health_event_category_sets[SAI_SWITCH_ATTR_REG_WARNING_SWITCH_ASIC_SDK_HEALTH_CATEGORY], empty_category); + ASSERT_EQ(_ut_stub_asic_sdk_health_event_category_sets[SAI_SWITCH_ATTR_REG_NOTICE_SWITCH_ASIC_SDK_HEALTH_CATEGORY], all_categories); + + // case: severity: fatal, operation: suppress all categories + entries.push_back({"fatal", "SET", + { + {"categories", "software,firmware,cpu_hw,asic_hw"} + }}); + auto consumer = dynamic_cast(gSwitchOrch->getExecutor(CFG_SUPPRESS_ASIC_SDK_HEALTH_EVENT_NAME)); + consumer->addToSync(entries); + entries.clear(); + static_cast(gSwitchOrch)->doTask(); + ASSERT_EQ(_ut_stub_asic_sdk_health_event_category_sets[SAI_SWITCH_ATTR_REG_FATAL_SWITCH_ASIC_SDK_HEALTH_CATEGORY], empty_category); + call_count++; + ASSERT_EQ(_ut_stub_asic_sdk_health_event_call_count, call_count); + + // case: severity: warning, operation: suppress partial categories + entries.push_back({"warning", "SET", + { + {"categories", "software,cpu_hw,invalid_category"} + }}); + consumer->addToSync(entries); + entries.clear(); + static_cast(gSwitchOrch)->doTask(); + ASSERT_EQ(_ut_stub_asic_sdk_health_event_category_sets[SAI_SWITCH_ATTR_REG_WARNING_SWITCH_ASIC_SDK_HEALTH_CATEGORY], + set({ + SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_FW, + SAI_SWITCH_ASIC_SDK_HEALTH_CATEGORY_ASIC_HW})); + call_count++; + ASSERT_EQ(_ut_stub_asic_sdk_health_event_call_count, call_count); + + // case: invalid severity, nothing changed (to satisfy coverate) + entries.push_back({"warninga", "SET", + { + {"categories", "software,cpu_hw,asic_hw"} + }}); + consumer->addToSync(entries); + entries.clear(); + static_cast(gSwitchOrch)->doTask(); + // No SAI API called + ASSERT_EQ(_ut_stub_asic_sdk_health_event_call_count, call_count); + + // case: severity: warning, operation: set max_events only, which means to remove suppress list + entries.push_back({"warning", "SET", + { + {"max_events", "10"} + }}); + consumer->addToSync(entries); + entries.clear(); + static_cast(gSwitchOrch)->doTask(); + call_count++; + ASSERT_EQ(_ut_stub_asic_sdk_health_event_call_count, call_count); + ASSERT_EQ(_ut_stub_asic_sdk_health_event_category_sets[SAI_SWITCH_ATTR_REG_WARNING_SWITCH_ASIC_SDK_HEALTH_CATEGORY], all_categories); + + // case: severity: notice, operation: suppress no category + entries.push_back({"notice", "DEL", {}}); + consumer->addToSync(entries); + entries.clear(); + static_cast(gSwitchOrch)->doTask(); + ASSERT_EQ(_ut_stub_asic_sdk_health_event_category_sets[SAI_SWITCH_ATTR_REG_NOTICE_SWITCH_ASIC_SDK_HEALTH_CATEGORY], all_categories); + call_count++; + ASSERT_EQ(_ut_stub_asic_sdk_health_event_call_count, call_count); + + _unhook_sai_apis(); + } + + TEST_F(SwitchOrchTest, SwitchOrchTestCheckCapability) + { + initSwitchOrch(); + + string value; + gSwitchOrch->m_switchTable.hget("switch", SWITCH_CAPABILITY_TABLE_ASIC_SDK_HEALTH_EVENT_CAPABLE, value); + ASSERT_EQ(value, "true"); + gSwitchOrch->m_switchTable.hget("switch", SWITCH_CAPABILITY_TABLE_REG_FATAL_ASIC_SDK_HEALTH_CATEGORY, value); + ASSERT_EQ(value, "true"); + gSwitchOrch->m_switchTable.hget("switch", SWITCH_CAPABILITY_TABLE_REG_WARNING_ASIC_SDK_HEALTH_CATEGORY, value); + ASSERT_EQ(value, "true"); + gSwitchOrch->m_switchTable.hget("switch", SWITCH_CAPABILITY_TABLE_REG_NOTICE_ASIC_SDK_HEALTH_CATEGORY, value); + ASSERT_EQ(value, "true"); + + // Test that mirror capabilities are also queried and stored + // The actual values depend on the SAI implementation, but we can verify the entries exist + bool ingress_exists = gSwitchOrch->m_switchTable.hget("switch", SWITCH_CAPABILITY_TABLE_PORT_INGRESS_MIRROR_CAPABLE, value); + ASSERT_TRUE(ingress_exists); + bool egress_exists = gSwitchOrch->m_switchTable.hget("switch", SWITCH_CAPABILITY_TABLE_PORT_EGRESS_MIRROR_CAPABLE, value); + ASSERT_TRUE(egress_exists); + } + + TEST_F(SwitchOrchTest, SwitchOrchTestCheckCapabilityUnsupported) + { + _ut_reg_event_unsupported = true; + _ut_stub_asic_sdk_health_event_check_all = true; + + _hook_sai_apis(); + initSwitchOrch(); + + string value; + gSwitchOrch->m_switchTable.hget("switch", SWITCH_CAPABILITY_TABLE_ASIC_SDK_HEALTH_EVENT_CAPABLE, value); + ASSERT_EQ(value, "false"); + gSwitchOrch->m_switchTable.hget("switch", SWITCH_CAPABILITY_TABLE_REG_FATAL_ASIC_SDK_HEALTH_CATEGORY, value); + ASSERT_EQ(value, "false"); + gSwitchOrch->m_switchTable.hget("switch", SWITCH_CAPABILITY_TABLE_REG_WARNING_ASIC_SDK_HEALTH_CATEGORY, value); + ASSERT_EQ(value, "false"); + gSwitchOrch->m_switchTable.hget("switch", SWITCH_CAPABILITY_TABLE_REG_NOTICE_ASIC_SDK_HEALTH_CATEGORY, value); + ASSERT_EQ(value, "false"); + + // Test that mirror capabilities are also queried and stored + // The actual values depend on the SAI implementation, but we can verify the entries exist + bool ingress_exists = gSwitchOrch->m_switchTable.hget("switch", SWITCH_CAPABILITY_TABLE_PORT_INGRESS_MIRROR_CAPABLE, value); + ASSERT_TRUE(ingress_exists); + bool egress_exists = gSwitchOrch->m_switchTable.hget("switch", SWITCH_CAPABILITY_TABLE_PORT_EGRESS_MIRROR_CAPABLE, value); + ASSERT_TRUE(egress_exists); + + // case: unsupported severity. To satisfy coverage. + vector ts; + std::deque entries; + Table suppressAsicSdkHealthEventTable = Table(m_config_db.get(), CFG_SUPPRESS_ASIC_SDK_HEALTH_EVENT_NAME); + entries.push_back({"fatal", "SET", + { + {"categories", "software,firmware,cpu_hw,asic_hw"} + }}); + set empty_category; + auto consumer = dynamic_cast(gSwitchOrch->getExecutor(CFG_SUPPRESS_ASIC_SDK_HEALTH_EVENT_NAME)); + consumer->addToSync(entries); + entries.clear(); + auto call_count = _ut_stub_asic_sdk_health_event_call_count; + static_cast(gSwitchOrch)->doTask(); + ASSERT_EQ(_ut_stub_asic_sdk_health_event_category_sets[SAI_SWITCH_ATTR_REG_FATAL_SWITCH_ASIC_SDK_HEALTH_CATEGORY], empty_category); + ASSERT_EQ(_ut_stub_asic_sdk_health_event_call_count, call_count); + } + + TEST_F(SwitchOrchTest, SwitchOrchTestHandleEvent) + { + sai_timespec_t timestamp = {.tv_sec = 1701160447, .tv_nsec = 538710245}; + checkAsicSdkHealthEvent(timestamp, "2023-11-28 08:34:07"); + } + + TEST_F(SwitchOrchTest, SwitchOrchTestHandleEventInvalidTimeStamp) + { + sai_timespec_t timestamp = {.tv_sec = 172479515853275099, .tv_nsec = 538710245}; + checkAsicSdkHealthEvent(timestamp); + } +} diff --git a/tests/mock_tests/teammgrd/teammgr_ut.cpp b/tests/mock_tests/teammgrd/teammgr_ut.cpp index 32f064f5526..a40f39f4841 100644 --- a/tests/mock_tests/teammgrd/teammgr_ut.cpp +++ b/tests/mock_tests/teammgrd/teammgr_ut.cpp @@ -1,22 +1,128 @@ #include "gtest/gtest.h" #include "../mock_table.h" #include "teammgr.h" +#include extern int (*callback)(const std::string &cmd, std::string &stdout); extern std::vector mockCallArgs; +static std::vector< std::pair > mockKillCommands; +static std::map pidFiles; + +static int (*callback_kill)(pid_t pid, int sig) = NULL; +static std::pair (*callback_fopen)(const char *pathname, const char *mode) = NULL; + +static int cb_kill(pid_t pid, int sig) +{ + mockKillCommands.push_back(std::make_pair(pid, sig)); + if (!sig) + { + errno = ESRCH; + return -1; + } + else + { + return 0; + } +} + +int kill(pid_t pid, int sig) +{ + if (callback_kill) + { + return callback_kill(pid, sig); + } + int (*realfunc)(pid_t, int) = + (int(*)(pid_t, int))(dlsym (RTLD_NEXT, "kill")); + return realfunc(pid, sig); +} + +static std::pair cb_fopen(const char *pathname, const char *mode) +{ + auto pidFileSearch = pidFiles.find(pathname); + if (pidFileSearch != pidFiles.end()) + { + if (!pidFileSearch->second) + { + errno = ENOENT; + } + return std::make_pair(true, pidFileSearch->second); + } + else + { + return std::make_pair(false, (FILE*)NULL); + } +} + +FILE* fopen(const char *pathname, const char *mode) +{ + if (callback_fopen) + { + std::pair callback_fd = callback_fopen(pathname, mode); + if (callback_fd.first) + { + return callback_fd.second; + } + } + FILE* (*realfunc)(const char *, const char *) = + (FILE* (*)(const char *, const char *))(dlsym (RTLD_NEXT, "fopen")); + return realfunc(pathname, mode); +} + +FILE* fopen64(const char *pathname, const char *mode) +{ + if (callback_fopen) + { + std::pair callback_fd = callback_fopen(pathname, mode); + if (callback_fd.first) + { + return callback_fd.second; + } + } + FILE* (*realfunc)(const char *, const char *) = + (FILE* (*)(const char *, const char *))(dlsym (RTLD_NEXT, "fopen64")); + return realfunc(pathname, mode); +} int cb(const std::string &cmd, std::string &stdout) { mockCallArgs.push_back(cmd); - if (cmd.find("/usr/bin/teamd -r -t PortChannel1") != std::string::npos) + if (cmd.find("/usr/bin/teamd -r -t PortChannel382") != std::string::npos) { + mkdir("/var/run/teamd", 0755); + std::FILE* pidFile = std::tmpfile(); + std::fputs("1234", pidFile); + std::rewind(pidFile); + pidFiles["/var/run/teamd/PortChannel382.pid"] = pidFile; return 1; } - else if (cmd.find("cat \"/var/run/teamd/PortChannel1.pid\"") != std::string::npos) + else if (cmd.find("/usr/bin/teamd -r -t PortChannel812") != std::string::npos) { - stdout = "1234"; + pidFiles["/var/run/teamd/PortChannel812.pid"] = NULL; + return 1; + } + else if (cmd.find("/usr/bin/teamd -r -t PortChannel495") != std::string::npos) + { + mkdir("/var/run/teamd", 0755); + std::FILE* pidFile = std::tmpfile(); + std::fputs("5678", pidFile); + std::rewind(pidFile); + pidFiles["/var/run/teamd/PortChannel495.pid"] = pidFile; return 0; } + else if (cmd.find("/usr/bin/teamd -r -t PortChannel198") != std::string::npos) + { + pidFiles["/var/run/teamd/PortChannel198.pid"] = NULL; + } + else + { + for (int i = 600; i < 620; i++) + { + if (cmd.find(std::string("/usr/bin/teamd -r -t PortChannel") + std::to_string(i)) != std::string::npos) + { + pidFiles[std::string("/var/run/teamd/PortChannel") + std::to_string(i) + std::string(".pid")] = NULL; + } + } + } return 0; } @@ -53,7 +159,18 @@ namespace teammgr_ut cfg_lag_tables = tables; mockCallArgs.clear(); + mockKillCommands.clear(); + pidFiles.clear(); callback = cb; + callback_kill = cb_kill; + callback_fopen = cb_fopen; + } + + virtual void TearDown() override + { + callback = NULL; + callback_kill = NULL; + callback_fopen = NULL; } }; @@ -61,18 +178,90 @@ namespace teammgr_ut { swss::TeamMgr teammgr(m_config_db.get(), m_app_db.get(), m_state_db.get(), cfg_lag_tables); swss::Table cfg_lag_table = swss::Table(m_config_db.get(), CFG_LAG_TABLE_NAME); - cfg_lag_table.set("PortChannel1", { { "admin_status", "up" }, + cfg_lag_table.set("PortChannel382", { { "admin_status", "up" }, { "mtu", "9100" }, { "lacp_key", "auto" }, { "min_links", "2" } }); teammgr.addExistingData(&cfg_lag_table); teammgr.doTask(); - int kill_cmd_called = 0; - for (auto cmd : mockCallArgs){ - if (cmd.find("kill -TERM 1234") != std::string::npos){ - kill_cmd_called++; - } + ASSERT_NE(mockCallArgs.size(), 0); + EXPECT_NE(mockCallArgs.front().find("/usr/bin/teamd -r -t PortChannel382"), std::string::npos); + EXPECT_EQ(mockCallArgs.size(), 1); + EXPECT_EQ(mockKillCommands.size(), 1); + EXPECT_EQ(mockKillCommands.front().first, 1234); + EXPECT_EQ(mockKillCommands.front().second, SIGTERM); + } + + TEST_F(TeamMgrTest, testProcessPidFileMissingAfterAddLagFailure) + { + swss::TeamMgr teammgr(m_config_db.get(), m_app_db.get(), m_state_db.get(), cfg_lag_tables); + swss::Table cfg_lag_table = swss::Table(m_config_db.get(), CFG_LAG_TABLE_NAME); + cfg_lag_table.set("PortChannel812", { { "admin_status", "up" }, + { "mtu", "9100" }, + { "fallback", "true" }, + { "lacp_key", "auto" }, + { "min_links", "1" } }); + teammgr.addExistingData(&cfg_lag_table); + teammgr.doTask(); + ASSERT_NE(mockCallArgs.size(), 0); + EXPECT_NE(mockCallArgs.front().find("/usr/bin/teamd -r -t PortChannel812"), std::string::npos); + EXPECT_EQ(mockCallArgs.size(), 1); + EXPECT_EQ(mockKillCommands.size(), 0); + } + + TEST_F(TeamMgrTest, testProcessCleanupAfterAddLag) + { + swss::TeamMgr teammgr(m_config_db.get(), m_app_db.get(), m_state_db.get(), cfg_lag_tables); + swss::Table cfg_lag_table = swss::Table(m_config_db.get(), CFG_LAG_TABLE_NAME); + cfg_lag_table.set("PortChannel495", { { "admin_status", "up" }, + { "mtu", "9100" }, + { "lacp_key", "auto" }, + { "min_links", "2" } }); + teammgr.addExistingData(&cfg_lag_table); + teammgr.doTask(); + ASSERT_EQ(mockCallArgs.size(), 3); + ASSERT_NE(mockCallArgs.front().find("/usr/bin/teamd -r -t PortChannel495"), std::string::npos); + teammgr.cleanTeamProcesses(); + EXPECT_EQ(mockKillCommands.size(), 2); + EXPECT_EQ(mockKillCommands.front().first, 5678); + EXPECT_EQ(mockKillCommands.front().second, SIGTERM); + } + + TEST_F(TeamMgrTest, testProcessPidFileMissingDuringCleanup) + { + swss::TeamMgr teammgr(m_config_db.get(), m_app_db.get(), m_state_db.get(), cfg_lag_tables); + swss::Table cfg_lag_table = swss::Table(m_config_db.get(), CFG_LAG_TABLE_NAME); + cfg_lag_table.set("PortChannel198", { { "admin_status", "up" }, + { "mtu", "9100" }, + { "fallback", "true" }, + { "lacp_key", "auto" }, + { "min_links", "1" } }); + teammgr.addExistingData(&cfg_lag_table); + teammgr.doTask(); + ASSERT_NE(mockCallArgs.size(), 0); + EXPECT_NE(mockCallArgs.front().find("/usr/bin/teamd -r -t PortChannel198"), std::string::npos); + EXPECT_EQ(mockCallArgs.size(), 3); + teammgr.cleanTeamProcesses(); + EXPECT_EQ(mockKillCommands.size(), 0); + } + + TEST_F(TeamMgrTest, testSleepDuringCleanup) + { + swss::TeamMgr teammgr(m_config_db.get(), m_app_db.get(), m_state_db.get(), cfg_lag_tables); + swss::Table cfg_lag_table = swss::Table(m_config_db.get(), CFG_LAG_TABLE_NAME); + for (int i = 600; i < 620; i++) + { + cfg_lag_table.set(std::string("PortChannel") + std::to_string(i), { { "admin_status", "up" }, + { "mtu", "9100" }, + { "lacp_key", "auto" } }); } - ASSERT_EQ(kill_cmd_called, 1); + teammgr.addExistingData(&cfg_lag_table); + teammgr.doTask(); + ASSERT_EQ(mockCallArgs.size(), 60); + std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now(); + teammgr.cleanTeamProcesses(); + std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); + EXPECT_EQ(mockKillCommands.size(), 0); + EXPECT_GE(std::chrono::duration_cast(end - begin).count(), 200); } -} \ No newline at end of file +} diff --git a/tests/mock_tests/test_failure_handling.cpp b/tests/mock_tests/test_failure_handling.cpp index 7381f4015ee..48fb3cb825c 100644 --- a/tests/mock_tests/test_failure_handling.cpp +++ b/tests/mock_tests/test_failure_handling.cpp @@ -58,31 +58,31 @@ namespace saifailure_test *_sai_syncd_notifications_count = 0; uint32_t notif_count = *_sai_syncd_notifications_count; - ASSERT_DEATH({handleSaiCreateStatus(SAI_API_FDB, SAI_STATUS_FAILURE);}, ""); + handleSaiCreateStatus(SAI_API_FDB, SAI_STATUS_FAILURE); ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); - ASSERT_DEATH({handleSaiCreateStatus(SAI_API_HOSTIF, SAI_STATUS_INVALID_PARAMETER);}, ""); + handleSaiCreateStatus(SAI_API_HOSTIF, SAI_STATUS_INVALID_PARAMETER); ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); - ASSERT_DEATH({handleSaiCreateStatus(SAI_API_PORT, SAI_STATUS_FAILURE);}, ""); + handleSaiCreateStatus(SAI_API_PORT, SAI_STATUS_FAILURE); ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); - ASSERT_DEATH({handleSaiSetStatus(SAI_API_HOSTIF, SAI_STATUS_FAILURE);}, ""); + handleSaiSetStatus(SAI_API_HOSTIF, SAI_STATUS_FAILURE); ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); - ASSERT_DEATH({handleSaiSetStatus(SAI_API_PORT, SAI_STATUS_FAILURE);}, ""); + handleSaiSetStatus(SAI_API_PORT, SAI_STATUS_FAILURE); ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); - ASSERT_DEATH({handleSaiSetStatus(SAI_API_TUNNEL, SAI_STATUS_FAILURE);}, ""); + handleSaiSetStatus(SAI_API_TUNNEL, SAI_STATUS_FAILURE); ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); - ASSERT_DEATH({handleSaiRemoveStatus(SAI_API_LAG, SAI_STATUS_FAILURE);}, ""); + handleSaiRemoveStatus(SAI_API_LAG, SAI_STATUS_FAILURE); ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); diff --git a/tests/mock_tests/tunneldecaporch_ut.cpp b/tests/mock_tests/tunneldecaporch_ut.cpp new file mode 100644 index 00000000000..8339b77a1ec --- /dev/null +++ b/tests/mock_tests/tunneldecaporch_ut.cpp @@ -0,0 +1,521 @@ +#include "ut_helper.h" +#include "mock_orchagent_main.h" + +namespace tunneldecaporch_test +{ + using namespace std; + + shared_ptr m_app_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + + sai_tunnel_api_t ut_sai_tunnel_api; + sai_tunnel_api_t *pold_sai_tunnel_api; + sai_router_interface_api_t ut_sai_router_intfs_api; + sai_router_interface_api_t *pold_sai_router_intfs_api; + sai_next_hop_api_t ut_sai_next_hop_api; + sai_next_hop_api_t *pold_sai_next_hop_api; + + // Mock SAI API functions + sai_status_t _ut_stub_sai_create_tunnel( + _Out_ sai_object_id_t *tunnel_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + static sai_object_id_t tunnel_id_counter = 0x300; + *tunnel_id = tunnel_id_counter++; + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_stub_sai_remove_tunnel( + _In_ sai_object_id_t tunnel_id) + { + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_stub_sai_set_tunnel_attribute( + _In_ sai_object_id_t tunnel_id, + _In_ const sai_attribute_t *attr) + { + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_stub_sai_create_tunnel_term_table_entry( + _Out_ sai_object_id_t *tunnel_term_table_entry_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + static sai_object_id_t term_entry_id_counter = 0x400; + *tunnel_term_table_entry_id = term_entry_id_counter++; + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_stub_sai_remove_tunnel_term_table_entry( + _In_ sai_object_id_t tunnel_term_table_entry_id) + { + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_stub_sai_create_router_interface( + _Out_ sai_object_id_t *router_interface_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + static sai_object_id_t rif_id_counter = 0x500; + *router_interface_id = rif_id_counter++; + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_stub_sai_remove_router_interface( + _In_ sai_object_id_t router_interface_id) + { + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_stub_sai_create_next_hop( + _Out_ sai_object_id_t *next_hop_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + static sai_object_id_t nh_id_counter = 0x600; + *next_hop_id = nh_id_counter++; + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_stub_sai_remove_next_hop( + _In_ sai_object_id_t next_hop_id) + { + return SAI_STATUS_SUCCESS; + } + + struct TunnelDecapOrchTest : public ::testing::Test + { + TunnelDecapOrchTest() + { + m_app_db = make_shared("APPL_DB", 0); + m_config_db = make_shared("CONFIG_DB", 0); + m_state_db = make_shared("STATE_DB", 0); + } + + void SetUp() override + { + // Initialize minimal global objects - following aclorch_ut pattern + ASSERT_EQ(gCrmOrch, nullptr); + gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); + + // Set other global objects to nullptr + gPortsOrch = nullptr; + gQosOrch = nullptr; + + // Set up mock tunnel API + ut_sai_tunnel_api = {}; + ut_sai_tunnel_api.create_tunnel = _ut_stub_sai_create_tunnel; + ut_sai_tunnel_api.remove_tunnel = _ut_stub_sai_remove_tunnel; + ut_sai_tunnel_api.set_tunnel_attribute = _ut_stub_sai_set_tunnel_attribute; + ut_sai_tunnel_api.create_tunnel_term_table_entry = _ut_stub_sai_create_tunnel_term_table_entry; + ut_sai_tunnel_api.remove_tunnel_term_table_entry = _ut_stub_sai_remove_tunnel_term_table_entry; + + pold_sai_tunnel_api = sai_tunnel_api; + sai_tunnel_api = &ut_sai_tunnel_api; + + // Set up mock router interface API + ut_sai_router_intfs_api = {}; + ut_sai_router_intfs_api.create_router_interface = _ut_stub_sai_create_router_interface; + ut_sai_router_intfs_api.remove_router_interface = _ut_stub_sai_remove_router_interface; + + pold_sai_router_intfs_api = sai_router_intfs_api; + sai_router_intfs_api = &ut_sai_router_intfs_api; + + // Set up mock next hop API + ut_sai_next_hop_api = {}; + ut_sai_next_hop_api.create_next_hop = _ut_stub_sai_create_next_hop; + ut_sai_next_hop_api.remove_next_hop = _ut_stub_sai_remove_next_hop; + + pold_sai_next_hop_api = sai_next_hop_api; + sai_next_hop_api = &ut_sai_next_hop_api; + + // Set basic global variables needed for TunnelDecapOrch + gSwitchId = 0x21000000000000; + gVirtualRouterId = 0x3000000000001; + gUnderlayIfId = 0x6000000000001; + gMacAddress = MacAddress("20:03:04:05:06:00"); + } + + void TearDown() override + { + // Restore SAI APIs + sai_tunnel_api = pold_sai_tunnel_api; + sai_router_intfs_api = pold_sai_router_intfs_api; + sai_next_hop_api = pold_sai_next_hop_api; + + // Clean up global orchestrator objects + delete gCrmOrch; + gCrmOrch = nullptr; + } + }; + + TEST_F(TunnelDecapOrchTest, TunnelDecapOrch_Creation) + { + // Test creation of TunnelDecapOrch + vector tunnel_tables = { APP_TUNNEL_DECAP_TABLE_NAME }; + + ASSERT_NO_THROW({ + auto tunnelDecapOrch = make_shared( + m_app_db.get(), m_state_db.get(), m_config_db.get(), tunnel_tables); + ASSERT_NE(tunnelDecapOrch, nullptr); + }); + } + + TEST_F(TunnelDecapOrchTest, TunnelDecapOrch_BasicFunctionality) + { + vector tunnel_tables = { APP_TUNNEL_DECAP_TABLE_NAME }; + auto tunnelDecapOrch = make_shared( + m_app_db.get(), m_state_db.get(), m_config_db.get(), tunnel_tables); + ASSERT_NE(tunnelDecapOrch, nullptr); + + EXPECT_NO_THROW({ + }); + } + + TEST_F(TunnelDecapOrchTest, TunnelDecapOrch_GetDscpMode) + { + vector tunnel_tables = { APP_TUNNEL_DECAP_TABLE_NAME }; + auto tunnelDecapOrch = make_shared( + m_app_db.get(), m_state_db.get(), m_config_db.get(), tunnel_tables); + ASSERT_NE(tunnelDecapOrch, nullptr); + + // Test getDscpMode with non-existent tunnel + EXPECT_NO_THROW({ + string dscp_mode = tunnelDecapOrch->getDscpMode("non_existent_tunnel"); + EXPECT_TRUE(dscp_mode.empty()); + }); + } + + TEST_F(TunnelDecapOrchTest, TunnelDecapOrch_GetDstIpAddresses) + { + vector tunnel_tables = { APP_TUNNEL_DECAP_TABLE_NAME }; + auto tunnelDecapOrch = make_shared( + m_app_db.get(), m_state_db.get(), m_config_db.get(), tunnel_tables); + ASSERT_NE(tunnelDecapOrch, nullptr); + + // Test getDstIpAddresses with non-existent tunnel + EXPECT_NO_THROW({ + auto dst_ips = tunnelDecapOrch->getDstIpAddresses("non_existent_tunnel"); + EXPECT_EQ(dst_ips.getSize(), 0); + }); + } + + TEST_F(TunnelDecapOrchTest, TunnelDecapOrch_GetQosMapId) + { + vector tunnel_tables = { APP_TUNNEL_DECAP_TABLE_NAME }; + auto tunnelDecapOrch = make_shared( + m_app_db.get(), m_state_db.get(), m_config_db.get(), tunnel_tables); + ASSERT_NE(tunnelDecapOrch, nullptr); + + // Test getQosMapId with non-existent tunnel + EXPECT_NO_THROW({ + sai_object_id_t qos_map_id; + bool result = tunnelDecapOrch->getQosMapId("non_existent_tunnel", "dscp_to_tc_map", qos_map_id); + EXPECT_FALSE(result); + }); + } + + TEST_F(TunnelDecapOrchTest, TunnelDecapOrch_CreateNextHopTunnel) + { + vector tunnel_tables = { APP_TUNNEL_DECAP_TABLE_NAME }; + auto tunnelDecapOrch = make_shared( + m_app_db.get(), m_state_db.get(), m_config_db.get(), tunnel_tables); + ASSERT_NE(tunnelDecapOrch, nullptr); + + EXPECT_NO_THROW({ + IpAddress ip_addr("192.168.1.1"); + sai_object_id_t nh_id = tunnelDecapOrch->createNextHopTunnel("non_existent_tunnel", ip_addr); + EXPECT_EQ(nh_id, SAI_NULL_OBJECT_ID); + }); + } + + TEST_F(TunnelDecapOrchTest, TunnelDecapOrch_RemoveNextHopTunnel) + { + vector tunnel_tables = { APP_TUNNEL_DECAP_TABLE_NAME }; + auto tunnelDecapOrch = make_shared( + m_app_db.get(), m_state_db.get(), m_config_db.get(), tunnel_tables); + ASSERT_NE(tunnelDecapOrch, nullptr); + + // Test removeNextHopTunnel with non-existent tunnel + EXPECT_NO_THROW({ + IpAddress ip_addr("192.168.1.1"); + bool result = tunnelDecapOrch->removeNextHopTunnel("non_existent_tunnel", ip_addr); + EXPECT_TRUE(result); + }); + } + + TEST_F(TunnelDecapOrchTest, TunnelDecapOrch_GetSubnetDecapConfig) + { + vector tunnel_tables = { APP_TUNNEL_DECAP_TABLE_NAME }; + auto tunnelDecapOrch = make_shared( + m_app_db.get(), m_state_db.get(), m_config_db.get(), tunnel_tables); + ASSERT_NE(tunnelDecapOrch, nullptr); + + // Test getSubnetDecapConfig + EXPECT_NO_THROW({ + const auto& config = tunnelDecapOrch->getSubnetDecapConfig(); + EXPECT_FALSE(config.enable); + }); + } + + TEST_F(TunnelDecapOrchTest, TunnelDecapOrch_DoTask) + { + vector tunnel_tables = { APP_TUNNEL_DECAP_TABLE_NAME }; + auto tunnelDecapOrch = make_shared( + m_app_db.get(), m_state_db.get(), m_config_db.get(), tunnel_tables); + ASSERT_NE(tunnelDecapOrch, nullptr); + + // Test that doTask doesn't crash with empty task queue + EXPECT_NO_THROW({ + static_cast(tunnelDecapOrch.get())->doTask(); + }); + } + + TEST_F(TunnelDecapOrchTest, TunnelDecapOrch_WithStateDb) + { + // Create TunnelDecapOrch with state DB + vector tunnel_tables = { APP_TUNNEL_DECAP_TABLE_NAME }; + auto tunnelDecapOrch = make_shared( + m_app_db.get(), m_state_db.get(), m_config_db.get(), tunnel_tables); + ASSERT_NE(tunnelDecapOrch, nullptr); + + // Test that basic functionality works with state DB constructor + EXPECT_NO_THROW({ + auto dst_ips = tunnelDecapOrch->getDstIpAddresses("test_tunnel"); + EXPECT_EQ(dst_ips.getSize(), 0); + }); + } + + TEST_F(TunnelDecapOrchTest, TunnelDecapOrch_DoDecapTunnelTask) + { + vector tunnel_tables = { APP_TUNNEL_DECAP_TABLE_NAME }; + auto tunnelDecapOrch = make_shared( + m_app_db.get(), m_state_db.get(), m_config_db.get(), tunnel_tables); + ASSERT_NE(tunnelDecapOrch, nullptr); + + // Test 1: Create tunnel configuration data in the database and test processing + Table tunnelDecapTable(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + + // Set tunnel configuration in app DB + vector tunnelData = { + {"tunnel_type", "IPINIP"}, + {"dscp_mode", "uniform"}, + {"ecn_mode", "copy_from_outer"}, + {"ttl_mode", "pipe"}, + {"src_ip", "10.0.0.1"} + }; + tunnelDecapTable.set("test_tunnel", tunnelData); + + // Test that the table was set correctly + EXPECT_NO_THROW({ + vector values; + bool result = tunnelDecapTable.get("test_tunnel", values); + EXPECT_TRUE(result); + EXPECT_EQ(values.size(), 5); + }); + + // Test 2: Test invalid tunnel configuration + vector invalidTunnelData = { + {"tunnel_type", "INVALID_TYPE"}, + {"dscp_mode", "invalid_mode"}, + {"ecn_mode", "invalid_ecn"} + }; + tunnelDecapTable.set("invalid_tunnel", invalidTunnelData); + + // Verify invalid data was set + EXPECT_NO_THROW({ + vector values; + bool result = tunnelDecapTable.get("invalid_tunnel", values); + EXPECT_TRUE(result); + EXPECT_EQ(values.size(), 3); + }); + + // Test 3: Test tunnel deletion + tunnelDecapTable.del("test_tunnel"); + + EXPECT_NO_THROW({ + vector values; + bool result = tunnelDecapTable.get("test_tunnel", values); + EXPECT_FALSE(result); + }); + + // Test 4: Test empty configuration + vector emptyData = {}; + tunnelDecapTable.set("empty_tunnel", emptyData); + + EXPECT_NO_THROW({ + vector values; + bool result = tunnelDecapTable.get("empty_tunnel", values); + EXPECT_TRUE(result); + EXPECT_EQ(values.size(), 0); + }); + + // Test 5: Test that getDscpMode works after setting up tunnel data + EXPECT_NO_THROW({ + string dscp_mode = tunnelDecapOrch->getDscpMode("invalid_tunnel"); + EXPECT_TRUE(dscp_mode.empty()); + }); + + // Test 6: Test that getDstIpAddresses works with database entries + EXPECT_NO_THROW({ + auto dst_ips = tunnelDecapOrch->getDstIpAddresses("empty_tunnel"); + EXPECT_EQ(dst_ips.getSize(), 0); + }); + } + + TEST_F(TunnelDecapOrchTest, TunnelDecapOrch_StateDbVerification) + { + vector tunnel_tables = { APP_TUNNEL_DECAP_TABLE_NAME }; + auto tunnelDecapOrch = make_shared( + m_app_db.get(), m_state_db.get(), m_config_db.get(), tunnel_tables); + ASSERT_NE(tunnelDecapOrch, nullptr); + + Table tunnelDecapTable(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + Table stateTable(m_state_db.get(), STATE_TUNNEL_DECAP_TABLE_NAME); + + vector tunnelData = { + {"tunnel_type", "IPINIP"}, + {"dscp_mode", "uniform"}, + {"ecn_mode", "copy_from_outer"}, + {"ttl_mode", "pipe"}, + {"src_ip", "10.0.0.1"} + }; + tunnelDecapTable.set("state_test_tunnel", tunnelData); + + EXPECT_NO_THROW({ + vector values; + bool result = tunnelDecapTable.get("state_test_tunnel", values); + EXPECT_TRUE(result); + EXPECT_EQ(values.size(), 5); + }); + + EXPECT_NO_THROW({ + vector stateValues; + stateTable.get("state_test_tunnel", stateValues); + }); + + tunnelDecapTable.del("state_test_tunnel"); + + vector modifiedData = { + {"tunnel_type", "IPINIP"}, + {"dscp_mode", "pipe"}, + {"ecn_mode", "standard"}, + {"ttl_mode", "uniform"} + }; + tunnelDecapTable.set("state_test_tunnel", modifiedData); + + EXPECT_NO_THROW({ + vector values; + bool result = tunnelDecapTable.get("state_test_tunnel", values); + EXPECT_TRUE(result); + EXPECT_EQ(values.size(), 4); + + bool foundDscpMode = false; + bool foundEcnMode = false; + for (const auto& fv : values) { + if (fvField(fv) == "dscp_mode") { + EXPECT_EQ(fvValue(fv), "pipe"); + foundDscpMode = true; + } + if (fvField(fv) == "ecn_mode") { + EXPECT_EQ(fvValue(fv), "standard"); + foundEcnMode = true; + } + } + EXPECT_TRUE(foundDscpMode); + EXPECT_TRUE(foundEcnMode); + }); + + tunnelDecapTable.del("state_test_tunnel"); + + EXPECT_NO_THROW({ + vector values; + bool result = tunnelDecapTable.get("state_test_tunnel", values); + EXPECT_FALSE(result); + }); + } + + TEST_F(TunnelDecapOrchTest, TunnelDecapOrch_StateDbTermTable) + { + vector tunnel_tables = { APP_TUNNEL_DECAP_TABLE_NAME }; + auto tunnelDecapOrch = make_shared( + m_app_db.get(), m_state_db.get(), m_config_db.get(), tunnel_tables); + ASSERT_NE(tunnelDecapOrch, nullptr); + + Table tunnelTermTable(m_app_db.get(), APP_TUNNEL_DECAP_TERM_TABLE_NAME); + Table stateTermTable(m_state_db.get(), STATE_TUNNEL_DECAP_TERM_TABLE_NAME); + + vector termData = { + {"term_type", "P2MP"}, + {"dst_ip", "192.168.1.0/24"}, + {"src_ip", "10.0.0.1"}, + {"subnet_type", "vnet"} + }; + tunnelTermTable.set("test_tunnel|192.168.1.0/24", termData); + + EXPECT_NO_THROW({ + vector values; + bool result = tunnelTermTable.get("test_tunnel|192.168.1.0/24", values); + EXPECT_TRUE(result); + EXPECT_EQ(values.size(), 4); + }); + + EXPECT_NO_THROW({ + vector stateValues; + stateTermTable.get("test_tunnel|192.168.1.0/24", stateValues); + }); + + tunnelTermTable.del("test_tunnel|192.168.1.0/24"); + + EXPECT_NO_THROW({ + vector values; + bool result = tunnelTermTable.get("test_tunnel|192.168.1.0/24", values); + EXPECT_FALSE(result); + }); + } + + TEST_F(TunnelDecapOrchTest, TunnelDecapOrch_GettersWithStateDb) + { + vector tunnel_tables = { APP_TUNNEL_DECAP_TABLE_NAME }; + auto tunnelDecapOrch = make_shared( + m_app_db.get(), m_state_db.get(), m_config_db.get(), tunnel_tables); + ASSERT_NE(tunnelDecapOrch, nullptr); + + EXPECT_NO_THROW({ + string dscp_mode = tunnelDecapOrch->getDscpMode("test_tunnel_with_state"); + EXPECT_TRUE(dscp_mode.empty()); + }); + + EXPECT_NO_THROW({ + auto dst_ips = tunnelDecapOrch->getDstIpAddresses("test_tunnel_with_state"); + EXPECT_EQ(dst_ips.getSize(), 0); + }); + + EXPECT_NO_THROW({ + sai_object_id_t qos_map_id; + bool result = tunnelDecapOrch->getQosMapId("test_tunnel_with_state", "encap_tc_to_dscp_map", qos_map_id); + EXPECT_FALSE(result); + }); + + EXPECT_NO_THROW({ + sai_object_id_t qos_map_id; + bool result = tunnelDecapOrch->getQosMapId("test_tunnel_with_state", "encap_tc_to_queue_map", qos_map_id); + EXPECT_FALSE(result); + }); + + EXPECT_NO_THROW({ + const auto& config = tunnelDecapOrch->getSubnetDecapConfig(); + EXPECT_FALSE(config.enable); + }); + } + +} // namespace tunneldecaporch_test diff --git a/tests/mock_tests/twamporch_ut.cpp b/tests/mock_tests/twamporch_ut.cpp new file mode 100644 index 00000000000..721950e74aa --- /dev/null +++ b/tests/mock_tests/twamporch_ut.cpp @@ -0,0 +1,975 @@ +#define private public // make Directory::m_values available to clean it. +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_table.h" +#include "notifier.h" + +extern string gMySwitchType; + +extern sai_object_id_t gSwitchId; + +extern redisReply *mockReply; + + +namespace twamporch_test +{ + using namespace std; + + int create_twamp_session_count; + int set_twamp_session_count; + int remove_twamp_session_count; + + sai_twamp_api_t ut_sai_twamp_api; + sai_twamp_api_t *pold_sai_twamp_api; + sai_switch_api_t ut_sai_switch_api; + sai_switch_api_t *pold_sai_switch_api; + + sai_create_twamp_session_fn old_create_twamp_session; + sai_remove_twamp_session_fn old_remove_twamp_session; + sai_set_twamp_session_attribute_fn old_set_twamp_session_attribute; + + sai_status_t _ut_stub_sai_create_twamp_session( + _Out_ sai_object_id_t *twamp_session_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + *twamp_session_id = (sai_object_id_t)(0x1); + create_twamp_session_count++; + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_stub_sai_remove_twamp_session( + _In_ sai_object_id_t twamp_session_id) + { + remove_twamp_session_count++; + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_stub_sai_set_twamp_session_attribute( + _In_ sai_object_id_t twamp_session_id, + _In_ const sai_attribute_t *attr) + { + set_twamp_session_count++; + if (attr->id == SAI_TWAMP_SESSION_ATTR_SESSION_ENABLE_TRANSMIT) + { + return SAI_STATUS_SUCCESS; + } + return old_set_twamp_session_attribute(twamp_session_id, attr); + } + + sai_status_t _ut_stub_sai_get_switch_attribute( + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t *attr_list) + { + if (attr_count == 1) + { + if (attr_list[0].id == SAI_SWITCH_ATTR_MAX_TWAMP_SESSION) + { + attr_list[0].value.u32 = 128; + return SAI_STATUS_SUCCESS; + } + } + return pold_sai_switch_api->get_switch_attribute(switch_id, attr_count, attr_list); + } + + sai_status_t _ut_stub_sai_set_switch_attribute( + _In_ sai_object_id_t switch_id, + _In_ const sai_attribute_t *attr) + { + if (attr[0].id == SAI_SWITCH_ATTR_TWAMP_SESSION_EVENT_NOTIFY) + { + return SAI_STATUS_SUCCESS; + } + return pold_sai_switch_api->set_switch_attribute(switch_id, attr); + } + + void _hook_sai_twamp_api() + { + ut_sai_twamp_api = *sai_twamp_api; + pold_sai_twamp_api = sai_twamp_api; + ut_sai_twamp_api.create_twamp_session = _ut_stub_sai_create_twamp_session; + ut_sai_twamp_api.remove_twamp_session = _ut_stub_sai_remove_twamp_session; + ut_sai_twamp_api.set_twamp_session_attribute = _ut_stub_sai_set_twamp_session_attribute; + sai_twamp_api = &ut_sai_twamp_api; + } + + void _unhook_sai_twamp_api() + { + sai_twamp_api = pold_sai_twamp_api; + } + + void _hook_sai_switch_api() + { + ut_sai_switch_api = *sai_switch_api; + pold_sai_switch_api = sai_switch_api; + ut_sai_switch_api.get_switch_attribute = _ut_stub_sai_get_switch_attribute; + ut_sai_switch_api.set_switch_attribute = _ut_stub_sai_set_switch_attribute; + sai_switch_api = &ut_sai_switch_api; + } + + void _unhook_sai_switch_api() + { + sai_switch_api = pold_sai_switch_api; + } + + class MockTwampOrch final + { + public: + MockTwampOrch() + { + this->confDb = std::make_shared("CONFIG_DB", 0); + TableConnector confDbTwampTable(this->confDb.get(), CFG_TWAMP_SESSION_TABLE_NAME); + TableConnector stateDbTwampTable(this->confDb.get(), STATE_TWAMP_SESSION_TABLE_NAME); + this->twampOrch = std::make_shared(confDbTwampTable, stateDbTwampTable, gSwitchOrch, gPortsOrch, gVrfOrch); + } + ~MockTwampOrch() = default; + + void doTwampTableTask(const std::deque &entries) + { + auto consumer = dynamic_cast((this->twampOrch.get())->getExecutor(CFG_TWAMP_SESSION_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(this->twampOrch.get())->doTask(*consumer); + } + + void doTwampNotificationTask() + { + auto exec = static_cast((this->twampOrch.get())->getExecutor("TWAMP_NOTIFICATIONS")); + auto consumer = exec->getNotificationConsumer(); + consumer->readData(); + static_cast(this->twampOrch.get())->doTask(*consumer); + } + + TwampOrch& get() + { + return *twampOrch; + } + + private: + std::shared_ptr confDb; + std::shared_ptr twampOrch; + }; + + class TwampOrchTest : public ::testing::Test + { + public: + TwampOrchTest() + { + this->initDb(); + } + virtual ~TwampOrchTest() = default; + + void SetUp() override + { + this->initSaiApi(); + this->initSwitch(); + this->initOrch(); + this->initPorts(); + _hook_sai_twamp_api(); + _hook_sai_switch_api(); + } + + void TearDown() override + { + this->deinitOrch(); + this->deinitSwitch(); + this->deinitSaiApi(); + _unhook_sai_twamp_api(); + _unhook_sai_switch_api(); + } + + private: + void initSaiApi() + { + std::map profileMap = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + auto status = ut_helper::initSaiApi(profileMap); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + } + + void deinitSaiApi() + { + auto status = ut_helper::uninitSaiApi(); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + } + + void initSwitch() + { + sai_status_t status; + sai_attribute_t attr; + + // Create switch + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + // Get switch source MAC address + attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; + + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gMacAddress = attr.value.mac; + + // Get switch default virtual router ID + attr.id = SAI_SWITCH_ATTR_DEFAULT_VIRTUAL_ROUTER_ID; + + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gVirtualRouterId = attr.value.oid; + } + + void deinitSwitch() + { + // Remove switch + auto status = sai_switch_api->remove_switch(gSwitchId); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gSwitchId = SAI_NULL_OBJECT_ID; + gVirtualRouterId = SAI_NULL_OBJECT_ID; + } + + void initOrch() + { + // + // SwitchOrch + // + TableConnector state_switch_table(this->stateDb.get(), "SWITCH_CAPABILITY"); + TableConnector app_switch_table(this->appDb.get(), APP_SWITCH_TABLE_NAME); + TableConnector conf_asic_sensors(this->configDb.get(), CFG_ASIC_SENSORS_TABLE_NAME); + + std::vector switchTableList = { + conf_asic_sensors, + app_switch_table + }; + + ASSERT_EQ(gSwitchOrch, nullptr); + gSwitchOrch = new SwitchOrch(this->appDb.get(), switchTableList, state_switch_table); + gDirectory.set(gSwitchOrch); + resourcesList.push_back(gSwitchOrch); + + // + // PortsOrch + // + const int portsorch_base_pri = 40; + + vector ports_tables = { + { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, + { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, + { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, + { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } + }; + + ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(this->appDb.get(), this->stateDb.get(), ports_tables, this->chassisAppDb.get()); + gDirectory.set(gPortsOrch); + resourcesList.push_back(gPortsOrch); + + // + // VrfOrch + // + ASSERT_EQ(gVrfOrch, nullptr); + gVrfOrch = new VRFOrch(this->appDb.get(), APP_VRF_TABLE_NAME, this->stateDb.get(), STATE_VRF_OBJECT_TABLE_NAME); + resourcesList.push_back(gVrfOrch); + + + // + // BufferOrch + // + std::vector bufferTableList = { + APP_BUFFER_POOL_TABLE_NAME, + APP_BUFFER_PROFILE_TABLE_NAME, + APP_BUFFER_QUEUE_TABLE_NAME, + APP_BUFFER_PG_TABLE_NAME, + APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, + APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME + }; + gBufferOrch = new BufferOrch(this->appDb.get(), this->configDb.get(), this->stateDb.get(), bufferTableList); + gDirectory.set(gBufferOrch); + resourcesList.push_back(gBufferOrch); + + // + // FlexCounterOrch + // + std::vector flexCounterTableList = { + CFG_FLEX_COUNTER_TABLE_NAME + }; + + auto flexCounterOrch = new FlexCounterOrch(this->configDb.get(), flexCounterTableList); + gDirectory.set(flexCounterOrch); + resourcesList.push_back(flexCounterOrch); + + // + // CrmOrch + // + ASSERT_EQ(gCrmOrch, nullptr); + gCrmOrch = new CrmOrch(this->configDb.get(), CFG_CRM_TABLE_NAME); + gDirectory.set(gCrmOrch); + resourcesList.push_back(gCrmOrch); + } + + void deinitOrch() + { + std::reverse(resourcesList.begin(), resourcesList.end()); + for (auto &it : resourcesList) + { + delete it; + } + + gSwitchOrch = nullptr; + gPortsOrch = nullptr; + gVrfOrch = nullptr; + gBufferOrch = nullptr; + gCrmOrch = nullptr; + + Portal::DirectoryInternal::clear(gDirectory); + EXPECT_TRUE(Portal::DirectoryInternal::empty(gDirectory)); + } + + void initPorts() + { + auto portTable = Table(this->appDb.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate port table with SAI ports + for (const auto &cit : ports) + { + portTable.set(cit.first, cit.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + // Set PortInitDone + portTable.set("PortInitDone", { { "lanes", "0" } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + } + + void initDb() + { + this->appDb = std::make_shared("APPL_DB", 0); + this->configDb = std::make_shared("CONFIG_DB", 0); + this->stateDb = std::make_shared("STATE_DB", 0); + this->countersDb = make_shared("COUNTERS_DB", 0); + this->chassisAppDb = make_shared("CHASSIS_APP_DB", 0); + this->asicDb = make_shared("ASIC_DB", 0); + } + + shared_ptr appDb; + shared_ptr configDb; + shared_ptr stateDb; + shared_ptr countersDb; + shared_ptr chassisAppDb; + shared_ptr asicDb; + + std::vector resourcesList; + }; + + TEST_F(TwampOrchTest, TwampOrchTestCreateDeleteSenderPacketCountSingle) + { + string twampSessionName = "TEST_SENDER1"; + + MockTwampOrch twampOrch; + + auto current_create_count = create_twamp_session_count; + auto current_remove_count = remove_twamp_session_count; + auto current_set_count = set_twamp_session_count; + + // Create TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + SET_COMMAND, + { + {"mode", "LIGHT" }, + {"role", "SENDER" }, + {"src_ip", "1.1.1.1" }, + {"src_udp_port", "862" }, + {"dst_ip", "2.2.2.2" }, + {"dst_udp_port", "863" }, + {"packet_count", "1000" }, + {"tx_interval", "10" }, + {"timeout", "10" }, + {"statistics_interval", "20000" }, + {"vrf_name", "default" }, + {"dscp", "0" }, + {"ttl", "10" }, + {"timestamp_format", "ntp" }, + {"padding_size", "100" }, + {"hw_lookup", "true" } + } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(session_status, "inactive"); + ASSERT_EQ(current_create_count + 1, create_twamp_session_count); + } + + // Start TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + SET_COMMAND, + { + {"admin_state", "enabled"} + } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(session_status, "active"); + ASSERT_EQ(current_set_count + 1, set_twamp_session_count); + } + + // Process Notification + { + // mock a redis reply for notification + mockReply = (redisReply *)calloc(sizeof(redisReply), 1); + mockReply->type = REDIS_REPLY_ARRAY; + mockReply->elements = 3; // REDIS_PUBLISH_MESSAGE_ELEMNTS + mockReply->element = (redisReply **)calloc(sizeof(redisReply *), mockReply->elements); + mockReply->element[2] = (redisReply *)calloc(sizeof(redisReply), 1); + mockReply->element[2]->type = REDIS_REPLY_STRING; + sai_twamp_session_event_notification_data_t twamp_session_data; + sai_twamp_session_stat_t counters_ids[SAI_TWAMP_SESSION_STAT_DURATION_TS]; + uint64_t counters[SAI_TWAMP_SESSION_STAT_DURATION_TS]; + twamp_session_data.session_state = SAI_TWAMP_SESSION_STATE_INACTIVE; + twamp_session_data.twamp_session_id = (sai_object_id_t)0x1; + twamp_session_data.session_stats.index = 1; + twamp_session_data.session_stats.number_of_counters = 11; + + counters_ids[0] = SAI_TWAMP_SESSION_STAT_RX_PACKETS; + counters_ids[1] = SAI_TWAMP_SESSION_STAT_RX_BYTE; + counters_ids[2] = SAI_TWAMP_SESSION_STAT_TX_PACKETS; + counters_ids[3] = SAI_TWAMP_SESSION_STAT_TX_BYTE; + counters_ids[4] = SAI_TWAMP_SESSION_STAT_DROP_PACKETS; + counters_ids[5] = SAI_TWAMP_SESSION_STAT_MAX_LATENCY; + counters_ids[6] = SAI_TWAMP_SESSION_STAT_MIN_LATENCY; + counters_ids[7] = SAI_TWAMP_SESSION_STAT_AVG_LATENCY; + counters_ids[8] = SAI_TWAMP_SESSION_STAT_MAX_JITTER; + counters_ids[9] = SAI_TWAMP_SESSION_STAT_MIN_JITTER; + counters_ids[10] = SAI_TWAMP_SESSION_STAT_AVG_JITTER; + counters[0] = 1000; + counters[1] = 100000; + counters[2] = 1000; + counters[3] = 100000; + counters[4] = 0; + counters[5] = 1987; + counters[6] = 1983; + counters[7] = 1984; + counters[8] = 2097; + counters[9] = 1896; + counters[10] = 1985; + twamp_session_data.session_stats.counters_ids = counters_ids; + twamp_session_data.session_stats.counters = counters; + + std::string data = sai_serialize_twamp_session_event_ntf(1, &twamp_session_data); + + std::vector notifyValues; + FieldValueTuple opdata("twamp_session_event", data); + notifyValues.push_back(opdata); + std::string msg = swss::JSon::buildJson(notifyValues); + mockReply->element[2]->str = (char*)calloc(1, msg.length() + 1); + memcpy(mockReply->element[2]->str, msg.c_str(), msg.length()); + + // trigger the notification + twampOrch.doTwampNotificationTask(); + mockReply = nullptr; + + TwampStatsTable twampStatistics = Portal::TwampOrchInternal::getTwampSessionStatistics(twampOrch.get()); + ASSERT_TRUE(twampStatistics.find(twampSessionName) != twampStatistics.end()); + ASSERT_EQ(twampStatistics[twampSessionName].rx_packets, 1000); + ASSERT_EQ(twampStatistics[twampSessionName].rx_bytes, 100000); + ASSERT_EQ(twampStatistics[twampSessionName].tx_packets, 1000); + ASSERT_EQ(twampStatistics[twampSessionName].tx_bytes, 100000); + ASSERT_EQ(twampStatistics[twampSessionName].drop_packets, 0); + ASSERT_EQ(twampStatistics[twampSessionName].max_latency, 1987); + ASSERT_EQ(twampStatistics[twampSessionName].min_latency, 1983); + ASSERT_EQ(twampStatistics[twampSessionName].avg_latency, 1984); + ASSERT_EQ(twampStatistics[twampSessionName].max_jitter, 2097); + ASSERT_EQ(twampStatistics[twampSessionName].min_jitter, 1896); + ASSERT_EQ(twampStatistics[twampSessionName].avg_jitter, 1985); + } + + // Delete TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + DEL_COMMAND, + { {} } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_FALSE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(current_remove_count + 1, remove_twamp_session_count); + } + + // Make sure both create and set has been called + ASSERT_EQ(current_create_count + 1, create_twamp_session_count); + ASSERT_EQ(current_remove_count + 1, remove_twamp_session_count); + ASSERT_EQ(current_set_count + 1, set_twamp_session_count); + } + + TEST_F(TwampOrchTest, TwampOrchTestCreateDeleteSenderPacketCountMulti) + { + string twampSessionName = "TEST_SENDER1"; + + MockTwampOrch twampOrch; + + auto current_create_count = create_twamp_session_count; + auto current_remove_count = remove_twamp_session_count; + auto current_set_count = set_twamp_session_count; + + // Create TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + SET_COMMAND, + { + {"mode", "LIGHT" }, + {"role", "SENDER" }, + {"src_ip", "1.1.1.1" }, + {"src_udp_port", "1862" }, + {"dst_ip", "2.2.2.2" }, + {"dst_udp_port", "1863" }, + {"packet_count", "1000" }, + {"tx_interval", "10" }, + {"timeout", "10" }, + {"statistics_interval", "11000" } + } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(session_status, "inactive"); + ASSERT_EQ(current_create_count + 1, create_twamp_session_count); + } + + // Start TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + SET_COMMAND, + { + {"admin_state", "enabled"} + } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(session_status, "active"); + ASSERT_EQ(current_set_count + 1, set_twamp_session_count); + } + + // Process Notification + { + sai_twamp_session_event_notification_data_t twamp_session_data; + sai_twamp_session_stat_t counters_ids[SAI_TWAMP_SESSION_STAT_DURATION_TS]; + uint64_t counters[SAI_TWAMP_SESSION_STAT_DURATION_TS]; + uint64_t latency_total = 0; + uint64_t jitter_total = 0; + twamp_session_data.twamp_session_id = (sai_object_id_t)0x1; + twamp_session_data.session_stats.number_of_counters = 11; + counters_ids[0] = SAI_TWAMP_SESSION_STAT_RX_PACKETS; + counters_ids[1] = SAI_TWAMP_SESSION_STAT_RX_BYTE; + counters_ids[2] = SAI_TWAMP_SESSION_STAT_TX_PACKETS; + counters_ids[3] = SAI_TWAMP_SESSION_STAT_TX_BYTE; + counters_ids[4] = SAI_TWAMP_SESSION_STAT_DROP_PACKETS; + counters_ids[5] = SAI_TWAMP_SESSION_STAT_MAX_LATENCY; + counters_ids[6] = SAI_TWAMP_SESSION_STAT_MIN_LATENCY; + counters_ids[7] = SAI_TWAMP_SESSION_STAT_AVG_LATENCY; + counters_ids[8] = SAI_TWAMP_SESSION_STAT_MAX_JITTER; + counters_ids[9] = SAI_TWAMP_SESSION_STAT_MIN_JITTER; + counters_ids[10] = SAI_TWAMP_SESSION_STAT_AVG_JITTER; + twamp_session_data.session_stats.counters_ids = counters_ids; + twamp_session_data.session_stats.counters = counters; + for (uint8_t i = 1; i <= 10; i++) + { + // mock a redis reply for notification + mockReply = (redisReply *)calloc(sizeof(redisReply), 1); + mockReply->type = REDIS_REPLY_ARRAY; + mockReply->elements = 3; // REDIS_PUBLISH_MESSAGE_ELEMNTS + mockReply->element = (redisReply **)calloc(sizeof(redisReply *), mockReply->elements); + mockReply->element[2] = (redisReply *)calloc(sizeof(redisReply), 1); + mockReply->element[2]->type = REDIS_REPLY_STRING; + + twamp_session_data.session_state = (i<10) ? SAI_TWAMP_SESSION_STATE_ACTIVE : SAI_TWAMP_SESSION_STATE_INACTIVE; + twamp_session_data.session_stats.index = i; + counters[0] = 100; + counters[1] = 10000; + counters[2] = 100; + counters[3] = 10000; + counters[4] = 0; + counters[5] = 1000+i; + counters[6] = 1000+i; + counters[7] = 1000+i; + counters[8] = 1100+i; + counters[9] = 1100+i; + counters[10] = 1100+i; + latency_total += counters[7]; + jitter_total += counters[10]; + + std::string data = sai_serialize_twamp_session_event_ntf(1, &twamp_session_data); + + std::vector notifyValues; + FieldValueTuple opdata("twamp_session_event", data); + notifyValues.push_back(opdata); + std::string msg = swss::JSon::buildJson(notifyValues); + mockReply->element[2]->str = (char*)calloc(1, msg.length() + 1); + memcpy(mockReply->element[2]->str, msg.c_str(), msg.length()); + + // trigger the notification + twampOrch.doTwampNotificationTask(); + mockReply = nullptr; + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + if (i<10) + { + ASSERT_EQ(session_status, "active"); + } + else + { + ASSERT_EQ(session_status, "inactive"); + } + + TwampStatsTable twampStatistics = Portal::TwampOrchInternal::getTwampSessionStatistics(twampOrch.get()); + ASSERT_TRUE(twampStatistics.find(twampSessionName) != twampStatistics.end()); + ASSERT_EQ(twampStatistics[twampSessionName].rx_packets, 100*i); + ASSERT_EQ(twampStatistics[twampSessionName].rx_bytes, 10000*i); + ASSERT_EQ(twampStatistics[twampSessionName].tx_packets, 100*i); + ASSERT_EQ(twampStatistics[twampSessionName].tx_bytes, 10000*i); + ASSERT_EQ(twampStatistics[twampSessionName].drop_packets, 0); + ASSERT_EQ(twampStatistics[twampSessionName].max_latency, 1000+i); + ASSERT_EQ(twampStatistics[twampSessionName].min_latency, 1000+1); + ASSERT_EQ(twampStatistics[twampSessionName].avg_latency, latency_total/i); + ASSERT_EQ(twampStatistics[twampSessionName].max_jitter, 1100+i); + ASSERT_EQ(twampStatistics[twampSessionName].min_jitter, 1100+1); + ASSERT_EQ(twampStatistics[twampSessionName].avg_jitter, jitter_total/i); + } + } + + // Delete TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + DEL_COMMAND, + { {} } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_FALSE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(current_remove_count + 1, remove_twamp_session_count); + } + + // Make sure both create and set has been called + ASSERT_EQ(current_create_count + 1, create_twamp_session_count); + ASSERT_EQ(current_remove_count + 1, remove_twamp_session_count); + ASSERT_EQ(current_set_count + 1, set_twamp_session_count); + } + + TEST_F(TwampOrchTest, TwampOrchTestCreateDeleteSenderContinuousSingle) + { + string twampSessionName = "TEST_SENDER1"; + + MockTwampOrch twampOrch; + + auto current_create_count = create_twamp_session_count; + auto current_remove_count = remove_twamp_session_count; + auto current_set_count = set_twamp_session_count; + + // Create TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + SET_COMMAND, + { + {"mode", "LIGHT" }, + {"role", "SENDER" }, + {"src_ip", "1.1.1.1" }, + {"src_udp_port", "862" }, + {"dst_ip", "2.2.2.2" }, + {"dst_udp_port", "863" }, + {"monitor_time", "60" }, + {"tx_interval", "100" }, + {"timeout", "10" }, + {"statistics_interval", "60000" }, + {"vrf_name", "default" }, + {"dscp", "0" }, + {"ttl", "10" }, + {"timestamp_format", "ntp" }, + {"padding_size", "100" }, + {"hw_lookup", "true" } + } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(session_status, "inactive"); + ASSERT_EQ(current_create_count + 1, create_twamp_session_count); + } + + // Start TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + SET_COMMAND, + { + {"admin_state", "enabled"} + } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(session_status, "active"); + ASSERT_EQ(current_set_count + 1, set_twamp_session_count); + } + + // Delete TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + DEL_COMMAND, + { {} } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_FALSE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(current_remove_count + 1, remove_twamp_session_count); + } + + // Make sure both create and set has been called + ASSERT_EQ(current_create_count + 1, create_twamp_session_count); + ASSERT_EQ(current_remove_count + 1, remove_twamp_session_count); + ASSERT_EQ(current_set_count + 1, set_twamp_session_count); + } + + TEST_F(TwampOrchTest, TwampOrchTestCreateDeleteSenderContinuousMulti) + { + string twampSessionName = "TEST_SENDER1"; + + MockTwampOrch twampOrch; + + auto current_create_count = create_twamp_session_count; + auto current_remove_count = remove_twamp_session_count; + auto current_set_count = set_twamp_session_count; + + // Create TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + SET_COMMAND, + { + {"mode", "LIGHT" }, + {"role", "SENDER" }, + {"src_ip", "1.1.1.1" }, + {"src_udp_port", "1862" }, + {"dst_ip", "2.2.2.2" }, + {"dst_udp_port", "1863" }, + {"monitor_time", "0" }, + {"tx_interval", "100" }, + {"timeout", "10" }, + {"statistics_interval", "20000" }, + } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(session_status, "inactive"); + ASSERT_EQ(current_create_count + 1, create_twamp_session_count); + } + + // Start TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + SET_COMMAND, + { + {"admin_state", "enabled"} + } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(session_status, "active"); + ASSERT_EQ(current_set_count + 1, set_twamp_session_count); + } + + // Stop TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + SET_COMMAND, + { + {"admin_state", "disabled"} + } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(session_status, "inactive"); + ASSERT_EQ(current_set_count + 2, set_twamp_session_count); + } + + // Delete TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + DEL_COMMAND, + { {} } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_FALSE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(current_remove_count + 1, remove_twamp_session_count); + } + + // Make sure both create and set has been called + ASSERT_EQ(current_create_count + 1, create_twamp_session_count); + ASSERT_EQ(current_remove_count + 1, remove_twamp_session_count); + ASSERT_EQ(current_set_count + 2, set_twamp_session_count); + } + + TEST_F(TwampOrchTest, TwampOrchTestCreateDeleteReflector) + { + string twampSessionName = "TEST_SENDER1"; + + MockTwampOrch twampOrch; + + auto current_create_count = create_twamp_session_count; + auto current_remove_count = remove_twamp_session_count; + auto current_set_count = set_twamp_session_count; + + // Create TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + SET_COMMAND, + { + {"mode", "LIGHT"}, + {"role", "REFLECTOR"}, + {"src_ip", "1.1.1.1"}, + {"src_udp_port", "862"}, + {"dst_ip", "2.2.2.2"}, + {"dst_udp_port", "863"} + } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_TRUE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(session_status, "active"); + ASSERT_EQ(current_create_count + 1, create_twamp_session_count); + } + + // Delete TWAMP Light session + { + std::deque tableKofvt; + tableKofvt.push_back( + { + twampSessionName, + DEL_COMMAND, + { {} } + } + ); + + twampOrch.doTwampTableTask(tableKofvt); + + string session_status; + ASSERT_FALSE(twampOrch.get().getSessionStatus(twampSessionName, session_status)); + ASSERT_EQ(current_remove_count + 1, remove_twamp_session_count); + } + + // Make sure both create and set has been called + ASSERT_EQ(current_create_count + 1, create_twamp_session_count); + ASSERT_EQ(current_remove_count + 1, remove_twamp_session_count); + ASSERT_EQ(current_set_count, set_twamp_session_count); + } +} \ No newline at end of file diff --git a/tests/mock_tests/ut_saihelper.cpp b/tests/mock_tests/ut_saihelper.cpp index 8b6b35b6f7c..bb7d376b191 100644 --- a/tests/mock_tests/ut_saihelper.cpp +++ b/tests/mock_tests/ut_saihelper.cpp @@ -89,7 +89,23 @@ namespace ut_helper sai_api_query(SAI_API_MPLS, (void**)&sai_mpls_api); sai_api_query(SAI_API_COUNTER, (void**)&sai_counter_api); sai_api_query(SAI_API_FDB, (void**)&sai_fdb_api); - + sai_api_query(SAI_API_TWAMP, (void**)&sai_twamp_api); + sai_api_query(SAI_API_TAM, (void**)&sai_tam_api); + sai_api_query((sai_api_t)SAI_API_DASH_VIP, (void**)&sai_dash_vip_api); + sai_api_query((sai_api_t)SAI_API_DASH_DIRECTION_LOOKUP, (void**)&sai_dash_direction_lookup_api); + sai_api_query((sai_api_t)SAI_API_DASH_ENI, (void**)&sai_dash_eni_api); + sai_api_query((sai_api_t)SAI_API_DASH_HA, (void**)&sai_dash_ha_api); + sai_api_query((sai_api_t)SAI_API_DASH_OUTBOUND_CA_TO_PA, (void**)&sai_dash_outbound_ca_to_pa_api); + sai_api_query((sai_api_t)SAI_API_DASH_PA_VALIDATION, (void**)&sai_dash_pa_validation_api); + sai_api_query((sai_api_t)SAI_API_DASH_VNET, (void**)&sai_dash_vnet_api); + sai_api_query((sai_api_t)SAI_API_DASH_APPLIANCE, (void**)&sai_dash_appliance_api); + sai_api_query((sai_api_t)SAI_API_DASH_OUTBOUND_ROUTING, (void**)&sai_dash_outbound_routing_api); + sai_api_query((sai_api_t)SAI_API_DASH_INBOUND_ROUTING, (void**)&sai_dash_inbound_routing_api); + sai_api_query((sai_api_t)SAI_API_DASH_TUNNEL, (void**)&sai_dash_tunnel_api); + sai_api_query((sai_api_t)SAI_API_DASH_OUTBOUND_PORT_MAP, (void**)&sai_dash_outbound_port_map_api); + sai_api_query((sai_api_t)SAI_API_DASH_TRUSTED_VNI, (void**)&sai_dash_trusted_vni_api); + sai_api_query(SAI_API_STP, (void**)&sai_stp_api); + sai_api_query((sai_api_t)SAI_API_DASH_METER, (void**)&sai_dash_meter_api); return SAI_STATUS_SUCCESS; } @@ -118,6 +134,14 @@ namespace ut_helper sai_buffer_api = nullptr; sai_queue_api = nullptr; sai_counter_api = nullptr; + sai_twamp_api = nullptr; + sai_tam_api = nullptr; + sai_dash_vip_api = nullptr; + sai_dash_direction_lookup_api = nullptr; + sai_dash_eni_api = nullptr; + sai_dash_ha_api = nullptr; + sai_stp_api = nullptr; + sai_dash_meter_api = nullptr; return SAI_STATUS_SUCCESS; } diff --git a/tests/mock_tests/warmrestarthelper_ut.cpp b/tests/mock_tests/warmrestarthelper_ut.cpp new file mode 100644 index 00000000000..9aae03e88dc --- /dev/null +++ b/tests/mock_tests/warmrestarthelper_ut.cpp @@ -0,0 +1,106 @@ +#include "warmRestartHelper.h" +#include "warm_restart.h" +#include "mock_table.h" +#include "ut_helper.h" + +using namespace testing_db; + +namespace wrhelper_test +{ + struct WRHelperTest : public ::testing::Test + { + std::shared_ptr m_app_db; + std::shared_ptr m_pipeline; + std::shared_ptr m_routeTable; + std::shared_ptr m_routeProducerTable; + std::shared_ptr wrHelper; + + void SetUp() override + { + m_app_db = std::make_shared("APPL_DB", 0); + m_pipeline = std::make_shared(m_app_db.get()); + m_routeTable = std::make_shared(m_app_db.get(), "ROUTE_TABLE"); + m_routeProducerTable = std::make_shared(m_app_db.get(), "ROUTE_TABLE"); + wrHelper = std::make_shared(m_pipeline.get(), m_routeProducerTable.get(), "ROUTE_TABLE", "bgp", "bgp"); + testing_db::reset(); + } + + void TearDown() override { + } + }; + + TEST_F(WRHelperTest, testReconciliation) + { + /* Initialize WR */ + wrHelper->setState(WarmStart::INITIALIZED); + ASSERT_EQ(wrHelper->getState(), WarmStart::INITIALIZED); + + /* Old-life entries */ + m_routeTable->set("1.0.0.0/24", + { + {"ifname", "eth1"}, + {"nexthop", "2.0.0.0"} + }); + m_routeTable->set("1.1.0.0/24", + { + {"ifname", "eth2"}, + {"nexthop", "2.1.0.0"}, + {"weight", "1"}, + }); + m_routeTable->set("1.2.0.0/24", + { + {"ifname", "eth2"}, + {"nexthop", "2.2.0.0"}, + {"weight", "1"}, + {"random_attrib", "random_val"}, + }); + wrHelper->runRestoration(); + ASSERT_EQ(wrHelper->getState(), WarmStart::RESTORED); + + /* Insert new life entries */ + wrHelper->insertRefreshMap({ + "1.0.0.0/24", + "SET", + { + {"ifname", "eth1"}, + {"nexthop", "2.0.0.0"}, + {"protocol", "kernel"} + } + }); + wrHelper->insertRefreshMap({ + "1.1.0.0/24", + "SET", + { + {"ifname", "eth2"}, + {"nexthop", "2.1.0.0,2.5.0.0"}, + {"weight", "4"}, + {"protocol", "kernel"} + } + }); + wrHelper->insertRefreshMap({ + "1.2.0.0/24", + "SET", + { + {"ifname", "eth2"}, + {"nexthop", "2.2.0.0"}, + {"weight", "1"}, + {"protocol", "kernel"} + } + }); + wrHelper->reconcile(); + ASSERT_EQ(wrHelper->getState(), WarmStart::RECONCILED); + + std::string val; + ASSERT_TRUE(m_routeTable->hget("1.0.0.0/24", "protocol", val)); + ASSERT_EQ(val, "kernel"); + + m_routeTable->hget("1.1.0.0/24", "protocol", val); + ASSERT_EQ(val, "kernel"); + + m_routeTable->hget("1.1.0.0/24", "weight", val); + ASSERT_EQ(val, "4"); + + m_routeTable->hget("1.2.0.0/24", "protocol", val); + ASSERT_EQ(val, "kernel"); + } +} diff --git a/tests/mock_tests/zmq_orch_ut.cpp b/tests/mock_tests/zmq_orch_ut.cpp new file mode 100644 index 00000000000..21a023522a1 --- /dev/null +++ b/tests/mock_tests/zmq_orch_ut.cpp @@ -0,0 +1,131 @@ +#include "gtest/gtest.h" +#include +#include +#include +#include "schema.h" +#include "ut_helper.h" +#include "orch_zmq_config.h" +#include "dbconnector.h" +#include "mock_table.h" + +#define protected public +#include "orch.h" +#include "zmqorch.h" +#undef protected + +#define MAX_RETRY 10 + +using namespace std; + +TEST(ZmqOrchTest, CreateZmqOrchWitTableNames) +{ + vector tables = { + { "TABLE_1", 1 }, + { "TABLE_2", 2 }, + { "TABLE_3", 3 } + }; + + auto app_db = make_shared("APPL_DB", 0); + auto zmq_orch = make_shared(app_db.get(), tables, nullptr); + + EXPECT_EQ(zmq_orch->getSelectables().size(), tables.size()); +} + +TEST(ZmqOrchTest, GetZMQPort) +{ + const char* backup_nsid = getenv("NAMESPACE_ID"); + + // ZMQ port for global namespace + unsetenv("NAMESPACE_ID"); + int port = swss::get_zmq_port(); + EXPECT_EQ(port, ORCH_ZMQ_PORT); + + // ZMQ port for namespace 0 + setenv("NAMESPACE_ID", "0", true); + port = swss::get_zmq_port(); + EXPECT_EQ(port, ORCH_ZMQ_PORT+1); + + if (backup_nsid == nullptr) + { + unsetenv("NAMESPACE_ID"); + } + else + { + setenv("NAMESPACE_ID", backup_nsid, true); + } +} + +class ZmqHandler : public ZmqMessageHandler +{ +public: + ZmqHandler() + { + received = false; + } + + virtual void handleReceivedData(const std::vector>& kcos) + { + received = true; + } + + bool received; +}; + +TEST(ZmqOrchTest, CreateZmqClient) +{ + string zmq_server_address = "tcp://127.0.0.1"; + ZmqHandler zmq_handler; + auto zmq_server = swss::create_zmq_server(zmq_server_address); + auto zmq_client = swss::create_zmq_client(zmq_server_address); + + zmq_server->registerMessageHandler("test_db", "test_table", &zmq_handler); + zmq_server->bind(); + + std::vector value; + zmq_client->sendMsg("test_db", "test_table", value); + + int retry = 0; + while (retry < MAX_RETRY) + { + if (zmq_handler.received) + { + break; + } + + sleep(1); + } + + EXPECT_TRUE(zmq_handler.received); +} + +TEST(ZmqOrchTest, GetFeatureStatus) +{ + DBConnector config_db("CONFIG_DB", 0); + config_db.hset("DEVICE_METADATA|localhost", ORCH_NORTHBOND_DASH_ZMQ_ENABLED, "true"); + auto enabled = swss::get_feature_status(ORCH_NORTHBOND_DASH_ZMQ_ENABLED, false); + EXPECT_TRUE(enabled); + + config_db.hdel("DEVICE_METADATA|localhost", ORCH_NORTHBOND_DASH_ZMQ_ENABLED); + enabled = swss::get_feature_status(ORCH_NORTHBOND_DASH_ZMQ_ENABLED, false); + EXPECT_FALSE(enabled); + + config_db.hset("DEVICE_METADATA|localhost", ORCH_NORTHBOND_DASH_ZMQ_ENABLED, "false"); + enabled = swss::get_feature_status(ORCH_NORTHBOND_DASH_ZMQ_ENABLED, true); + EXPECT_FALSE(enabled); + + config_db.hdel("DEVICE_METADATA|localhost", ORCH_NORTHBOND_DASH_ZMQ_ENABLED); + enabled = swss::get_feature_status(ORCH_NORTHBOND_DASH_ZMQ_ENABLED, true); + EXPECT_TRUE(enabled); +} + +TEST(ZmqOrchTest, GetFeatureStatusException) +{ + DBConnector config_db("CONFIG_DB", 0); + config_db.hset("DEVICE_METADATA|localhost", HGET_THROW_EXCEPTION_FIELD_NAME, "false"); + auto enabled = swss::get_feature_status(HGET_THROW_EXCEPTION_FIELD_NAME, true); + EXPECT_TRUE(enabled); + + config_db.hset("DEVICE_METADATA|localhost", HGET_THROW_EXCEPTION_FIELD_NAME, "true"); + enabled = swss::get_feature_status(HGET_THROW_EXCEPTION_FIELD_NAME, false); + EXPECT_FALSE(enabled); +} diff --git a/tests/p4rt/l3.py b/tests/p4rt/l3.py index 915228a9b57..cc72039d4bb 100644 --- a/tests/p4rt/l3.py +++ b/tests/p4rt/l3.py @@ -28,6 +28,22 @@ class P4RtRouterInterfaceWrapper(util.DBInterface): DEFAULT_SRC_MAC = "00:11:22:33:44:55" DEFAULT_ACTION = "set_port_and_src_mac" + def get_default_loopback_oid(self): + rif_entries = util.get_keys(self.asic_db, self.ASIC_DB_TBL_NAME) + for key in rif_entries: + (status, fvs) = util.get_key(self.asic_db, self.ASIC_DB_TBL_NAME, key) + assert status == True + is_loopback = False + is_gVR = False + for f,v in fvs: + if f == "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": + is_gVR = True + if f == "SAI_ROUTER_INTERFACE_ATTR_TYPE" and v == "SAI_ROUTER_INTERFACE_TYPE_LOOPBACK": + is_loopback = True + if is_gVR and is_loopback: + return key + return 0 + # Fetch oid of the first newly created rif from created rif in ASIC # db. This API should only be used when only one oid is expected to be # created after the original entries. diff --git a/tests/p4rt/test_l3.py b/tests/p4rt/test_l3.py index a16c8d3f03a..5f20ce0b1f0 100644 --- a/tests/p4rt/test_l3.py +++ b/tests/p4rt/test_l3.py @@ -23,6 +23,13 @@ def _set_up(self, dvs): self._p4rt_nexthop_obj.set_up_databases(dvs) self._p4rt_route_obj.set_up_databases(dvs) self._p4rt_wcmp_group_obj.set_up_databases(dvs) + APP_P4RT_CHANNEL_NAME="P4rt_Channel" + self.p4rt_notifier = swsscommon.NotificationProducer( + self._p4rt_route_obj.appl_db, APP_P4RT_CHANNEL_NAME + ) + self.p4rt_response_consumer = swsscommon.NotificationConsumer( + self._p4rt_route_obj.appl_db, APP_P4RT_CHANNEL_NAME + ) self.response_consumer = swsscommon.NotificationConsumer( self._p4rt_route_obj.appl_db, "APPL_DB_" + swsscommon.APP_P4RT_TABLE_NAME + "_RESPONSE_CHANNEL" @@ -59,19 +66,10 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): "%s:%s" % (self._p4rt_route_obj.APP_DB_TBL_NAME, self._p4rt_route_obj.TBL_NAME), ), - ( - self._p4rt_route_obj.appl_state_db, - "%s:%s" - % (self._p4rt_route_obj.APP_DB_TBL_NAME, self._p4rt_route_obj.TBL_NAME), - ), (self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME), ) self._p4rt_route_obj.get_original_redis_entries(db_list) - # Fetch the original key to oid information from Redis DB. - key_to_oid_helper = util.KeyToOidDBHelper(dvs) - _, original_key_oid_info = key_to_oid_helper.get_db_info() - # Create router interface. ( router_interface_id, @@ -82,24 +80,12 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): self.response_consumer, router_intf_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count = 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create neighbor. neighbor_id, neighbor_key, attr_list = self._p4rt_neighbor_obj.create_neighbor() util.verify_response( self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create nexthop. nexthop_id, nexthop_key, attr_list = self._p4rt_nexthop_obj.create_next_hop() util.verify_response( @@ -109,24 +95,12 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() assert nexthop_oid is not None - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create route entry. route_key, attr_list = self._p4rt_route_obj.create_route(nexthop_id) util.verify_response( self.response_consumer, route_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Query application database for route entries. route_entries = util.get_keys( self._p4rt_route_obj.appl_db, @@ -145,24 +119,6 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) - # Query application state database for route entries. - state_route_entries = util.get_keys( - self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, - ) - assert len(state_route_entries) == ( - self._p4rt_route_obj.get_original_appl_state_db_entries_count() + 1 - ) - - # Query application state database for newly created route key. - (status, fvs) = util.get_key( - self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key, - ) - assert status == True - util.verify_attr(fvs, attr_list) - # Query ASIC database for route entries. route_entries = util.get_keys( self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME @@ -191,11 +147,6 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): self.response_consumer, route_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count did not change in Redis DB. - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Query application database for route entries. route_entries = util.get_keys( self._p4rt_route_obj.appl_db, @@ -227,24 +178,6 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): ] util.verify_attr(fvs, attr_list_appl_db) - # Query application state database for route entries. - state_route_entries = util.get_keys( - self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, - ) - assert len(state_route_entries) == ( - self._p4rt_route_obj.get_original_appl_state_db_entries_count() + 1 - ) - - # Query application state database for the updated route key. - (status, fvs) = util.get_key( - self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key, - ) - assert status == True - util.verify_attr(fvs, attr_list) - # Query ASIC database for route entries. route_entries = util.get_keys( self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME @@ -274,11 +207,6 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): self.response_consumer, route_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count did not change in Redis DB. - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Query application database for route entries. route_entries = util.get_keys( self._p4rt_route_obj.appl_db, @@ -295,37 +223,6 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): route_key, ) assert status == True - attr_list_appl_db = [ - (self._p4rt_route_obj.ACTION_FIELD, "drop"), - ( - util.prepend_param_field( - self._p4rt_route_obj.NEXTHOP_ID_FIELD), - nexthop_id, - ), - ( - util.prepend_param_field( - self._p4rt_route_obj.ROUTE_METADATA_FIELD), - "2", - ), - ] - util.verify_attr(fvs, attr_list_appl_db) - - # Query application state database for route entries. - state_route_entries = util.get_keys( - self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, - ) - assert len(state_route_entries) == ( - self._p4rt_route_obj.get_original_appl_state_db_entries_count() + 1 - ) - - # Query application state database for the updated route key. - (status, fvs) = util.get_key( - self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key, - ) - assert status == True util.verify_attr(fvs, attr_list) # Query ASIC database for route entries. @@ -360,46 +257,23 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): util.verify_response(self.response_consumer, route_key, [], "SWSS_RC_SUCCESS") - # Verify that P4RT key to OID count decremented by 1 in Redis DB. - count -= 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Remove nexthop. self._p4rt_nexthop_obj.remove_app_db_entry(nexthop_key) util.verify_response(self.response_consumer, nexthop_key, [], "SWSS_RC_SUCCESS") - # Verify that P4RT key to OID count decremented by 1 in Redis DB. - count -= 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Remove neighbor. self._p4rt_neighbor_obj.remove_app_db_entry(neighbor_key) util.verify_response( self.response_consumer, neighbor_key, [], "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count decremented by 1 in Redis DB. - count -= 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Remove router interface. self._p4rt_router_intf_obj.remove_app_db_entry(router_intf_key) util.verify_response( self.response_consumer, router_intf_key, [], "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count is same as the original count. - status, fvs = key_to_oid_helper.get_db_info() - assert status == False - assert len(fvs) == len(original_key_oid_info) - # Query application database for route entries. route_entries = util.get_keys( self._p4rt_route_obj.appl_db, @@ -417,24 +291,6 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): ) assert status == False - # Query application state database for route entries. - state_route_entries = util.get_keys( - self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, - ) - assert len(state_route_entries) == ( - self._p4rt_route_obj.get_original_appl_state_db_entries_count() - ) - - # Verify that the route_key no longer exists in application state - # database. - (status, fsv) = util.get_key( - self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key, - ) - assert status == False - # Query ASIC database for route entries. route_entries = util.get_keys( self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME @@ -468,11 +324,6 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): "%s:%s" % (self._p4rt_route_obj.APP_DB_TBL_NAME, self._p4rt_route_obj.TBL_NAME), ), - ( - self._p4rt_route_obj.appl_state_db, - "%s:%s" - % (self._p4rt_route_obj.APP_DB_TBL_NAME, self._p4rt_route_obj.TBL_NAME), - ), (self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME), ) self._p4rt_route_obj.get_original_redis_entries(db_list) @@ -490,14 +341,6 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): self._p4rt_wcmp_group_obj.TBL_NAME, ), ), - ( - self._p4rt_wcmp_group_obj.appl_state_db, - "%s:%s" - % ( - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - self._p4rt_wcmp_group_obj.TBL_NAME, - ), - ), ( self._p4rt_wcmp_group_obj.asic_db, self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, @@ -509,10 +352,6 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): ) self._p4rt_wcmp_group_obj.get_original_redis_entries(db_list) - # Fetch the original key to oid information from Redis DB. - key_to_oid_helper = util.KeyToOidDBHelper(dvs) - _, original_key_oid_info = key_to_oid_helper.get_db_info() - # Create router interface. ( router_interface_id, @@ -523,12 +362,6 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): self.response_consumer, router_intf_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count = 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create neighbor. neighbor_id, neighbor_key, attr_list = self._p4rt_neighbor_obj.create_neighbor( ipv4=False @@ -537,12 +370,6 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create nexthop. nexthop_id, nexthop_key, attr_list = self._p4rt_nexthop_obj.create_next_hop( ipv4=False @@ -554,12 +381,6 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() assert nexthop_oid is not None - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create wcmp group. ( wcmp_group_id, @@ -570,13 +391,6 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): self.response_consumer, wcmp_group_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 2 in Redis DB - # (1 each for WCMP group and member). - count += 2 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Query application database for wcmp group entries. wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_db, @@ -597,26 +411,6 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) - # Query application state database for wcmp group entries. - state_wcmp_group_entries = util.get_keys( - self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME - + ":" - + self._p4rt_wcmp_group_obj.TBL_NAME, - ) - assert len(state_wcmp_group_entries) == ( - self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() + 1 - ) - - # Query application state database for newly created wcmp group key. - (status, fvs) = util.get_key( - self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - wcmp_group_key, - ) - assert status == True - util.verify_attr(fvs, attr_list) - # Query ASIC database for wcmp group entries. wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.asic_db, @@ -684,12 +478,6 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): self.response_consumer, route_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Query application database for route entries. route_entries = util.get_keys( self._p4rt_route_obj.appl_db, @@ -708,24 +496,6 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) - # Query application state database for route entries. - state_route_entries = util.get_keys( - self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, - ) - assert len(state_route_entries) == ( - self._p4rt_route_obj.get_original_appl_state_db_entries_count() + 1 - ) - - # Query application state database for newly created route key. - (status, fvs) = util.get_key( - self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key, - ) - assert status == True - util.verify_attr(fvs, attr_list) - # Query ASIC database for route entries. route_entries = util.get_keys( self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME @@ -755,11 +525,6 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): self.response_consumer, route_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count did not change in Redis DB. - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Query application database for route entries. route_entries = util.get_keys( self._p4rt_route_obj.appl_db, @@ -776,32 +541,6 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): route_key, ) assert status == True - attr_list_appl_db = [ - (self._p4rt_route_obj.ACTION_FIELD, "drop"), - ( - util.prepend_param_field( - self._p4rt_route_obj.WCMP_GROUP_ID_FIELD), - wcmp_group_id, - ), - ] - util.verify_attr(fvs, attr_list_appl_db) - - # Query application state database for route entries. - state_route_entries = util.get_keys( - self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, - ) - assert len(state_route_entries) == ( - self._p4rt_route_obj.get_original_appl_state_db_entries_count() + 1 - ) - - # Query application state database for the updated route key. - (status, fvs) = util.get_key( - self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key, - ) - assert status == True util.verify_attr(fvs, attr_list) # Query ASIC database for route entries. @@ -838,11 +577,6 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): self.response_consumer, route_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count did not change in Redis DB. - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Query application database for route entries. route_entries = util.get_keys( self._p4rt_route_obj.appl_db, @@ -859,32 +593,6 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): route_key, ) assert status == True - attr_list_appl_db = [ - (self._p4rt_route_obj.ACTION_FIELD, "trap"), - ( - util.prepend_param_field( - self._p4rt_route_obj.WCMP_GROUP_ID_FIELD), - wcmp_group_id, - ), - ] - util.verify_attr(fvs, attr_list_appl_db) - - # Query application state database for route entries. - state_route_entries = util.get_keys( - self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, - ) - assert len(state_route_entries) == ( - self._p4rt_route_obj.get_original_appl_state_db_entries_count() + 1 - ) - - # Query application state database for the updated route key. - (status, fvs) = util.get_key( - self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key, - ) - assert status == True util.verify_attr(fvs, attr_list) # Query ASIC database for route entries. @@ -918,59 +626,29 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): util.verify_response(self.response_consumer, route_key, [], "SWSS_RC_SUCCESS") - # Verify that P4RT key to OID count decremented by 1 in Redis DB. - count -= 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Remove wcmp group entry. self._p4rt_wcmp_group_obj.remove_app_db_entry(wcmp_group_key) util.verify_response( self.response_consumer, wcmp_group_key, [], "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count decremented by 2 in Redis DB - # (1 each for WCMP group and member). - count -= 2 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Remove nexthop. self._p4rt_nexthop_obj.remove_app_db_entry(nexthop_key) util.verify_response(self.response_consumer, nexthop_key, [], "SWSS_RC_SUCCESS") - # Verify that P4RT key to OID count decremented by 1 in Redis DB. - count -= 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Remove neighbor. self._p4rt_neighbor_obj.remove_app_db_entry(neighbor_key) util.verify_response( self.response_consumer, neighbor_key, [], "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count decremented by 1 in Redis DB. - count -= 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Remove router interface. self._p4rt_router_intf_obj.remove_app_db_entry(router_intf_key) util.verify_response( self.response_consumer, router_intf_key, [], "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count is same as original count. - status, fvs = key_to_oid_helper.get_db_info() - assert status == False - assert len(fvs) == len(original_key_oid_info) - # Query application database for route entries. route_entries = util.get_keys( self._p4rt_route_obj.appl_db, @@ -988,24 +666,6 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): ) assert status == False - # Query application state database for route entries. - state_route_entries = util.get_keys( - self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, - ) - assert len(state_route_entries) == ( - self._p4rt_route_obj.get_original_appl_state_db_entries_count() - ) - - # Verify that the route_key no longer exists in application state - # database. - (status, fsv) = util.get_key( - self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key, - ) - assert status == False - # Query ASIC database for route entries. route_entries = util.get_keys( self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME @@ -1041,26 +701,6 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): ) assert status == False - # Query application state database for wcmp group entries. - state_wcmp_group_entries = util.get_keys( - self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME - + ":" - + self._p4rt_wcmp_group_obj.TBL_NAME, - ) - assert len(state_wcmp_group_entries) == ( - self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() - ) - - # Verify that the wcmp_group_key no longer exists in application state - # database. - (status, fsv) = util.get_key( - self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - wcmp_group_key, - ) - assert status == False - # Query ASIC database for wcmp group entries. wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.asic_db, @@ -1114,14 +754,6 @@ def test_NexthopWithGreTunnelAddDeletePass(self, dvs, testlog): self._p4rt_nexthop_obj.TBL_NAME, ), ), - ( - self._p4rt_nexthop_obj.appl_state_db, - "%s:%s" - % ( - self._p4rt_nexthop_obj.APP_DB_TBL_NAME, - self._p4rt_nexthop_obj.TBL_NAME, - ), - ), (self._p4rt_nexthop_obj.asic_db, self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME), ) @@ -1135,14 +767,6 @@ def test_NexthopWithGreTunnelAddDeletePass(self, dvs, testlog): self._p4rt_gre_tunnel_obj.TBL_NAME, ), ), - ( - self._p4rt_gre_tunnel_obj.appl_state_db, - "%s:%s" - % ( - self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME, - self._p4rt_gre_tunnel_obj.TBL_NAME, - ), - ), (self._p4rt_gre_tunnel_obj.asic_db, self._p4rt_gre_tunnel_obj.ASIC_DB_TBL_NAME), ) @@ -1153,10 +777,6 @@ def test_NexthopWithGreTunnelAddDeletePass(self, dvs, testlog): ) self._p4rt_router_intf_obj.get_original_redis_entries(db_list) - # Fetch the original key to oid information from Redis DB. - key_to_oid_helper = util.KeyToOidDBHelper(dvs) - _, original_key_oid_info = key_to_oid_helper.get_db_info() - # Create router interface. ( router_interface_id, @@ -1171,12 +791,6 @@ def test_NexthopWithGreTunnelAddDeletePass(self, dvs, testlog): router_intf_oid = self._p4rt_router_intf_obj.get_newly_created_router_interface_oid() assert router_intf_oid is not None - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count = 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create tunnel. tunnel_id, tunnel_key, attr_list = self._p4rt_gre_tunnel_obj.create_gre_tunnel() util.verify_response( @@ -1185,16 +799,6 @@ def test_NexthopWithGreTunnelAddDeletePass(self, dvs, testlog): # get tunnel_oid of newly created tunnel tunnel_oid = self._p4rt_gre_tunnel_obj.get_newly_created_tunnel_oid() assert tunnel_oid is not None - # get overlay router_interface_oid of newly created router_intf - overlay_router_intf_oid = self._p4rt_router_intf_obj.get_newly_created_router_interface_oid( - set([router_intf_oid])) - assert overlay_router_intf_oid is not None - - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count # Query application database for tunnel entries. tunnel_entries = util.get_keys( @@ -1215,25 +819,6 @@ def test_NexthopWithGreTunnelAddDeletePass(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) - # Query application state database for tunnel entries. - state_tunnel_entries = util.get_keys( - self._p4rt_gre_tunnel_obj.appl_state_db, - self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME + - ":" + self._p4rt_gre_tunnel_obj.TBL_NAME, - ) - assert len(state_tunnel_entries) == ( - self._p4rt_gre_tunnel_obj.get_original_appl_state_db_entries_count() + 1 - ) - - # Query application state database for newly created tunnel key. - (status, fvs) = util.get_key( - self._p4rt_gre_tunnel_obj.appl_state_db, - self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME, - tunnel_key, - ) - assert status == True - util.verify_attr(fvs, attr_list) - # Query ASIC database for tunnel entries. tunnel_entries = util.get_keys( self._p4rt_gre_tunnel_obj.asic_db, self._p4rt_gre_tunnel_obj.ASIC_DB_TBL_NAME @@ -1254,7 +839,7 @@ def test_NexthopWithGreTunnelAddDeletePass(self, dvs, testlog): attr_list = [ (self._p4rt_gre_tunnel_obj.SAI_ATTR_UNDERLAY_INTERFACE, router_intf_oid), (self._p4rt_gre_tunnel_obj.SAI_ATTR_OVERLAY_INTERFACE, - overlay_router_intf_oid), + self._p4rt_router_intf_obj.get_default_loopback_oid()), (self._p4rt_gre_tunnel_obj.SAI_ATTR_TYPE, "SAI_TUNNEL_TYPE_IPINIP_GRE"), (self._p4rt_gre_tunnel_obj.SAI_ATTR_PEER_MODE, "SAI_TUNNEL_PEER_MODE_P2P"), (self._p4rt_gre_tunnel_obj.SAI_ATTR_ENCAP_SRC_IP, @@ -1270,12 +855,6 @@ def test_NexthopWithGreTunnelAddDeletePass(self, dvs, testlog): self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create tunnel nexthop. nexthop_id, nexthop_key, attr_list = self._p4rt_nexthop_obj.create_next_hop( tunnel_id=tunnel_id @@ -1287,12 +866,6 @@ def test_NexthopWithGreTunnelAddDeletePass(self, dvs, testlog): nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() assert nexthop_oid is not None - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Query application database for nexthop entries. nexthop_entries = util.get_keys( self._p4rt_nexthop_obj.appl_db, @@ -1311,24 +884,6 @@ def test_NexthopWithGreTunnelAddDeletePass(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) - # Query application state database for nexthop entries. - state_nexthop_entries = util.get_keys( - self._p4rt_nexthop_obj.appl_state_db, - self._p4rt_nexthop_obj.APP_DB_TBL_NAME + ":" + self._p4rt_nexthop_obj.TBL_NAME, - ) - assert len(state_nexthop_entries) == ( - self._p4rt_nexthop_obj.get_original_appl_state_db_entries_count() + 1 - ) - - # Query application state database for newly created nexthop key. - (status, fvs) = util.get_key( - self._p4rt_nexthop_obj.appl_state_db, - self._p4rt_nexthop_obj.APP_DB_TBL_NAME, - nexthop_key, - ) - assert status == True - util.verify_attr(fvs, attr_list) - # Query ASIC database for nexthop entries. nexthop_entries = util.get_keys( self._p4rt_nexthop_obj.asic_db, self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME @@ -1360,47 +915,24 @@ def test_NexthopWithGreTunnelAddDeletePass(self, dvs, testlog): util.verify_response(self.response_consumer, nexthop_key, [], "SWSS_RC_SUCCESS") - # Verify that P4RT key to OID count decremented by 1 in Redis DB. - count -= 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Remove neighbor. self._p4rt_neighbor_obj.remove_app_db_entry(neighbor_key) util.verify_response( self.response_consumer, neighbor_key, [], "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count decremented by 1 in Redis DB. - count -= 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Remove tunnel. self._p4rt_gre_tunnel_obj.remove_app_db_entry(tunnel_key) util.verify_response( self.response_consumer, tunnel_key, [], "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count decremented by 1 in Redis DB. - count -= 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Remove router interface. self._p4rt_router_intf_obj.remove_app_db_entry(router_intf_key) util.verify_response( self.response_consumer, router_intf_key, [], "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count is same as the original count. - status, fvs = key_to_oid_helper.get_db_info() - assert status == False - assert len(fvs) == len(original_key_oid_info) - # Query application database for nexthop entries. nexthop_entries = util.get_keys( self._p4rt_nexthop_obj.appl_db, @@ -1418,27 +950,9 @@ def test_NexthopWithGreTunnelAddDeletePass(self, dvs, testlog): ) assert status == False - # Query application state database for nexthop entries. - state_nexthop_entries = util.get_keys( - self._p4rt_nexthop_obj.appl_state_db, - self._p4rt_nexthop_obj.APP_DB_TBL_NAME + ":" + self._p4rt_nexthop_obj.TBL_NAME, - ) - assert len(state_nexthop_entries) == ( - self._p4rt_nexthop_obj.get_original_appl_state_db_entries_count() - ) - - # Verify that the nexthop_key no longer exists in application state - # database. - (status, fsv) = util.get_key( - self._p4rt_nexthop_obj.appl_state_db, - self._p4rt_nexthop_obj.APP_DB_TBL_NAME, - nexthop_key, - ) - assert status == False - - # Query ASIC database for nexthop entries. - nexthop_entries = util.get_keys( - self._p4rt_nexthop_obj.asic_db, self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME + # Query ASIC database for nexthop entries. + nexthop_entries = util.get_keys( + self._p4rt_nexthop_obj.asic_db, self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME ) assert len(nexthop_entries) == ( self._p4rt_nexthop_obj.get_original_asic_db_entries_count() @@ -1470,25 +984,6 @@ def test_NexthopWithGreTunnelAddDeletePass(self, dvs, testlog): ) assert status == False - # Query application state database for tunnel entries. - state_tunnel_entries = util.get_keys( - self._p4rt_gre_tunnel_obj.appl_state_db, - self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME + - ":" + self._p4rt_gre_tunnel_obj.TBL_NAME, - ) - assert len(state_tunnel_entries) == ( - self._p4rt_gre_tunnel_obj.get_original_appl_state_db_entries_count() - ) - - # Verify that the tunnel_key no longer exists in application state - # database. - (status, fsv) = util.get_key( - self._p4rt_gre_tunnel_obj.appl_state_db, - self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME, - tunnel_key, - ) - assert status == False - # Query ASIC database for tunnel entries. runnel_entries = util.get_keys( self._p4rt_gre_tunnel_obj.asic_db, self._p4rt_gre_tunnel_obj.ASIC_DB_TBL_NAME @@ -1524,11 +1019,6 @@ def test_IPv4RouteAddWithInvalidNexthopFail(self, dvs, testlog): "%s:%s" % (self._p4rt_route_obj.APP_DB_TBL_NAME, self._p4rt_route_obj.TBL_NAME), ), - ( - self._p4rt_route_obj.appl_state_db, - "%s:%s" - % (self._p4rt_route_obj.APP_DB_TBL_NAME, self._p4rt_route_obj.TBL_NAME), - ), (self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME), ) self._p4rt_route_obj.get_original_redis_entries(db_list) @@ -1558,25 +1048,6 @@ def test_IPv4RouteAddWithInvalidNexthopFail(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) - # Query application database for route entries (no new route entry - # expected). - state_route_entries = util.get_keys( - self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, - ) - assert len(state_route_entries) == ( - self._p4rt_route_obj.get_original_appl_state_db_entries_count() - ) - - # Verify that the newly added route key does not exist in application - # state db. - (status, fvs) = util.get_key( - self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key, - ) - assert status == False - # Query ASIC database for route entries (no new ASIC DB entry should be # created for route entry). route_entries = util.get_keys( @@ -1612,11 +1083,6 @@ def test_IPv6RouteAddWithInvalidWcmpFail(self, dvs, testlog): "%s:%s" % (self._p4rt_route_obj.APP_DB_TBL_NAME, self._p4rt_route_obj.TBL_NAME), ), - ( - self._p4rt_route_obj.appl_state_db, - "%s:%s" - % (self._p4rt_route_obj.APP_DB_TBL_NAME, self._p4rt_route_obj.TBL_NAME), - ), (self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME), ) self._p4rt_route_obj.get_original_redis_entries(db_list) @@ -1648,25 +1114,6 @@ def test_IPv6RouteAddWithInvalidWcmpFail(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) - # Query application state database for route entries (no new APPL STATE DB - # entry should be created for route entry). - state_route_entries = util.get_keys( - self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, - ) - assert len(state_route_entries) == ( - self._p4rt_route_obj.get_original_appl_state_db_entries_count() - ) - - # Verify that newly created route key does not exist in application - # state db. - (status, fvs) = util.get_key( - self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key, - ) - assert status == False - # Query ASIC database for route entries (no new ASIC DB entry should be # created for route entry). route_entries = util.get_keys( @@ -1699,14 +1146,6 @@ def test_PruneAndRestoreNextHop(self, dvs, testlog): self._p4rt_wcmp_group_obj.TBL_NAME, ), ), - ( - self._p4rt_wcmp_group_obj.appl_state_db, - "%s:%s" - % ( - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - self._p4rt_wcmp_group_obj.TBL_NAME, - ), - ), ( self._p4rt_wcmp_group_obj.asic_db, self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, @@ -1723,10 +1162,6 @@ def test_PruneAndRestoreNextHop(self, dvs, testlog): ) self._p4rt_nexthop_obj.get_original_redis_entries(db_list) - # Fetch the original key to oid information from Redis DB. - key_to_oid_helper = util.KeyToOidDBHelper(dvs) - _, original_key_oid_info = key_to_oid_helper.get_db_info() - # Bring up port under test. port_name = "Ethernet0" if_name = "eth0" @@ -1743,24 +1178,12 @@ def test_PruneAndRestoreNextHop(self, dvs, testlog): self.response_consumer, router_intf_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count = 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create neighbor. neighbor_id, neighbor_key, attr_list = self._p4rt_neighbor_obj.create_neighbor() util.verify_response( self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create nexthop. nexthop_id, nexthop_key, attr_list = self._p4rt_nexthop_obj.create_next_hop() util.verify_response( @@ -1770,12 +1193,6 @@ def test_PruneAndRestoreNextHop(self, dvs, testlog): nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() assert nexthop_oid is not None - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create wcmp group with one member. ( wcmp_group_id, @@ -1786,13 +1203,6 @@ def test_PruneAndRestoreNextHop(self, dvs, testlog): self.response_consumer, wcmp_group_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 2 in Redis DB - # (1 each for WCMP group and member). - count += 2 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Query application database for wcmp group entries. wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_db, @@ -1813,26 +1223,6 @@ def test_PruneAndRestoreNextHop(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) - # Query application state database for wcmp group entries. - state_wcmp_group_entries = util.get_keys( - self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME - + ":" - + self._p4rt_wcmp_group_obj.TBL_NAME, - ) - assert len(state_wcmp_group_entries) == ( - self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() + 1 - ) - - # Query application state database for newly created wcmp group key. - (status, fvs) = util.get_key( - self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - wcmp_group_key, - ) - assert status == True - util.verify_attr(fvs, attr_list) - # Query ASIC database for wcmp group entries. wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.asic_db, @@ -1898,15 +1288,6 @@ def test_PruneAndRestoreNextHop(self, dvs, testlog): self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() ) - # Check APPL STATE DB to verify no change. - (status, fvs) = util.get_key( - self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - wcmp_group_key, - ) - assert status == True - util.verify_attr(fvs, attr_list) - # Force oper-up for associated port. util.set_interface_status(dvs, if_name, "up") @@ -1933,52 +1314,15 @@ def test_PruneAndRestoreNextHop(self, dvs, testlog): # Delete WCMP group member. self._p4rt_wcmp_group_obj.remove_app_db_entry(wcmp_group_key) - # Verify that P4RT key to OID count decremented by 2 in Redis DB - # (1 each for WCMP group and member). - count -= 2 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - - # Verify that APPL STATE DB is now updated. - state_wcmp_group_entries = util.get_keys( - self._p4rt_wcmp_group_obj.appl_state_db, - ( - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME - + ":" - + self._p4rt_wcmp_group_obj.TBL_NAME - ), - ) - assert len(state_wcmp_group_entries) == ( - self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() - ) - # Delete next hop. self._p4rt_nexthop_obj.remove_app_db_entry(nexthop_key) - # Verify that P4RT key to OID count decremented by 1 in Redis DB. - count -= 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Delete neighbor. self._p4rt_neighbor_obj.remove_app_db_entry(neighbor_key) - # Verify that P4RT key to OID count decremented by 1 in Redis DB. - count -= 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Delete router interface. self._p4rt_router_intf_obj.remove_app_db_entry(router_intf_key) - # Verify that P4RT key to OID count is same as the original count. - status, fvs = key_to_oid_helper.get_db_info() - assert status == False - assert len(fvs) == len(original_key_oid_info) - def test_PruneNextHopOnWarmBoot(self, dvs, testlog): # Initialize L3 objects and database connectors. self._set_up(dvs) @@ -1994,14 +1338,6 @@ def test_PruneNextHopOnWarmBoot(self, dvs, testlog): self._p4rt_wcmp_group_obj.TBL_NAME, ), ), - ( - self._p4rt_wcmp_group_obj.appl_state_db, - "%s:%s" - % ( - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - self._p4rt_wcmp_group_obj.TBL_NAME, - ), - ), ( self._p4rt_wcmp_group_obj.asic_db, self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, @@ -2018,10 +1354,6 @@ def test_PruneNextHopOnWarmBoot(self, dvs, testlog): ) self._p4rt_nexthop_obj.get_original_redis_entries(db_list) - # Fetch the original key to oid information from Redis DB. - key_to_oid_helper = util.KeyToOidDBHelper(dvs) - _, original_key_oid_info = key_to_oid_helper.get_db_info() - # Bring up port under test. port_name = "Ethernet0" if_name = "eth0" @@ -2038,24 +1370,12 @@ def test_PruneNextHopOnWarmBoot(self, dvs, testlog): self.response_consumer, router_intf_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count = 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create neighbor. neighbor_id, neighbor_key, attr_list = self._p4rt_neighbor_obj.create_neighbor() util.verify_response( self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create nexthop. nexthop_id, nexthop_key, attr_list = self._p4rt_nexthop_obj.create_next_hop() util.verify_response( @@ -2065,12 +1385,6 @@ def test_PruneNextHopOnWarmBoot(self, dvs, testlog): nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() assert nexthop_oid is not None - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create wcmp group with one member. ( wcmp_group_id, @@ -2081,13 +1395,6 @@ def test_PruneNextHopOnWarmBoot(self, dvs, testlog): self.response_consumer, wcmp_group_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 2 in Redis DB - # (1 each for WCMP group and member). - count += 2 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Query application database for wcmp group entries. wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_db, @@ -2108,26 +1415,6 @@ def test_PruneNextHopOnWarmBoot(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) - # Query application state database for wcmp group entries. - state_wcmp_group_entries = util.get_keys( - self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME - + ":" - + self._p4rt_wcmp_group_obj.TBL_NAME, - ) - assert len(state_wcmp_group_entries) == ( - self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() + 1 - ) - - # Query application state database for newly created wcmp group key. - (status, fvs) = util.get_key( - self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - wcmp_group_key, - ) - assert status == True - util.verify_attr(fvs, attr_list) - # Query ASIC database for wcmp group entries. wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.asic_db, @@ -2212,52 +1499,15 @@ def test_PruneNextHopOnWarmBoot(self, dvs, testlog): # Delete WCMP group member. self._p4rt_wcmp_group_obj.remove_app_db_entry(wcmp_group_key) - # Verify that P4RT key to OID count decremented by 2 in Redis DB - # (1 each for WCMP group and member). - count -= 2 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - - # Verify that APPL STATE DB is updated. - state_wcmp_group_entries = util.get_keys( - self._p4rt_wcmp_group_obj.appl_state_db, - ( - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME - + ":" - + self._p4rt_wcmp_group_obj.TBL_NAME - ), - ) - assert len(state_wcmp_group_entries) == ( - self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() - ) - # Delete next hop. self._p4rt_nexthop_obj.remove_app_db_entry(nexthop_key) - # Verify that P4RT key to OID count decremented by 1 in Redis DB. - count -= 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Delete neighbor. self._p4rt_neighbor_obj.remove_app_db_entry(neighbor_key) - # Verify that P4RT key to OID count decremented by 1 in Redis DB. - count -= 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Delete router interface. self._p4rt_router_intf_obj.remove_app_db_entry(router_intf_key) - # Verify that P4RT key to OID count is same as the original count. - status, fvs = key_to_oid_helper.get_db_info() - assert status == False - assert len(fvs) == len(original_key_oid_info) - def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): # Initialize L3 objects and database connectors. self._set_up(dvs) @@ -2273,14 +1523,6 @@ def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): self._p4rt_wcmp_group_obj.TBL_NAME, ), ), - ( - self._p4rt_wcmp_group_obj.appl_state_db, - "%s:%s" - % ( - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - self._p4rt_wcmp_group_obj.TBL_NAME, - ), - ), ( self._p4rt_wcmp_group_obj.asic_db, self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, @@ -2297,10 +1539,6 @@ def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): ) self._p4rt_nexthop_obj.get_original_redis_entries(db_list) - # Fetch the original key to oid information from Redis DB. - key_to_oid_helper = util.KeyToOidDBHelper(dvs) - _, original_key_oid_info = key_to_oid_helper.get_db_info() - # Force oper-down on port under test. port_name = "Ethernet0" if_name = "eth0" @@ -2317,24 +1555,12 @@ def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): self.response_consumer, router_intf_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count = 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create neighbor. neighbor_id, neighbor_key, attr_list = self._p4rt_neighbor_obj.create_neighbor() util.verify_response( self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create nexthop. nexthop_id, nexthop_key, attr_list = self._p4rt_nexthop_obj.create_next_hop() util.verify_response( @@ -2344,12 +1570,6 @@ def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() assert nexthop_oid is not None - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create wcmp group with one member. ( wcmp_group_id, @@ -2360,13 +1580,6 @@ def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): self.response_consumer, wcmp_group_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 1 in Redis DB - # (WCMP group member is not created for operationally down watchport). - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Query application database for wcmp group entries. wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_db, @@ -2387,26 +1600,6 @@ def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) - # Query application state database for wcmp group entries. - state_wcmp_group_entries = util.get_keys( - self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME - + ":" - + self._p4rt_wcmp_group_obj.TBL_NAME, - ) - assert len(state_wcmp_group_entries) == ( - self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() + 1 - ) - - # Query application state database for newly created wcmp group key. - (status, fvs) = util.get_key( - self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - wcmp_group_key, - ) - assert status == True - util.verify_attr(fvs, attr_list) - # Query ASIC database for wcmp group entries. wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.asic_db, @@ -2447,14 +1640,6 @@ def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): # Bring up the port. util.set_interface_status(dvs, if_name, "up") - # Verify that P4RT key to OID count incremented by 1 in Redis DB - # (WCMP group member is now expected to be created in SAI due to - # watchport now being operationally up) - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Verify that next hop member is now created in SAI. wcmp_group_member_entries = util.get_keys( self._p4rt_wcmp_group_obj.asic_db, @@ -2489,52 +1674,15 @@ def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): # Delete WCMP group member. self._p4rt_wcmp_group_obj.remove_app_db_entry(wcmp_group_key) - # Verify that P4RT key to OID count decremented by 2 in Redis DB - # (1 each for WCMP group and member). - count -= 2 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - - # Verify that APPL STATE DB is updated. - state_wcmp_group_entries = util.get_keys( - self._p4rt_wcmp_group_obj.appl_state_db, - ( - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME - + ":" - + self._p4rt_wcmp_group_obj.TBL_NAME - ), - ) - assert len(state_wcmp_group_entries) == ( - self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() - ) - # Delete next hop. self._p4rt_nexthop_obj.remove_app_db_entry(nexthop_key) - # Verify that P4RT key to OID count decremented by 1 in Redis DB. - count -= 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Delete neighbor. self._p4rt_neighbor_obj.remove_app_db_entry(neighbor_key) - # Verify that P4RT key to OID count decremented by 1 in Redis DB. - count -= 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Delete router interface. self._p4rt_router_intf_obj.remove_app_db_entry(router_intf_key) - # Verify that P4RT key to OID count is same as the original count. - status, fvs = key_to_oid_helper.get_db_info() - assert status == False - assert len(fvs) == len(original_key_oid_info) - def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): # Initialize L3 objects and database connectors. self._set_up(dvs) @@ -2550,14 +1698,6 @@ def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): self._p4rt_wcmp_group_obj.TBL_NAME, ), ), - ( - self._p4rt_wcmp_group_obj.appl_state_db, - "%s:%s" - % ( - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - self._p4rt_wcmp_group_obj.TBL_NAME, - ), - ), ( self._p4rt_wcmp_group_obj.asic_db, self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, @@ -2574,10 +1714,6 @@ def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): ) self._p4rt_nexthop_obj.get_original_redis_entries(db_list) - # Fetch the original key to oid information from Redis DB. - key_to_oid_helper = util.KeyToOidDBHelper(dvs) - _, original_key_oid_info = key_to_oid_helper.get_db_info() - # Force oper-down on port under test. port_name = "Ethernet0" if_name = "eth0" @@ -2594,24 +1730,12 @@ def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): self.response_consumer, router_intf_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count = 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create neighbor. neighbor_id, neighbor_key, attr_list = self._p4rt_neighbor_obj.create_neighbor() util.verify_response( self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create nexthop. nexthop_id, nexthop_key, attr_list = self._p4rt_nexthop_obj.create_next_hop() util.verify_response( @@ -2621,12 +1745,6 @@ def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() assert nexthop_oid is not None - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create wcmp group with one member. ( wcmp_group_id, @@ -2637,13 +1755,6 @@ def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): self.response_consumer, wcmp_group_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 1 in Redis DB - # (WCMP group member is not created for operationally down watchport). - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Query application database for wcmp group entries. wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_db, @@ -2664,26 +1775,6 @@ def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) - # Query application state database for wcmp group entries. - state_wcmp_group_entries = util.get_keys( - self._p4rt_wcmp_group_obj.appl_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME - + ":" - + self._p4rt_wcmp_group_obj.TBL_NAME, - ) - assert len(state_wcmp_group_entries) == ( - self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() + 1 - ) - - # Query application state database for newly created wcmp group key. - (status, fvs) = util.get_key( - self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - wcmp_group_key, - ) - assert status == True - util.verify_attr(fvs, attr_list) - # Query ASIC database for wcmp group entries. wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.asic_db, @@ -2735,41 +1826,9 @@ def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): # group member is still referencing it. self._p4rt_nexthop_obj.remove_app_db_entry(nexthop_key) - # Verify that the P4RT key to OID count is same as before in Redis DB. - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - - # Verify that the next hop still exists in app state db. - (status, fvs) = util.get_key( - self._p4rt_nexthop_obj.appl_state_db, - self._p4rt_nexthop_obj.APP_DB_TBL_NAME, - nexthop_key, - ) - assert status == True - # Delete the pruned wcmp group member and try again. self._p4rt_wcmp_group_obj.remove_app_db_entry(wcmp_group_key) - # Verify that P4RT key to OID count decremented by 1 in Redis DB. - count -= 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - - # Verify that APPL STATE DB is updated. - state_wcmp_group_entries = util.get_keys( - self._p4rt_wcmp_group_obj.appl_state_db, - ( - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME - + ":" - + self._p4rt_wcmp_group_obj.TBL_NAME - ), - ) - assert len(state_wcmp_group_entries) == ( - self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() - ) - # Verify that ASIC DB is updated. wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.asic_db, @@ -2789,29 +1848,12 @@ def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): # Delete next hop. self._p4rt_nexthop_obj.remove_app_db_entry(nexthop_key) - # Verify that P4RT key to OID count decremented by 1 in Redis DB. - count -= 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Delete neighbor. self._p4rt_neighbor_obj.remove_app_db_entry(neighbor_key) - # Verify that P4RT key to OID count decremented by 1 in Redis DB. - count -= 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Delete router interface. self._p4rt_router_intf_obj.remove_app_db_entry(router_intf_key) - # Verify that P4RT key to OID count is same as the original count. - status, fvs = key_to_oid_helper.get_db_info() - assert status == False - assert len(fvs) == len(original_key_oid_info) - def test_NexthopWithGreTunnelCreationFailIfDependenciesAreMissing(self, dvs, testlog): # Initialize L3 objects and database connectors. self._set_up(dvs) @@ -2828,14 +1870,6 @@ def test_NexthopWithGreTunnelCreationFailIfDependenciesAreMissing(self, dvs, tes self._p4rt_nexthop_obj.TBL_NAME, ), ), - ( - self._p4rt_nexthop_obj.appl_state_db, - "%s:%s" - % ( - self._p4rt_nexthop_obj.APP_DB_TBL_NAME, - self._p4rt_nexthop_obj.TBL_NAME, - ), - ), (self._p4rt_nexthop_obj.asic_db, self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME), ) @@ -2849,14 +1883,6 @@ def test_NexthopWithGreTunnelCreationFailIfDependenciesAreMissing(self, dvs, tes self._p4rt_gre_tunnel_obj.TBL_NAME, ), ), - ( - self._p4rt_gre_tunnel_obj.appl_state_db, - "%s:%s" - % ( - self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME, - self._p4rt_gre_tunnel_obj.TBL_NAME, - ), - ), (self._p4rt_gre_tunnel_obj.asic_db, self._p4rt_gre_tunnel_obj.ASIC_DB_TBL_NAME), ) @@ -2867,10 +1893,6 @@ def test_NexthopWithGreTunnelCreationFailIfDependenciesAreMissing(self, dvs, tes ) self._p4rt_router_intf_obj.get_original_redis_entries(db_list) - # Fetch the original key to oid information from Redis DB. - key_to_oid_helper = util.KeyToOidDBHelper(dvs) - _, original_key_oid_info = key_to_oid_helper.get_db_info() - # Create tunnel. tunnel_id, tunnel_key, attr_list = self._p4rt_gre_tunnel_obj.create_gre_tunnel() util.verify_response( @@ -2878,11 +1900,6 @@ def test_NexthopWithGreTunnelCreationFailIfDependenciesAreMissing(self, dvs, tes "[OrchAgent] Router intf '16' does not exist" ) - # Verify that P4RT key to OID count does not change in Redis DB. - status, fvs = key_to_oid_helper.get_db_info() - assert status == False - assert len(fvs) == len(original_key_oid_info) - # Query application database for tunnel entries. tunnel_entries = util.get_keys( self._p4rt_gre_tunnel_obj.appl_db, @@ -2893,16 +1910,6 @@ def test_NexthopWithGreTunnelCreationFailIfDependenciesAreMissing(self, dvs, tes self._p4rt_gre_tunnel_obj.get_original_appl_db_entries_count() + 1 ) - # Query application state database for tunnel entries. - state_tunnel_entries = util.get_keys( - self._p4rt_gre_tunnel_obj.appl_state_db, - self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME + - ":" + self._p4rt_gre_tunnel_obj.TBL_NAME, - ) - assert len(state_tunnel_entries) == ( - self._p4rt_gre_tunnel_obj.get_original_appl_state_db_entries_count() - ) - # Query ASIC database for tunnel entries. tunnel_entries = util.get_keys( self._p4rt_gre_tunnel_obj.asic_db, self._p4rt_gre_tunnel_obj.ASIC_DB_TBL_NAME @@ -2920,11 +1927,6 @@ def test_NexthopWithGreTunnelCreationFailIfDependenciesAreMissing(self, dvs, tes "[OrchAgent] GRE Tunnel 'tunnel-1' does not exist in GRE Tunnel Manager" ) - # Verify that P4RT key to OID count does not change in Redis DB. - status, fvs = key_to_oid_helper.get_db_info() - assert status == False - assert len(fvs) == len(original_key_oid_info) - # Query application database for nexthop entries. nexthop_entries = util.get_keys( self._p4rt_nexthop_obj.appl_db, @@ -2934,15 +1936,6 @@ def test_NexthopWithGreTunnelCreationFailIfDependenciesAreMissing(self, dvs, tes self._p4rt_nexthop_obj.get_original_appl_db_entries_count() + 1 ) - # Query application state database for nexthop entries. - state_nexthop_entries = util.get_keys( - self._p4rt_nexthop_obj.appl_state_db, - self._p4rt_nexthop_obj.APP_DB_TBL_NAME + ":" + self._p4rt_nexthop_obj.TBL_NAME, - ) - assert len(state_nexthop_entries) == ( - self._p4rt_nexthop_obj.get_original_appl_state_db_entries_count() - ) - # Query ASIC database for nexthop entries. nexthop_entries = util.get_keys( self._p4rt_nexthop_obj.asic_db, self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME @@ -2952,3 +1945,148 @@ def test_NexthopWithGreTunnelCreationFailIfDependenciesAreMissing(self, dvs, tes ) self._clean_vrf(dvs) + + def test_P4rtNotification(self, dvs, testlog): + # Initialize L3 objects and database connectors. + self._set_up(dvs) + self._set_vrf(dvs) + + # Set IP type for route object. + self._p4rt_route_obj.set_ip_type("IPV4") + + # Maintain list of original Application and ASIC DB entries before + # adding new route. + db_list = ( + ( + self._p4rt_route_obj.appl_db, + "%s:%s" + % (self._p4rt_route_obj.APP_DB_TBL_NAME, self._p4rt_route_obj.TBL_NAME), + ), + (self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME), + ) + self._p4rt_route_obj.get_original_redis_entries(db_list) + + # Add router interface. + router_intf_key = self._p4rt_router_intf_obj.generate_app_db_key( + self._p4rt_router_intf_obj.DEFAULT_ROUTER_INTERFACE_ID) + ritf_attrs = [ + (util.prepend_param_field(self._p4rt_router_intf_obj.PORT_FIELD), + self._p4rt_router_intf_obj.DEFAULT_PORT_ID), + (util.prepend_param_field(self._p4rt_router_intf_obj.SRC_MAC_FIELD), + self._p4rt_router_intf_obj.DEFAULT_SRC_MAC), + (self._p4rt_router_intf_obj.ACTION_FIELD, + self._p4rt_router_intf_obj.DEFAULT_ACTION) + ] + attr_list = [ + util.prepend_param_field( + self._p4rt_router_intf_obj.PORT_FIELD), self._p4rt_router_intf_obj.DEFAULT_PORT_ID, + util.prepend_param_field( + self._p4rt_router_intf_obj.SRC_MAC_FIELD), self._p4rt_router_intf_obj.DEFAULT_SRC_MAC, + self._p4rt_router_intf_obj.ACTION_FIELD, self._p4rt_router_intf_obj.DEFAULT_ACTION + ] + sop = "SET" + sdata = self._p4rt_router_intf_obj.DEFAULT_PORT_ID + self.p4rt_notifier.send(sop, sdata, swsscommon.FieldValuePairs([(router_intf_key, json.dumps(attr_list))])) + util.p4rt_verify_response(self.p4rt_response_consumer, sop, sdata, ritf_attrs, router_intf_key) + + # Add neighbor. + neighbor_key = self._p4rt_neighbor_obj.generate_app_db_key( + self._p4rt_neighbor_obj.DEFAULT_ROUTER_INTERFACE_ID, self._p4rt_neighbor_obj.DEFAULT_IPV4_NEIGHBOR_ID) + neighbor_attrs = [ + (util.prepend_param_field(self._p4rt_neighbor_obj.DST_MAC_FIELD), + self._p4rt_neighbor_obj.DEFAULT_DST_MAC), + (self._p4rt_neighbor_obj.ACTION_FIELD, + self._p4rt_neighbor_obj.DEFAULT_ACTION) + ] + attr_list = [ + util.prepend_param_field( + self._p4rt_neighbor_obj.DST_MAC_FIELD), self._p4rt_neighbor_obj.DEFAULT_DST_MAC, + self._p4rt_neighbor_obj.ACTION_FIELD, self._p4rt_neighbor_obj.DEFAULT_ACTION + ] + sop = "SET" + sdata = self._p4rt_neighbor_obj.DEFAULT_ROUTER_INTERFACE_ID + self.p4rt_notifier.send(sop, sdata, swsscommon.FieldValuePairs([(neighbor_key, json.dumps(attr_list))])) + util.p4rt_verify_response(self.p4rt_response_consumer, sop, sdata, neighbor_attrs, neighbor_key) + + # Add nexthop. + nexthop_key = self._p4rt_nexthop_obj.generate_app_db_key( + self._p4rt_nexthop_obj.DEFAULT_NEXTHOP_ID) + nexthop_attrs = [ + (self._p4rt_nexthop_obj.ACTION_FIELD, + self._p4rt_nexthop_obj.DEFAULT_ACTION), + (util.prepend_param_field(self._p4rt_nexthop_obj.RIF_FIELD), + self._p4rt_nexthop_obj.DEFAULT_ROUTER_INTERFACE_ID), + (util.prepend_param_field(self._p4rt_nexthop_obj.NEIGHBOR_ID_FIELD), + self._p4rt_nexthop_obj.DEFAULT_IPV4_NEIGHBOR_ID) + ] + attr_list = [ + self._p4rt_nexthop_obj.ACTION_FIELD, self._p4rt_nexthop_obj.DEFAULT_ACTION, + util.prepend_param_field( + self._p4rt_nexthop_obj.RIF_FIELD), self._p4rt_nexthop_obj.DEFAULT_ROUTER_INTERFACE_ID, + util.prepend_param_field( + self._p4rt_nexthop_obj.NEIGHBOR_ID_FIELD), self._p4rt_nexthop_obj.DEFAULT_IPV4_NEIGHBOR_ID + ] + sop = "SET" + sdata = self._p4rt_nexthop_obj.DEFAULT_NEXTHOP_ID + self.p4rt_notifier.send(sop, sdata, swsscommon.FieldValuePairs([(nexthop_key, json.dumps(attr_list))])) + util.p4rt_verify_response(self.p4rt_response_consumer, sop, sdata, nexthop_attrs, nexthop_key) + + # Add route. + route_key = self._p4rt_route_obj.generate_app_db_key( + self._p4rt_route_obj.DEFAULT_VRF_ID, self._p4rt_route_obj.DEFAULT_DST) + route_attrs = [ + (self._p4rt_route_obj.ACTION_FIELD, + self._p4rt_route_obj.DEFAULT_ACTION), + (util.prepend_param_field(self._p4rt_route_obj.NEXTHOP_ID_FIELD), + self._p4rt_route_obj.DEFAULT_NEXTHOP_ID), + ] + attr_list = [ + self._p4rt_route_obj.ACTION_FIELD, self._p4rt_route_obj.DEFAULT_ACTION, + util.prepend_param_field( + self._p4rt_route_obj.NEXTHOP_ID_FIELD), self._p4rt_route_obj.DEFAULT_NEXTHOP_ID + ] + sop = "SET" + sdata = self._p4rt_route_obj.DEFAULT_VRF_ID + self.p4rt_notifier.send(sop, sdata, swsscommon.FieldValuePairs([(route_key, json.dumps(attr_list))])) + util.p4rt_verify_response(self.p4rt_response_consumer, sop, sdata, route_attrs, route_key) + + # Delete route. + sop = "DEL" + sdata = self._p4rt_route_obj.DEFAULT_VRF_ID + self.p4rt_notifier.send(sop, sdata, swsscommon.FieldValuePairs([(route_key, "")])) + util.p4rt_verify_response(self.p4rt_response_consumer, sop, sdata, route_attrs, route_key) + + # Delete nexthop. + sop = "DEL" + sdata = self._p4rt_nexthop_obj.DEFAULT_NEXTHOP_ID + self.p4rt_notifier.send(sop, sdata, swsscommon.FieldValuePairs([(nexthop_key, "")])) + util.p4rt_verify_response(self.p4rt_response_consumer, sop, sdata, nexthop_attrs, nexthop_key) + + # Delete neighbor. + sop = "DEL" + sdata = self._p4rt_neighbor_obj.DEFAULT_ROUTER_INTERFACE_ID + self.p4rt_notifier.send(sop, sdata, swsscommon.FieldValuePairs([(neighbor_key, "")])) + util.p4rt_verify_response(self.p4rt_response_consumer, sop, sdata, neighbor_attrs, neighbor_key) + + # Delete router interface. + sop = "DEL" + sdata = self._p4rt_router_intf_obj.DEFAULT_PORT_ID + self.p4rt_notifier.send(sop, sdata, swsscommon.FieldValuePairs([(router_intf_key, "")])) + util.p4rt_verify_response(self.p4rt_response_consumer, sop, sdata, ritf_attrs, router_intf_key) + + # Query application database for route entries. + route_entries = util.get_keys( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_appl_db_entries_count() + ) + # Query ASIC database for route entries. + route_entries = util.get_keys( + self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME + ) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_asic_db_entries_count() + ) + self._clean_vrf(dvs) diff --git a/tests/p4rt/test_l3_admit.py b/tests/p4rt/test_l3_admit.py index 81ffdf884ad..457023b7870 100644 --- a/tests/p4rt/test_l3_admit.py +++ b/tests/p4rt/test_l3_admit.py @@ -1,11 +1,13 @@ from swsscommon import swsscommon +import time import pytest import json import util import l3_admit +@pytest.mark.usefixtures("dvs_lag_manager") class TestP4RTL3Admit(object): def _set_up(self, dvs): self._p4rt_l3_admit_obj = l3_admit.P4RtL3AdmitWrapper() @@ -29,20 +31,11 @@ def test_DefaultL3AdmitAddDeletePass(self, dvs, testlog): "%s:%s" % (self._p4rt_l3_admit_obj.APP_DB_TBL_NAME, self._p4rt_l3_admit_obj.TBL_NAME), ), - ( - self._p4rt_l3_admit_obj.appl_state_db, - "%s:%s" - % (self._p4rt_l3_admit_obj.APP_DB_TBL_NAME, self._p4rt_l3_admit_obj.TBL_NAME), - ), (self._p4rt_l3_admit_obj.asic_db, self._p4rt_l3_admit_obj.ASIC_DB_TBL_NAME), ) self._p4rt_l3_admit_obj.get_original_redis_entries(db_list) - # Fetch the original key to oid information from Redis DB. - key_to_oid_helper = util.KeyToOidDBHelper(dvs) - _, original_key_oid_info = key_to_oid_helper.get_db_info() - # l3 admit entry attributes # P4RT_TABLE:FIXED_L3_ADMIT_TABLE:{\"match/dst_mac\":\"00:02:03:04:00:00&ff:ff:ff:ff:00:00\",\"match/in_port\":\"Ethernet8\",\"priority\":2030} # "action": "admit_to_l3" @@ -61,12 +54,6 @@ def test_DefaultL3AdmitAddDeletePass(self, dvs, testlog): self.response_consumer, l3_admit_key, attr_list, "SWSS_RC_SUCCESS" ) - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count = 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Query application database for l3 admit entries. l3_admit_entries = util.get_keys( self._p4rt_l3_admit_obj.appl_db, @@ -86,25 +73,6 @@ def test_DefaultL3AdmitAddDeletePass(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) - # Query application state database for l3 admit entries. - state_l3_admit_entries = util.get_keys( - self._p4rt_l3_admit_obj.appl_state_db, - self._p4rt_l3_admit_obj.APP_DB_TBL_NAME + - ":" + self._p4rt_l3_admit_obj.TBL_NAME, - ) - assert len(state_l3_admit_entries) == ( - self._p4rt_l3_admit_obj.get_original_appl_state_db_entries_count() + 1 - ) - - # Query application state database for newly created l3 admit key. - (status, fvs) = util.get_key( - self._p4rt_l3_admit_obj.appl_state_db, - self._p4rt_l3_admit_obj.APP_DB_TBL_NAME, - l3_admit_key, - ) - assert status == True - util.verify_attr(fvs, attr_list) - # Query ASIC database for my mac entries. asic_l3_admit_entries = util.get_keys( self._p4rt_l3_admit_obj.asic_db, self._p4rt_l3_admit_obj.ASIC_DB_TBL_NAME @@ -137,21 +105,11 @@ def test_DefaultL3AdmitAddDeletePass(self, dvs, testlog): "L3 Admit entry with the same key received: 'match/dst_mac=00:02:03:04:00:00&ff:ff:ff:ff:00:00:match/in_port=Ethernet8:priority=2030'" ) - # Verify that P4RT key to OID count did not change in Redis DB. - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Remove l3 admit entry. self._p4rt_l3_admit_obj.remove_app_db_entry(l3_admit_key) util.verify_response(self.response_consumer, l3_admit_key, [], "SWSS_RC_SUCCESS") - # Verify that P4RT key to OID count decremented to orig in Redis DB. - status, fvs = key_to_oid_helper.get_db_info() - assert status == False - assert len(fvs) == len(original_key_oid_info) - # Query application database for route entries. l3_admit_entries = util.get_keys( self._p4rt_l3_admit_obj.appl_db, @@ -170,24 +128,6 @@ def test_DefaultL3AdmitAddDeletePass(self, dvs, testlog): ) assert status == False - # Query application database for route entries. - state_l3_admit_entries = util.get_keys( - self._p4rt_l3_admit_obj.appl_state_db, - self._p4rt_l3_admit_obj.APP_DB_TBL_NAME + - ":" + self._p4rt_l3_admit_obj.TBL_NAME, - ) - assert len(state_l3_admit_entries) == ( - self._p4rt_l3_admit_obj.get_original_appl_state_db_entries_count() - ) - - # Verify that the route_key no longer exists in application database. - (status, fsv) = util.get_key( - self._p4rt_l3_admit_obj.appl_state_db, - self._p4rt_l3_admit_obj.APP_DB_TBL_NAME, - l3_admit_key, - ) - assert status == False - # Query ASIC database for my mac entries. my_mac_entries = util.get_keys( self._p4rt_l3_admit_obj.asic_db, self._p4rt_l3_admit_obj.ASIC_DB_TBL_NAME @@ -216,20 +156,11 @@ def test_InvalidL3AdmitKeyFailsToCreate(self, dvs, testlog): "%s:%s" % (self._p4rt_l3_admit_obj.APP_DB_TBL_NAME, self._p4rt_l3_admit_obj.TBL_NAME), ), - ( - self._p4rt_l3_admit_obj.appl_state_db, - "%s:%s" - % (self._p4rt_l3_admit_obj.APP_DB_TBL_NAME, self._p4rt_l3_admit_obj.TBL_NAME), - ), (self._p4rt_l3_admit_obj.asic_db, self._p4rt_l3_admit_obj.ASIC_DB_TBL_NAME), ) self._p4rt_l3_admit_obj.get_original_redis_entries(db_list) - # Fetch the original key to oid information from Redis DB. - key_to_oid_helper = util.KeyToOidDBHelper(dvs) - _, original_key_oid_info = key_to_oid_helper.get_db_info() - # Invalid l3 admit key # P4RT_TABLE:FIXED_L3_ADMIT_TABLE:{\"match/dst_mac\":\"1\",\"match/in_port\":\"Ethernet8\",\"priority\":2030} # "action": "admit_to_l3" @@ -249,11 +180,6 @@ def test_InvalidL3AdmitKeyFailsToCreate(self, dvs, testlog): "[OrchAgent] Failed to deserialize l3 admit key" ) - # Verify that P4RT key to OID count not changed in Redis DB - status, fvs = key_to_oid_helper.get_db_info() - assert status == False - assert len(fvs) == len(original_key_oid_info) - # Query ASIC database for my mac entries. Count remains the same asic_l3_admit_entries = util.get_keys( self._p4rt_l3_admit_obj.asic_db, self._p4rt_l3_admit_obj.ASIC_DB_TBL_NAME @@ -261,3 +187,32 @@ def test_InvalidL3AdmitKeyFailsToCreate(self, dvs, testlog): assert len(asic_l3_admit_entries) == ( self._p4rt_l3_admit_obj.get_original_asic_db_entries_count() ) + + # Configure lag ports on the switch + lag_id = "LAG1" + self.dvs_lag.create_port_channel(lag_id) + time.sleep(1) + + # Verify that lag port is created. + self.dvs_lag.get_and_verify_port_channel(1) + + # Invalid l3 admit key - in_port type is not supported + # P4RT_TABLE:FIXED_L3_ADMIT_TABLE:{\"match/dst_mac\":\"00:02:03:04:00:00&FF:FF:FF:FF:00:00\",\"match/in_port\":\"PortChannelLAG1\",\"priority\":2030} + # "action": "admit_to_l3" + # "controller_metadata": "..." + dst_mac_data = "00:02:03:04:00:00" + dst_mac_mask = "FF:FF:FF:FF:00:00" + in_port = "PortChannel" + lag_id + priority = 2030 + + # Create l3 admit entry. + (l3_admit_key, attr_list,) = self._p4rt_l3_admit_obj.create_l3_admit( + dst_mac_data + "&" + dst_mac_mask, priority, in_port + ) + util.verify_response( + self.response_consumer, + l3_admit_key, + attr_list, + "SWSS_RC_UNIMPLEMENTED", + "[OrchAgent] Port \'PortChannelLAG1\'\'s type 5 is not physical and is not supported for L3 Admit entry.", + ) diff --git a/tests/p4rt/test_p4rt_acl.py b/tests/p4rt/test_p4rt_acl.py index cfa1c0fb454..515354e1013 100644 --- a/tests/p4rt/test_p4rt_acl.py +++ b/tests/p4rt/test_p4rt_acl.py @@ -81,12 +81,6 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): + ":" + self._p4rt_acl_table_definition_obj.TBL_NAME, ) - original_appl_state_acl_tables = util.get_keys( - self._p4rt_acl_table_definition_obj.appl_state_db, - self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME - + ":" - + self._p4rt_acl_table_definition_obj.TBL_NAME, - ) original_asic_acl_tables = util.get_keys( self._p4rt_acl_table_definition_obj.asic_db, self._p4rt_acl_table_definition_obj.ASIC_DB_TBL_NAME, @@ -241,26 +235,6 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) - # query application state database for ACL tables - state_acl_tables = util.get_keys( - self._p4rt_acl_table_definition_obj.appl_state_db, - self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME - + ":" - + self._p4rt_acl_table_definition_obj.TBL_NAME, - ) - assert len(state_acl_tables) == len(original_appl_state_acl_tables) + 1 - - # query application state database for newly created ACL table - (status, fvs) = util.get_key( - self._p4rt_acl_table_definition_obj.appl_state_db, - self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME - + ":" - + self._p4rt_acl_table_definition_obj.TBL_NAME, - table_name, - ) - assert status == True - util.verify_attr(fvs, attr_list) - asic_udf_matches = util.get_keys( self._p4rt_udf_match_obj.asic_db, self._p4rt_udf_match_obj.ASIC_DB_TBL_NAME ) @@ -422,10 +396,6 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): self._p4rt_acl_rule_obj.appl_db, self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, ) - original_appl_state_acl_rules = util.get_keys( - self._p4rt_acl_rule_obj.appl_state_db, - self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, - ) original_asic_acl_rules = util.get_keys( self._p4rt_acl_rule_obj.asic_db, self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME ) @@ -480,22 +450,6 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) - # query application state database for ACL rules - state_acl_rules = util.get_keys( - self._p4rt_acl_rule_obj.appl_state_db, - self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, - ) - assert len(state_acl_rules) == len(original_appl_state_acl_rules) + 1 - - # query application state database for newly created ACL rule - (status, fvs) = util.get_key( - self._p4rt_acl_rule_obj.appl_state_db, - self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME, - table_name_with_rule_key1, - ) - assert status == True - util.verify_attr(fvs, attr_list) - # query ASIC database for ACL counters acl_asic_counters = util.get_keys( self._p4rt_acl_counter_obj.asic_db, @@ -653,22 +607,6 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) - # query application state database for ACL rules - state_acl_rules = util.get_keys( - self._p4rt_acl_rule_obj.appl_state_db, - self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, - ) - assert len(state_acl_rules) == len(original_appl_state_acl_rules) + 1 - - # query application state database for updated ACL rule - (status, fvs) = util.get_key( - self._p4rt_acl_rule_obj.appl_state_db, - self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME, - table_name_with_rule_key1, - ) - assert status == True - util.verify_attr(fvs, attr_list) - # query ASIC database for ACL counters acl_asic_counters = util.get_keys( self._p4rt_acl_counter_obj.asic_db, @@ -827,22 +765,6 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) - # query application state database for ACL rules - state_acl_rules = util.get_keys( - self._p4rt_acl_rule_obj.appl_state_db, - self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, - ) - assert len(state_acl_rules) == len(original_appl_state_acl_rules) + 2 - - # query application state database for newly created ACL rule - (status, fvs) = util.get_key( - self._p4rt_acl_rule_obj.appl_state_db, - self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME, - table_name_with_rule_key2, - ) - assert status == True - util.verify_attr(fvs, attr_list) - # query ASIC database for ACL counters acl_asic_counters = util.get_keys( self._p4rt_acl_counter_obj.asic_db, @@ -1025,22 +947,6 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) - # query application state database for ACL rules - state_acl_rules = util.get_keys( - self._p4rt_acl_rule_obj.appl_state_db, - self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, - ) - assert len(state_acl_rules) == len(original_appl_state_acl_rules) + 3 - - # query application state database for newly created ACL rule - (status, fvs) = util.get_key( - self._p4rt_acl_rule_obj.appl_state_db, - self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME, - table_name_with_rule_key3, - ) - assert status == True - util.verify_attr(fvs, attr_list) - # query ASIC database for ACL counters acl_asic_counters = util.get_keys( self._p4rt_acl_counter_obj.asic_db, @@ -1136,21 +1042,6 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): ) assert status == False - # query application state database for ACL rules - state_acl_rules = util.get_keys( - self._p4rt_acl_rule_obj.appl_state_db, - self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, - ) - assert len(state_acl_rules) == len(original_appl_state_acl_rules) + 2 - - # verify that the ACL rule no longer exists in application state database - (status, fvs) = util.get_key( - self._p4rt_acl_rule_obj.appl_state_db, - self._p4rt_acl_rule_obj.APP_DB_TBL_NAME, - table_name_with_rule_key3, - ) - assert status == False - # query ASIC database for ACL rules acl_rules = util.get_keys( self._p4rt_acl_rule_obj.asic_db, self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME @@ -1194,21 +1085,6 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): ) assert status == False - # query application state database for ACL rules - state_acl_rules = util.get_keys( - self._p4rt_acl_rule_obj.appl_state_db, - self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, - ) - assert len(state_acl_rules) == len(original_appl_state_acl_rules) + 1 - - # verify that the ACL rule no longer exists in application state database - (status, fvs) = util.get_key( - self._p4rt_acl_rule_obj.appl_state_db, - self._p4rt_acl_rule_obj.APP_DB_TBL_NAME, - table_name_with_rule_key1, - ) - assert status == False - # query ASIC database for ACL rules acl_rules = util.get_keys( self._p4rt_acl_rule_obj.asic_db, self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME @@ -1260,21 +1136,6 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): ) assert status == False - # query application state database for ACL rules - state_acl_rules = util.get_keys( - self._p4rt_acl_rule_obj.appl_state_db, - self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, - ) - assert len(state_acl_rules) == len(original_appl_state_acl_rules) - - # verify that the ACL rule no longer exists in application state database - (status, fvs) = util.get_key( - self._p4rt_acl_rule_obj.appl_state_db, - self._p4rt_acl_rule_obj.APP_DB_TBL_NAME, - table_name_with_rule_key2, - ) - assert status == False - # query ASIC database for ACL rules acl_rules = util.get_keys( self._p4rt_acl_rule_obj.asic_db, self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME @@ -1333,23 +1194,6 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): ) assert status == False - # query application state database for ACL tables - state_acl_tables = util.get_keys( - self._p4rt_acl_table_definition_obj.appl_state_db, - self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME - + ":" - + self._p4rt_acl_table_definition_obj.TBL_NAME, - ) - assert len(state_acl_tables) == len(original_appl_state_acl_tables) - - # verify that the ACL table no longer exists in application state database - (status, fvs) = util.get_key( - self._p4rt_acl_table_definition_obj.appl_state_db, - self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME, - self._p4rt_acl_table_definition_obj.TBL_NAME + ":" + table_name, - ) - assert status == False - # query ASIC database for ACL tables acl_tables = util.get_keys( self._p4rt_acl_table_definition_obj.asic_db, @@ -1376,10 +1220,6 @@ def test_AclRuleAddWithoutTableDefinitionFails(self, dvs, testlog): self._p4rt_acl_rule_obj.appl_db, self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, ) - original_appl_state_acl_rules = util.get_keys( - self._p4rt_acl_rule_obj.appl_state_db, - self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, - ) original_asic_acl_rules = util.get_keys( self._p4rt_acl_rule_obj.asic_db, self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME ) @@ -1428,13 +1268,6 @@ def test_AclRuleAddWithoutTableDefinitionFails(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) - # query application state database for ACL rules - state_acl_rules = util.get_keys( - self._p4rt_acl_rule_obj.appl_state_db, - self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, - ) - assert len(state_acl_rules) == len(original_appl_state_acl_rules) - # query ASIC database for ACL rules acl_asic_rules = util.get_keys( self._p4rt_acl_rule_obj.asic_db, self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME diff --git a/tests/p4rt/test_p4rt_mirror.py b/tests/p4rt/test_p4rt_mirror.py index c1327370c30..1584d9961d5 100644 --- a/tests/p4rt/test_p4rt_mirror.py +++ b/tests/p4rt/test_p4rt_mirror.py @@ -56,9 +56,6 @@ def test_MirrorSessionAddModifyAndDelete(self, dvs, testlog): original_appl_mirror_entries = util.get_keys( self._p4rt_mirror_session_wrapper.appl_db, self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME + ":" + self._p4rt_mirror_session_wrapper.TBL_NAME) - original_appl_state_mirror_entries = util.get_keys( - self._p4rt_mirror_session_wrapper.appl_state_db, - self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME + ":" + self._p4rt_mirror_session_wrapper.TBL_NAME) original_asic_mirror_entries = util.get_keys( self._p4rt_mirror_session_wrapper.asic_db, self._p4rt_mirror_session_wrapper.ASIC_DB_TBL_NAME) @@ -108,20 +105,6 @@ def test_MirrorSessionAddModifyAndDelete(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list_in_app_db) - # Query application state database for mirror entries - appl_state_mirror_entries = util.get_keys( - self._p4rt_mirror_session_wrapper.appl_state_db, - self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME + ":" + self._p4rt_mirror_session_wrapper.TBL_NAME) - assert len(appl_state_mirror_entries) == len( - original_appl_state_mirror_entries) + 1 - - # Query application state database for newly created mirror key - (status, fvs) = util.get_key(self._p4rt_mirror_session_wrapper.appl_state_db, - self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME, - mirror_session_key) - assert status == True - util.verify_attr(fvs, attr_list_in_app_db) - # Query ASIC database for mirror entries asic_mirror_entries = util.get_keys(self._p4rt_mirror_session_wrapper.asic_db, self._p4rt_mirror_session_wrapper.ASIC_DB_TBL_NAME) @@ -180,13 +163,6 @@ def test_MirrorSessionAddModifyAndDelete(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list_in_app_db) - # Query application state database for the modified mirror key - (status, fvs) = util.get_key(self._p4rt_mirror_session_wrapper.appl_state_db, - self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME, - mirror_session_key) - assert status == True - util.verify_attr(fvs, attr_list_in_app_db) - # Query ASIC DB about the modified mirror session. expected_attr_list_in_asic_db[9] = ( self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS, new_dst_mac) @@ -214,19 +190,6 @@ def test_MirrorSessionAddModifyAndDelete(self, dvs, testlog): mirror_session_key) assert status == False - # Query application state database for mirror entries - appl_state_mirror_entries = util.get_keys( - self._p4rt_mirror_session_wrapper.appl_state_db, - self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME + ":" + self._p4rt_mirror_session_wrapper.TBL_NAME) - assert len(appl_state_mirror_entries) == len( - original_appl_state_mirror_entries) - - # Query application state database for the deleted mirror key - (status, fvs) = util.get_key(self._p4rt_mirror_session_wrapper.appl_state_db, - self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME, - mirror_session_key) - assert status == False - # Query ASIC database for mirror entries asic_mirror_entries = util.get_keys(self._p4rt_mirror_session_wrapper.asic_db, self._p4rt_mirror_session_wrapper.ASIC_DB_TBL_NAME) diff --git a/tests/p4rt/test_viplb.py b/tests/p4rt/test_viplb.py index fbb51ea48dc..ac98fc36f70 100644 --- a/tests/p4rt/test_viplb.py +++ b/tests/p4rt/test_viplb.py @@ -69,17 +69,10 @@ def test_VIPv4LBWithGoodNexthopAddUpdateDeletePass(self, dvs, testlog): db_list = ((self._p4rt_viplb_obj.appl_db, "%s:%s" % (self._p4rt_viplb_obj.APP_DB_TBL_NAME, self._p4rt_viplb_obj.TBL_NAME)), - (self._p4rt_viplb_obj.appl_state_db, - "%s:%s" % (self._p4rt_viplb_obj.APP_DB_TBL_NAME, - self._p4rt_viplb_obj.TBL_NAME)), (self._p4rt_viplb_obj.asic_db, self._p4rt_viplb_obj.ASIC_DB_TBL_NAME)) self._p4rt_viplb_obj.get_original_redis_entries(db_list) - # Fetch the original key to oid information from Redis DB. - key_to_oid_helper = util.KeyToOidDBHelper(dvs) - _, original_key_oid_info = key_to_oid_helper.get_db_info() - # Create router interface. router_interface_id, router_intf_key, attr_list = ( self._p4rt_router_intf_obj.create_router_interface() @@ -87,12 +80,6 @@ def test_VIPv4LBWithGoodNexthopAddUpdateDeletePass(self, dvs, testlog): util.verify_response(self.response_consumer, router_intf_key, attr_list, "SWSS_RC_SUCCESS") - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count = 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create neighbor. neighbor_id, neighbor_key, attr_list = ( self._p4rt_neighbor_obj.create_neighbor() @@ -100,12 +87,6 @@ def test_VIPv4LBWithGoodNexthopAddUpdateDeletePass(self, dvs, testlog): util.verify_response(self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS") - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create nexthop. first_nexthop_id, first_nexthop_key, attr_list = ( self._p4rt_nexthop_obj.create_next_hop() @@ -116,12 +97,6 @@ def test_VIPv4LBWithGoodNexthopAddUpdateDeletePass(self, dvs, testlog): first_nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() assert first_nexthop_oid is not None - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create viplb. viplb_key, attr_list = ( self._p4rt_viplb_obj.create_viplb(first_nexthop_id) @@ -129,12 +104,6 @@ def test_VIPv4LBWithGoodNexthopAddUpdateDeletePass(self, dvs, testlog): util.verify_response(self.response_consumer, viplb_key, attr_list, "SWSS_RC_SUCCESS") - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Query application database for viplb entries. viplb_entries = util.get_keys( self._p4rt_viplb_obj.appl_db, @@ -150,22 +119,6 @@ def test_VIPv4LBWithGoodNexthopAddUpdateDeletePass(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) - # Query application state database for viplb entries. - state_viplb_entries = util.get_keys( - self._p4rt_viplb_obj.appl_state_db, - self._p4rt_viplb_obj.APP_DB_TBL_NAME + ":" + self._p4rt_viplb_obj.TBL_NAME) - assert len(state_viplb_entries) == ( - self._p4rt_viplb_obj.get_original_appl_state_db_entries_count() + 1 - ) - - # Query application state database for newly created viplb key. - (status, fvs) = util.get_key(self._p4rt_viplb_obj.appl_state_db, - self._p4rt_viplb_obj.APP_DB_TBL_NAME, - viplb_key) - assert status == True - util.verify_attr(fvs, attr_list) - - # get programmable_object_oid of newly created viplb viplb_oid = self._p4rt_viplb_obj.get_newly_created_programmable_object_oid() assert viplb_oid is not None @@ -183,12 +136,6 @@ def test_VIPv4LBWithGoodNexthopAddUpdateDeletePass(self, dvs, testlog): util.verify_response(self.response_consumer, router_intf_key, attr_list, "SWSS_RC_SUCCESS") - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create another neighbor. neighbor_id, neighbor_key, attr_list = ( self._p4rt_neighbor_obj.create_neighbor(router_interface_id="20", neighbor_id="10.0.0.1") @@ -196,12 +143,6 @@ def test_VIPv4LBWithGoodNexthopAddUpdateDeletePass(self, dvs, testlog): util.verify_response(self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS") - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Create another nexthop. second_nexthop_id, second_nexthop_key, attr_list = ( self._p4rt_nexthop_obj.create_next_hop(router_interface_id="20", neighbor_id="10.0.0.1", nexthop_id="16") @@ -209,12 +150,6 @@ def test_VIPv4LBWithGoodNexthopAddUpdateDeletePass(self, dvs, testlog): util.verify_response(self.response_consumer, second_nexthop_key, attr_list, "SWSS_RC_SUCCESS") - # Verify that P4RT key to OID count incremented by 1 in Redis DB. - count += 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # Update viplb. viplb_key, attr_list = ( self._p4rt_viplb_obj.create_viplb(second_nexthop_id) @@ -228,12 +163,6 @@ def test_VIPv4LBWithGoodNexthopAddUpdateDeletePass(self, dvs, testlog): util.verify_response(self.response_consumer, first_nexthop_key, [], "SWSS_RC_SUCCESS") - # Verify that P4RT key to OID count decremented by 1 in Redis DB. - count -= 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # get crm counters time.sleep(1) used_counter = getCrmCounterValue(dvs, "EXT_TABLE_STATS:"+self._p4rt_viplb_obj.TBL_NAME, 'crm_stats_extension_table_used') @@ -245,12 +174,6 @@ def test_VIPv4LBWithGoodNexthopAddUpdateDeletePass(self, dvs, testlog): util.verify_response( self.response_consumer, viplb_key, [], "SWSS_RC_SUCCESS") - # Verify that P4RT key to OID count decremented by 1 in Redis DB. - count -= 1 - status, fvs = key_to_oid_helper.get_db_info() - assert status == True - assert len(fvs) == len(original_key_oid_info) + count - # get crm counters time.sleep(1) used_counter = getCrmCounterValue(dvs, "EXT_TABLE_STATS:"+self._p4rt_viplb_obj.TBL_NAME, 'crm_stats_extension_table_used') diff --git a/tests/p4rt/util.py b/tests/p4rt/util.py index ac46a48587a..261df60212f 100644 --- a/tests/p4rt/util.py +++ b/tests/p4rt/util.py @@ -2,6 +2,7 @@ from swsscommon import swsscommon import time +import json def _set_up_appl_db(dvs): @@ -33,13 +34,15 @@ def get_key(db, tbl_name, key): def verify_attr(fvs, attr_list): """ Verifies attribute list for given key in a database table.""" - assert len(fvs) == len(attr_list) + assert len(fvs) == len(attr_list), "Unexpected size: '%d' received, expected '%d'" % \ + (len(fvs), len(attr_list)) d = dict(attr_list) for fv in fvs: if fv[0] in d: - assert fv[1] == d[fv[0]] + assert fv[1] == d[fv[0]], "Unexpected value of attribute '%s': '%s' received, expected '%s'" % \ + (fv[0], fv[1], d[fv[0]]) else: - assert False + assert False, "Unexpected attribute '%s' received" % (fv[0]) def prepend_match_field(match_field): return "match/" + match_field @@ -49,7 +52,8 @@ def prepend_param_field(param_field): def verify_response(consumer, key, attr_list, status, err_message = "SWSS_RC_SUCCESS"): """ Verifies a response.""" - consumer.readData() + if consumer.peek() <= 0: + consumer.readData() (op, data, values) = consumer.pop() assert data == key assert op == status @@ -62,6 +66,23 @@ def verify_response(consumer, key, attr_list, status, err_message = "SWSS_RC_SUC verify_attr(values, attr_list) +def p4rt_verify_response(consumer, send_op, send_data, send_attrs, key): + while consumer.peek() <= 0: + consumer.readData() + (rcvd_op, rcvd_data, rcvd_values) = consumer.pop() + assert rcvd_op == send_op, f"Expected rcvd_op={send_op}, got {rcvd_op}" + assert rcvd_data == send_data, f"Expected rcvd_data={send_data}, got {rcvd_data}" + if send_op == "SET": + rkey, rcvd_json = rcvd_values[0] + rcvd_attr_list = json.loads(rcvd_json) + rcvd_attrs_dict = dict(zip(rcvd_attr_list[0::2], rcvd_attr_list[1::2])) + expected_attrs_dict = dict(send_attrs) + assert rcvd_attrs_dict == expected_attrs_dict, f"Expected attributes={expected_attrs_dict}, but got={rcvd_attrs_dict}" + elif send_op == "DEL": + assert rcvd_values[0][0] == key, f"Expected key={key}, got {rcvd_values[0][0]}" + assert rcvd_values[0][1] == "", f"Expected empty value, got {rcvd_values[0][1]}" + + def check_syslog(dvs, marker, process, err_log, expected_cnt): """ Checks syslog on dvs docker. @@ -123,15 +144,3 @@ def get_original_redis_entries(self, db_list): table = i[1] self._original_entries["{}:{}".format(db, table)]= get_keys(db, table) -class KeyToOidDBHelper(object): - """Provides helper APIs for P4RT key to OID mapping in Redis DB.""" - - # Table name in Redis DB for the mapping. - TBL_NAME = "P4RT_KEY_TO_OID" - KEY = "" - - def __init__(self, dvs): - self.table = swsscommon.Table(_set_up_appl_state_db(dvs), self.TBL_NAME) - - def get_db_info(self): - return self.table.get(self.KEY) diff --git a/tests/request_parser_ut.cpp b/tests/request_parser_ut.cpp index 8409dc5b664..6bf7ad20304 100644 --- a/tests/request_parser_ut.cpp +++ b/tests/request_parser_ut.cpp @@ -30,6 +30,12 @@ class TestRequest1 : public Request TestRequest1() : Request(request_description1, '|') { } }; +class TestRequest1_Relaxed : public Request +{ +public: + TestRequest1_Relaxed() : Request(request_description1, '|', true) { } +}; + const request_description_t request_description2 = { { REQ_T_STRING, REQ_T_MAC_ADDRESS, REQ_T_STRING }, { @@ -110,6 +116,37 @@ TEST(request_parser, simpleKey) } } +TEST(request_parser, relaxedAttrParsing) +{ + KeyOpFieldsValuesTuple t {"key1", "SET", + { + { "v4", "true" }, + { "v6", "true" }, + { "src_mac", "02:03:04:05:06:07" }, + { "ttl_action", "copy" }, + { "ip_opt_action", "drop" }, + { "l3_mc_action", "log" }, + { "nlist", "name1" }, + { "random", "whatever" } + } + }; + + try + { + TestRequest1_Relaxed request; + + EXPECT_NO_THROW(request.parse(t)); + } + catch (const std::exception& e) + { + FAIL() << "Got unexpected exception " << e.what(); + } + catch (...) + { + FAIL() << "Got unexpected exception"; + } +} + TEST(request_parser, simpleKeyEmptyAttrs) { KeyOpFieldsValuesTuple t {"key1", "SET", @@ -1598,6 +1635,7 @@ const request_description_t request_description_multi_value = { { "endpoint", REQ_T_IP_LIST }, { "vni", REQ_T_UINT_LIST }, { "mac_address", REQ_T_MAC_ADDRESS_LIST }, + {"local_endpoint", REQ_T_BOOL_LIST}, }, { } // no mandatory attributes }; @@ -1616,6 +1654,7 @@ TEST(request_parser, multipleValues) { "endpoint", "1.1.1.1,2.2.2.2,3.3.3.3" }, { "vni", "11111,11112,11113" }, { "mac_address", "02:03:04:05:06:07,12:13:14:15:16:17,22:23:24:25:26:27" }, + { "local_endpoint", "true,false,true" }, } }; @@ -1628,21 +1667,25 @@ TEST(request_parser, multipleValues) EXPECT_STREQ(request.getOperation().c_str(), "SET"); EXPECT_STREQ(request.getFullKey().c_str(), "key1"); EXPECT_STREQ(request.getKeyString(0).c_str(), "key1"); - EXPECT_TRUE(request.getAttrFieldNames() == (std::unordered_set{"endpoint", "vni", "mac_address"})); + EXPECT_TRUE(request.getAttrFieldNames() == (std::unordered_set{"endpoint", "vni", "mac_address", "local_endpoint"})); auto ep_list = request.getAttrIPList("endpoint"); auto vni_list = request.getAttrUintList("vni"); auto mac_list = request.getAttrMacAddressList("mac_address"); + auto local_ep_list = request.getAttrBoolList("local_endpoint"); std::vector expected_ep{ "1.1.1.1", "2.2.2.2", "3.3.3.3" }; std::vector expected_vni{ 11111, 11112, 11113 }; std::vector expected_mac{ "02:03:04:05:06:07", "12:13:14:15:16:17", "22:23:24:25:26:27" }; + std::vector expected_local_ep{ true, false, true }; EXPECT_EQ(ep_list.size(), value_size); EXPECT_EQ(vni_list.size(), value_size); EXPECT_EQ(mac_list.size(), value_size); + EXPECT_EQ(local_ep_list.size(), value_size); for (size_t idx = 0; idx < value_size; idx++) { EXPECT_STREQ(ep_list[idx].to_string().c_str(), expected_ep[idx].c_str()); EXPECT_EQ(vni_list[idx], expected_vni[idx]); EXPECT_STREQ(mac_list[idx].to_string().c_str(), expected_mac[idx].c_str()); + EXPECT_EQ(local_ep_list[idx], expected_local_ep[idx]); } } catch (const std::exception& e) @@ -1761,3 +1804,223 @@ TEST(request_parser, multipleValuesInvalidMac) FAIL() << "Expected std::invalid_argument, not other exception"; } } + +TEST(request_parser, multipleValuesInvalidBool) +{ + KeyOpFieldsValuesTuple t {"key1", "SET", + { + { "endpoint", "1.1.1.1,2.2.2.2,3.3.3.3" }, + { "vni", "11111,11112,11113" }, + { "mac_address", "02:03:04:05:06:07,12:13:14:15:16:17,22:23:24:25:26:27" }, + { "local_endpoint", "true,false,invalid_bool" }, + } + }; + + try + { + TestRequestMultiValue request; + request.parse(t); + FAIL() << "Expected std::invalid_argument"; + } + catch (const std::invalid_argument& e) + { + EXPECT_STREQ(e.what(), "Invalid boolean list: true,false,invalid_bool"); + } + catch (const std::exception& e) + { + FAIL() << "Got unexpected exception " << e.what(); + } + catch (...) + { + FAIL() << "Expected std::invalid_argument, not other exception"; + } +} + +/* +Check MAC key +*/ +const request_description_t test_mac_key = { + { REQ_T_STRING, REQ_T_MAC_ADDRESS }, + { + { "f1", REQ_T_STRING } + }, + { } +}; + +class TestRequestMacKey: public Request +{ +public: + TestRequestMacKey() : Request(test_mac_key, ':') { } +}; + +TEST(request_parser, mac_key_parse_checker) +{ + KeyOpFieldsValuesTuple t {"Vnet_1000:ab:b1:ca:dd:e1:f2", "SET", + { } + }; + + try + { + TestRequestMacKey request; + + EXPECT_NO_THROW(request.parse(t)); + + EXPECT_STREQ(request.getOperation().c_str(), "SET"); + EXPECT_STREQ(request.getFullKey().c_str(), "Vnet_1000:ab:b1:ca:dd:e1:f2"); + EXPECT_EQ(request.getKeyString(0), "Vnet_1000"); + EXPECT_EQ(request.getKeyMacAddress(1), MacAddress("ab:b1:ca:dd:e1:f2")); + } + catch (const std::exception& e) + { + FAIL() << "Got unexpected exception " << e.what(); + } + catch (...) + { + FAIL() << "Got unexpected exception"; + } +} + +/* +Check STRING_LIST attribute type +*/ +const request_description_t test_string_list = { + { REQ_T_STRING }, + { + { "main_dpu_ids", REQ_T_STRING_LIST }, + }, + { } +}; + +class TestRequestStringList: public Request +{ +public: + TestRequestStringList() : Request(test_string_list, '|') { } +}; + +TEST(request_parser, string_list_basic) +{ + KeyOpFieldsValuesTuple t {"key1", "SET", + { + { "main_dpu_ids", "dpu0,dpu1,dpu3" }, + } + }; + + try + { + TestRequestStringList request; + + EXPECT_NO_THROW(request.parse(t)); + EXPECT_STREQ(request.getOperation().c_str(), "SET"); + EXPECT_STREQ(request.getFullKey().c_str(), "key1"); + EXPECT_STREQ(request.getKeyString(0).c_str(), "key1"); + EXPECT_TRUE(request.getAttrFieldNames() == (std::unordered_set{"main_dpu_ids"})); + + auto main_dpu_list = request.getAttrStringList("main_dpu_ids"); + std::vector expected_main{"dpu0", "dpu1", "dpu3"}; + EXPECT_EQ(main_dpu_list.size(), 3); + for (size_t idx = 0; idx < main_dpu_list.size(); idx++) + { + EXPECT_STREQ(main_dpu_list[idx].c_str(), expected_main[idx].c_str()); + } + } + catch (const std::exception& e) + { + FAIL() << "Got unexpected exception " << e.what(); + } + catch (...) + { + FAIL() << "Got unexpected exception"; + } +} + +TEST(request_parser, string_list_single_item) +{ + KeyOpFieldsValuesTuple t {"key1", "SET", + { + { "main_dpu_ids", "dpu0" }, + } + }; + + try + { + TestRequestStringList request; + EXPECT_NO_THROW(request.parse(t)); + auto main_dpu_list = request.getAttrStringList("main_dpu_ids"); + EXPECT_EQ(main_dpu_list.size(), 1); + EXPECT_STREQ(main_dpu_list[0].c_str(), "dpu0"); + } + catch (const std::exception& e) + { + FAIL() << "Got unexpected exception " << e.what(); + } + catch (...) + { + FAIL() << "Got unexpected exception"; + } +} + +TEST(request_parser, string_list_empty) +{ + KeyOpFieldsValuesTuple t {"key1", "SET", + { + { "main_dpu_ids", "" }, + } + }; + + try + { + TestRequestStringList request; + EXPECT_NO_THROW(request.parse(t)); + auto main_dpu_list = request.getAttrStringList("main_dpu_ids"); + EXPECT_EQ(main_dpu_list.size(), 0); + } + catch (const std::exception& e) + { + FAIL() << "Got unexpected exception " << e.what(); + } + catch (...) + { + FAIL() << "Got unexpected exception"; + } +} + +TEST(request_parser, string_list_clear_test) +{ + KeyOpFieldsValuesTuple t1 {"key1", "SET", + { + { "main_dpu_ids", "dpu0,dpu1,dpu3" }, + } + }; + + KeyOpFieldsValuesTuple t2 {"key2", "SET", + { + { "main_dpu_ids", "dpu4,dpu5" }, + } + }; + + try + { + TestRequestStringList request; + + // Parse first request + EXPECT_NO_THROW(request.parse(t1)); + auto main_dpu_list1 = request.getAttrStringList("main_dpu_ids"); + EXPECT_EQ(main_dpu_list1.size(), 3); + EXPECT_STREQ(main_dpu_list1[0].c_str(), "dpu0"); + + // Clear and parse second request + EXPECT_NO_THROW(request.clear()); + EXPECT_NO_THROW(request.parse(t2)); + auto main_dpu_list2 = request.getAttrStringList("main_dpu_ids"); + EXPECT_EQ(main_dpu_list2.size(), 2); + EXPECT_STREQ(main_dpu_list2[0].c_str(), "dpu4"); + } + catch (const std::exception& e) + { + FAIL() << "Got unexpected exception " << e.what(); + } + catch (...) + { + FAIL() << "Got unexpected exception"; + } +} diff --git a/tests/run-tests.sh b/tests/run-tests.sh new file mode 100755 index 00000000000..b9cdadf783d --- /dev/null +++ b/tests/run-tests.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +IMAGE_NAME=$1 +PY_TEST_PARAMS="$2" +TESTS="$3" +RETRY=$4 +[ -z "$RETRY" ] && RETRY=1 +JUNITXML=$(echo "$TESTS" | cut -d "." -f1)_tr.xml + +set -x +for ((i=1; i<=$RETRY; i++)); do + echo "Running the py test for tests: $TESTS, $i/$RETRY..." + py.test -v --force-flaky --junitxml="$JUNITXML" $PY_TEST_PARAMS --imgname="$IMAGE_NAME" $TESTS && break +done diff --git a/tests/sai_attrs.py b/tests/sai_attrs.py new file mode 100644 index 00000000000..05ea8b211d1 --- /dev/null +++ b/tests/sai_attrs.py @@ -0,0 +1,36 @@ +SAI_DIRECTION_LOOKUP_ENTRY_ATTR_ACTION = "SAI_DIRECTION_LOOKUP_ENTRY_ATTR_ACTION" +SAI_DIRECTION_LOOKUP_ENTRY_ACTION_SET_OUTBOUND_DIRECTION = "SAI_DIRECTION_LOOKUP_ENTRY_ACTION_SET_OUTBOUND_DIRECTION" + +SAI_ENI_ATTR_PL_SIP = 'SAI_ENI_ATTR_PL_SIP' +SAI_ENI_ATTR_PL_SIP_MASK = 'SAI_ENI_ATTR_PL_SIP_MASK' +SAI_ENI_ATTR_PL_UNDERLAY_SIP = 'SAI_ENI_ATTR_PL_UNDERLAY_SIP' +SAI_ENI_ATTR_OUTBOUND_ROUTING_GROUP_ID = 'SAI_ENI_ATTR_OUTBOUND_ROUTING_GROUP_ID' + +SAI_OUTBOUND_CA_TO_PA_ENTRY_ACTION_SET_TUNNEL_MAPPING = 'SAI_OUTBOUND_CA_TO_PA_ENTRY_ACTION_SET_TUNNEL_MAPPING' +SAI_OUTBOUND_CA_TO_PA_ENTRY_ACTION_SET_PRIVATE_LINK_MAPPING = 'SAI_OUTBOUND_CA_TO_PA_ENTRY_ACTION_SET_PRIVATE_LINK_MAPPING' +SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_DIP = 'SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_DIP' +SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_DIP_MASK = 'SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_DIP_MASK' +SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_ACTION = 'SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_ACTION' +SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_SIP = 'SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_SIP' +SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_DIP = 'SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_DIP' +SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_SIP_MASK = 'SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_SIP_MASK' +SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_DIP_MASK = 'SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_DIP_MASK' +SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_DASH_ENCAPSULATION = 'SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_DASH_ENCAPSULATION' +SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_TUNNEL_KEY = 'SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_TUNNEL_KEY' +SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_UNDERLAY_DIP = 'SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_UNDERLAY_DIP' +SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_DASH_TUNNEL_ID = 'SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_DASH_TUNNEL_ID' +SAI_DASH_ENCAPSULATION_NVGRE = 'SAI_DASH_ENCAPSULATION_NVGRE' +SAI_DASH_ENCAPSULATION_VXLAN = 'SAI_DASH_ENCAPSULATION_VXLAN' + +SAI_OUTBOUND_ROUTING_ENTRY_ATTR_UNDERLAY_SIP = 'SAI_OUTBOUND_ROUTING_ENTRY_ATTR_UNDERLAY_SIP' + +SAI_DASH_TUNNEL_ATTR_DIP = 'SAI_DASH_TUNNEL_ATTR_DIP' +SAI_DASH_TUNNEL_ATTR_DASH_ENCAPSULATION = 'SAI_DASH_TUNNEL_ATTR_DASH_ENCAPSULATION' +SAI_DASH_TUNNEL_ATTR_TUNNEL_KEY = 'SAI_DASH_TUNNEL_ATTR_TUNNEL_KEY' +SAI_DASH_TUNNEL_ATTR_MAX_MEMBER_SIZE = 'SAI_DASH_TUNNEL_ATTR_MAX_MEMBER_SIZE' +SAI_DASH_TUNNEL_ATTR_SIP = 'SAI_DASH_TUNNEL_ATTR_SIP' + +SAI_DASH_TUNNEL_NEXT_HOP_ATTR_DIP = 'SAI_DASH_TUNNEL_NEXT_HOP_ATTR_DIP' + +SAI_DASH_TUNNEL_MEMBER_ATTR_DASH_TUNNEL_ID = 'SAI_DASH_TUNNEL_MEMBER_ATTR_DASH_TUNNEL_ID' +SAI_DASH_TUNNEL_MEMBER_ATTR_DASH_TUNNEL_NEXT_HOP_ID = 'SAI_DASH_TUNNEL_MEMBER_ATTR_DASH_TUNNEL_NEXT_HOP_ID' diff --git a/tests/single_asic_voq_fs/default_config.json b/tests/single_asic_voq_fs/default_config.json new file mode 100644 index 00000000000..85292999eee --- /dev/null +++ b/tests/single_asic_voq_fs/default_config.json @@ -0,0 +1,276 @@ +{ + "DEVICE_METADATA": { + "localhost": { + "asic_name" : "Asic0", + "switch_type": "voq", + "switch_id": "0", + "max_cores": "8" + } + }, + "SYSTEM_PORT": { + "VS|Asic0|Cpu0": { + "speed": "10000", + "system_port_id": "0", + "switch_id": "0", + "core_index": "0", + "num_voq":8, + "core_port_index": "0" + }, + "VS|Asic0|Ethernet0": { + "speed": "40000", + "system_port_id": "1", + "switch_id": "0", + "core_index": "0", + "num_voq":8, + "core_port_index": "1" + }, + "VS|Asic0|Ethernet4": { + "speed": "40000", + "system_port_id": "2", + "switch_id": "0", + "core_index": "0", + "num_voq":8, + "core_port_index": "2" + }, + "VS|Asic0|Ethernet8": { + "speed": "40000", + "system_port_id": "3", + "switch_id": "0", + "core_index": "0", + "num_voq":8, + "core_port_index": "3" + }, + "VS|Asic0|Ethernet12": { + "speed": "40000", + "system_port_id": "4", + "switch_id": "0", + "core_index": "0", + "num_voq":8, + "core_port_index": "4" + }, + "VS|Asic0|Ethernet16": { + "speed": "40000", + "system_port_id": "5", + "switch_id": "0", + "core_index": "0", + "num_voq":8, + "core_port_index": "5" + }, + "VS|Asic0|Ethernet20": { + "speed": "40000", + "system_port_id": "6", + "switch_id": "0", + "core_index": "0", + "num_voq":8, + "core_port_index": "6" + }, + "VS|Asic0|Ethernet24": { + "speed": "40000", + "system_port_id": "7", + "switch_id": "0", + "core_index": "0", + "num_voq":8, + "core_port_index": "7" + }, + "VS|Asic0|Ethernet28": { + "speed": "40000", + "system_port_id": "8", + "switch_id": "0", + "core_index": "0", + "num_voq":8, + "core_port_index": "8" + }, + "VS|Asic0|Ethernet32": { + "speed": "40000", + "system_port_id": "9", + "switch_id": "0", + "core_index": "0", + "num_voq":8, + "core_port_index": "9" + }, + "VS|Asic0|Ethernet36": { + "speed": "40000", + "system_port_id": "10", + "switch_id": "0", + "core_index": "0", + "num_voq":8, + "core_port_index": "10" + }, + "VS|Asic0|Ethernet40": { + "speed": "40000", + "system_port_id": "11", + "switch_id": "0", + "core_index": "0", + "num_voq":8, + "core_port_index": "11" + }, + "VS|Asic0|Ethernet44": { + "speed": "40000", + "system_port_id": "12", + "switch_id": "0", + "core_index": "0", + "num_voq":8, + "core_port_index": "12" + }, + "VS|Asic0|Ethernet48": { + "speed": "40000", + "system_port_id": "13", + "switch_id": "0", + "core_index": "0", + "num_voq":8, + "core_port_index": "13" + }, + "VS|Asic0|Ethernet52": { + "speed": "40000", + "system_port_id": "14", + "switch_id": "0", + "core_index": "0", + "num_voq":8, + "core_port_index": "14" + }, + "VS|Asic0|Ethernet56": { + "speed": "40000", + "system_port_id": "15", + "switch_id": "0", + "core_index": "0", + "num_voq":8, + "core_port_index": "15" + }, + "VS|Asic0|Ethernet60": { + "speed": "40000", + "system_port_id": "16", + "switch_id": "0", + "core_index": "0", + "num_voq":8, + "core_port_index": "16" + }, + "VS|Asic0|Ethernet64": { + "speed": "40000", + "system_port_id": "17", + "switch_id": "0", + "core_index": "1", + "num_voq":8, + "core_port_index": "1" + }, + "VS|Asic0|Ethernet68": { + "speed": "40000", + "system_port_id": "18", + "switch_id": "0", + "core_index": "1", + "num_voq":8, + "core_port_index": "2" + }, + "VS|Asic0|Ethernet72": { + "speed": "40000", + "system_port_id": "19", + "switch_id": "0", + "core_index": "1", + "num_voq":8, + "core_port_index": "3" + }, + "VS|Asic0|Ethernet76": { + "speed": "40000", + "system_port_id": "20", + "switch_id": "0", + "core_index": "1", + "num_voq":8, + "core_port_index": "4" + }, + "VS|Asic0|Ethernet80": { + "speed": "40000", + "system_port_id": "21", + "switch_id": "0", + "core_index": "1", + "num_voq":8, + "core_port_index": "5" + }, + "VS|Asic0|Ethernet84": { + "speed": "40000", + "system_port_id": "22", + "switch_id": "0", + "core_index": "1", + "num_voq":8, + "core_port_index": "6" + }, + "VS|Asic0|Ethernet88": { + "speed": "40000", + "system_port_id": "23", + "switch_id": "0", + "core_index": "1", + "num_voq":8, + "core_port_index": "7" + }, + "VS|Asic0|Ethernet92": { + "speed": "40000", + "system_port_id": "24", + "switch_id": "0", + "core_index": "1", + "num_voq":8, + "core_port_index": "8" + }, + "VS|Asic0|Ethernet96": { + "speed": "40000", + "system_port_id": "25", + "switch_id": "0", + "core_index": "1", + "num_voq":8, + "core_port_index": "9" + }, + "VS|Asic0|Ethernet100": { + "speed": "40000", + "system_port_id": "26", + "switch_id": "0", + "core_index": "1", + "num_voq":8, + "core_port_index": "10" + }, + "VS|Asic0|Ethernet104": { + "speed": "40000", + "system_port_id": "27", + "switch_id": "0", + "core_index": "1", + "num_voq":8, + "core_port_index": "11" + }, + "VS|Asic0|Ethernet108": { + "speed": "40000", + "system_port_id": "28", + "switch_id": "0", + "core_index": "1", + "num_voq":8, + "core_port_index": "12" + }, + "VS|Asic0|Ethernet112": { + "speed": "40000", + "system_port_id": "29", + "switch_id": "0", + "core_index": "1", + "num_voq":8, + "core_port_index": "13" + }, + "VS|Asic0|Ethernet116": { + "speed": "40000", + "system_port_id": "30", + "switch_id": "0", + "core_index": "1", + "num_voq":8, + "core_port_index": "14" + }, + "VS|Asic0|Ethernet120": { + "speed": "40000", + "system_port_id": "31", + "switch_id": "0", + "core_index": "1", + "num_voq":8, + "core_port_index": "15" + }, + "VS|Asic0|Ethernet124": { + "speed": "40000", + "system_port_id": "32", + "switch_id": "0", + "core_index": "1", + "num_voq":8, + "core_port_index": "16" + } + } +} diff --git a/tests/test_acl.py b/tests/test_acl.py index cf68d1516e9..ed5789b2b00 100644 --- a/tests/test_acl.py +++ b/tests/test_acl.py @@ -1,5 +1,6 @@ import pytest from requests import request +import time L3_TABLE_TYPE = "L3" L3_TABLE_NAME = "L3_TEST" @@ -131,6 +132,38 @@ def test_InvalidAclRuleCreation(self, dvs_acl, l3_acl_table): dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, "INVALID_RULE", None) dvs_acl.verify_no_acl_rules() + def test_AclRuleUpdate(self, dvs_acl, l3_acl_table): + """The test is to verify there is no duplicated flex counter when updating an ACL rule + """ + config_qualifiers = {"SRC_IP": "10.10.10.10/32"} + expected_sai_qualifiers = { + "SAI_ACL_ENTRY_ATTR_FIELD_SRC_IP": dvs_acl.get_simple_qualifier_comparator("10.10.10.10&mask:255.255.255.255") + } + + dvs_acl.create_acl_rule(L3_TABLE_NAME, L3_RULE_NAME, config_qualifiers) + dvs_acl.verify_acl_rule(expected_sai_qualifiers) + + acl_rule_id = dvs_acl.get_acl_rule_id() + counter_id = dvs_acl.get_acl_counter_oid() + + new_config_qualifiers = {"SRC_IP": "10.10.10.11/32"} + new_expected_sai_qualifiers = { + "SAI_ACL_ENTRY_ATTR_FIELD_SRC_IP": dvs_acl.get_simple_qualifier_comparator("10.10.10.11&mask:255.255.255.255") + } + dvs_acl.update_acl_rule(L3_TABLE_NAME, L3_RULE_NAME, new_config_qualifiers) + # Verify the rule has been updated + retry = 5 + while dvs_acl.get_acl_rule_id() == acl_rule_id and retry >= 0: + retry -= 1 + time.sleep(1) + assert retry > 0 + dvs_acl.verify_acl_rule(new_expected_sai_qualifiers) + # Verify the previous counter is removed + if counter_id: + dvs_acl.check_acl_counter_not_in_counters_map(counter_id) + dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME) + dvs_acl.verify_no_acl_rules() + def test_AclRuleL4SrcPort(self, dvs_acl, l3_acl_table): config_qualifiers = {"L4_SRC_PORT": "65000"} expected_sai_qualifiers = { diff --git a/tests/test_acl_inner_src_mac_rewrite.py b/tests/test_acl_inner_src_mac_rewrite.py new file mode 100644 index 00000000000..1bcbd51c70d --- /dev/null +++ b/tests/test_acl_inner_src_mac_rewrite.py @@ -0,0 +1,228 @@ +import pytest +from requests import request +import time +from swsscommon import swsscommon +from dvslib.dvs_common import PollingConfig, wait_for_result +import pdb +import json + +TABLE_TYPE = "INNER_SRC_MAC_REWRITE_TABLE_TYPE" +CUSTOM_TABLE_TYPE_MATCHES = [ + "TUNNEL_VNI", + "INNER_SRC_IP" +] +CUSTOM_TABLE_TYPE_BPOINT_TYPES = ["PORT","PORTCHANNEL"] +CUSTOM_TABLE_TYPE_ACTIONS = ["INNER_SRC_MAC_REWRITE_ACTION"] +EXPECTED_ACTION_LIST = ["SAI_ACL_ACTION_TYPE_SET_INNER_SRC_MAC"] +ASIC_STATE_ACL = "ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY" +TABLE_NAME = "INNER_SRC_MAC_REWRITE_TEST" +BIND_PORTS = ["Ethernet0", "Ethernet4"] +RULE_NAME = "INNER_SRC_MAC_REWRITE_TEST_RULE" + + +class TestInnerSrcMacRewriteAclTable: + + def setup_db(self, dvs): + self.pdb = swsscommon.DBConnector(0, dvs.redis_sock, 0) + self.adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + self.ctdb = swsscommon.DBConnector(2, dvs.redis_sock, 0) + self.cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + self.sdb = swsscommon.DBConnector(6, dvs.redis_sock, 0) + + @pytest.fixture + def innersrcmacrewrite_acl_table(self, dvs_acl): + try: + dvs_acl.create_acl_table_type(TABLE_TYPE, CUSTOM_TABLE_TYPE_MATCHES, CUSTOM_TABLE_TYPE_BPOINT_TYPES, CUSTOM_TABLE_TYPE_ACTIONS) + dvs_acl.create_acl_table(TABLE_NAME, TABLE_TYPE, BIND_PORTS, stage="egress") + yield dvs_acl.get_acl_table_ids(1)[0] + finally: + dvs_acl.remove_acl_table(TABLE_NAME) + dvs_acl.remove_acl_table_type(TABLE_TYPE) + dvs_acl.verify_acl_table_count(0) + + def create_acl_rule(self, dvs, table_name, rule_name, qualifiers, priority:str="1000", action:str="AA:BB:CC:DD:EE:FF"): + tbl = swsscommon.Table(self.cdb, "ACL_RULE") + + fvs={ + "PRIORITY": priority, + "INNER_SRC_MAC_REWRITE_ACTION": action + } + for k, v in qualifiers.items(): + fvs[k] = v + + formatted_entry = swsscommon.FieldValuePairs(list(fvs.items())) + tbl.set(table_name + "|" + rule_name, formatted_entry) + + def remove_acl_rule(self, dvs, table_name, rule_name): + tbl = swsscommon.Table(self.cdb, "ACL_RULE") + tbl._del(table_name + "|" + rule_name) + + def validate_asic_acl_entries(self, dvs_acl, asic_db, expected_qualifier): + def _access_function(): + false_ret = (False, '') + + key = dvs_acl.get_acl_rule_id() + + fvs = asic_db.get_entry(ASIC_STATE_ACL, key) + if not fvs: + return false_ret + + for qualifer in expected_qualifier: + if qualifer not in fvs: + return false_ret + + if fvs[qualifer] != expected_qualifier[qualifer]: + return false_ret + + return (True, key) + val, result = wait_for_result(_access_function, failure_message="Inner-src-mac-rewrite ACL rule not updated") + + def update_acl_rule(self, dvs, table_name, rule_name, qualifier): + table = swsscommon.Table(self.cdb, "ACL_RULE") + status, fvs=table.get(table_name+"|"+rule_name) + fvs_pairs= dict(fvs) + for k, v in qualifier.items(): + fvs_pairs[k] = v + formatted_entry = swsscommon.FieldValuePairs(list(fvs_pairs.items())) + table.set(table_name + "|" + rule_name, formatted_entry) + + def test_InnerSrcMacRewriteAclTableCreationDeletion(self, dvs_acl): + + # This test checks for ACL table and table type creation deletion for inner src mac rewrite + try: + dvs_acl.create_acl_table_type(TABLE_TYPE, CUSTOM_TABLE_TYPE_MATCHES, CUSTOM_TABLE_TYPE_BPOINT_TYPES, CUSTOM_TABLE_TYPE_ACTIONS) + dvs_acl.create_acl_table(TABLE_NAME, TABLE_TYPE, BIND_PORTS, stage="egress") + acl_table_id = dvs_acl.get_acl_table_ids(1)[0] + assert acl_table_id is not None + acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(BIND_PORTS)) + + dvs_acl.verify_acl_table_group_members(acl_table_id, acl_table_group_ids, 1) + dvs_acl.verify_acl_table_port_binding(acl_table_id, BIND_PORTS, 1, stage="egress") + dvs_acl.verify_acl_table_action_list(acl_table_id, EXPECTED_ACTION_LIST) + dvs_acl.verify_acl_table_status(TABLE_NAME, "Active") + finally: + dvs_acl.remove_acl_table(TABLE_NAME) + dvs_acl.remove_acl_table_type(TABLE_TYPE) + dvs_acl.verify_acl_table_count(0) + + def test_InnerSrcMacRewriteAclRuleCreationDeletion(self, dvs, dvs_acl, innersrcmacrewrite_acl_table): + + # This test checks for ACL rule creation(more than one) deletion for the table type inner src mac rewrite + self.setup_db(dvs) + + # Add the first rule and verify status in STATE_DB + config_qualifiers = {"INNER_SRC_IP": "10.10.10.10/32", "TUNNEL_VNI": "5000"} + self.create_acl_rule(dvs, TABLE_NAME, RULE_NAME, config_qualifiers, priority="1000", action="60:BB:AA:C3:3E:AB") + dvs_acl.verify_acl_rule_status(TABLE_NAME, RULE_NAME, "Active") + + # Add second rule and verify status in STATE_DB + config_qualifiers = {"INNER_SRC_IP": "9.9.9.9/30", "TUNNEL_VNI": "5000"} + self.create_acl_rule(dvs, TABLE_NAME, RULE_NAME+"2", config_qualifiers, priority="9990", action="AB:BB:AA:C3:3E:AB") + dvs_acl.verify_acl_rule_status(TABLE_NAME, RULE_NAME+"2", "Active") + + # Remove first rule and check status in STATE_DB + self.remove_acl_rule(dvs, TABLE_NAME, RULE_NAME) + dvs_acl.verify_acl_rule_status(TABLE_NAME, RULE_NAME, None) + dvs_acl.verify_acl_rule_status(TABLE_NAME, RULE_NAME+"2", "Active") + + # Remove second rule and check status in STATE_DB + self.remove_acl_rule(dvs, TABLE_NAME, RULE_NAME+"2") + dvs_acl.verify_acl_rule_status(TABLE_NAME, RULE_NAME+"2", None) + + # Verify no rules in ASIC_DB + dvs_acl.verify_no_acl_rules() + + def test_InnerSrcMacRewriteAclRuleUpdate(self, dvs, dvs_acl, innersrcmacrewrite_acl_table): + + # This test checks for ACL rule update for the table type inner src mac rewrite + + try : + self.setup_db(dvs) + + # Add the rule + config_qualifiers = {"INNER_SRC_IP": "10.10.10.10/32", "TUNNEL_VNI": "4000"} + self.create_acl_rule(dvs, TABLE_NAME, RULE_NAME, config_qualifiers, priority="1001", action="66:BB:AA:C3:3E:AB") + dvs_acl.verify_acl_rule_status(TABLE_NAME, RULE_NAME, "Active") + rule_id = dvs_acl.get_acl_rule_id() + + # SAI entries for the rule creation + new_expected_sai_qualifiers={"SAI_ACL_ENTRY_ATTR_FIELD_INNER_SRC_IP": "10.10.10.10&mask:255.255.255.255", + "SAI_ACL_ENTRY_ATTR_FIELD_TUNNEL_VNI": "4000&mask:0xffffffff", + "SAI_ACL_ENTRY_ATTR_PRIORITY": "1001", + "SAI_ACL_ENTRY_ATTR_ACTION_SET_INNER_SRC_MAC": "66:BB:AA:C3:3E:AB"} + + # Verify the rule with SAI entries + self.validate_asic_acl_entries(dvs_acl, dvs_acl.asic_db, new_expected_sai_qualifiers) + + # Verify the rule with counter id to be present in ASIC DB + counter_id = dvs_acl.get_acl_counter_oid() + assert counter_id in dvs_acl.get_acl_counter_ids(1) + + # Update the rule with inner src ip + self.update_acl_rule(dvs, TABLE_NAME, RULE_NAME, {"INNER_SRC_IP": "15.15.15.15/20"}) + + # Expected SAI entries for the rule update #1 + new_expected_sai_qualifiers={"SAI_ACL_ENTRY_ATTR_FIELD_INNER_SRC_IP": "15.15.15.15&mask:255.255.240.0"} + + # Verify the rule id and SAI entries are updated in ASIC DB + self.validate_asic_acl_entries(dvs_acl, dvs_acl.asic_db, new_expected_sai_qualifiers) + + # Verify the rule with counter id to be present in ASIC DB + counter_id_2 = dvs_acl.get_acl_counter_oid() + rule_id_2 = dvs_acl.get_acl_rule_id() + + # Verify the rule id are different + assert rule_id != rule_id_2 + + # Verify the counter id is different and present in ASIC DB + assert counter_id not in dvs_acl.get_acl_counter_ids(1) + assert counter_id_2 in dvs_acl.get_acl_counter_ids(1) + + # Update the rule with tunnel vni and inner src ip + self.update_acl_rule(dvs, TABLE_NAME, RULE_NAME, {"TUNNEL_VNI": "111", "INNER_SRC_IP": "20.20.20.20/20"} ) + + # Expected SAI entries for the rule update #2 + new_expected_sai_qualifiers={"SAI_ACL_ENTRY_ATTR_FIELD_INNER_SRC_IP": "20.20.20.20&mask:255.255.240.0", + "SAI_ACL_ENTRY_ATTR_FIELD_TUNNEL_VNI": "111&mask:0xffffffff"} + + # Verify the rule id and SAI entries are updated in ASIC DB + self.validate_asic_acl_entries(dvs_acl, dvs_acl.asic_db, new_expected_sai_qualifiers) + + # Verify the rule with counter id to be present in ASIC DB + counter_id_3 = dvs_acl.get_acl_counter_oid() + rule_id_3 = dvs_acl.get_acl_rule_id() + + # Verify the rule id are different + assert rule_id_2 != rule_id_3 + + # Verify the counter id is different and present in ASIC DB + assert counter_id_2 not in dvs_acl.get_acl_counter_ids(1) + assert counter_id_3 in dvs_acl.get_acl_counter_ids(1) + + # Update the rule with action + self.update_acl_rule(dvs, TABLE_NAME, RULE_NAME, {"INNER_SRC_MAC_REWRITE_ACTION": "11:BB:AA:C3:3E:AB"} ) + + # Expected SAI entries for the rule update #3 + new_expected_sai_qualifiers={"SAI_ACL_ENTRY_ATTR_ACTION_SET_INNER_SRC_MAC": "11:BB:AA:C3:3E:AB"} + + # Verify the rule id and SAI entries are updated in ASIC DB + self.validate_asic_acl_entries(dvs_acl, dvs_acl.asic_db, new_expected_sai_qualifiers) + + # Verify the rule with counter id to be present in ASIC DB + counter_id_4= dvs_acl.get_acl_counter_oid() + assert counter_id_3 not in dvs_acl.get_acl_counter_ids(1) + assert counter_id_4 in dvs_acl.get_acl_counter_ids(1) + + finally: + # Remove the rule + self.remove_acl_rule(dvs, TABLE_NAME, RULE_NAME) + dvs_acl.verify_no_acl_rules() + dvs_acl.remove_acl_table(TABLE_NAME) + dvs_acl.remove_acl_table_type(TABLE_TYPE) + dvs_acl.verify_acl_table_count(0) + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass \ No newline at end of file diff --git a/tests/test_acl_mark.py b/tests/test_acl_mark.py new file mode 100644 index 00000000000..aa5b5ec7ed6 --- /dev/null +++ b/tests/test_acl_mark.py @@ -0,0 +1,447 @@ +import pytest +from requests import request + +OVERLAY_TABLE_TYPE = "UNDERLAY_SET_DSCP" +OVERLAY_TABLE_NAME = "OVERLAY_MARK_META_TEST" +OVERLAY_BIND_PORTS = ["Ethernet0", "Ethernet4", "Ethernet8", "Ethernet12"] +OVERLAY_RULE_NAME = "OVERLAY_TEST_RULE" + +OVERLAY_TABLE_TYPE6 = "UNDERLAY_SET_DSCPV6" +OVERLAY_TABLE_NAME6 = "OVERLAY_MARK_META_TEST6" +OVERLAY_BIND_PORTS6 = ["Ethernet20", "Ethernet24", "Ethernet28", "Ethernet32"] +OVERLAY_RULE_NAME6 = "OVERLAY_TEST_RULE6" + +# tests for UNDERLAY_SET_DSCP table + + +class TestAclMarkMeta: + @pytest.fixture + def overlay_acl_table(self, dvs_acl): + try: + dvs_acl.create_acl_table(OVERLAY_TABLE_NAME, + OVERLAY_TABLE_TYPE, + OVERLAY_BIND_PORTS) + yield dvs_acl.get_acl_table_ids(2) + finally: + dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME) + dvs_acl.verify_acl_table_count(0) + + @pytest.fixture + def overlay6_acl_table(self, dvs_acl): + try: + dvs_acl.create_acl_table(OVERLAY_TABLE_NAME6, + OVERLAY_TABLE_TYPE6, + OVERLAY_BIND_PORTS6) + yield dvs_acl.get_acl_table_ids(2) + finally: + dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME6) + dvs_acl.verify_acl_table_count(0) + + def verify_acl_table_group_members_multitable(self, dvs_acl, acl_table_id, acl_table_group_ids, member_count): + members = dvs_acl.asic_db.wait_for_n_keys(dvs_acl.ADB_ACL_GROUP_MEMBER_TABLE_NAME, + member_count) + + member_groups = [] + table_member_map = {} + for member in members: + fvs = dvs_acl.asic_db.wait_for_entry(dvs_acl.ADB_ACL_GROUP_MEMBER_TABLE_NAME, member) + group_id = fvs.get("SAI_ACL_TABLE_GROUP_MEMBER_ATTR_ACL_TABLE_GROUP_ID") + table_id = fvs.get("SAI_ACL_TABLE_GROUP_MEMBER_ATTR_ACL_TABLE_ID") + + if group_id in acl_table_group_ids and table_id in acl_table_id: + member_groups.append(group_id) + if table_id not in table_member_map: + table_member_map[table_id] = [] + table_member_map[table_id].append(group_id) + + assert set(member_groups) == set(acl_table_group_ids) + return table_member_map + + def get_table_stage(self, dvs_acl, acl_table_id, v4_ports, v6_ports): + stages = [] + names = [] + ports = [] + for table in acl_table_id: + fvs = dvs_acl.asic_db.wait_for_entry(dvs_acl.ADB_ACL_TABLE_NAME, table) + stage = fvs.get("SAI_ACL_TABLE_ATTR_ACL_STAGE") + if stage == "SAI_ACL_STAGE_INGRESS": + stages.append("ingress") + elif stage == "SAI_ACL_STAGE_EGRESS": + stages.append("egress") + qual = fvs.get("SAI_ACL_TABLE_ATTR_FIELD_ACL_USER_META") + if qual == "true": + names.append("EGR_SET_DSCP") + ports.append(v4_ports+v6_ports) + qual = fvs.get("SAI_ACL_TABLE_ATTR_FIELD_DST_IPV6") + if qual == "true": + names.append("MARK_META6") + ports.append(v6_ports) + qual = fvs.get("SAI_ACL_TABLE_ATTR_FIELD_DST_IP") + if qual == "true": + names.append("MARK_META") + ports.append(v4_ports) + return stages, names, ports + + def verify_acl_table_port_binding_multi(self, dvs_acl, table_member_map, bind_ports, stages, acl_table_id): + for i in range(0, len(stages)): + stage = stages[i] + table = acl_table_id[i] + port_groups = [] + for port in bind_ports[i]: + port_oid = dvs_acl.counters_db.get_entry("COUNTERS_PORT_NAME_MAP", "").get(port) + fvs = dvs_acl.asic_db.wait_for_entry("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid) + acl_table_group_id = fvs.pop(dvs_acl.ADB_PORT_ATTR_LOOKUP[stage], None) + assert acl_table_group_id in table_member_map[table] + port_groups.append(acl_table_group_id) + + assert len(port_groups) == len(bind_ports[i]) + assert set(port_groups) == set(table_member_map[table]) + + + def get_acl_rules_with_action(self, dvs_acl, total_rules): + """Verify that there are N rules in the ASIC DB.""" + members = dvs_acl.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY", + total_rules) + + member_groups = [] + table_member_map = {} + for member in members: + fvs = dvs_acl.asic_db.wait_for_entry("ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY", member) + table_id = fvs.get("SAI_ACL_ENTRY_ATTR_TABLE_ID") + entry = {} + entry['id'] = member + action = fvs.get("SAI_ACL_ENTRY_ATTR_ACTION_SET_DSCP") + if action: + entry['action_type'] = "dscp" + entry['action_value'] = action + meta = fvs.get("SAI_ACL_ENTRY_ATTR_FIELD_ACL_USER_META") + entry['match_meta'] = meta.split('&')[0] + action = fvs.get("SAI_ACL_ENTRY_ATTR_ACTION_SET_ACL_META_DATA") + if action: + entry['action_type'] = "meta" + entry['action_value'] = action + + if table_id not in table_member_map: + table_member_map[table_id] = [] + table_member_map[table_id].append(entry) + return table_member_map + + def verify_acl_rules_with_action(self, table_names, acl_table_id, table_rules, meta, dscp): + for i in range(0, len(table_names)): + if acl_table_id[i] in table_rules: + for j in range(0, len(table_rules[acl_table_id[i]])): + if table_names[i] == "MARK_META" or table_names[i] == "MARK_META6": + assert table_rules[acl_table_id[i]][j]['action_type'] == "meta" + assert table_rules[acl_table_id[i]][j]['action_value'] in meta + else: + assert table_rules[acl_table_id[i]][j]['action_type'] == "dscp" + assert table_rules[acl_table_id[i]][j]['action_value'] in dscp + assert table_rules[acl_table_id[i]][j]['match_meta'] in meta + + def test_OverlayTableCreationDeletion(self, dvs_acl): + try: + dvs_acl.create_acl_table(OVERLAY_TABLE_NAME, OVERLAY_TABLE_TYPE, OVERLAY_BIND_PORTS) + # this should create 2 tables. MARK_META and EGR_SET_DSCP Verify the table count. + acl_table_id = dvs_acl.get_acl_table_ids(2) + stages, names, ports = self.get_table_stage(dvs_acl, acl_table_id, OVERLAY_BIND_PORTS, []) + + acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(OVERLAY_BIND_PORTS)*2) + table_member_map = self.verify_acl_table_group_members_multitable(dvs_acl, acl_table_id, acl_table_group_ids, 8) + + self.verify_acl_table_port_binding_multi(dvs_acl, table_member_map, ports, stages, acl_table_id) + + # Verify status is written into STATE_DB + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME, "Active") + finally: + dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME) + dvs_acl.verify_acl_table_count(0) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME, None) + + def test_Overlay6TableCreationDeletion(self, dvs_acl): + try: + dvs_acl.create_acl_table(OVERLAY_TABLE_NAME6, OVERLAY_TABLE_TYPE6, OVERLAY_BIND_PORTS6) + # this should create 2 tables. MARK_META and EGR_SET_DSCP Verify the table count. + acl_table_id = dvs_acl.get_acl_table_ids(2) + stages, names, ports = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS6) + acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(OVERLAY_BIND_PORTS6)*2) + table_member_map = self.verify_acl_table_group_members_multitable(dvs_acl, acl_table_id, acl_table_group_ids, 8) + + self.verify_acl_table_port_binding_multi(dvs_acl, table_member_map, ports, stages, acl_table_id) + + # Verify status is written into STATE_DB + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME6, "Active") + finally: + dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME6) + dvs_acl.verify_acl_table_count(0) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME6, None) + + def test_OverlayBothv4v6TableCreationDeletion(self, dvs_acl): + try: + dvs_acl.create_acl_table(OVERLAY_TABLE_NAME, OVERLAY_TABLE_TYPE, OVERLAY_BIND_PORTS) + dvs_acl.create_acl_table(OVERLAY_TABLE_NAME6, OVERLAY_TABLE_TYPE6, OVERLAY_BIND_PORTS6) + # this should create 2 tables. MARK_META and EGR_SET_DSCP Verify the table count. + acl_table_id = dvs_acl.get_acl_table_ids(3) + stages, names, ports = self.get_table_stage(dvs_acl, acl_table_id,OVERLAY_BIND_PORTS, OVERLAY_BIND_PORTS6) + acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(OVERLAY_BIND_PORTS6)*4) + table_member_map = self.verify_acl_table_group_members_multitable(dvs_acl, acl_table_id, acl_table_group_ids, 16) + + self.verify_acl_table_port_binding_multi(dvs_acl, table_member_map, ports, stages, acl_table_id) + + # Verify status is written into STATE_DB + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME, "Active") + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME6, "Active") + finally: + dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME) + dvs_acl.verify_acl_table_count(2) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME, None) + acl_table_id = dvs_acl.get_acl_table_ids(2) + + stages, names, ports = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS6) + acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(OVERLAY_BIND_PORTS6)*2) + table_member_map = self.verify_acl_table_group_members_multitable(dvs_acl, acl_table_id, acl_table_group_ids, 8) + + dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME6) + dvs_acl.verify_acl_table_count(0) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME6, None) + + def test_OverlayBothv4v6TableSameintfCreationDeletion(self, dvs_acl): + try: + dvs_acl.create_acl_table(OVERLAY_TABLE_NAME, OVERLAY_TABLE_TYPE, OVERLAY_BIND_PORTS) + dvs_acl.create_acl_table(OVERLAY_TABLE_NAME6, OVERLAY_TABLE_TYPE6, OVERLAY_BIND_PORTS) + # this should create 2 tables. MARK_META and EGR_SET_DSCP Verify the table count. + acl_table_id = dvs_acl.get_acl_table_ids(3) + stages, names, ports = self.get_table_stage(dvs_acl, acl_table_id,OVERLAY_BIND_PORTS, OVERLAY_BIND_PORTS) + acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(OVERLAY_BIND_PORTS)*2) + table_member_map = self.verify_acl_table_group_members_multitable(dvs_acl, acl_table_id, acl_table_group_ids, 12) + + self.verify_acl_table_port_binding_multi(dvs_acl, table_member_map, ports, stages, acl_table_id) + + # Verify status is written into STATE_DB + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME, "Active") + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME6, "Active") + finally: + dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME) + dvs_acl.verify_acl_table_count(2) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME, None) + acl_table_id = dvs_acl.get_acl_table_ids(2) + + stages, names, ports = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) + acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(OVERLAY_BIND_PORTS)*2) + table_member_map = self.verify_acl_table_group_members_multitable(dvs_acl, acl_table_id, acl_table_group_ids, 8) + + dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME6) + dvs_acl.verify_acl_table_count(0) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_table_status(OVERLAY_TABLE_NAME6, None) + + def test_OverlayEntryCreationDeletion(self, dvs_acl, overlay_acl_table): + config_qualifiers = {"DST_IP": "20.0.0.1/32", + "SRC_IP": "10.0.0.0/32"} + acl_table_id = dvs_acl.get_acl_table_ids(2) + _, names, _ = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "VALID_RULE", config_qualifiers,action="12") + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "VALID_RULE", "Active") + table_rules = self.get_acl_rules_with_action(dvs_acl, 2) + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1"], ["12"]) + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "VALID_RULE") + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "VALID_RULE", None) + dvs_acl.verify_no_acl_rules() + + def test_OverlayEntryMultiRuleRef(self, dvs_acl, overlay_acl_table): + config_qualifiers = {"DST_IP": "20.0.0.1/32", + "SRC_IP": "10.0.0.0/32", + "DSCP": "1" + } + acl_table_id = dvs_acl.get_acl_table_ids(2) + _, names, _ = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) + #create 1st Rule + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "1", config_qualifiers, action="12") + #create 2nd Rule + config_qualifiers["DSCP"] = "2" + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "2", config_qualifiers, action="12") + #create 3rd Rule + config_qualifiers["DSCP"] = "3" + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "3", config_qualifiers, action="12") + + #This should create 4 rules 3 for MARK_META and 1 for EGR_SET_DSCP + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", "Active") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "2", "Active") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "3", "Active") + table_rules = self.get_acl_rules_with_action(dvs_acl, 4) + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1"], ["12"]) + + # remove first rule. We should still have 3 rules, 2 for MARK_META and 1 for EGR_SET_DSCP + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "1") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", None) + table_rules = self.get_acl_rules_with_action(dvs_acl, 3) + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1"], ["12"]) + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", None) + + # remove 2nd rule. We should still have 2 rules, 1 for MARK_META and 1 for EGR_SET_DSCP + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "2") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "2", None) + table_rules = self.get_acl_rules_with_action(dvs_acl, 2) + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1"], ["12"]) + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "2", None) + + # Verify the STATE_DB entry is removed + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "3") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "3", None) + + dvs_acl.verify_no_acl_rules() + + def test_OverlayEntryMultiTableRules(self, dvs_acl): + config_qualifiers = {"DST_IP": "20.0.0.1/32", + "SRC_IP": "10.0.0.0/32", + "DSCP": "1"} + dvs_acl.create_acl_table(OVERLAY_TABLE_NAME, + OVERLAY_TABLE_TYPE, + OVERLAY_BIND_PORTS) + dvs_acl.create_acl_table(OVERLAY_TABLE_NAME6, + OVERLAY_TABLE_TYPE6, + OVERLAY_BIND_PORTS6) + acl_table_id = dvs_acl.get_acl_table_ids(3) + _, names, _ = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) + #create 1st Rule + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "1", config_qualifiers, action="12") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", "Active") + + #create 2nd Rule ipv6 + config_qualifiers6 = {"SRC_IPV6": "2777::0/64", + "DST_IPV6": "2788::0/64", + "DSCP" : "1"}; + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME6, "1", config_qualifiers6, action="12") + + # Verify status of both rules. + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", "Active") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME6, "1", "Active") + table_rules = self.get_acl_rules_with_action(dvs_acl, 3) + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1"], ["12"]) + + # remove first rule. We should still have 1 rule, 1 for MARK_META + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME6, "1") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME6, "1", None) + table_rules = self.get_acl_rules_with_action(dvs_acl, 2) + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1"], ["12"]) + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", "Active") + + # remove 2nd rule. We should still have 2 rules, 1 for MARK_META and 1 for EGR_SET_DSCP + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "1") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", None) + dvs_acl.verify_no_acl_rules() + dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME) + dvs_acl.remove_acl_table(OVERLAY_TABLE_NAME6) + dvs_acl.verify_acl_table_count(0) + + def test_OverlayEntryMultiMetaRule(self, dvs_acl, overlay_acl_table): + config_qualifiers = {"DST_IP": "20.0.0.1/32", + "SRC_IP": "10.0.0.0/32", + "DSCP": "1" + } + + acl_table_id = dvs_acl.get_acl_table_ids(2) + _, names, _ = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) + #create 1st Rule + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "1", config_qualifiers, action="12") + #create 2nd Rule + config_qualifiers["DSCP"] = "2" + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "2", config_qualifiers, action="13") + #create 3rd Rule + config_qualifiers["DSCP"] = "3" + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, "3", config_qualifiers, action="14") + + #This should create 4 rules 3 for MARK_META and 1 for EGR_SET_DSCP + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", "Active") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "2", "Active") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "3", "Active") + table_rules = self.get_acl_rules_with_action(dvs_acl, 6) + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1", "2", "3"], ["12", "13", "14"]) + + # remove first rule. We should still have 3 rules, 2 for MARK_META and 1 for EGR_SET_DSCP + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "1") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", None) + table_rules = self.get_acl_rules_with_action(dvs_acl, 4) + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1", "2", "3"], ["12", "13", "14"]) + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "1", None) + + # remove 2nd rule. We should still have 2 rules, 1 for MARK_META and 1 for EGR_SET_DSCP + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "2") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "2", None) + table_rules = self.get_acl_rules_with_action(dvs_acl, 2) + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, ["1", "2", "3"], ["12", "13", "14"]) + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "2", None) + + # Verify the STATE_DB entry is removed + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, "3") + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, "3", None) + + dvs_acl.verify_no_acl_rules() + + def test_OverlayEntryExhaustMeta(self, dvs_acl, overlay_acl_table): + config_qualifiers = {"DST_IP": "20.0.0.1/32", + "SRC_IP": "10.0.0.0/32", + "DSCP": "1" + } + acl_table_id = dvs_acl.get_acl_table_ids(2) + _, names, _ = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) + #create 8 rules. 8th one should fail. + for i in range(1, 9): + config_qualifiers["DSCP"] = str(i) + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, str(i), config_qualifiers, action=str(i+10)) + if i < 8: + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, str(i), "Active") + else: + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, str(i), None) + + table_rules = self.get_acl_rules_with_action(dvs_acl, 14) + meta = [str(i) for i in range(1, 8)] + dscps = [str(i) for i in range(11, 18)] + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, meta, dscps) + + for i in range(1, 9): + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, str(i)) + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, str(i), None) + dvs_acl.verify_no_acl_rules() + + def test_OverlayEntryTestMetaDataMgr(self, dvs_acl, overlay_acl_table): + # allocate all 7 metadata values and free them multiple times. + # At the end there should be no rules allocated. + for i in range(1, 4): + config_qualifiers = {"DST_IP": "20.0.0.1/32", + "SRC_IP": "10.0.0.0/32", + "DSCP": "1" + } + acl_table_id = dvs_acl.get_acl_table_ids(2) + _, names, _ = self.get_table_stage(dvs_acl, acl_table_id, [], OVERLAY_BIND_PORTS) + #create 8 rules. 8th one should fail. + for i in range(1, 9): + config_qualifiers["DSCP"] = str(i) + dvs_acl.create_dscp_acl_rule(OVERLAY_TABLE_NAME, str(i), config_qualifiers, action=str(i+10)) + if i < 8: + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, str(i), "Active") + else: + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, str(i), None) + + table_rules = self.get_acl_rules_with_action(dvs_acl, 14) + meta = [str(i) for i in range(1, 8)] + dscps = [str(i) for i in range(11, 18)] + self.verify_acl_rules_with_action(names, acl_table_id, table_rules, meta, dscps) + + for i in range(1, 9): + dvs_acl.remove_acl_rule(OVERLAY_TABLE_NAME, str(i)) + dvs_acl.verify_acl_rule_status(OVERLAY_TABLE_NAME, str(i), None) + dvs_acl.verify_no_acl_rules() + + # Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass \ No newline at end of file diff --git a/tests/test_admin_status.py b/tests/test_admin_status.py index 1b99bf37c72..6aac5cc691b 100644 --- a/tests/test_admin_status.py +++ b/tests/test_admin_status.py @@ -8,6 +8,7 @@ class TestAdminStatus(object): def setup_db(self, dvs): self.pdb = swsscommon.DBConnector(0, dvs.redis_sock, 0) self.adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + self.countdb = swsscommon.DBConnector(2, dvs.redis_sock, 0) self.cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) self.sdb = swsscommon.DBConnector(6, dvs.redis_sock, 0) @@ -43,6 +44,19 @@ def remove_port_channel_members(self, dvs, lag, members): tbl._del(lag + "|" + member) time.sleep(1) + def update_host_tx_ready_status(self, dvs, port_id, switch_id, admin_state): + host_tx_ready = "SAI_PORT_HOST_TX_READY_STATUS_READY" if admin_state == "up" else "SAI_PORT_HOST_TX_READY_STATUS_NOT_READY" + ntf = swsscommon.NotificationProducer(dvs.adb, "NOTIFICATIONS") + fvp = swsscommon.FieldValuePairs() + ntf_data = "[{\"host_tx_ready_status\":\""+host_tx_ready+"\",\"port_id\":\""+port_id+"\",\"switch_id\":\""+switch_id+"\"}]" + ntf.send("port_host_tx_ready", ntf_data, fvp) + + def get_port_id(self, dvs, port_name): + port_name_map = swsscommon.Table(self.countdb, "COUNTERS_PORT_NAME_MAP") + status, returned_value = port_name_map.hget("", port_name) + assert status == True + return returned_value + def check_admin_status(self, dvs, port, admin_status): assert admin_status == "up" or admin_status == "down" tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_PORT") @@ -91,8 +105,12 @@ def test_PortChannelMemberAdminStatus(self, dvs, testlog): self.remove_port_channel(dvs, "PortChannel6") def test_PortHostTxReadiness(self, dvs, testlog): + dvs.setup_db() self.setup_db(dvs) + #Find switch_id + switch_id = dvs.getSwitchOid() + # configure admin status to interface self.set_admin_status("Ethernet0", "up") self.set_admin_status("Ethernet4", "down") @@ -103,6 +121,11 @@ def test_PortHostTxReadiness(self, dvs, testlog): self.check_admin_status(dvs, "Ethernet4", "down") self.check_admin_status(dvs, "Ethernet8", "up") + self.update_host_tx_ready_status(dvs, self.get_port_id(dvs, "Ethernet0") , switch_id, "up") + self.update_host_tx_ready_status(dvs, self.get_port_id(dvs, "Ethernet4") , switch_id, "down") + self.update_host_tx_ready_status(dvs, self.get_port_id(dvs, "Ethernet8") , switch_id, "up") + time.sleep(3) + # check host readiness status in PORT TABLE of STATE-DB self.check_host_tx_ready_status(dvs, "Ethernet0", "up") self.check_host_tx_ready_status(dvs, "Ethernet4", "down") diff --git a/tests/test_bfd.py b/tests/test_bfd.py index 5add329278d..5cd18bbe05c 100644 --- a/tests/test_bfd.py +++ b/tests/test_bfd.py @@ -9,6 +9,7 @@ def setup_db(self, dvs): self.pdb = dvs.get_app_db() self.adb = dvs.get_asic_db() self.sdb = dvs.get_state_db() + self.cdb = dvs.get_config_db() def get_exist_bfd_session(self): return set(self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION")) @@ -43,6 +44,22 @@ def update_bfd_session_state(self, dvs, session, state): ntf_data = "[{\"bfd_session_id\":\""+session+"\",\"session_state\":\""+bfd_sai_state[state]+"\"}]" ntf.send("bfd_session_state_change", ntf_data, fvp) + def update_bgp_global_dev_state(self, state): + tbl = swsscommon.Table(self.cdb.db_connection, "BGP_DEVICE_GLOBAL") + fvs = swsscommon.FieldValuePairs(list(state.items())) + key = "STATE" + tbl.set(key, fvs) + time.sleep(1) + + def set_tsa(self): + state = {"tsa_enabled": "true"} + self.update_bgp_global_dev_state(state) + + def clear_tsa(self): + state = {"tsa_enabled": "false"} + self.update_bgp_global_dev_state(state) + + def test_addRemoveBfdSession(self, dvs): self.setup_db(dvs) @@ -476,6 +493,131 @@ def test_multipleBfdSessions(self, dvs): self.remove_bfd_session(key4) self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session4) + def test_addRemoveBfdSession_with_tsa_case1(self, dvs): + # This is a test for BFD caching mechanism. + # This test sets up a BFD session with shutdown_bfd_during_tsa=true and checks state DB for session creation. + # Then TSA is applied and removal of the session is verified in app db. This is followed by TSB and finally the + # reinstated session is verified. + self.setup_db(dvs) + + bfdSessions = self.get_exist_bfd_session() + + # Create BFD session + fieldValues = {"local_addr": "10.0.0.1", "type": "demand_active", "shutdown_bfd_during_tsa": "true"} + self.create_bfd_session("default:default:10.0.0.2", fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", len(bfdSessions) + 1) + + # Checked created BFD session in ASIC_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + assert len(createdSessions) == 1 + + session = createdSessions.pop() + expected_adb_values = { + "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_DEMAND_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", + "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4" + } + self.check_asic_bfd_session_value(session, expected_adb_values) + + # Check STATE_DB entry related to the BFD session + expected_sdb_values = {"state": "Down", "type": "demand_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "12"} + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + # Send BFD session state notification to update BFD session state + self.update_bfd_session_state(dvs, session, "Up") + time.sleep(2) + bfdSessions = self.get_exist_bfd_session() + # Confirm BFD session state in STATE_DB is updated as expected. + expected_sdb_values["state"] = "Up" + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + #set TSA + self.set_tsa() + time.sleep(2) + + #ensure the session is removed. + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session) + + #set TSB + self.clear_tsa() + time.sleep(2) + createdSessions = self.get_exist_bfd_session() - bfdSessions + session = createdSessions.pop() + expected_sdb_values["local_discriminator"] = "13" + self.update_bfd_session_state(dvs, session, "Up") + time.sleep(2) + # bfd session should come back + expected_sdb_values["state"] = "Up" + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + # Remove the BFD session + + self.remove_bfd_session("default:default:10.0.0.2") + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session) + + + def test_addRemoveBfdSession_with_tsa_case2(self, dvs): + # This is a test for BFD caching mechanism. + # This test sets up a BFD session with shutdown_bfd_during_tsa=true and checks state DB for session creation. + # Then TSA is applied and removal of the session is verified from app db. At this point the session is removed. + # This isfollowed by TSB. Since the session configuration has been removed during TSB, the BFD session should not + # start up. + self.setup_db(dvs) + + bfdSessions = self.get_exist_bfd_session() + + # Create BFD session + fieldValues = {"local_addr": "10.0.0.1", "type": "demand_active"} + self.create_bfd_session("default:default:10.0.0.2", fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", len(bfdSessions) + 1) + + # Checked created BFD session in ASIC_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + assert len(createdSessions) == 1 + + session = createdSessions.pop() + expected_adb_values = { + "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_DEMAND_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", + "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4" + } + self.check_asic_bfd_session_value(session, expected_adb_values) + + # Check STATE_DB entry related to the BFD session + expected_sdb_values = {"state": "Down", "type": "demand_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "14"} + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + # Send BFD session state notification to update BFD session state + self.update_bfd_session_state(dvs, session, "Up") + time.sleep(2) + # Confirm BFD session state in STATE_DB is updated as expected. + expected_sdb_values["state"] = "Up" + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + #set TSA + self.set_tsa() + time.sleep(2) + + #ensure the session is still present. + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", len(bfdSessions) + 1) + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + #set TSB + self.clear_tsa() + time.sleep(2) + + self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) + + # Remove the BFD session + self.remove_bfd_session("default:default:10.0.0.2") + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session) + def test_bfd_state_db_clear(self, dvs): self.setup_db(dvs) diff --git a/tests/test_buffer_dynamic.py b/tests/test_buffer_dynamic.py index 49d36b357ca..6f8851aa5ef 100644 --- a/tests/test_buffer_dynamic.py +++ b/tests/test_buffer_dynamic.py @@ -9,7 +9,7 @@ def dynamic_buffer(dvs): buffer_model.enable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) yield - buffer_model.disable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) + buffer_model.disable_dynamic_buffer(dvs) @pytest.mark.usefixtures("dynamic_buffer") class TestBufferMgrDyn(object): @@ -140,7 +140,7 @@ def check_new_profile_in_asic_db(self, dvs, profile): 'SAI_BUFFER_PROFILE_ATTR_POOL_ID': self.ingress_lossless_pool_oid, 'SAI_BUFFER_PROFILE_ATTR_THRESHOLD_MODE': sai_threshold_mode, sai_threshold_name: sai_threshold_value}, - self.DEFAULT_POLLING_CONFIG) + polling_config=self.DEFAULT_POLLING_CONFIG) def make_lossless_profile_name(self, speed, cable_length, mtu = None, dynamic_th = None): extra = "" @@ -865,3 +865,112 @@ def test_bufferPortMaxParameter(self, dvs, testlog): dvs.port_admin_set('Ethernet0', 'down') self.cleanup_db(dvs) + + + def test_bufferPoolInitWithSHP(self, dvs, testlog): + self.setup_db(dvs) + + try: + # 1. Enable the shared headroom pool + default_lossless_buffer_parameter = self.config_db.get_entry('DEFAULT_LOSSLESS_BUFFER_PARAMETER', 'AZURE') + default_lossless_buffer_parameter['over_subscribe_ratio'] = '2' + self.config_db.update_entry('DEFAULT_LOSSLESS_BUFFER_PARAMETER', 'AZURE', default_lossless_buffer_parameter) + + # 2. Stop the orchagent + _, oa_pid = dvs.runcmd("pgrep orchagent") + dvs.runcmd("kill -s SIGSTOP {}".format(oa_pid)) + + # 3. Remove the size from CONFIG_DB|BUFFER_POOL.ingress_lossless_pool + original_ingress_lossless_pool = self.config_db.get_entry('BUFFER_POOL', 'ingress_lossless_pool') + try: + self.config_db.delete_field('BUFFER_POOL', 'ingress_lossless_pool', 'size') + self.config_db.delete_field('BUFFER_POOL', 'ingress_lossless_pool', 'xoff') + except Exception as e: + pass + + # 4. Remove the ingress_lossless_pool from the APPL_DB + dvs.delete_entry_tbl(self.app_db.db_connection, 'BUFFER_POOL_TABLE', 'ingress_lossless_pool') + + # 5. Mock it by adding a "TABLE_SET" entry to trigger the fallback logic + self.app_db.update_entry("BUFFER_PG_TABLE_SET", "", {"NULL": "NULL"}) + + # 6. Invoke the lua plugin + _, output = dvs.runcmd("redis-cli --eval /usr/share/swss/buffer_pool_vs.lua") + assert "ingress_lossless_pool:2048:1024" in output + + finally: + self.config_db.update_entry('BUFFER_POOL', 'ingress_lossless_pool', original_ingress_lossless_pool) + self.config_db.delete_entry('DEFAULT_LOSSLESS_BUFFER_PARAMETER', 'AZURE') + dvs.delete_entry_tbl(self.app_db.db_connection, 'BUFFER_PG_TABLE_SET', '') + dvs.runcmd("kill -s SIGCONT {}".format(oa_pid)) + + + def test_bufferPoolCalculation(self, dvs, testlog): + self.setup_db(dvs) + + try: + self.config_db.delete_field('BUFFER_POOL', 'ingress_lossless_pool', 'size') + except Exception as e: + pass + + try: + # Test 1: Test the buffer pool calculation with a percentage + percentage = 75 + margin = 1 + + re_pool_size = "ingress_lossless_pool:([0-9]+)" + _, original_output = dvs.runcmd("redis-cli --eval /usr/share/swss/buffer_pool_vs.lua") + original_size = int(re.match(re_pool_size, original_output).group(1)) + + original_ingress_lossless_pool = self.config_db.get_entry('BUFFER_POOL', 'ingress_lossless_pool') + ingress_lossless_pool = original_ingress_lossless_pool + ingress_lossless_pool['percentage'] = str(percentage) + self.config_db.update_entry('BUFFER_POOL', 'ingress_lossless_pool', ingress_lossless_pool) + + _, percentage_output = dvs.runcmd("redis-cli --eval /usr/share/swss/buffer_pool_vs.lua") + percentage_size = int(re.match(re_pool_size, percentage_output).group(1)) + + real_percentage = percentage_size * 100 / original_size + assert abs(percentage - real_percentage) < margin + + # Test 2: Test the buffer pool calculation with a port with multiple queues + # Store existing Ethernet0 entries for restoration + original_eth0_entries = {} + eth0_keys = self.config_db.get_keys('BUFFER_QUEUE') + for key in eth0_keys: + if key.startswith('Ethernet0|'): + original_eth0_entries[key] = self.config_db.get_entry('BUFFER_QUEUE', key) + self.config_db.delete_entry('BUFFER_QUEUE', key) + + # Startup port + dvs.port_admin_set('Ethernet0', 'up') + + # Create buffer profile + self.config_db.update_entry('BUFFER_PROFILE', 'egress_test_profile', + {'dynamic_th': '0', + 'pool': 'egress_lossy_pool', + 'size': '16384'}) + + # Create buffer queue entries + self.config_db.update_entry('BUFFER_QUEUE', 'Ethernet0|0-7', {'profile': 'egress_test_profile'}) + self.config_db.update_entry('BUFFER_QUEUE', 'Ethernet0|8-12', {'profile': 'egress_test_profile'}) + self.config_db.update_entry('BUFFER_QUEUE', 'Ethernet0|13-19', {'profile': 'egress_test_profile'}) + + # Run lua script and check output + _, output = dvs.runcmd("redis-cli --eval /usr/share/swss/buffer_pool_vs.lua") + assert re.search(r"debug:BUFFER_PROFILE_TABLE:egress_test_profile:16384:20", output), "Profile reference count not found in output" + + finally: + # Remove objects in reverse order + self.config_db.delete_entry('BUFFER_QUEUE', 'Ethernet0|13-19') + self.config_db.delete_entry('BUFFER_QUEUE', 'Ethernet0|8-12') + self.config_db.delete_entry('BUFFER_QUEUE', 'Ethernet0|0-7') + self.config_db.delete_entry('BUFFER_PROFILE', 'egress_test_profile') + dvs.port_admin_set('Ethernet0', 'down') + + # Restore original Ethernet0 entries + for key, value in original_eth0_entries.items(): + self.config_db.update_entry('BUFFER_QUEUE', key, value) + + self.config_db.delete_entry('BUFFER_POOL', 'ingress_lossless_pool') + self.config_db.update_entry('BUFFER_POOL', 'ingress_lossless_pool', original_ingress_lossless_pool) \ No newline at end of file diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index 21371cb05aa..c1346b4d620 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -3,14 +3,18 @@ class TestBuffer(object): + from conftest import DockerVirtualSwitch lossless_pgs = [] INTF = "Ethernet0" def setup_db(self, dvs): - self.app_db = dvs.get_app_db() - self.asic_db = dvs.get_asic_db() - self.config_db = dvs.get_config_db() - self.counter_db = dvs.get_counters_db() + from conftest import ApplDbValidator, AsicDbValidator + from dvslib.dvs_database import DVSDatabase + + self.app_db: ApplDbValidator = dvs.get_app_db() + self.asic_db: AsicDbValidator = dvs.get_asic_db() + self.config_db: DVSDatabase = dvs.get_config_db() + self.counter_db: DVSDatabase = dvs.get_counters_db() # enable PG watermark self.set_pg_wm_status('enable') @@ -74,6 +78,10 @@ def get_pg_name_map(self): pg_name = "{}:{}".format(self.INTF, pg) pg_name_map[pg_name] = self.get_pg_oid(pg_name) return pg_name_map + + def check_syslog(self, dvs, marker, err_log, expected_cnt=1): + (exitcode, num) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \"%s\" | wc -l" % (marker, err_log)]) + assert num.strip() >= str(expected_cnt) @pytest.fixture def setup_teardown_test(self, dvs): @@ -246,3 +254,195 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): dvs.port_field_set(extra_port, "speed", orig_speed) dvs.port_admin_set(self.INTF, "down") dvs.port_admin_set(extra_port, "down") + + def test_no_pg_profile_for_speed_and_length(self, dvs: DockerVirtualSwitch, setup_teardown_test): + """ + Test to verify that buffermgrd correctly handles a scenario where no PG profile + is configured for a given speed (10000) and cable length (80m) for Ethernet0 (self.INTF). + """ + orig_cable_len = None + orig_port_speed = None + orig_port_status = None + orig_port_qos_map = None + + test_cable_len = "80m" # cable length must not exist for test_speed in + test_speed = "10000" + test_port_status ="down" # can be up or down, but it must exist in port configuration + test_port_pfc_enable = "3,4" # does not matter, but must exist + + try: + ################################## + ## Save original configurations ## + ################################## + + # Save original cable length + fvs_cable_len = self.config_db.get_entry("CABLE_LENGTH", "AZURE") + orig_cable_len = fvs_cable_len.get(self.INTF) if fvs_cable_len else None + + # Save original port speed and admin status + fvs_port = self.config_db.get_entry("PORT", self.INTF) + orig_port_speed = fvs_port.get("speed") if fvs_port else None + orig_port_status = fvs_port.get("admin_status") if fvs_port else None + + # Save original port qos map + fvs_qos_map = self.config_db.get_entry("PORT_QOS_MAP", self.INTF) + orig_cable_len = fvs_qos_map.get("pfc_enable") if fvs_qos_map else None + + ###################################### + ## Send configurations to CONFIG_DB ## + ###################################### + + # Configure cable length + self.change_cable_len(test_cable_len) + + # Configure port speed + dvs.port_field_set(self.INTF, "speed", test_speed) + + # Configure PFC enable + self.set_port_qos_table(self.INTF, test_port_pfc_enable) + + # Add marker to log to make syslog verification easier + # Set before setting admin status to not miss syslog + marker = dvs.add_log_marker() + + # Configure admin status + dvs.port_admin_set(self.INTF, test_port_status) + + # Wait for buffermgrd to process the changes + time.sleep(2) + + ################## + ## Verification ## + ################## + + + # Check syslog if this error is present. This is expected. + self.check_syslog(dvs, marker, "Failed to process invalid entry, drop it") + + finally: + ############################### + ## Revert to original values ## + ############################### + + # Revert values to original values + # If there are none, then assume entry/field never existed and should be deleted + + # Revert cable length + if orig_cable_len: + self.change_cable_len(orig_cable_len) + else: + self.config_db.delete_entry("CABLE_LENGTH", "AZURE") + + # Revert port speed + if orig_port_speed: + dvs.port_field_set(self.INTF, "speed", orig_port_speed) + else: + self.config_db.delete_field("PORT", self.INTF, "speed") + + # Revert admin status + if orig_port_status: + dvs.port_admin_set(self.INTF, orig_port_status) + else: + self.config_db.delete_field("PORT", self.INTF, "admin_status") + + # Revert port qos map + if orig_port_qos_map: + self.config_db.update_entry("PORT_QOS_MAP", self.INTF, orig_port_qos_map) + else: + self.config_db.delete_entry("PORT_QOS_MAP", self.INTF) + + def test_config_db_buffer_pg_update(self, dvs: DockerVirtualSwitch, setup_teardown_test): + """ + Test to verify BUFFER_PG table entry creation and its consistency when admin_status is updated. + """ + + orig_port_qos_map = None + orig_cable_len = None + orig_fvs_port = None + + # Test parameters + test_cable_len = "300m" + test_speed = "100000" + test_port_pfc_enable = "3,4" + + try: + ################################## + ## Save original configurations ## + ################################## + + # Save original cable length + fvs_cable_len = self.config_db.get_entry("CABLE_LENGTH", "AZURE") + orig_cable_len = fvs_cable_len.get(self.INTF) if fvs_cable_len else None + + # Save original port speed and admin status + orig_fvs_port = self.config_db.get_entry("PORT", self.INTF) + + # Save original port qos map + fvs_qos_map = self.config_db.get_entry("PORT_QOS_MAP", self.INTF) + orig_port_qos_map = fvs_qos_map if fvs_qos_map else None + + ###################################### + ## Send configurations to CONFIG_DB ## + ###################################### + + # Configure cable length + self.change_cable_len(test_cable_len) + + if orig_fvs_port: + self.config_db.delete_entry("PORT", self.INTF) + + fvs_port = orig_fvs_port.copy() + + # Delete admin_status field if it exists + fvs_port.pop("admin_status", None) + + fvs_port["speed"] = test_speed + + self.config_db.update_entry("PORT", self.INTF, fvs_port) + + # Configure PFC enable + self.set_port_qos_table(self.INTF, test_port_pfc_enable) + + # Wait for buffermgrd to process the changes + time.sleep(2) + + ################## + ## Verification ## + ################## + + # Verify BUFFER_PG table entry in CONFIG_DB without admin_status field + expected_profile = "pg_lossless_{}_{}_profile".format(test_speed, test_cable_len) + pg_field_key = "{}|{}".format(self.INTF, test_port_pfc_enable.replace(',', '-')) + self.config_db.wait_for_field_match("BUFFER_PG", "{}".format(pg_field_key), + {"profile": expected_profile}) + + # Set admin_status to up + dvs.port_admin_set(self.INTF, "up") + + fvs_buffer_pg = self.config_db.get_entry("BUFFER_PG", pg_field_key) + assert fvs_buffer_pg.get("profile") == expected_profile, \ + "BUFFER_PG entry for {} does not match expected profile {}".format(pg_field_key, expected_profile) + + finally: + ############################### + ## Revert to original values ## + ############################### + + # Revert cable length + if orig_cable_len: + self.change_cable_len(orig_cable_len) + else: + self.config_db.delete_entry("CABLE_LENGTH", "AZURE") + + # Revert to original PORT configuration + if orig_fvs_port: + self.config_db.update_entry("PORT", self.INTF, orig_fvs_port) + else: + self.config_db.delete_entry("PORT", self.INTF) + + # Revert port qos map + if orig_port_qos_map: + self.config_db.update_entry("PORT_QOS_MAP", self.INTF, orig_port_qos_map) + else: + self.config_db.delete_entry("PORT_QOS_MAP", self.INTF) + diff --git a/tests/test_copp.py b/tests/test_copp.py index 5885a489b52..c9dd514013f 100644 --- a/tests/test_copp.py +++ b/tests/test_copp.py @@ -71,7 +71,8 @@ "dest_nat_miss": "SAI_HOSTIF_TRAP_TYPE_DNAT_MISS", "ldp": "SAI_HOSTIF_TRAP_TYPE_LDP", "bfd_micro": "SAI_HOSTIF_TRAP_TYPE_BFD_MICRO", - "bfdv6_micro": "SAI_HOSTIF_TRAP_TYPE_BFDV6_MICRO" + "bfdv6_micro": "SAI_HOSTIF_TRAP_TYPE_BFDV6_MICRO", + "neighbor_miss": "SAI_HOSTIF_TRAP_TYPE_NEIGHBOR_MISS" } copp_group_default = { @@ -128,6 +129,17 @@ "red_action":"drop" } +copp_group_queue1_group3 = { + "trap_action":"trap", + "trap_priority":"1", + "queue": "1", + "meter_type":"packets", + "mode":"sr_tcm", + "cir":"200", + "cbs":"200", + "red_action":"drop" +} + copp_group_queue2_group1 = { "cbs": "1000", "cir": "1000", @@ -194,6 +206,7 @@ class TestCopp(object): def setup_copp(self, dvs): self.adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) self.cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + self.sdb = swsscommon.DBConnector(6, dvs.redis_sock, 0) self.trap_atbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF_TRAP") self.trap_group_atbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF_TRAP_GROUP") self.policer_atbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_POLICER") @@ -202,10 +215,24 @@ def setup_copp(self, dvs): self.trap_ctbl = swsscommon.Table(self.cdb, "COPP_TRAP") self.trap_group_ctbl = swsscommon.Table(self.cdb, "COPP_GROUP") self.feature_tbl = swsscommon.Table(self.cdb, "FEATURE") + self.state_stbl = swsscommon.Table(self.sdb, "COPP_TRAP_TABLE") + self.capability_stbl = swsscommon.Table(self.sdb, "COPP_TRAP_CAPABILITY_TABLE") fvs = swsscommon.FieldValuePairs([("state", "disabled")]) self.feature_tbl.set("sflow", fvs) time.sleep(2) + def validate_trap_hw_status(self, trap_id, trap_status): + (status, fvs) = self.state_stbl.get(trap_id) + if trap_status == True: + assert status == True + + if status: + for fv in fvs: + if fv[0] == "hw_status": + if trap_status == True: + assert fv[1] == "installed" + else: + assert fv[1] == "not-installed" def validate_policer(self, policer_oid, field, value): (status, fvs) = self.policer_atbl.get(policer_oid) @@ -232,6 +259,8 @@ def validate_trap_group(self, trap_oid, trap_group): queue = "" trap_action = "" trap_priority = "" + default_trap_queue = "0" + default_trap_prio = "1" for fv in trap_fvs: if fv[0] == "SAI_HOSTIF_TRAP_ATTR_PACKET_ACTION": @@ -262,6 +291,11 @@ def validate_trap_group(self, trap_oid, trap_group): assert trap_group_oid != "oid:0x0" if keys == "queue": assert queue == trap_group[keys] + # default trap in copp config doesn't specify a trap priority + # this is instead set internally in swss + # confirm that default trap uses a priority 1 + if queue == default_trap_queue: + assert trap_priority == default_trap_prio else: assert 0 @@ -330,6 +364,15 @@ def test_defaults(self, dvs, testlog): if trap_id not in disabled_traps: assert trap_found == True + (status, fvs) = self.capability_stbl.get("traps") + assert status == True + trap_list = [] + for fv in fvs: + if fv[0] == "trap_ids": + trap_list = fv[1].split(",") + break + + assert len(trap_list) != 0 def test_restricted_trap_sflow(self, dvs, testlog): self.setup_copp(dvs) @@ -470,6 +513,7 @@ def test_trap_ids_set(self, dvs, testlog): assert trap_found == True elif trap_id == "bgpv6": assert trap_found == False + self.validate_trap_hw_status(trap_id, trap_found) traps = "bgp,bgpv6" fvs = swsscommon.FieldValuePairs([("trap_ids", traps)]) @@ -494,6 +538,8 @@ def test_trap_ids_set(self, dvs, testlog): self.validate_trap_group(key,trap_group) break assert trap_found == True + self.validate_trap_hw_status(trap_id, trap_found) + def test_trap_action_set(self, dvs, testlog): self.setup_copp(dvs) @@ -558,6 +604,7 @@ def test_new_trap_add(self, dvs, testlog): break if trap_id not in disabled_traps: assert trap_found == True + self.validate_trap_hw_status(trap_id, trap_found) def test_new_trap_del(self, dvs, testlog): self.setup_copp(dvs) @@ -595,6 +642,7 @@ def test_new_trap_del(self, dvs, testlog): break if trap_id not in disabled_traps: assert trap_found == False + self.validate_trap_hw_status(trap_id, trap_found) def test_new_trap_group_add(self, dvs, testlog): self.setup_copp(dvs) @@ -634,6 +682,7 @@ def test_new_trap_group_add(self, dvs, testlog): break if trap_id not in disabled_traps: assert trap_found == True + self.validate_trap_hw_status(trap_id, trap_found) def test_new_trap_group_del(self, dvs, testlog): self.setup_copp(dvs) @@ -675,6 +724,7 @@ def test_new_trap_group_del(self, dvs, testlog): break if trap_id not in disabled_traps: assert trap_found != True + self.validate_trap_hw_status(trap_id, trap_found) def test_override_trap_grp_cfg_del (self, dvs, testlog): self.setup_copp(dvs) @@ -744,6 +794,7 @@ def test_override_trap_cfg_del(self, dvs, testlog): assert trap_found == True elif trap_id == "ssh": assert trap_found == False + self.validate_trap_hw_status(trap_id, trap_found) def test_empty_trap_cfg(self, dvs, testlog): self.setup_copp(dvs) @@ -769,6 +820,7 @@ def test_empty_trap_cfg(self, dvs, testlog): self.validate_trap_group(key,trap_group) break assert trap_found == False + self.validate_trap_hw_status(trap_id, trap_found) self.trap_ctbl._del("ip2me") time.sleep(2) @@ -788,6 +840,7 @@ def test_empty_trap_cfg(self, dvs, testlog): self.validate_trap_group(key,trap_group) break assert trap_found == True + self.validate_trap_hw_status(trap_id, trap_found) def test_disabled_feature_always_enabled_trap(self, dvs, testlog): @@ -841,3 +894,38 @@ def test_disabled_feature_always_enabled_trap(self, dvs, testlog): self.feature_tbl.set("lldp", fvs) assert table_found == False + + def test_multi_feature_trap_add(self, dvs, testlog): + self.setup_copp(dvs) + global copp_trap + traps = "eapol" + fvs = swsscommon.FieldValuePairs([("state", "disbled")]) + self.feature_tbl.set("macsec", fvs) + fvs = swsscommon.FieldValuePairs([("state", "enabled")]) + self.feature_tbl.set("pac", fvs) + fvs = swsscommon.FieldValuePairs([("trap_group", "queue4_group1"),("trap_ids", traps)]) + self.trap_ctbl.set("pac", fvs) + + + copp_trap["eapol"] = [traps, copp_group_queue4_group1] + time.sleep(2) + + trap_keys = self.trap_atbl.getKeys() + trap_ids = traps.split(",") + trap_group = copp_group_queue4_group1 + for trap_id in trap_ids: + trap_type = traps_to_trap_type[trap_id] + trap_found = False + trap_group_oid = "" + for key in trap_keys: + (status, fvs) = self.trap_atbl.get(key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_HOSTIF_TRAP_ATTR_TRAP_TYPE": + if fv[1] == trap_type: + trap_found = True + if trap_found: + self.validate_trap_group(key,trap_group) + break + if trap_id not in disabled_traps: + assert trap_found == True diff --git a/tests/test_dash_vnet.py b/tests/test_dash_vnet.py deleted file mode 100644 index 031fd7a0ef6..00000000000 --- a/tests/test_dash_vnet.py +++ /dev/null @@ -1,334 +0,0 @@ -from swsscommon import swsscommon - -from dash_api.appliance_pb2 import * -from dash_api.vnet_pb2 import * -from dash_api.eni_pb2 import * -from dash_api.route_pb2 import * -from dash_api.route_rule_pb2 import * -from dash_api.vnet_mapping_pb2 import * -from dash_api.route_type_pb2 import * -from dash_api.types_pb2 import * - -import typing -import time -import binascii -import uuid -import ipaddress -import sys -import socket - - -DVS_ENV = ["HWSKU=DPU-2P"] -NUM_PORTS = 2 - -def to_string(value): - if isinstance(value, bool): - return "true" if value else "false" - elif isinstance(value, bytes): - return value - return str(value) - - -class ProduceStateTable(object): - def __init__(self, database, table_name: str): - self.table = swsscommon.ProducerStateTable( - database.db_connection, - table_name) - - def __setitem__(self, key: str, pairs: typing.Union[dict, list, tuple]): - pairs_str = [] - if isinstance(pairs, dict): - pairs = pairs.items() - for k, v in pairs: - pairs_str.append((to_string(k), to_string(v))) - self.table.set(key, pairs_str) - - def __delitem__(self, key: str): - self.table.delete(str(key)) - - -class Table(object): - def __init__(self, database, table_name: str): - self.table_name = table_name - self.table = swsscommon.Table(database.db_connection, self.table_name) - - def __getitem__(self, key: str): - exists, result = self.table.get(str(key)) - if not exists: - return None - else: - return dict(result) - - def get_keys(self): - return self.table.getKeys() - - def get_newly_created_oid(self, old_oids): - new_oids = self.asic_db.wait_for_n_keys(table, len(old_oids) + 1) - oid = [ids for ids in new_oids if ids not in old_oids] - return oid[0] - - -class Dash(object): - def __init__(self, dvs): - self.dvs = dvs - self.app_dash_appliance_table = ProduceStateTable( - self.dvs.get_app_db(), "DASH_APPLIANCE_TABLE") - self.asic_direction_lookup_table = Table( - self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_DIRECTION_LOOKUP_ENTRY") - self.asic_vip_table = Table( - self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_VIP_ENTRY") - self.app_dash_vnet_table = ProduceStateTable( - self.dvs.get_app_db(), "DASH_VNET_TABLE") - self.asic_dash_vnet_table = Table( - self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_VNET") - self.app_dash_eni_table = ProduceStateTable( - self.dvs.get_app_db(), "DASH_ENI_TABLE") - self.asic_eni_table = Table( - self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_ENI") - self.asic_eni_ether_addr_map_table = Table( - self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_ENI_ETHER_ADDRESS_MAP_ENTRY") - self.app_dash_vnet_map_table = ProduceStateTable( - self.dvs.get_app_db(), "DASH_VNET_MAPPING_TABLE") - self.asic_dash_outbound_ca_to_pa_table = Table( - self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_CA_TO_PA_ENTRY") - self.asic_pa_validation_table = Table( - self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_PA_VALIDATION_ENTRY") - self.app_dash_route_table = ProduceStateTable( - self.dvs.get_app_db(), "DASH_ROUTE_TABLE") - self.app_dash_route_rule_table = ProduceStateTable( - self.dvs.get_app_db(), "DASH_ROUTE_RULE_TABLE") - self.asic_outbound_routing_table = Table( - self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_ENTRY") - self.asic_inbound_routing_rule_table = Table( - self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_INBOUND_ROUTING_ENTRY") - - def create_appliance(self, appliance_id, attr_maps: dict): - self.app_dash_appliance_table[str(appliance_id)] = attr_maps - - def remove_appliance(self, appliance_id): - del self.app_dash_appliance_table[str(appliance_id)] - - def create_vnet(self, vnet, attr_maps: dict): - self.app_dash_vnet_table[str(vnet)] = attr_maps - - def remove_vnet(self, vnet): - del self.app_dash_vnet_table[str(vnet)] - - def create_eni(self, eni, attr_maps: dict): - self.app_dash_eni_table[str(eni)] = attr_maps - - def remove_eni(self, eni): - del self.app_dash_eni_table[str(eni)] - - def create_vnet_map(self, vnet, ip, attr_maps: dict): - self.app_dash_vnet_map_table[str(vnet) + ":" + str(ip)] = attr_maps - - def remove_vnet_map(self, vnet, ip): - del self.app_dash_vnet_map_table[str(vnet) + ":" + str(ip)] - - def create_outbound_routing(self, mac_string, ip, attr_maps: dict): - self.app_dash_route_table[str(mac_string) + ":" + str(ip)] = attr_maps - - def remove_outbound_routing(self, mac_string, ip): - del self.app_dash_route_table[str(mac_string) + ":" + str(ip)] - - def create_inbound_routing(self, mac_string, vni, ip, attr_maps: dict): - self.app_dash_route_rule_table[str(mac_string) + ":" + str(vni) + ":" + str(ip)] = attr_maps - - def remove_inbound_routing(self, mac_string, vni, ip): - del self.app_dash_route_rule_table[str(mac_string) + ":" + str(vni) + ":" + str(ip)] - -class TestDash(object): - def test_appliance(self, dvs): - dashobj = Dash(dvs) - self.appliance_id = "100" - self.sip = "10.0.0.1" - self.vm_vni = "4321" - pb = Appliance() - pb.sip.ipv4 = socket.htonl(int(ipaddress.ip_address(self.sip))) - pb.vm_vni = int(self.vm_vni) - dashobj.create_appliance(self.appliance_id, {"pb": pb.SerializeToString()}) - time.sleep(3) - - direction_entries = dashobj.asic_direction_lookup_table.get_keys() - assert direction_entries - fvs = dashobj.asic_direction_lookup_table[direction_entries[0]] - for fv in fvs.items(): - if fv[0] == "SAI_DIRECTION_LOOKUP_ENTRY_ATTR_ACTION": - assert fv[1] == "SAI_DIRECTION_LOOKUP_ENTRY_ACTION_SET_OUTBOUND_DIRECTION" - vip_entries = dashobj.asic_vip_table.get_keys() - assert vip_entries - fvs = dashobj.asic_vip_table[vip_entries[0]] - for fv in fvs.items(): - if fv[0] == "SAI_VIP_ENTRY_ATTR_ACTION": - assert fv[1] == "SAI_VIP_ENTRY_ACTION_ACCEPT" - return dashobj - - def test_vnet(self, dvs): - dashobj = Dash(dvs) - self.vnet = "Vnet1" - self.vni = "45654" - self.guid = "559c6ce8-26ab-4193-b946-ccc6e8f930b2" - pb = Vnet() - pb.vni = int(self.vni) - pb.guid.value = bytes.fromhex(uuid.UUID(self.guid).hex) - dashobj.create_vnet(self.vnet, {"pb": pb.SerializeToString()}) - time.sleep(3) - vnets = dashobj.asic_dash_vnet_table.get_keys() - assert vnets - self.vnet_oid = vnets[0] - vnet_attr = dashobj.asic_dash_vnet_table[self.vnet_oid] - assert vnet_attr["SAI_VNET_ATTR_VNI"] == "45654" - return dashobj - - def test_eni(self, dvs): - dashobj = Dash(dvs) - self.vnet = "Vnet1" - self.mac_string = "F4939FEFC47E" - self.mac_address = "F4:93:9F:EF:C4:7E" - self.eni_id = "497f23d7-f0ac-4c99-a98f-59b470e8c7bd" - self.underlay_ip = "25.1.1.1" - self.admin_state = "enabled" - pb = Eni() - pb.eni_id = self.eni_id - pb.mac_address = bytes.fromhex(self.mac_address.replace(":", "")) - pb.underlay_ip.ipv4 = socket.htonl(int(ipaddress.ip_address(self.underlay_ip))) - pb.admin_state = State.STATE_ENABLED - pb.vnet = self.vnet - dashobj.create_eni(self.mac_string, {"pb": pb.SerializeToString()}) - time.sleep(3) - vnets = dashobj.asic_dash_vnet_table.get_keys() - assert vnets - self.vnet_oid = vnets[0] - enis = dashobj.asic_eni_table.get_keys() - assert enis - self.eni_oid = enis[0]; - fvs = dashobj.asic_eni_table[enis[0]] - for fv in fvs.items(): - if fv[0] == "SAI_ENI_ATTR_VNET_ID": - assert fv[1] == str(self.vnet_oid) - if fv[0] == "SAI_ENI_ATTR_PPS": - assert fv[1] == 0 - if fv[0] == "SAI_ENI_ATTR_CPS": - assert fv[1] == 0 - if fv[0] == "SAI_ENI_ATTR_FLOWS": - assert fv[1] == 0 - if fv[0] == "SAI_ENI_ATTR_ADMIN_STATE": - assert fv[1] == "true" - - time.sleep(3) - eni_addr_maps = dashobj.asic_eni_ether_addr_map_table.get_keys() - assert eni_addr_maps - fvs = dashobj.asic_eni_ether_addr_map_table[eni_addr_maps[0]] - for fv in fvs.items(): - if fv[0] == "SAI_ENI_ETHER_ADDRESS_MAP_ENTRY_ATTR_ENI_ID": - assert fv[1] == str(self.eni_oid) - return dashobj - - def test_vnet_map(self, dvs): - dashobj = Dash(dvs) - self.vnet = "Vnet1" - self.ip1 = "10.1.1.1" - self.ip2 = "10.1.1.2" - self.mac_address = "F4:93:9F:EF:C4:7E" - self.routing_type = "vnet_encap" - self.underlay_ip = "101.1.2.3" - pb = VnetMapping() - pb.mac_address = bytes.fromhex(self.mac_address.replace(":", "")) - pb.action_type = RoutingType.ROUTING_TYPE_VNET_ENCAP - pb.underlay_ip.ipv4 = socket.htonl(int(ipaddress.ip_address(self.underlay_ip))) - - dashobj.create_vnet_map(self.vnet, self.ip1, {"pb": pb.SerializeToString()}) - dashobj.create_vnet_map(self.vnet, self.ip2, {"pb": pb.SerializeToString()}) - time.sleep(3) - - vnet_ca_to_pa_maps = dashobj.asic_dash_outbound_ca_to_pa_table.get_keys() - assert len(vnet_ca_to_pa_maps) >= 2 - fvs = dashobj.asic_dash_outbound_ca_to_pa_table[vnet_ca_to_pa_maps[0]] - for fv in fvs.items(): - if fv[0] == "SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_UNDERLAY_DIP": - assert fv[1] == "101.1.2.3" - if fv[0] == "SAI_OUTBOUND_CA_TO_PA_ENTRY_ATTR_OVERLAY_DMAC": - assert fv[1] == "F4:93:9F:EF:C4:7E" - - vnet_pa_validation_maps = dashobj.asic_pa_validation_table.get_keys() - assert vnet_pa_validation_maps - fvs = dashobj.asic_pa_validation_table[vnet_pa_validation_maps[0]] - for fv in fvs.items(): - if fv[0] == "SAI_PA_VALIDATION_ENTRY_ATTR_ACTION": - assert fv[1] == "SAI_PA_VALIDATION_ENTRY_ACTION_PERMIT" - return dashobj - - def test_outbound_routing(self, dvs): - dashobj = Dash(dvs) - self.vnet = "Vnet1" - self.mac_string = "F4939FEFC47E" - self.ip = "10.1.0.0/24" - self.action_type = "vnet_direct" - self.overlay_ip= "10.0.0.6" - pb = Route() - pb.action_type = RoutingType.ROUTING_TYPE_VNET_DIRECT - pb.vnet_direct.vnet = self.vnet - pb.vnet_direct.overlay_ip.ipv4 = socket.htonl(int(ipaddress.ip_address(self.overlay_ip))) - dashobj.create_outbound_routing(self.mac_string, self.ip, {"pb": pb.SerializeToString()}) - time.sleep(3) - - outbound_routing_entries = dashobj.asic_outbound_routing_table.get_keys() - assert outbound_routing_entries - fvs = dashobj.asic_outbound_routing_table[outbound_routing_entries[0]] - for fv in fvs.items(): - if fv[0] == "SAI_OUTBOUND_ROUTING_ENTRY_ATTR_ACTION": - assert fv[1] == "SAI_OUTBOUND_ROUTING_ENTRY_ACTION_ROUTE_VNET_DIRECT" - if fv[0] == "SAI_OUTBOUND_ROUTING_ENTRY_ATTR_OVERLAY_IP": - assert fv[1] == "10.0.0.6" - assert "SAI_OUTBOUND_ROUTING_ENTRY_ATTR_DST_VNET_ID" in fvs - return dashobj - - def test_inbound_routing(self, dvs): - dashobj = Dash(dvs) - self.mac_string = "F4939FEFC47E" - self.vnet = "Vnet1" - self.vni = "3251" - self.ip = "10.1.1.1" - self.action_type = "decap" - self.pa_validation = "true" - self.priority = "1" - self.protocol = "0" - pb = RouteRule() -# pb.action_type = RoutingType.ROUTING_TYPE_DECAP - pb.pa_validation = True - pb.priority = int(self.priority) - pb.protocol = int(self.protocol) - pb.vnet = self.vnet - - dashobj.create_inbound_routing(self.mac_string, self.vni, self.ip, {"pb": pb.SerializeToString()}) - time.sleep(3) - - inbound_routing_entries = dashobj.asic_inbound_routing_rule_table.get_keys() - assert inbound_routing_entries - fvs = dashobj.asic_inbound_routing_rule_table[inbound_routing_entries[0]] - for fv in fvs.items(): - if fv[0] == "SAI_INBOUND_ROUTING_ENTRY_ATTR_ACTION": - assert fv[1] == "SAI_INBOUND_ROUTING_ENTRY_ACTION_VXLAN_DECAP_PA_VALIDATE" - return dashobj - - def test_cleanup(self, dvs): - dashobj = Dash(dvs) - self.vnet = "Vnet1" - self.mac_string = "F4939FEFC47E" - self.vni = "3251" - self.sip = "10.1.1.1" - self.dip = "10.1.0.0/24" - self.appliance_id = "100" - dashobj.remove_inbound_routing(self.mac_string, self.vni, self.sip) - dashobj.remove_outbound_routing(self.mac_string, self.dip) - dashobj.remove_eni(self.mac_string) - dashobj.remove_vnet_map(self.vnet, self.sip) - dashobj.remove_vnet(self.vnet) - dashobj.remove_appliance(self.appliance_id) - -# Add Dummy always-pass test at end as workaroud -# for issue when Flaky fail on final test it invokes module tear-down -# before retrying -def test_nonflaky_dummy(): - pass diff --git a/tests/test_evpn_l3_vxlan.py b/tests/test_evpn_l3_vxlan.py index 3f424f38303..8820f484e67 100644 --- a/tests/test_evpn_l3_vxlan.py +++ b/tests/test_evpn_l3_vxlan.py @@ -17,6 +17,7 @@ def setup_db(self, dvs): self.pdb = swsscommon.DBConnector(0, dvs.redis_sock, 0) self.adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) self.cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + self.sdb = swsscommon.DBConnector(6, dvs.redis_sock, 0) # Test 1 - Create and Delete SIP Tunnel and VRF VNI Map entries # @pytest.mark.skip(reason="Starting Route Orch, VRF Orch to be merged") @@ -597,6 +598,34 @@ def test_tunnel_ipv6_routes(self, dvs, testlog): vxlan_obj.remove_vlan(dvs, "100") + def test_vrf_state_db_update(self, dvs, testlog): + vxlan_obj = self.get_vxlan_obj() + helper = self.get_vxlan_helper() + + self.setup_db(dvs) + tunnel_name = 'tunnel_2' + map_name = 'map_1000_100' + vrf_map_name = 'evpn_map_1000_Vrf-RED' + + vxlan_obj.fetch_exist_entries(dvs) + + + vxlan_obj.create_vrf(dvs, "Vrf-RED") + vxlan_obj.create_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED', '1000') + + time.sleep(2) + #adding nvo after + vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) + exp_attr = [ + ("state", "ok"), + ] + helper.check_object(self.sdb, "VRF_OBJECT_TABLE", 'Vrf-RED', exp_attr) + vxlan_obj.remove_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED') + vxlan_obj.remove_vrf(dvs, "Vrf-RED") + vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) + vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/tests/test_fabric.py b/tests/test_fabric.py index 2d1ea8c2930..72ad8287905 100644 --- a/tests/test_fabric.py +++ b/tests/test_fabric.py @@ -73,6 +73,14 @@ def test_voq_switch(self, vst): port_counters_stat_keys = flex_db.get_keys("FLEX_COUNTER_TABLE:" + meta_data['group_name']) for port_stat in port_counters_stat_keys: assert port_stat in dict(port_counters_keys.items()).values(), "Non port created on PORT_STAT_COUNTER group: {}".format(port_stat) + + # update some config_db entries + cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + tb = swsscommon.Table(cfg_db, "FABRIC_PORT") + fvs = swsscommon.FieldValuePairs([("isolateStatus","True")]) + tb.set("FABRIC_PORT|Fabric0", fvs ) + fvs = swsscommon.FieldValuePairs([("forceUnisolateStatus", "1")]) + tb.set("FABRIC_PORT|Fabric0", fvs ) else: print( "We do not check switch type:", cfg_switch_type ) diff --git a/tests/test_fabric_capacity.py b/tests/test_fabric_capacity.py new file mode 100644 index 00000000000..cb10e09af2c --- /dev/null +++ b/tests/test_fabric_capacity.py @@ -0,0 +1,97 @@ +import random +from dvslib.dvs_database import DVSDatabase +from dvslib.dvs_common import PollingConfig + + +class TestVirtualChassis(object): + def test_voq_switch_fabric_capacity(self, vst): + """Test basic fabric capacity infrastructure in VOQ switchs. + + This test validates that when fabric links get isolated, the fabric capacity + get updated in the state_db. + When the link get unisolated, the fabric capacity get set back as well. + """ + + dvss = vst.dvss + for name in dvss.keys(): + dvs = dvss[name] + # Get the config information and choose a linecard or fabric card to test. + config_db = dvs.get_config_db() + adb = dvs.get_app_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + + cfg_switch_type = metatbl.get("switch_type") + if cfg_switch_type == "fabric": + + max_poll = PollingConfig(polling_interval=60, timeout=600, strict=True) + config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'disable'}) + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'disable'}, polling_config=max_poll) + # enable monitoring + config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'enable'}) + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'enable'}, polling_config=max_poll) + + # get state_db infor + sdb = dvs.get_state_db() + # There are 16 fabric ports in the test environment. + # Choose one link to test. + portNum = random.randint(1, 16) + cdb_port = "Fabric"+str(portNum) + sdb_port = "PORT"+str(portNum) + + # setup test environment + sdb.update_entry("FABRIC_PORT_TABLE", sdb_port, {"TEST": "TEST"}) + + # get current fabric capacity + fvs = sdb.wait_for_fields("FABRIC_CAPACITY_TABLE", "FABRIC_CAPACITY_DATA",['operating_links'], polling_config=max_poll) + capacity = fvs['operating_links'] + + fvs = sdb.wait_for_fields("FABRIC_PORT_TABLE", sdb_port, ['STATUS'], polling_config=max_poll) + link_status = fvs['STATUS'] + if link_status == 'up': + try: + # clean up the testing port. + # set TEST_CRC_ERRORS to 0 + # set TEST_CODE_ERRORS to 0 + sdb.update_entry("FABRIC_PORT_TABLE", sdb_port, {"TEST_CRC_ERRORS":"0"}) + sdb.update_entry("FABRIC_PORT_TABLE", sdb_port, {"TEST_CODE_ERRORS": "0"}) + + # isolate the link from config_db + config_db.update_entry("FABRIC_PORT", cdb_port, {"isolateStatus": "True"}) + sdb.wait_for_field_match("FABRIC_PORT_TABLE", sdb_port, {"ISOLATED": "1"}, polling_config=max_poll) + # check if capacity reduced + sdb.wait_for_field_negative_match("FABRIC_CAPACITY_TABLE", "FABRIC_CAPACITY_DATA", {'operating_links': capacity}, polling_config=max_poll) + # unisolate the link from config_db + config_db.update_entry("FABRIC_PORT", cdb_port, {"isolateStatus": "False"}) + sdb.wait_for_field_match("FABRIC_PORT_TABLE", sdb_port, {"ISOLATED": "0"}, polling_config=max_poll) + sdb.wait_for_field_match("FABRIC_CAPACITY_TABLE", "FABRIC_CAPACITY_DATA", {'operating_links': capacity}, polling_config=max_poll) + + # now disable fabric link monitor + config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'disable'}) + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'disable'}, polling_config=max_poll) + # isolate the link from config_db + config_db.update_entry("FABRIC_PORT", cdb_port, {"isolateStatus": "True"}) + try: + max_poll = PollingConfig(polling_interval=30, timeout=90, strict=True) + sdb.wait_for_field_match("FABRIC_PORT_TABLE", sdb_port, {"ISOLATED": "1"}, polling_config=max_poll) + # check if capacity reduced + sdb.wait_for_field_negative_match("FABRIC_CAPACITY_TABLE", "FABRIC_CAPACITY_DATA", {'operating_links': capacity}, polling_config=max_poll) + assert False, "Expecting no change here" + except Exception as e: + # Expect field not change here + pass + finally: + # cleanup + sdb.update_entry("FABRIC_PORT_TABLE", sdb_port, {"TEST_CRC_ERRORS": "0"}) + sdb.update_entry("FABRIC_PORT_TABLE", sdb_port, {"TEST_CODE_ERRORS": "0"}) + sdb.update_entry("FABRIC_PORT_TABLE", sdb_port, {"TEST": "product"}) + else: + print("The link ", port, " is down") + else: + print("We do not check switch type:", cfg_switch_type) + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass + diff --git a/tests/test_fabric_port.py b/tests/test_fabric_port.py index a7ad9958b01..dbdd235605f 100644 --- a/tests/test_fabric_port.py +++ b/tests/test_fabric_port.py @@ -21,15 +21,22 @@ def test_voq_switch_fabric_link(self, vst): cfg_switch_type = metatbl.get("switch_type") if cfg_switch_type == "fabric": - # get config_db information + # get app_db/config_db information cdb = dvs.get_config_db() + adb = dvs.get_app_db() + + # check if the fabric montior toggle working + cdb.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'disable'}) + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'disable'}) + + cdb.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'enable'}) + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'enable'}) # set config_db to isolateStatus: True cdb.update_entry("FABRIC_PORT", "Fabric1", {"isolateStatus": "True"}) cdb.wait_for_field_match("FABRIC_PORT", "Fabric1", {"isolateStatus": "True"}) # check if appl_db value changes to isolateStatus: True - adb = dvs.get_app_db() adb.wait_for_field_match("FABRIC_PORT_TABLE", "Fabric1", {"isolateStatus": "True"}) # cleanup diff --git a/tests/test_fabric_port_isolation.py b/tests/test_fabric_port_isolation.py new file mode 100644 index 00000000000..5649c652f38 --- /dev/null +++ b/tests/test_fabric_port_isolation.py @@ -0,0 +1,108 @@ +import random +from dvslib.dvs_database import DVSDatabase +from dvslib.dvs_common import PollingConfig + + +class TestVirtualChassis(object): + def test_voq_switch_fabric_link(self, vst): + """Test basic fabric link monitoring infrastructure in VOQ switchs. + + This test validates that fabric links get isolated if they experienced some errors. + And the link get unisolated if it clears the error for several consecutive polls. + """ + + dvss = vst.dvss + for name in dvss.keys(): + dvs = dvss[name] + # Get the config information and choose a linecard or fabric card to test. + config_db = dvs.get_config_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + + cfg_switch_type = metatbl.get("switch_type") + if cfg_switch_type == "fabric": + + max_poll = PollingConfig(polling_interval=15, timeout=600, strict=True) + config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'enable'}) + adb = dvs.get_app_db() + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'enable'}, polling_config=max_poll) + + # get state_db infor + sdb = dvs.get_state_db() + # key + port = "PORT1" + # There are 16 fabric ports in the test environment. + portNum = random.randint(1, 16) + port = "PORT"+str(portNum) + # wait for link monitoring algorithm skips init pollings + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST": "TEST"}) + if sdb.get_entry("FABRIC_PORT_TABLE", port)['STATUS'] == 'up': + try: + # clean up the system for the testing port. + # set TEST_CRC_ERRORS to 0 + # set TEST_CODE_ERRORS to 0 + # set TEST to "TEST" + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CRC_ERRORS":"0"}) + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CODE_ERRORS": "0"}) + sdb.update_entry("FABRIC_PORT_TABLE", port, {'PORT_DOWN_COUNT': "0"}) + # ==== + # force unisolate this link to clear all the status + configKey = "Fabric"+str(portNum) + curForceStatus = int( config_db.get_entry( "FABRIC_PORT", configKey)['forceUnisolateStatus'] ) + curForceStatus += 1 + config_db.update_entry("FABRIC_PORT", configKey, {'forceUnisolateStatus': str(curForceStatus)}) + config_db.wait_for_field_match("FABRIC_PORT", configKey, {'forceUnisolateStatus': str(curForceStatus)}, + polling_config=max_poll) + sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"AUTO_ISOLATED": "0"}, polling_config=max_poll) + + # ====== + # inject testing errors and wait for link get isolated. + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CRC_ERRORS": "2"}) + sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"AUTO_ISOLATED": "1"}, polling_config=max_poll) + + # clear the testing errors and wait for link get unisolated. + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CRC_ERRORS": "0"}) + sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"AUTO_ISOLATED": "0"}, polling_config=max_poll) + + # inject testing errors and wait for link get isolated. + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CRC_ERRORS": "2"}) + sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"AUTO_ISOLATED": "1"}, polling_config=max_poll) + + lnkDownCnt = 2 + sdb.update_entry("FABRIC_PORT_TABLE", port, {'PORT_DOWN_COUNT': str(lnkDownCnt)}) + sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"PORT_DOWN_COUNT_handled": str(lnkDownCnt)}, polling_config=max_poll) + sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"AUTO_ISOLATED": "0"}, polling_config=max_poll) + + + # inject testing errors and wait for link get isolated again. + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CRC_ERRORS": "2"}) + sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"AUTO_ISOLATED": "1"}, polling_config=max_poll) + + # check if the link get permanently isolated as the link get isolate/unisolated more than 3 times + sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"PRM_ISOLATED": "1"}, polling_config=max_poll) + + + # now test force unisolate this link + configKey = "Fabric"+str(portNum) + curForceStatus = int( config_db.get_entry( "FABRIC_PORT", configKey)['forceUnisolateStatus'] ) + curForceStatus += 1 + config_db.update_entry("FABRIC_PORT", configKey, {'forceUnisolateStatus': str(curForceStatus)}) + config_db.wait_for_field_match("FABRIC_PORT", configKey, {'forceUnisolateStatus': str(curForceStatus)}, + polling_config=max_poll) + sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"AUTO_ISOLATED": "0"}, polling_config=max_poll) + + finally: + # cleanup + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CRC_ERRORS": "0"}) + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CODE_ERRORS": "0"}) + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST": "product"}) + else: + print("The link ", port, " is down") + else: + print("We do not check switch type:", cfg_switch_type) + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass + diff --git a/tests/test_fabric_rate.py b/tests/test_fabric_rate.py new file mode 100644 index 00000000000..4a135b6dd6b --- /dev/null +++ b/tests/test_fabric_rate.py @@ -0,0 +1,55 @@ +from dvslib.dvs_common import PollingConfig +from dvslib.dvs_database import DVSDatabase +from swsscommon import swsscommon +import random + +class TestVirtualChassis(object): + def test_voq_switch_fabric_rate(self, vst): + """Test fabric counters rate mpbs commands. + + Choose a fabric link, get the tx_rate. + Set the test field in the state_db, so the testing value can be read. + Now wait for the tx_rate increase in the state_db. + """ + + dvss = vst.dvss + for name in dvss.keys(): + dvs = dvss[name] + # Get the config info + config_db = dvs.get_config_db() + adb = dvs.get_app_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + + cfg_switch_type = metatbl.get("switch_type") + if cfg_switch_type == "fabric": + + max_poll = PollingConfig(polling_interval=60, timeout=600, strict=True) + config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'disable'}) + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'disable'}, polling_config=max_poll) + config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'enable'}) + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'enable'}, polling_config=max_poll) + # get state_db infor + sdb = dvs.get_state_db() + + try: + # There are 16 fabric ports in the test environment. + # Choose one link to test. + portNum = random.randint(1, 16) + sdb_port = "PORT"+str(portNum) + + fvs = sdb.wait_for_fields("FABRIC_PORT_TABLE", sdb_port, ['OLD_TX_DATA'], polling_config=max_poll) + tx_rate = fvs['OLD_TX_DATA'] + + sdb.update_entry("FABRIC_PORT_TABLE", sdb_port, {"TEST": "TEST"}) + sdb.wait_for_field_negative_match("FABRIC_PORT_TABLE", sdb_port, {'OLD_TX_DATA': tx_rate}, polling_config=max_poll) + finally: + # cleanup + sdb.update_entry("FABRIC_PORT_TABLE", sdb_port, {"TEST": "product"}) + else: + print( "We do not check switch type:", cfg_switch_type ) + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_fabric_switch_id.py b/tests/test_fabric_switch_id.py new file mode 100644 index 00000000000..f6f76011d50 --- /dev/null +++ b/tests/test_fabric_switch_id.py @@ -0,0 +1,48 @@ +from dvslib.dvs_common import wait_for_result, PollingConfig +import pytest + +class TestFabricSwitchId(object): + def check_syslog(self, dvs, marker, log): + def do_check_syslog(): + (ec, out) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \'%s\' | wc -l" %(marker, log)]) + return (int(out.strip()) >= 1, None) + max_poll = PollingConfig(polling_interval=5, timeout=600, strict=True) + wait_for_result(do_check_syslog, polling_config=max_poll) + + def test_invalid_fabric_switch_id(self, vst): + # Find supervisor dvs. + dvs = None + config_db = None + for name in vst.dvss.keys(): + dvs = vst.dvss[name] + config_db = dvs.get_config_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + cfg_switch_type = metatbl.get("switch_type") + if cfg_switch_type == "fabric": + break + assert dvs and config_db + + # Verify orchagent's handling of invalid fabric switch_id in following cases: + # - Invalid fabric switch_id, e.g, -1, is set. + # - fabric switch_id is missing in ConfigDb. + for invalid_switch_id in (-1, None): + print(f"Test invalid switch id {invalid_switch_id}") + if invalid_switch_id is None: + config_db.delete_field("DEVICE_METADATA", "localhost", "switch_id") + expected_log = "Fabric switch id is not configured" + else: + config_db.set_field("DEVICE_METADATA", "localhost", "switch_id", str(invalid_switch_id)) + expected_log = f"Invalid fabric switch id {invalid_switch_id} configured" + + # Restart orchagent and verify orchagent behavior by checking syslog. + dvs.stop_swss() + marker = dvs.add_log_marker() + dvs.start_swss() + self.check_syslog(dvs, marker, expected_log) + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass + diff --git a/tests/test_fgnhg.py b/tests/test_fgnhg.py index 645853e24cf..3cb7e8f1fca 100644 --- a/tests/test_fgnhg.py +++ b/tests/test_fgnhg.py @@ -3,6 +3,7 @@ import time import json import pytest +import random from dvslib.dvs_common import wait_for_result from swsscommon import swsscommon @@ -148,7 +149,7 @@ def get_nh_oid_map(asic_db): assert nh_oid_map != {} return nh_oid_map -def verify_programmed_fg_asic_db_entry(asic_db,nh_memb_exp_count,nh_oid_map,nhgid,bucket_size): +def verify_programmed_fg_asic_db_entry(asic_db,prev_memb_dict,num_exp_changes,nh_memb_exp_count,nh_oid_map,nhgid,bucket_size): def _access_function(): false_ret = (False, None) ret = True @@ -182,7 +183,8 @@ def _access_function(): nh_oid == "0" or nh_oid_map.get(nh_oid,"NULL") == "NULL" or nh_oid_map.get(nh_oid) not in nh_memb_exp_count): - print("Invalid nh: nh_oid " + nh_oid + " index " + str(index)) + print("Invalid nh: nh_oid " + nh_oid + " index " + str(index) + + " member: " + member) if nh_oid_map.get(nh_oid,"NULL") == "NULL": print("nh_oid is null") if nh_oid_map.get(nh_oid) not in nh_memb_exp_count: @@ -190,22 +192,24 @@ def _access_function(): return false_ret memb_dict[index] = nh_oid_map.get(nh_oid) idxs = [0]*bucket_size + num_changes = 0 for idx,memb in memb_dict.items(): nh_memb_count[memb] = 1 + nh_memb_count[memb] idxs[idx] = idxs[idx] + 1 - + if memb != prev_memb_dict.get(idx, "NULL"): + num_changes = num_changes + 1 + #print("Change detected at index " + str(idx) + " old nh " + prev_memb_dict.get(idx, "NULL") + " new nh " + memb) for key in nh_memb_exp_count: ret = ret and (nh_memb_count[key] == nh_memb_exp_count[key]) for idx in idxs: ret = ret and (idx == 1) - if ret != True: - print("Expected member count was " + str(nh_memb_exp_count) + " Received was " + str(nh_memb_count)) - print("Indexes arr was " + str(idxs)) - return (ret, nh_memb_count) + if num_changes != num_exp_changes: + ret = False + return ret, memb_dict - status, result = wait_for_result(_access_function) - assert status, f"Exact match not found: expected={nh_memb_exp_count}, received={result}" - return result + status, new_memb_dict = wait_for_result(_access_function) + assert status, f"Exact match not found: expected={nh_memb_exp_count}, received={nh_memb_count}" + return new_memb_dict def shutdown_link(dvs, db, port): dvs.servers[port].runcmd("ip link set down dev eth0") == 0 @@ -242,17 +246,52 @@ def verify_programmed_fg_state_db_entry(state_db, fg_nhg_prefix, nh_memb_exp_cou for idx,memb in memb_dict.items(): assert memb == 0 -def validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, +def verify_fg_state_db_for_even_distribution(state_db, fg_nhg_prefix, bucket_size, nh_ip_count): + def _access_function(): + false_ret = (False, '') + ret = True + keys = state_db.get_keys("FG_ROUTE_TABLE") + if not keys: + return false_ret + for key in keys: + if key != fg_nhg_prefix: + continue + fvs = state_db.get_entry("FG_ROUTE_TABLE", key) + if not fvs: + return false_ret + member_count = {} + for key, value in fvs.items(): + if value not in member_count: + member_count[value] = 0 + member_count[value] = member_count[value] + 1 + + # Verify that values in the member_count dictionary don't differ by more than 1 + if member_count: + min_count = min(member_count.values()) + max_count = max(member_count.values()) + if (min_count == bucket_size//nh_ip_count) and (max_count - min_count <= 1) and (sum(member_count.values()) == bucket_size): + ret = True + else: + ret = False + + else: + ret = False + return ret, member_count + + status, member_count = wait_for_result(_access_function) + assert status, f"Member count distribution is uneven" + +def validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, prev_memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size): state_db_entry_memb_exp_count = {} for ip, cnt in nh_memb_exp_count.items(): state_db_entry_memb_exp_count[ip + '@' + ip_to_if_map[ip]] = cnt - - verify_programmed_fg_asic_db_entry(asic_db,nh_memb_exp_count,nh_oid_map,nhgid,bucket_size) + next_memb_dict = verify_programmed_fg_asic_db_entry(asic_db,prev_memb_dict,num_exp_changes,nh_memb_exp_count,nh_oid_map,nhgid,bucket_size) verify_programmed_fg_state_db_entry(state_db, fg_nhg_prefix, state_db_entry_memb_exp_count) + return next_memb_dict -def program_route_and_validate_fine_grained_ecmp(app_db, asic_db, state_db, ip_to_if_map, +def program_route_and_validate_fine_grained_ecmp(app_db, asic_db, state_db, ip_to_if_map, prev_memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size): ips = "" ifs = "" @@ -267,8 +306,25 @@ def program_route_and_validate_fine_grained_ecmp(app_db, asic_db, state_db, ip_t ps = swsscommon.ProducerStateTable(app_db, ROUTE_TB) fvs = swsscommon.FieldValuePairs([("nexthop", ips), ("ifname", ifs)]) ps.set(fg_nhg_prefix, fvs) - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + new_memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + prev_memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + return new_memb_dict + +def program_route_and_validate_distribtution(app_db, state_db, ip_to_if_map, fg_nhg_prefix, nh_ips, bucket_size): + ips = "" + ifs = "" + for ip in nh_ips: + if ips == "": + ips = ip + ifs = ip_to_if_map[ip] + else: + ips = ips + "," + ip + ifs = ifs + "," + ip_to_if_map[ip] + + ps = swsscommon.ProducerStateTable(app_db, ROUTE_TB) + fvs = swsscommon.FieldValuePairs([("nexthop", ips), ("ifname", ifs)]) + ps.set(fg_nhg_prefix, fvs) + verify_fg_state_db_for_even_distribution(state_db, fg_nhg_prefix, bucket_size, len(nh_ips)) def create_interface_n_fg_ecmp_config(dvs, nh_range_start, nh_range_end, fg_nhg_name): ip_to_if_map = {} @@ -317,6 +373,12 @@ def fine_grained_ecmp_base_test(dvs, match_mode): bucket_size = 60 ip_to_if_map = {} + # Update log level so that we can analyze the log in case the test failed + logfvs = config_db.wait_for_entry("LOGGER", "orchagent") + old_log_level = logfvs.get("LOGLEVEL") + logfvs["LOGLEVEL"] = "INFO" + config_db.update_entry("LOGGER", "orchagent", logfvs) + fvs = {"bucket_size": str(bucket_size), "match_mode": match_mode} create_entry(config_db, FG_NHG, fg_nhg_name, fvs) @@ -382,12 +444,14 @@ def fine_grained_ecmp_base_test(dvs, match_mode): nhgid = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix, bucket_size) nh_oid_map = get_nh_oid_map(asic_db) + memb_dict = {} ### Test scenarios with bank 0 having 0 members up and only bank 1 having members # ARP is not resolved for 10.0.0.7, so fg nhg should be created without 10.0.0.7 nh_memb_exp_count = {"10.0.0.9":30,"10.0.0.11":30} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 60 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Resolve ARP for 10.0.0.7 asic_nh_count = len(asic_db.get_keys(ASIC_NH_TB)) @@ -397,8 +461,9 @@ def fine_grained_ecmp_base_test(dvs, match_mode): # Now that ARP was resolved, 10.0.0.7 should be added as a valid fg nhg member nh_memb_exp_count = {"10.0.0.7":20,"10.0.0.9":20,"10.0.0.11":20} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 20 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Test warm reboot run_warm_reboot(dvs) @@ -406,104 +471,123 @@ def fine_grained_ecmp_base_test(dvs, match_mode): nhgid = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix, bucket_size) nh_oid_map = get_nh_oid_map(asic_db) nh_memb_exp_count = {"10.0.0.7":20,"10.0.0.9":20,"10.0.0.11":20} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 0 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Bring down 1 next hop in bank 1 nh_memb_exp_count = {"10.0.0.7":30,"10.0.0.11":30} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 20 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Bring down 2 next hop and bring up 1 next hop in bank 1 nh_memb_exp_count = {"10.0.0.9":60} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 60 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Bring up 1 next hop in bank 1 nh_memb_exp_count = {"10.0.0.7":20,"10.0.0.9":20,"10.0.0.11":20} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 40 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Bring up some next-hops in bank 0 for the 1st time nh_memb_exp_count = {"10.0.0.1":10,"10.0.0.3":10,"10.0.0.5":10,"10.0.0.7":10,"10.0.0.9":10,"10.0.0.11":10} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 30 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Test warm reboot run_warm_reboot(dvs) asic_db.wait_for_n_keys(ASIC_NHG_MEMB, bucket_size) nhgid = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix, bucket_size) nh_oid_map = get_nh_oid_map(asic_db) + num_exp_changes = 0 nh_memb_exp_count = {"10.0.0.1":10,"10.0.0.3":10,"10.0.0.5":10,"10.0.0.7":10,"10.0.0.9":10,"10.0.0.11":10} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Bring down 1 next-hop from bank 0, and 2 next-hops from bank 1 nh_memb_exp_count = {"10.0.0.1":15,"10.0.0.5":15,"10.0.0.11":30} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 30 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Bring down 1 member and bring up 1 member in bank 0 at the same time nh_memb_exp_count = {"10.0.0.1":15,"10.0.0.3":15,"10.0.0.11":30} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 15 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Bring down 2 members and bring up 1 member in bank 0 at the same time nh_memb_exp_count = {"10.0.0.5":30,"10.0.0.11":30} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 30 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Bring up 2 members and bring down 1 member in bank 0 at the same time nh_memb_exp_count = {"10.0.0.1":15,"10.0.0.3":15,"10.0.0.11":30} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 30 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Bringup arbitrary # of next-hops from both banks at the same time nh_memb_exp_count = {"10.0.0.1":10,"10.0.0.3":10,"10.0.0.5":10,"10.0.0.7":10,"10.0.0.9":10,"10.0.0.11":10} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 30 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Bring all next-hops in bank 1 down nh_memb_exp_count = {"10.0.0.1":20,"10.0.0.3":20,"10.0.0.5":20} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 30 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Make next-hop changes to bank 0 members, given bank 1 is still down nh_memb_exp_count = {"10.0.0.1":30,"10.0.0.5":30} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 20 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Bringup 1 member in bank 1 again nh_memb_exp_count = {"10.0.0.1":15,"10.0.0.5":15,"10.0.0.11":30} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 30 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Test 2nd,3rd memb up in bank nh_memb_exp_count = {"10.0.0.1":15,"10.0.0.5":15,"10.0.0.7":10,"10.0.0.9":10,"10.0.0.11":10} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 20 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # bring all links down one by one shutdown_link(dvs, app_db, 0) shutdown_link(dvs, app_db, 1) nh_memb_exp_count = {"10.0.0.5":30,"10.0.0.7":10,"10.0.0.9":10,"10.0.0.11":10} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 15 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) shutdown_link(dvs, app_db, 2) + num_exp_changes = 30 nh_memb_exp_count = {"10.0.0.7":20,"10.0.0.9":20,"10.0.0.11":20} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) shutdown_link(dvs, app_db, 3) nh_memb_exp_count = {"10.0.0.9":30,"10.0.0.11":30} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 20 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) shutdown_link(dvs, app_db, 4) + num_exp_changes = 30 nh_memb_exp_count = {"10.0.0.11":60} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Bring down last link, there shouldn't be a crash or other bad orchagent state because of this shutdown_link(dvs, app_db, 5) @@ -516,9 +600,10 @@ def fine_grained_ecmp_base_test(dvs, match_mode): asic_db.wait_for_n_keys(ASIC_NHG_MEMB, bucket_size) nhgid = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix, bucket_size) nh_oid_map = get_nh_oid_map(asic_db) + num_exp_changes = 60 nh_memb_exp_count = {"10.0.0.7":30,"10.0.0.9":30} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) startup_link(dvs, app_db, 5) # Perform a route table update, Update the route to contain 10.0.0.3 as well, since Ethernet4 associated with it @@ -530,31 +615,36 @@ def fine_grained_ecmp_base_test(dvs, match_mode): # 10.0.0.11 associated with newly brought up link 5 should be updated in FG ecmp # 10.0.0.3 addition per above route table change should have no effect nh_memb_exp_count = {"10.0.0.7":20,"10.0.0.9":20,"10.0.0.11":20} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 20 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) startup_link(dvs, app_db, 2) nh_memb_exp_count = {"10.0.0.5":30,"10.0.0.7":10,"10.0.0.9":10,"10.0.0.11":10} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 30 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) startup_link(dvs, app_db, 0) nh_memb_exp_count = {"10.0.0.1":15,"10.0.0.5":15,"10.0.0.7":10,"10.0.0.9":10,"10.0.0.11":10} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 15 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # remove fgnhg member remove_entry(config_db, "FG_NHG_MEMBER", "10.0.0.1") nh_memb_exp_count = {"10.0.0.5":30,"10.0.0.7":10,"10.0.0.9":10,"10.0.0.11":10} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 15 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # add fgnhg member fvs = {"FG_NHG": fg_nhg_name, "bank": "0"} create_entry(config_db, FG_NHG_MEMBER, "10.0.0.1", fvs) nh_memb_exp_count = {"10.0.0.1":15,"10.0.0.5":15,"10.0.0.7":10,"10.0.0.9":10,"10.0.0.11":10} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 15 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Remove route asic_rt_key = get_asic_route_key(asic_db, fg_nhg_prefix) @@ -589,10 +679,12 @@ def fine_grained_ecmp_base_test(dvs, match_mode): nh_oid_map = {} nh_oid_map = get_nh_oid_map(asic_db) + memb_dict = {} nh_memb_exp_count = {"10.0.0.7":20,"10.0.0.9":20,"10.0.0.11":20} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 60 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # remove fgnhg prefix: The fine grained route should transition to regular ECMP/route remove_entry(config_db, "FG_NHG_PREFIX", fg_nhg_prefix) @@ -624,6 +716,624 @@ def fine_grained_ecmp_base_test(dvs, match_mode): remove_entry(config_db, "FG_NHG_MEMBER", "10.0.0." + str(1 + i*2)) +def fine_grained_ecmp_match_mode_prefix_test(dvs): + app_db = dvs.get_app_db() + asic_db = dvs.get_asic_db() + config_db = dvs.get_config_db() + state_db = dvs.get_state_db() + fvs_nul = {"NULL": "NULL"} + NUM_NHs = 6 + fg_nhg_name = "fgnhg_v4" + fg_nhg_prefix = "2.2.2.0/24" + bucket_size = 60 + ip_to_if_map = {} + match_mode = 'prefix-based' + + # Update log level so that we can analyze the log in case the test failed + logfvs = config_db.wait_for_entry("LOGGER", "orchagent") + old_log_level = logfvs.get("LOGLEVEL") + logfvs["LOGLEVEL"] = "INFO" + config_db.update_entry("LOGGER", "orchagent", logfvs) + + fvs = {"bucket_size": str(bucket_size), "match_mode": match_mode, + "max_next_hops": str(NUM_NHs)} + create_entry(config_db, FG_NHG, fg_nhg_name, fvs) + + fvs = {"FG_NHG": fg_nhg_name} + create_entry(config_db, FG_NHG_PREFIX, fg_nhg_prefix, fvs) + + for i in range(0,NUM_NHs): + if_name_key = "Ethernet" + str(i*4) + ip_pref_key = if_name_key + "|10.0.0." + str(i*2) + "/31" + create_entry(config_db, IF_TB, if_name_key, fvs_nul) + create_entry(config_db, IF_TB, ip_pref_key, fvs_nul) + dvs.port_admin_set(if_name_key, "up") + dvs.servers[i].runcmd("ip link set down dev eth0") == 0 + dvs.servers[i].runcmd("ip link set up dev eth0") == 0 + ip_to_if_map["10.0.0." + str(1 + i*2)] = if_name_key + + # Wait for the software to receive the entries + time.sleep(1) + + # Resolve ARP for 3 next-hops + asic_nh_count = len(asic_db.get_keys(ASIC_NH_TB)) + dvs.runcmd("arp -s 10.0.0.7 00:00:00:00:00:04") + dvs.runcmd("arp -s 10.0.0.9 00:00:00:00:00:05") + dvs.runcmd("arp -s 10.0.0.11 00:00:00:00:00:06") + + asic_db.wait_for_n_keys(ASIC_NH_TB, asic_nh_count + 3) + + # Add route with 3 next-hops + print("Add route with 3 next-hops") + ps = swsscommon.ProducerStateTable(app_db.db_connection, ROUTE_TB) + fvs = swsscommon.FieldValuePairs([("nexthop","10.0.0.7,10.0.0.9,10.0.0.11"), + ("ifname", "Ethernet12,Ethernet16,Ethernet20")]) + ps.set(fg_nhg_prefix, fvs) + + # We just use sleep so that the sw receives this entry + time.sleep(1) + + adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + rtbl = swsscommon.Table(adb, ASIC_ROUTE_TB) + keys = rtbl.getKeys() + found_route = False + for k in keys: + rt_key = json.loads(k) + + if rt_key['dest'] == fg_nhg_prefix: + found_route = True + break + + assert (found_route == True) + + asic_db.wait_for_n_keys(ASIC_NHG_MEMB, bucket_size) + nhgid = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix, bucket_size) + + nh_oid_map = get_nh_oid_map(asic_db) + + ### Test scenarios with 3 members + print("Test scenarios with 3 members") + nh_memb_exp_count = {"10.0.0.7":20,"10.0.0.9":20,"10.0.0.11":20} + memb_dict = {} + num_exp_changes = 60 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + + print("Test warm reboot") + run_warm_reboot(dvs) + asic_db.wait_for_n_keys(ASIC_NHG_MEMB, bucket_size) + + nhgid = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix, bucket_size) + nh_oid_map = get_nh_oid_map(asic_db) + nh_memb_exp_count = {"10.0.0.7":20,"10.0.0.9":20,"10.0.0.11":20} + num_exp_changes = 0 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + + # Bring down 1 next hop + print("FGNHG Bring down 1 next hop") + nh_memb_exp_count = {"10.0.0.7":30,"10.0.0.11":30} + num_exp_changes = 20 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + + # (Bring down 2 next hop and bring up 1 next hop) + print("Bring down 2 next hop and bring up 1 next hop") + nh_memb_exp_count = {"10.0.0.9":60} + num_exp_changes = 60 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + + # Bring up 2 next hops + print("Bring up 2 next hops") + nh_memb_exp_count = {"10.0.0.7":20,"10.0.0.9":20,"10.0.0.11":20} + num_exp_changes = 40 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + + # Bring up 3 more next-hops + print("Bring up 3 more next-hops") + # First Resolve ARP for 3 more next-hops + asic_nh_count = len(asic_db.get_keys(ASIC_NH_TB)) + dvs.runcmd("arp -s 10.0.0.1 00:00:00:00:00:07") + dvs.runcmd("arp -s 10.0.0.3 00:00:00:00:00:08") + dvs.runcmd("arp -s 10.0.0.5 00:00:00:00:00:09") + + asic_db.wait_for_n_keys(ASIC_NH_TB, asic_nh_count + 3) + + nh_oid_map = get_nh_oid_map(asic_db) + nh_memb_exp_count = {"10.0.0.1":10,"10.0.0.3":10,"10.0.0.5":10,"10.0.0.7":10,"10.0.0.9":10,"10.0.0.11":10} + num_exp_changes = 30 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + # Test warm reboot + print("Test warm reboot") + run_warm_reboot(dvs) + asic_db.wait_for_n_keys(ASIC_NHG_MEMB, bucket_size) + nhgid = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix, bucket_size) + nh_oid_map = get_nh_oid_map(asic_db) + nh_memb_exp_count = {"10.0.0.1":10,"10.0.0.3":10,"10.0.0.5":10,"10.0.0.7":10,"10.0.0.9":10,"10.0.0.11":10} + num_exp_changes = 0 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + + # Bring down 3 next-hops + print("Bring down 3 next-hops") + nh_memb_exp_count = {"10.0.0.1":20,"10.0.0.5":20,"10.0.0.11":20} + num_exp_changes = 30 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + + # Bring down 1 member and bring up 1 member at the same time + print("Bring down 1 member and bring up 1 member at the same time") + nh_memb_exp_count = {"10.0.0.1":20,"10.0.0.3":20,"10.0.0.11":20} + num_exp_changes = 20 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + + # Bring down 2 members and bring up 1 member at the same time + print("Bring down 2 members and bring up 1 member at the same time") + nh_memb_exp_count = {"10.0.0.5":30,"10.0.0.11":30} + num_exp_changes = 40 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + + # Bring up 2 members and bring down 1 member at the same time + print("Bring up 2 members and bring down 1 member at the same time") + nh_memb_exp_count = {"10.0.0.1":20,"10.0.0.3":20,"10.0.0.11":20} + num_exp_changes = 40 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + + # Bringup all the inactive nexthops at the same time + print("Bringup all the inactive nexthops at the same time") + num_exp_changes = 30 + nh_memb_exp_count = {"10.0.0.1":10,"10.0.0.3":10,"10.0.0.5":10,"10.0.0.7":10,"10.0.0.9":10,"10.0.0.11":10} + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + + # Remove route + print("Remove route") + asic_rt_key = get_asic_route_key(asic_db, fg_nhg_prefix) + ps._del(fg_nhg_prefix) + + # validate routes and nhg member in asic db, route entry in state db are removed + print("validate routes and nhg member in asic db, route entry in state db are removed") + asic_db.wait_for_deleted_entry(ASIC_ROUTE_TB, asic_rt_key) + asic_db.wait_for_n_keys(ASIC_NHG_MEMB, 0) + state_db.wait_for_n_keys("FG_ROUTE_TABLE", 0) + + # Remove fgnhg prefix and group + print("Remove fgnhg prefix") + remove_entry(config_db, "FG_NHG_PREFIX", fg_nhg_prefix) + remove_entry(config_db, "FG_NHG", fg_nhg_name) + # Nothing we can wait for in terms of db entries, we sleep here + # to give the sw enough time to delete the entry + time.sleep(1) + + # Add an ECMP route, since we deleted the FG_NHG_PREFIX it should see + # standard(non-Fine grained) ECMP behavior + print("Add an ECMP route, since we deleted the FG_NHG_PREFIX it should see") + print("standard(non-Fine grained) ECMP behavior") + fvs = swsscommon.FieldValuePairs([("nexthop","10.0.0.7,10.0.0.9,10.0.0.11"), + ("ifname", "Ethernet12,Ethernet16,Ethernet20")]) + ps.set(fg_nhg_prefix, fvs) + validate_asic_nhg_regular_ecmp(asic_db, fg_nhg_prefix) + asic_db.wait_for_n_keys(ASIC_NHG_MEMB, 3) + + # add back fgnhg group and prefix: The regular route should transition to fine grained ECMP + print("add fgnhg group and prefix: The regular route should transition to fine grained ECMP") + fvs = {"bucket_size": str(bucket_size), "match_mode": match_mode, + "max_next_hops": str(NUM_NHs)} + create_entry(config_db, FG_NHG, fg_nhg_name, fvs) + fvs = {"FG_NHG": fg_nhg_name} + create_entry(config_db, FG_NHG_PREFIX, fg_nhg_prefix, fvs) + + # Validate the transistion to Fine Grained ECMP + print("Validate the transistion to Fine Grained ECMP") + asic_db.wait_for_n_keys(ASIC_NHG_MEMB, bucket_size) + nhgid = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix, bucket_size) + + nh_oid_map = {} + nh_oid_map = get_nh_oid_map(asic_db) + + nh_memb_exp_count = {"10.0.0.7":20,"10.0.0.9":20,"10.0.0.11":20} + memb_dict = {} + num_exp_changes = 60 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + + # remove prefix entry + print("remove prefix entry") + asic_rt_key = get_asic_route_key(asic_db, fg_nhg_prefix) + ps._del(fg_nhg_prefix) + asic_db.wait_for_deleted_entry(ASIC_ROUTE_TB, asic_rt_key) + asic_db.wait_for_n_keys(ASIC_NHG_MEMB, 0) + + # Cleanup all FG, arp and interface + print("Cleanup all FG, arp and interface") + remove_entry(config_db, "FG_NHG_PREFIX", fg_nhg_prefix) + remove_entry(config_db, "FG_NHG", fg_nhg_name) + + for i in range(0,NUM_NHs): + if_name_key = "Ethernet" + str(i*4) + ip_pref_key = if_name_key + "|10.0.0." + str(i*2) + "/31" + remove_entry(config_db, IF_TB, ip_pref_key) + remove_entry(config_db, IF_TB, if_name_key) + dvs.port_admin_set(if_name_key, "down") + dvs.servers[i].runcmd("ip link set down dev eth0") == 0 + +def fine_grained_ecmp_match_mode_prefix_multi_route_test(dvs): + app_db = dvs.get_app_db() + asic_db = dvs.get_asic_db() + config_db = dvs.get_config_db() + state_db = dvs.get_state_db() + fvs_nul = {"NULL": "NULL"} + NUM0_NHs = 6 + NUM1_NHs = 4 + fg_nhg_name0 = "fgnhg0_v4" + fg_nhg_name1 = "fgnhg1_v4" + fg_nhg_prefix0 = "2.2.2.0/24" + fg_nhg_prefix1 = "3.3.3.0/24" + bucket0_size = 60 + bucket1_size = 24 + ip_to_if_map = {} + match_mode = 'prefix-based' + + # Update log level so that we can analyze the log in case the test failed + logfvs = config_db.wait_for_entry("LOGGER", "orchagent") + old_log_level = logfvs.get("LOGLEVEL") + logfvs["LOGLEVEL"] = "INFO" + config_db.update_entry("LOGGER", "orchagent", logfvs) + + ### Create first fine grained next hop group and prefix + fvs = {"bucket_size": str(bucket0_size), "match_mode": match_mode, + "max_next_hops": str(NUM0_NHs)} + create_entry(config_db, FG_NHG, fg_nhg_name0, fvs) + + fvs = {"FG_NHG": fg_nhg_name0} + create_entry(config_db, FG_NHG_PREFIX, fg_nhg_prefix0, fvs) + + ### Create shared interfaces (Last two Nexthops for the 1st prefix are shared with the 2nd prefix) + for i in range(0,NUM0_NHs+NUM1_NHs-2): + if_name_key = "Ethernet" + str(i*4) + vlan_name_key = "Vlan" + str((i+1)*4) + ip_pref_key = vlan_name_key + "|10.0.0." + str(i*2) + "/31" + fvs = {"vlanid": str((i+1)*4)} + create_entry(config_db, VLAN_TB, vlan_name_key, fvs) + fvs = {"tagging_mode": "untagged"} + create_entry(config_db, VLAN_MEMB_TB, vlan_name_key + "|" + if_name_key, fvs) + create_entry(config_db, VLAN_IF_TB, vlan_name_key, fvs_nul) + create_entry(config_db, VLAN_IF_TB, ip_pref_key, fvs_nul) + dvs.port_admin_set(if_name_key, "up") + dvs.servers[i].runcmd("ip link set down dev eth0") == 0 + dvs.servers[i].runcmd("ip link set up dev eth0") == 0 + ip_to_if_map["10.0.0." + str(1 + i*2)] = vlan_name_key + + # Wait for the software to receive the entries + time.sleep(1) + + ### Create Route for the first Prefix + ps = swsscommon.ProducerStateTable(app_db.db_connection, ROUTE_TB) + fvs = swsscommon.FieldValuePairs([("nexthop","10.0.0.7,10.0.0.9,10.0.0.11"), + ("ifname", "Vlan16,Vlan20,Vlan24")]) + ps.set(fg_nhg_prefix0, fvs) + # No ASIC_DB entry we can wait for since ARP is not resolved yet, + # We just use sleep so that the sw receives this entry + time.sleep(1) + + adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + rtbl = swsscommon.Table(adb, ASIC_ROUTE_TB) + keys = rtbl.getKeys() + found_route = False + for k in keys: + rt_key = json.loads(k) + + if rt_key['dest'] == fg_nhg_prefix0: + found_route = True + break + + # Since we didn't populate ARP yet, route should point to RIF for kernel arp resolution to occur + assert (found_route == True) + validate_asic_nhg_router_interface(asic_db, fg_nhg_prefix0) + + # Add ARP entries for both prefixes + asic_nh_count = len(asic_db.get_keys(ASIC_NH_TB)) + dvs.runcmd("arp -s 10.0.0.1 00:00:00:00:00:01") + dvs.runcmd("arp -s 10.0.0.3 00:00:00:00:00:02") + dvs.runcmd("arp -s 10.0.0.5 00:00:00:00:00:03") + dvs.runcmd("arp -s 10.0.0.7 00:00:00:00:00:04") + dvs.runcmd("arp -s 10.0.0.9 00:00:00:00:00:05") + dvs.runcmd("arp -s 10.0.0.11 00:00:00:00:00:06") + dvs.runcmd("arp -s 10.0.0.13 00:00:00:00:00:07") + dvs.runcmd("arp -s 10.0.0.15 00:00:00:00:00:08") + + asic_db.wait_for_n_keys(ASIC_NH_TB, asic_nh_count + 8) + + # verify correct number of nhg members were created for the first prefix + asic_db.wait_for_n_keys(ASIC_NHG_MEMB, bucket0_size) + nhgid0 = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix0, bucket0_size) + + nh_oid_map = get_nh_oid_map(asic_db) + + ### Create 2nd next hop group and prefix + fvs = {"bucket_size": str(bucket1_size), "match_mode": match_mode, + "max_next_hops": str(NUM1_NHs)} + create_entry(config_db, FG_NHG, fg_nhg_name1, fvs) + + fvs = {"FG_NHG": fg_nhg_name1} + create_entry(config_db, FG_NHG_PREFIX, fg_nhg_prefix1, fvs) + + # Create Route for the 2nd Prefix + ps = swsscommon.ProducerStateTable(app_db.db_connection, ROUTE_TB) + fvs = swsscommon.FieldValuePairs([("nexthop","10.0.0.13,10.0.0.15,10.0.0.9,10.0.0.11"), + ("ifname", "Vlan28,Vlan32,Vlan20,Vlan24")]) + ps.set(fg_nhg_prefix1, fvs) + # We just use sleep so that the sw receives this entry + time.sleep(1) + + adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + rtbl = swsscommon.Table(adb, ASIC_ROUTE_TB) + keys = rtbl.getKeys() + found_route = False + for k in keys: + rt_key = json.loads(k) + + if rt_key['dest'] == fg_nhg_prefix1: + found_route = True + break + + assert (found_route == True) + + # verify correct number of nhg members were created for the 2nd prefix + asic_db.wait_for_n_keys(ASIC_NHG_MEMB, bucket0_size+bucket1_size) + nhgid1 = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix1, bucket1_size) + + nh_oid_map = get_nh_oid_map(asic_db) + + ### Test scenarios with 3 members for the first prefix and 4 members for the 2nd prefix + + nh_memb_exp_count = {"10.0.0.7":20,"10.0.0.9":20,"10.0.0.11":20} + memb_dict0 = {} + num_exp_changes = 60 + memb_dict0 = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict0, num_exp_changes, fg_nhg_prefix0, nh_memb_exp_count, nh_oid_map, nhgid0, bucket0_size) + + memb_dict1 = {} + num_exp_changes = 24 + nh_memb_exp_count = {"10.0.0.13":6,"10.0.0.15":6,"10.0.0.9":6,"10.0.0.11":6} + memb_dict1 = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict1, num_exp_changes, fg_nhg_prefix1, nh_memb_exp_count, nh_oid_map, nhgid1, bucket1_size) + + # Test warm reboot + run_warm_reboot(dvs) + asic_db.wait_for_n_keys(ASIC_NHG_MEMB, bucket0_size+bucket1_size) + + nh_oid_map = get_nh_oid_map(asic_db) + + nhgid0 = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix0, bucket0_size) + nh_memb_exp_count = {"10.0.0.7":20,"10.0.0.9":20,"10.0.0.11":20} + num_exp_changes = 0 + memb_dict0 = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict0, num_exp_changes, fg_nhg_prefix0, nh_memb_exp_count, nh_oid_map, nhgid0, bucket0_size) + + nhgid1 = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix1, bucket1_size) + num_exp_changes = 0 + nh_memb_exp_count = {"10.0.0.13":6,"10.0.0.15":6,"10.0.0.9":6,"10.0.0.11":6} + memb_dict1 = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict1, num_exp_changes, fg_nhg_prefix1, nh_memb_exp_count, nh_oid_map, nhgid1, bucket1_size) + + # Bring down 1 common next hop + nhgid0 = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix0, bucket0_size) + nh_memb_exp_count = {"10.0.0.7":30,"10.0.0.11":30} + num_exp_changes = 20 + memb_dict0 = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict0, num_exp_changes, fg_nhg_prefix0, nh_memb_exp_count, nh_oid_map, nhgid0, bucket0_size) + + nhgid1 = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix1, bucket1_size) + nh_memb_exp_count = {"10.0.0.13":8,"10.0.0.15":8,"10.0.0.11":8} + num_exp_changes = 6 + memb_dict1 = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict1, num_exp_changes, fg_nhg_prefix1, nh_memb_exp_count, nh_oid_map, nhgid1, bucket1_size) + + # Bring it back up + nhgid0 = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix0, bucket0_size) + nh_memb_exp_count = {"10.0.0.7":20,"10.0.0.9":20,"10.0.0.11":20} + num_exp_changes = 20 + memb_dict0 = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict0, num_exp_changes, fg_nhg_prefix0, nh_memb_exp_count, nh_oid_map, nhgid0, bucket0_size) + + nhgid1 = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix1, bucket1_size) + nh_memb_exp_count = {"10.0.0.13":6,"10.0.0.15":6,"10.0.0.9":6,"10.0.0.11":6} + num_exp_changes = 6 + memb_dict1 = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict1, num_exp_changes, fg_nhg_prefix1, nh_memb_exp_count, nh_oid_map, nhgid1, bucket1_size) + + # Bring down 3 next-hops + nhgid0 = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix0, bucket0_size) + nh_memb_exp_count = {"10.0.0.9":60} + num_exp_changes = 40 + memb_dict0 = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict0, num_exp_changes, fg_nhg_prefix0, nh_memb_exp_count, nh_oid_map, nhgid0, bucket0_size) + + nhgid1 = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix1, bucket1_size) + nh_memb_exp_count = {"10.0.0.13":12,"10.0.0.11":12} + num_exp_changes = 12 + memb_dict1 = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict1, num_exp_changes, fg_nhg_prefix1, nh_memb_exp_count, nh_oid_map, nhgid1, bucket1_size) + + # Bringup all the inactive nexthops at the same time + nhgid0 = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix0, bucket0_size) + nh_memb_exp_count = {"10.0.0.1":10,"10.0.0.3":10,"10.0.0.5":10,"10.0.0.7":10,"10.0.0.9":10,"10.0.0.11":10} + num_exp_changes = 50 + memb_dict0 = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict0, num_exp_changes, fg_nhg_prefix0, nh_memb_exp_count, nh_oid_map, nhgid0, bucket0_size) + + nhgid1 = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix1, bucket1_size) + nh_memb_exp_count = {"10.0.0.13":6,"10.0.0.15":6,"10.0.0.9":6,"10.0.0.11":6} + num_exp_changes = 12 + memb_dict1 = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict1, num_exp_changes, fg_nhg_prefix1, nh_memb_exp_count, nh_oid_map, nhgid1, bucket1_size) + + # remove prefix entries + asic_rt_key = get_asic_route_key(asic_db, fg_nhg_prefix0) + ps._del(fg_nhg_prefix0) + asic_rt_key = get_asic_route_key(asic_db, fg_nhg_prefix1) + ps._del(fg_nhg_prefix1) + + asic_db.wait_for_deleted_entry(ASIC_ROUTE_TB, asic_rt_key) + asic_db.wait_for_n_keys(ASIC_NHG_MEMB, 0) + state_db.wait_for_n_keys("FG_ROUTE_TABLE", 0) + + remove_entry(config_db, "FG_NHG_PREFIX", fg_nhg_prefix0) + remove_entry(config_db, "FG_NHG_PREFIX", fg_nhg_prefix1) + # Cleanup all FG, arp and interface + remove_entry(config_db, "FG_NHG", fg_nhg_name0) + remove_entry(config_db, "FG_NHG", fg_nhg_name1) + + for i in range(0,NUM0_NHs+NUM1_NHs): + if_name_key = "Ethernet" + str(i*4) + vlan_name_key = "Vlan" + str((i+1)*4) + ip_pref_key = vlan_name_key + "|10.0.0." + str(i*2) + "/31" + remove_entry(config_db, VLAN_IF_TB, ip_pref_key) + remove_entry(config_db, VLAN_IF_TB, vlan_name_key) + remove_entry(config_db, VLAN_MEMB_TB, vlan_name_key + "|" + if_name_key) + remove_entry(config_db, VLAN_TB, vlan_name_key) + dvs.port_admin_set(if_name_key, "down") + dvs.servers[i].runcmd("ip link set down dev eth0") == 0 + +def fine_grained_ecmp_match_mode_prefix_even_distribution_test(dvs): + app_db = dvs.get_app_db() + asic_db = dvs.get_asic_db() + config_db = dvs.get_config_db() + state_db = dvs.get_state_db() + fvs_nul = {"NULL": "NULL"} + NUM_NHs = 16 + fg_nhg_name = "fgnhg_v4" + fg_nhg_prefix = "2.2.2.0/24" + bucket_size = 256 + ip_to_if_map = {} + match_mode = 'prefix-based' + + # Update log level so that we can analyze the log in case the test failed + logfvs = config_db.wait_for_entry("LOGGER", "orchagent") + old_log_level = logfvs.get("LOGLEVEL") + logfvs["LOGLEVEL"] = "INFO" + config_db.update_entry("LOGGER", "orchagent", logfvs) + + fvs = {"bucket_size": str(bucket_size), "match_mode": match_mode, + "max_next_hops": str(NUM_NHs)} + create_entry(config_db, FG_NHG, fg_nhg_name, fvs) + + fvs = {"FG_NHG": fg_nhg_name} + create_entry(config_db, FG_NHG_PREFIX, fg_nhg_prefix, fvs) + + for i in range(0,NUM_NHs): + if_name_key = "Ethernet" + str(i*4) + ip_pref_key = if_name_key + "|10.0.0." + str(i*2) + "/31" + create_entry(config_db, IF_TB, if_name_key, fvs_nul) + create_entry(config_db, IF_TB, ip_pref_key, fvs_nul) + dvs.port_admin_set(if_name_key, "up") + dvs.servers[i].runcmd("ip link set down dev eth0") == 0 + dvs.servers[i].runcmd("ip link set up dev eth0") == 0 + ip_to_if_map["10.0.0." + str(1 + i*2)] = if_name_key + + # Wait for the software to receive the entries + time.sleep(1) + + # Resolve ARP for all NUM_NHs next-hops + asic_nh_count = len(asic_db.get_keys(ASIC_NH_TB)) + for i in range(NUM_NHs): + dvs.runcmd(f"arp -s 10.0.0.{1 + i * 2} 00:00:00:00:00:{1 + i * 2:02x}") + + asic_db.wait_for_n_keys(ASIC_NH_TB, asic_nh_count + NUM_NHs) + + # Add route with NUM_NHs next-hops + print(f"Add route with {NUM_NHs} next-hops") + ps = swsscommon.ProducerStateTable(app_db.db_connection, ROUTE_TB) + fvs = swsscommon.FieldValuePairs([("nexthop", ",".join([f"10.0.0.{1 + i * 2}" for i in range(NUM_NHs)])), + ("ifname", ",".join([f"Ethernet{i * 4}" for i in range(NUM_NHs)]))]) + ps.set(fg_nhg_prefix, fvs) + + # We just use sleep so that the sw receives this entry + time.sleep(1) + + adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + rtbl = swsscommon.Table(adb, ASIC_ROUTE_TB) + keys = rtbl.getKeys() + found_route = False + for k in keys: + rt_key = json.loads(k) + + if rt_key['dest'] == fg_nhg_prefix: + found_route = True + break + + assert (found_route == True) + + asic_db.wait_for_n_keys(ASIC_NHG_MEMB, bucket_size) + nhgid = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix, bucket_size) + + ### Start with NUM_NHs members and verify that distribution is even after a series of add/remove operations + for _ in range(50): + print() + print(f"Iteration: {_}, started test with {NUM_NHs} members, bucket_size: {bucket_size}") + nh_ips = [f"10.0.0.{1 + i * 2}" for i in range(NUM_NHs)] + program_route_and_validate_distribtution(app_db.db_connection, state_db, ip_to_if_map, + fg_nhg_prefix, nh_ips, bucket_size) + + ### remove 1 to 3 nexthops in each step and verify that distribution is even + removed_ips = [] + while len(nh_ips) > 1: + num_to_remove = random.randint(1, min(3, len(nh_ips) - 1)) + removed_in_iteration = [] + for _ in range(num_to_remove): + removed_ip = nh_ips.pop() + removed_ips.append(removed_ip) + removed_in_iteration.append(removed_ip) + print(f"Removed IPs: {removed_in_iteration}, No. of remaining IPs: {len(nh_ips)}") + program_route_and_validate_distribtution(app_db.db_connection, state_db, ip_to_if_map, + fg_nhg_prefix, nh_ips, bucket_size) + + ### add 1-3 nexthops in each step and verify that distribution stays even + while len(nh_ips) < NUM_NHs: + num_to_add = random.randint(1, min(3, len(removed_ips))) + added_in_iteration = [] + for _ in range(num_to_add): + if removed_ips: + added_ip = removed_ips.pop(0) + nh_ips.append(added_ip) + added_in_iteration.append(added_ip) + print(f"Added IPs: {added_in_iteration}, Total IPs: {len(nh_ips)}") + program_route_and_validate_distribtution(app_db.db_connection, state_db, ip_to_if_map, + fg_nhg_prefix, nh_ips, bucket_size) + + # Remove route + print("Remove route") + asic_rt_key = get_asic_route_key(asic_db, fg_nhg_prefix) + ps._del(fg_nhg_prefix) + + # validate routes and nhg member in asic db, route entry in state db are removed + print("validate routes and nhg member in asic db, route entry in state db are removed") + asic_db.wait_for_deleted_entry(ASIC_ROUTE_TB, asic_rt_key) + asic_db.wait_for_n_keys(ASIC_NHG_MEMB, 0) + state_db.wait_for_n_keys("FG_ROUTE_TABLE", 0) + + + # Cleanup all FG, arp and interface + print("Cleanup all FG, arp and interface") + remove_entry(config_db, "FG_NHG_PREFIX", fg_nhg_prefix) + remove_entry(config_db, "FG_NHG", fg_nhg_name) + + for i in range(0,NUM_NHs): + if_name_key = "Ethernet" + str(i*4) + ip_pref_key = if_name_key + "|10.0.0." + str(i*2) + "/31" + remove_entry(config_db, IF_TB, ip_pref_key) + remove_entry(config_db, IF_TB, if_name_key) + dvs.port_admin_set(if_name_key, "down") + dvs.servers[i].runcmd("ip link set down dev eth0") == 0 + + class TestFineGrainedNextHopGroup(object): def test_fgnhg_matchmode_route(self, dvs, testlog): ''' @@ -637,6 +1347,24 @@ def test_fgnhg_matchmode_nexthop(self, dvs, testlog): ''' fine_grained_ecmp_base_test(dvs, 'nexthop-based') + def test_fgnhg_matchmode_prefix(self, dvs, testlog): + ''' + Test for match_mode prefix-based + ''' + fine_grained_ecmp_match_mode_prefix_test(dvs) + + def test_fgnhg_matchmode_prefix_multi_route(self, dvs, testlog): + ''' + Test for match_mode prefix-based with multiple routes + ''' + fine_grained_ecmp_match_mode_prefix_multi_route_test(dvs); + + def test_fgnhg_matchmode_prefix_even_distribution(self, dvs, testlog): + ''' + Test for match_mode prefix-based with up to 16 nexthops and even distribution + ''' + fine_grained_ecmp_match_mode_prefix_even_distribution_test(dvs); + def test_fgnhg_more_nhs_nondiv_bucket_size(self, dvs, testlog): ''' Test Fine Grained ECMP with a greater number of FG members and @@ -680,49 +1408,58 @@ def test_fgnhg_more_nhs_nondiv_bucket_size(self, dvs, testlog): nh_oid_map = get_nh_oid_map(asic_db) # The route had been created with 0 members in bank + memb_dict = {} nh_memb_exp_count = {"10.0.0.1":64,"10.0.0.11":64} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 128 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Add 2 nhs to both bank 0 and bank 1 nh_memb_exp_count = {"10.0.0.1":22,"10.0.0.3":21,"10.0.0.5":21,"10.0.0.11":22, "10.0.0.13":21,"10.0.0.15":21} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 128-22-22 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Add 2 more nhs to both bank 0 and bank 1 nh_memb_exp_count = {"10.0.0.1":13,"10.0.0.3":13,"10.0.0.5":13,"10.0.0.7":12, "10.0.0.9":13,"10.0.0.11":13,"10.0.0.13":13,"10.0.0.15":13,"10.0.0.17":12,"10.0.0.19":13} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 128-6*13 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Remove 1 nh from bank 0 and remove 2 nhs from bank 1 nh_memb_exp_count = {"10.0.0.3":16,"10.0.0.5":16,"10.0.0.7":16,"10.0.0.9":16, "10.0.0.11":22,"10.0.0.13":21,"10.0.0.19":21} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 128-13-13-12-13-13-13-13 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Remove 1 nh from bank 0 and add 1 nh to bank 1 nh_memb_exp_count = {"10.0.0.3":22,"10.0.0.7":21,"10.0.0.9":21,"10.0.0.13":16, "10.0.0.15":16,"10.0.0.17":16,"10.0.0.19":16} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 128-5*16 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Remove 2 nh from bank 0 and remove 3 nh from bank 1 nh_memb_exp_count = {"10.0.0.7":64,"10.0.0.11":64} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 128-21 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Add 2 nhs to bank 0 and remove all nh from bank 1 nh_memb_exp_count = {"10.0.0.5":42,"10.0.0.7":44,"10.0.0.9":42} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 128-22 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Add 2 nhs to bank 0 and add 1 nh to bank 1 nh_memb_exp_count = {"10.0.0.1":12,"10.0.0.3":13,"10.0.0.5":13,"10.0.0.7":13, "10.0.0.9":13,"10.0.0.11":64} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 128-13-13-13 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Remove route # remove prefix entry @@ -739,7 +1476,7 @@ def test_fgnhg_more_nhs_nondiv_bucket_size(self, dvs, testlog): def test_fgnhg_matchmode_nexthop_multi_route(self, dvs, testlog): ''' Test route/nh transitions to/from Fine Grained ECMP and Regular ECMP. - Create multiple prefixes pointing to the Fine Grained nhs and ensure + Create multiple prefixes pointing to the Fine Grained nhs and ensure fine grained ECMP ASIC objects were created for this scenario as expected. ''' app_db = dvs.get_app_db() @@ -788,11 +1525,13 @@ def test_fgnhg_matchmode_nexthop_multi_route(self, dvs, testlog): nhgid = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix, bucket_size) nh_oid_map = get_nh_oid_map(asic_db) + memb_dict = {} # The route had been created with 0 members in bank nh_memb_exp_count = {"10.0.0.1":64,"10.0.0.5":64} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 128 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) # Add a 2nd prefix associated with the same set of next-hops fg_nhg_prefix_2 = "5.5.5.0/16" @@ -802,8 +1541,9 @@ def test_fgnhg_matchmode_nexthop_multi_route(self, dvs, testlog): asic_db.wait_for_n_keys(ASIC_NHG_MEMB, bucket_size*2) nhgid_2 = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix_2, bucket_size) nh_memb_exp_count = {"10.0.0.1":64,"10.0.0.5":64} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix_2, nh_memb_exp_count, nh_oid_map, nhgid_2, bucket_size) + num_exp_changes = 0 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix_2, nh_memb_exp_count, nh_oid_map, nhgid_2, bucket_size) # Add a 3rd prefix with a next-hop(10.0.0.9) not defined for FG ECMP # Should end up as regular ECMP @@ -820,8 +1560,9 @@ def test_fgnhg_matchmode_nexthop_multi_route(self, dvs, testlog): nhgid_3 = validate_asic_nhg_fine_grained_ecmp(asic_db, fg_nhg_prefix_3, bucket_size) asic_db.wait_for_n_keys(ASIC_NHG_MEMB, bucket_size*3) nh_memb_exp_count = {"10.0.0.1":64,"10.0.0.5":64} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix_3, nh_memb_exp_count, nh_oid_map, nhgid_3, bucket_size) + num_exp_changes = 0 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix_3, nh_memb_exp_count, nh_oid_map, nhgid_3, bucket_size) # Add the 10.0.0.9 next-hop again, it should transition back to regular ECMP fvs = swsscommon.FieldValuePairs([("nexthop","10.0.0.1,10.0.0.5,10.0.0.9"), ("ifname", "Ethernet0,Ethernet8,Ethernet16")]) @@ -836,11 +1577,13 @@ def test_fgnhg_matchmode_nexthop_multi_route(self, dvs, testlog): # Change FG nhs for one route, ensure that the other route nh is unaffected nh_memb_exp_count = {"10.0.0.1":32,"10.0.0.3":32,"10.0.0.5":32,"10.0.0.7":32} - program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, - fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) + num_exp_changes = 64 + memb_dict = program_route_and_validate_fine_grained_ecmp(app_db.db_connection, asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix, nh_memb_exp_count, nh_oid_map, nhgid, bucket_size) nh_memb_exp_count = {"10.0.0.1":64,"10.0.0.5":64} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix_2, nh_memb_exp_count, nh_oid_map, nhgid_2, bucket_size) + num_exp_changes = 64 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix_2, nh_memb_exp_count, nh_oid_map, nhgid_2, bucket_size) # Remove route # remove prefix entry @@ -850,8 +1593,9 @@ def test_fgnhg_matchmode_nexthop_multi_route(self, dvs, testlog): asic_db.wait_for_n_keys(ASIC_NHG_MEMB, bucket_size) # Ensure that 2nd route is still here and then delete it nh_memb_exp_count = {"10.0.0.1":64,"10.0.0.5":64} - validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, - fg_nhg_prefix_2, nh_memb_exp_count, nh_oid_map, nhgid_2, bucket_size) + num_exp_changes = 0 + memb_dict = validate_fine_grained_asic_n_state_db_entries(asic_db, state_db, ip_to_if_map, + memb_dict, num_exp_changes, fg_nhg_prefix_2, nh_memb_exp_count, nh_oid_map, nhgid_2, bucket_size) # Delete the 2nd route as well asic_rt_key = get_asic_route_key(asic_db, fg_nhg_prefix_2) ps._del(fg_nhg_prefix_2) diff --git a/tests/test_fips_macsec_post.py b/tests/test_fips_macsec_post.py new file mode 100644 index 00000000000..4d0c192e1a8 --- /dev/null +++ b/tests/test_fips_macsec_post.py @@ -0,0 +1,200 @@ +from dvslib.dvs_common import wait_for_result, PollingConfig + +# State DB POST state +STATE_DB_MACSEC_POST_TABLE = "FIPS_MACSEC_POST_TABLE" +STATE_DB_MACSEC_POST_STATE_DISABLED = "disabled" +STATE_DB_MACSEC_POST_STATE_SWITCH_LEVEL_POST_IN_PROGRESS = "switch-level-post-in-progress" +STATE_DB_MACSEC_POST_STATE_MACSEC_LEVEL_POST_IN_PROGRESS = "macsec-level-post-in-progress" +STATE_DB_MACSEC_POST_STATE_PASS = "pass" +STATE_DB_MACSEC_POST_STATE_FAIL = "fail" + +# SAI POST capability +SAI_MACSEC_POST_CAPABILITY = "macsec-post-capability" +SAI_MACSEC_POST_CAPABILITY_NOT_SUPPORTED = "not-supported" +SAI_MACSEC_POST_CAPABILITY_SWITCH = "switch" +SAI_MACSEC_POST_CAPABILITY_MACSEC = "macsec" + +# VS SAI POST config +VS_SAI_POST_CONFIG_FILE = "/tmp/vs_fips_post_config" +VS_SAI_POST_CONFIG_SWITCH_POST_STATUS_NOTIFY = "switch-macsec-post-status-notify" +VS_SAI_POST_CONFIG_SWITCH_POST_STATUS_QUERY = "switch-macsec-post-status-query" +VS_SAI_POST_CONFIG_INGRESS_MACSEC_POST_STATUS_NOTIFY = "ingress-macsec-post-status-notify" +VS_SAI_POST_CONFIG_EGRESS_MACSEC_POST_STATUS_NOTIFY = "egress-macsec-post-status-notify" +SAI_SWITCH_MACSEC_POST_STATUS_PASS = "SAI_SWITCH_MACSEC_POST_STATUS_PASS" +SAI_SWITCH_MACSEC_POST_STATUS_FAIL = "SAI_SWITCH_MACSEC_POST_STATUS_FAIL" +SAI_SWITCH_MACSEC_POST_STATUS_IN_PROGRESS = "SAI_SWITCH_MACSEC_POST_STATUS_IN_PROGRESS" +SAI_MACSEC_POST_STATUS_PASS = "SAI_MACSEC_POST_STATUS_PASS" +SAI_MACSEC_POST_STATUS_FAIL = "SAI_MACSEC_POST_STATUS_FAIL" +SAI_MACSEC_POST_STATUS_IN_PROGRESS = "SAI_MACSEC_POST_STATUS_IN_PROGRESS" + +# POST syslogs +SWITCH_MACSEC_POST_PASS_SYSYLOG = "Switch MACSec POST passed" +SWITCH_MACSEC_POST_FAIL_SYSYLOG = "Switch MACSec POST failed" +SWITCH_MACSEC_POST_FAIL_SYSYLOG_SAI_NOT_SUPPORTED = "MACSec POST is not supported by SAI" +MACSEC_POST_ENABLED_SYSLOG = "Init MACSec objects and enable POST" +INGRESS_MACSEC_POST_PASS_SYSLOG = "Ingress MACSec POST passed" +INGRESS_MACSEC_POST_FAIL_SYSLOG = "Ingress MACSec POST failed" +EGRESS_MACSEC_POST_PASS_SYSLOG = "Egress MACSec POST passed" +EGRESS_MACSEC_POST_FAIL_SYSLOG = "Egress MACSec POST failed" +MACSEC_POST_PASS_SYSLOG = "Ingress and egress MACSec POST passed" +MACSEC_POST_FAIL_SYSLOG = "MACSec POST failed" + +ORCHAGENT_SH_BACKUP = "/usr/bin/orchagent_sh_macsec_post_ut_backup" + +class TestMacsecPost(object): + def check_state_db_post_state(self, dvs, expected_state): + dvs.get_state_db().wait_for_field_match(STATE_DB_MACSEC_POST_TABLE, "sai", + {'post_state': expected_state}) + + def restart_dvs_with_post_config(self, dvs, sai_post_capability=SAI_MACSEC_POST_CAPABILITY_SWITCH, + sai_post_notification_status_config=None, sai_macsec_post_enabled=True): + + sai_post_config = {} + if sai_post_capability != SAI_MACSEC_POST_CAPABILITY_NOT_SUPPORTED: + sai_post_config[SAI_MACSEC_POST_CAPABILITY] = sai_post_capability + if sai_post_notification_status_config: + sai_post_config.update(sai_post_notification_status_config) + dvs.runcmd(["sh", "-c", f"rm -f {VS_SAI_POST_CONFIG_FILE}"]) + dvs.runcmd(["sh", "-c", f"touch {VS_SAI_POST_CONFIG_FILE}"]) + for k, v in sai_post_config.items(): + dvs.runcmd(["sh", "-c", f"echo '{k} {v}' >> {VS_SAI_POST_CONFIG_FILE}"]) + + if sai_macsec_post_enabled: + rc, _ = dvs.runcmd(["sh", "-c", f"ls {ORCHAGENT_SH_BACKUP}"]) + if rc == 0: + dvs.runcmd(f"cp {ORCHAGENT_SH_BACKUP} /usr/bin/orchagent.sh") + else: + dvs.runcmd(f"cp /usr/bin/orchagent.sh {ORCHAGENT_SH_BACKUP}") + dvs.runcmd("sed -i.bak 's/\/usr\/bin\/orchagent /\/usr\/bin\/orchagent -M /g' /usr/bin/orchagent.sh") + + marker = dvs.add_log_marker() + + dvs.runcmd('killall5 -15') + dvs.net_cleanup() + dvs.destroy_servers() + dvs.create_servers() + dvs.restart() + + return marker + + def check_syslog(self, dvs, marker, log): + def do_check_syslog(): + (ec, out) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \'%s\' | wc -l" %(marker, log)]) + return (int(out.strip()) >= 1, None) + max_poll = PollingConfig(polling_interval=5, timeout=600, strict=True) + wait_for_result(do_check_syslog, polling_config=max_poll) + + def check_asic_db_post_state(self, dvs, sai_macsec_post_enabled=True, sai_post_capability=SAI_MACSEC_POST_CAPABILITY_SWITCH): + switch_oids = dvs.get_asic_db().get_keys("ASIC_STATE:SAI_OBJECT_TYPE_SWITCH") + assert len(switch_oids) == 1 + entry = dvs.get_asic_db().get_entry("ASIC_STATE", f"SAI_OBJECT_TYPE_SWITCH:{switch_oids[0]}") + if sai_macsec_post_enabled: + assert entry["SAI_SWITCH_ATTR_MACSEC_ENABLE_POST"] and entry["SAI_SWITCH_ATTR_SWITCH_MACSEC_POST_STATUS_NOTIFY"] + else: + assert "SAI_SWITCH_ATTR_MACSEC_ENABLE_POST" not in entry and "SAI_SWITCH_ATTR_SWITCH_MACSEC_POST_STATUS_NOTIFY" not in entry + + macsec_oids = dvs.get_asic_db().get_keys("ASIC_STATE:SAI_OBJECT_TYPE_MACSEC") + if sai_post_capability == SAI_MACSEC_POST_CAPABILITY_SWITCH: + # No MACSec object should be created since POST is supported in switch init. + assert not macsec_oids + elif sai_post_capability == SAI_MACSEC_POST_CAPABILITY_MACSEC: + # POST is only supported in MACSec init. Two MACSec objects - ingress and egress - must be created to enable POST. + assert len(macsec_oids) == 2 + for oid in macsec_oids: + entry = dvs.get_asic_db().get_entry("ASIC_STATE", f"SAI_OBJECT_TYPE_MACSEC:{oid}") + assert entry["SAI_MACSEC_ATTR_ENABLE_POST"] + + def test_PostDisabled(self, dvs): + self.restart_dvs_with_post_config(dvs, sai_macsec_post_enabled=False) + self.check_state_db_post_state(dvs, STATE_DB_MACSEC_POST_STATE_DISABLED) + self.check_asic_db_post_state(dvs, sai_macsec_post_enabled=False) + + def test_PostEnabled_InitialState(self, dvs): + sai_post_notification_status_config = {VS_SAI_POST_CONFIG_SWITCH_POST_STATUS_QUERY : SAI_SWITCH_MACSEC_POST_STATUS_IN_PROGRESS} + self.restart_dvs_with_post_config(dvs, sai_post_notification_status_config=sai_post_notification_status_config) + self.check_state_db_post_state(dvs, STATE_DB_MACSEC_POST_STATE_SWITCH_LEVEL_POST_IN_PROGRESS) + self.check_asic_db_post_state(dvs) + + def test_PostEnabled_SaiPostNotSupported(self, dvs): + marker = self.restart_dvs_with_post_config(dvs, sai_post_capability=SAI_MACSEC_POST_CAPABILITY_NOT_SUPPORTED) + self.check_state_db_post_state(dvs, STATE_DB_MACSEC_POST_STATE_DISABLED) + self.check_syslog(dvs, marker, SWITCH_MACSEC_POST_FAIL_SYSYLOG_SAI_NOT_SUPPORTED) + self.check_asic_db_post_state(dvs) + + def test_PostEnabled_SwitchLevelPost_NotificationPass(self, dvs): + sai_post_notification_status_config = {VS_SAI_POST_CONFIG_SWITCH_POST_STATUS_NOTIFY : SAI_SWITCH_MACSEC_POST_STATUS_PASS, + VS_SAI_POST_CONFIG_SWITCH_POST_STATUS_QUERY : SAI_SWITCH_MACSEC_POST_STATUS_IN_PROGRESS} + marker = self.restart_dvs_with_post_config(dvs, sai_post_notification_status_config=sai_post_notification_status_config) + self.check_state_db_post_state(dvs, STATE_DB_MACSEC_POST_STATE_PASS) + self.check_syslog(dvs, marker, SWITCH_MACSEC_POST_PASS_SYSYLOG) + self.check_asic_db_post_state(dvs) + + def test_PostEnabled_SwitchLevelPost_NotificationFail(self, dvs): + sai_post_notification_status_config = {VS_SAI_POST_CONFIG_SWITCH_POST_STATUS_NOTIFY : SAI_SWITCH_MACSEC_POST_STATUS_FAIL, + VS_SAI_POST_CONFIG_SWITCH_POST_STATUS_QUERY : SAI_SWITCH_MACSEC_POST_STATUS_IN_PROGRESS} + marker = self.restart_dvs_with_post_config(dvs, sai_post_notification_status_config=sai_post_notification_status_config) + self.check_state_db_post_state(dvs, STATE_DB_MACSEC_POST_STATE_FAIL) + self.check_syslog(dvs, marker, SWITCH_MACSEC_POST_FAIL_SYSYLOG) + self.check_asic_db_post_state(dvs) + + def test_PostEnabled_SwitchLevelPost_QueryPass(self, dvs): + sai_post_notification_status_config = {VS_SAI_POST_CONFIG_SWITCH_POST_STATUS_QUERY : SAI_SWITCH_MACSEC_POST_STATUS_PASS} + marker =self.restart_dvs_with_post_config(dvs, sai_post_notification_status_config=sai_post_notification_status_config) + self.check_state_db_post_state(dvs, STATE_DB_MACSEC_POST_STATE_PASS) + self.check_syslog(dvs, marker, SWITCH_MACSEC_POST_PASS_SYSYLOG) + self.check_asic_db_post_state(dvs) + + def test_PostEnabled_SwitchLevelPost_QueryFail(self, dvs): + sai_post_notification_status_config = {VS_SAI_POST_CONFIG_SWITCH_POST_STATUS_QUERY : SAI_SWITCH_MACSEC_POST_STATUS_FAIL} + marker =self.restart_dvs_with_post_config(dvs, sai_post_notification_status_config=sai_post_notification_status_config) + self.check_state_db_post_state(dvs, STATE_DB_MACSEC_POST_STATE_FAIL) + self.check_syslog(dvs, marker, SWITCH_MACSEC_POST_FAIL_SYSYLOG) + self.check_asic_db_post_state(dvs) + + def test_PostEnabled_MacsecLevelPost_StateBeforeNotification(self, dvs): + marker = self.restart_dvs_with_post_config(dvs, sai_post_capability=SAI_MACSEC_POST_CAPABILITY_MACSEC) + self.check_state_db_post_state(dvs, STATE_DB_MACSEC_POST_STATE_MACSEC_LEVEL_POST_IN_PROGRESS) + self.check_syslog(dvs, marker, MACSEC_POST_ENABLED_SYSLOG) + self.check_asic_db_post_state(dvs, sai_post_capability=SAI_MACSEC_POST_CAPABILITY_MACSEC) + + def test_PostEnabled_MacsecLevelPost_NotificationPass(self, dvs): + sai_post_notification_status_config = {VS_SAI_POST_CONFIG_INGRESS_MACSEC_POST_STATUS_NOTIFY : SAI_MACSEC_POST_STATUS_PASS, + VS_SAI_POST_CONFIG_EGRESS_MACSEC_POST_STATUS_NOTIFY : SAI_MACSEC_POST_STATUS_PASS} + marker = self.restart_dvs_with_post_config(dvs, sai_post_capability=SAI_MACSEC_POST_CAPABILITY_MACSEC, + sai_post_notification_status_config=sai_post_notification_status_config) + self.check_state_db_post_state(dvs, STATE_DB_MACSEC_POST_STATE_PASS) + for syslog in [INGRESS_MACSEC_POST_PASS_SYSLOG, EGRESS_MACSEC_POST_PASS_SYSLOG, MACSEC_POST_PASS_SYSLOG]: + self.check_syslog(dvs, marker, syslog) + self.check_asic_db_post_state(dvs, sai_post_capability=SAI_MACSEC_POST_CAPABILITY_MACSEC) + + def test_PostEnabled_MacsecLevelPost_NotificationIngressPostFail(self, dvs): + sai_post_notification_status_config = {VS_SAI_POST_CONFIG_INGRESS_MACSEC_POST_STATUS_NOTIFY : SAI_MACSEC_POST_STATUS_FAIL, + VS_SAI_POST_CONFIG_EGRESS_MACSEC_POST_STATUS_NOTIFY : SAI_MACSEC_POST_STATUS_PASS} + marker = self.restart_dvs_with_post_config(dvs, sai_post_capability=SAI_MACSEC_POST_CAPABILITY_MACSEC, + sai_post_notification_status_config=sai_post_notification_status_config) + self.check_state_db_post_state(dvs, STATE_DB_MACSEC_POST_STATE_FAIL) + for syslog in [INGRESS_MACSEC_POST_FAIL_SYSLOG, EGRESS_MACSEC_POST_PASS_SYSLOG, MACSEC_POST_FAIL_SYSLOG]: + self.check_syslog(dvs, marker, syslog) + self.check_asic_db_post_state(dvs, sai_post_capability=SAI_MACSEC_POST_CAPABILITY_MACSEC) + + def test_PostEnabled_MacsecLevelPost_NotificationEgressPostFail(self, dvs): + sai_post_notification_status_config = {VS_SAI_POST_CONFIG_INGRESS_MACSEC_POST_STATUS_NOTIFY : SAI_MACSEC_POST_STATUS_PASS, + VS_SAI_POST_CONFIG_EGRESS_MACSEC_POST_STATUS_NOTIFY : SAI_MACSEC_POST_STATUS_FAIL} + marker = self.restart_dvs_with_post_config(dvs, sai_post_capability=SAI_MACSEC_POST_CAPABILITY_MACSEC, + sai_post_notification_status_config=sai_post_notification_status_config) + self.check_state_db_post_state(dvs, STATE_DB_MACSEC_POST_STATE_FAIL) + for syslog in [INGRESS_MACSEC_POST_PASS_SYSLOG, EGRESS_MACSEC_POST_FAIL_SYSLOG, MACSEC_POST_FAIL_SYSLOG]: + self.check_syslog(dvs, marker, syslog) + self.check_asic_db_post_state(dvs, sai_post_capability=SAI_MACSEC_POST_CAPABILITY_MACSEC) + + def test_CleanUp(self,dvs): + rc, _ = dvs.runcmd(["sh", "-c", f"ls {ORCHAGENT_SH_BACKUP}"]) + if rc == 0: + dvs.runcmd(f"cp {ORCHAGENT_SH_BACKUP} /usr/bin/orchagent.sh") + dvs.runcmd(["sh", "-c", f"rm -f {VS_SAI_POST_CONFIG_FILE}"]) + dvs.restart() + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_flex_counters.py b/tests/test_flex_counters.py index f590b7748c3..f64073424a6 100644 --- a/tests/test_flex_counters.py +++ b/tests/test_flex_counters.py @@ -1,11 +1,11 @@ import time import pytest +from dvslib.dvs_flex_counter import TestFlexCountersBase, NUMBER_OF_RETRIES from swsscommon import swsscommon TUNNEL_TYPE_MAP = "COUNTERS_TUNNEL_TYPE_MAP" ROUTE_TO_PATTERN_MAP = "COUNTERS_ROUTE_TO_PATTERN_MAP" -NUMBER_OF_RETRIES = 10 CPU_PORT_OID = "0x0" counter_group_meta = { @@ -78,67 +78,28 @@ 'name_map': 'COUNTERS_ROUTE_NAME_MAP', 'pre_test': 'pre_route_flow_counter_test', 'post_test': 'post_route_flow_counter_test', + }, + 'wred_queue_counter': { + 'key': 'WRED_ECN_QUEUE', + 'group_name': 'WRED_ECN_QUEUE_STAT_COUNTER', + 'name_map': 'COUNTERS_QUEUE_NAME_MAP', + }, + 'wred_port_counter': { + 'key': 'WRED_ECN_PORT', + 'group_name': 'WRED_ECN_PORT_STAT_COUNTER', + 'name_map': 'COUNTERS_PORT_NAME_MAP', + }, + 'srv6_counter': { + 'key': 'SRV6', + 'group_name': 'SRV6_STAT_COUNTER', + 'name_map': 'COUNTERS_SRV6_NAME_MAP', + 'pre_test': 'pre_srv6_counter_test', + 'post_test': 'post_srv6_counter_test', } } -class TestFlexCounters(object): - - def setup_dbs(self, dvs): - self.config_db = dvs.get_config_db() - self.flex_db = dvs.get_flex_db() - self.counters_db = dvs.get_counters_db() - self.app_db = dvs.get_app_db() - - def wait_for_table(self, table): - for retry in range(NUMBER_OF_RETRIES): - counters_keys = self.counters_db.db_connection.hgetall(table) - if len(counters_keys) > 0: - return - else: - time.sleep(1) - - assert False, str(table) + " not created in Counters DB" - - def wait_for_table_empty(self, table): - for retry in range(NUMBER_OF_RETRIES): - counters_keys = self.counters_db.db_connection.hgetall(table) - if len(counters_keys) == 0: - return - else: - time.sleep(1) - - assert False, str(table) + " is still in Counters DB" - - def wait_for_id_list(self, stat, name, oid): - for retry in range(NUMBER_OF_RETRIES): - id_list = self.flex_db.db_connection.hgetall("FLEX_COUNTER_TABLE:" + stat + ":" + oid).items() - if len(id_list) > 0: - return - else: - time.sleep(1) - - assert False, "No ID list for counter " + str(name) - - def wait_for_id_list_remove(self, stat, name, oid): - for retry in range(NUMBER_OF_RETRIES): - id_list = self.flex_db.db_connection.hgetall("FLEX_COUNTER_TABLE:" + stat + ":" + oid).items() - if len(id_list) == 0: - return - else: - time.sleep(1) - - assert False, "ID list for counter " + str(name) + " is still there" - - def wait_for_interval_set(self, group, interval): - interval_value = None - for retry in range(NUMBER_OF_RETRIES): - interval_value = self.flex_db.db_connection.hget("FLEX_COUNTER_GROUP_TABLE:" + group, 'POLL_INTERVAL') - if interval_value == interval: - return - else: - time.sleep(1) - assert False, "Polling interval is not applied to FLEX_COUNTER_GROUP_TABLE for group {}, expect={}, actual={}".format(group, interval, interval_value) +class TestFlexCounters(TestFlexCountersBase): def wait_for_buffer_pg_queue_counter(self, map, port, index, isSet): for retry in range(NUMBER_OF_RETRIES): @@ -152,10 +113,6 @@ def wait_for_buffer_pg_queue_counter(self, map, port, index, isSet): assert False, "Counter not {} for port: {}, type: {}, index: {}".format("created" if isSet else "removed", port, map, index) - def verify_no_flex_counters_tables(self, counter_stat): - counters_stat_keys = self.flex_db.get_keys("FLEX_COUNTER_TABLE:" + counter_stat) - assert len(counters_stat_keys) == 0, "FLEX_COUNTER_TABLE:" + str(counter_stat) + " tables exist before enabling the flex counter group" - def verify_no_flex_counters_tables_after_delete(self, counter_stat): for retry in range(NUMBER_OF_RETRIES): counters_stat_keys = self.flex_db.get_keys("FLEX_COUNTER_TABLE:" + counter_stat + ":") @@ -165,13 +122,6 @@ def verify_no_flex_counters_tables_after_delete(self, counter_stat): time.sleep(1) assert False, "FLEX_COUNTER_TABLE:" + str(counter_stat) + " tables exist after removing the entries" - def verify_flex_counters_populated(self, map, stat): - counters_keys = self.counters_db.db_connection.hgetall(map) - for counter_entry in counters_keys.items(): - name = counter_entry[0] - oid = counter_entry[1] - self.wait_for_id_list(stat, name, oid) - def verify_tunnel_type_vxlan(self, meta_data, type_map): counters_keys = self.counters_db.db_connection.hgetall(meta_data['name_map']) for counter_entry in counters_keys.items(): @@ -186,53 +136,13 @@ def verify_only_phy_ports_created(self, meta_data): for port_stat in port_counters_stat_keys: assert port_stat in dict(port_counters_keys.items()).values(), "Non PHY port created on PORT_STAT_COUNTER group: {}".format(port_stat) - def set_flex_counter_group_status(self, group, map, status='enable', check_name_map=True): - group_stats_entry = {"FLEX_COUNTER_STATUS": status} - self.config_db.create_entry("FLEX_COUNTER_TABLE", group, group_stats_entry) - if check_name_map: - if status == 'enable': - self.wait_for_table(map) - else: - self.wait_for_table_empty(map) - - def set_flex_counter_group_interval(self, key, group, interval): - group_stats_entry = {"POLL_INTERVAL": interval} - self.config_db.create_entry("FLEX_COUNTER_TABLE", key, group_stats_entry) - self.wait_for_interval_set(group, interval) - def set_only_config_db_buffers_field(self, value): fvs = {'create_only_config_db_buffers' : value} self.config_db.update_entry("DEVICE_METADATA", "localhost", fvs) @pytest.mark.parametrize("counter_type", counter_group_meta.keys()) def test_flex_counters(self, dvs, counter_type): - """ - The test will check there are no flex counters tables on FlexCounter DB when the counters are disabled. - After enabling each counter group, the test will check the flow of creating flex counters tables on FlexCounter DB. - For some counter types the MAPS on COUNTERS DB will be created as well after enabling the counter group, this will be also verified on this test. - """ - self.setup_dbs(dvs) - meta_data = counter_group_meta[counter_type] - counter_key = meta_data['key'] - counter_stat = meta_data['group_name'] - counter_map = meta_data['name_map'] - pre_test = meta_data.get('pre_test') - post_test = meta_data.get('post_test') - meta_data['dvs'] = dvs - - self.verify_no_flex_counters_tables(counter_stat) - - if pre_test: - cb = getattr(self, pre_test) - cb(meta_data) - - self.set_flex_counter_group_status(counter_key, counter_map) - self.verify_flex_counters_populated(counter_map, counter_stat) - self.set_flex_counter_group_interval(counter_key, counter_stat, '2500') - - if post_test: - cb = getattr(self, post_test) - cb(meta_data) + self.verify_flex_counter_flow(dvs, counter_group_meta[counter_type]) def pre_rif_counter_test(self, meta_data): self.config_db.db_connection.hset('INTERFACE|Ethernet0', "NULL", "NULL") @@ -291,6 +201,19 @@ def pre_route_flow_counter_test(self, meta_data): dvs.servers[1].runcmd("ping -6 -c 1 2001::1") dvs.runcmd("vtysh -c \"configure terminal\" -c \"ipv6 route 2000::/64 2001::2\"") + def pre_srv6_counter_test(self, meta_data): + dvs = meta_data['dvs'] + dvs.runcmd("ip link add sr0 type dummy") + dvs.runcmd("ip link set sr0 up") + + self.config_db.create_entry("SRV6_MY_LOCATORS", "loc1", {"prefix": "1000:0:1::", "block_len": "32", "node_len": "16", "func_len": "0", "arg_len": "0"}) + self.config_db.create_entry("SRV6_MY_SIDS", f'loc1|1000:0:1::/48', {"decap_dscp_mode": "pipe"}) + + loc_cmd = 'vtysh -c "configure terminal" -c "segment-routing" -c "srv6" -c "locators" -c "locator loc1" -c "prefix 1000:0:1::/48 block-len 32 node-len 16 func-bits 0" -c "behavior usid"' + sid_cmd = 'vtysh -c "configure terminal" -c "segment-routing" -c "srv6" -c "static-sids" -c "sid 1000:0:1::/48 locator loc1 behavior uN"' + dvs.runcmd(loc_cmd) + dvs.runcmd(sid_cmd) + def post_rif_counter_test(self, meta_data): self.config_db.db_connection.hdel('INTERFACE|Ethernet0|192.168.0.1/24', "NULL") @@ -364,6 +287,18 @@ def post_route_flow_counter_test(self, meta_data): dvs.servers[1].runcmd("ip -6 address del 2001::2/64 dev eth0") self.config_db.delete_entry('FLOW_COUNTER_ROUTE_PATTERN', '2000::/64') + def post_srv6_counter_test(self, meta_data): + dvs = meta_data['dvs'] + sid_cmd = 'vtysh -c "configure terminal" -c "segment-routing" -c "srv6" -c "static-sids" -c "no sid 1000:0:1::/48 locator loc1 behavior uN"' + loc_cmd = 'vtysh -c "configure terminal" -c "segment-routing" -c "srv6" -c "locators" -c "no locator loc1"' + dvs.runcmd(sid_cmd) + dvs.runcmd(loc_cmd) + + self.config_db.delete_entry("SRV6_MY_SIDS", f"loc1|1000:0:1::/48") + self.config_db.delete_entry("SRV6_MY_LOCATORS", "loc1") + + dvs.runcmd("ip link del sr0 type dummy") + def test_add_remove_trap(self, dvs): """Test steps: 1. Enable trap_flow_counter @@ -716,12 +651,12 @@ def remove_ip_address(self, interface, ip): def set_admin_status(self, interface, status): self.config_db.update_entry("PORT", interface, {"admin_status": status}) - @pytest.mark.parametrize('counter_type', [('queue_counter'), ('pg_drop_counter')]) - def test_create_only_config_db_buffers_false(self, dvs, counter_type): + @pytest.mark.parametrize('counter_type_id', [('queue_counter', '8'), ('pg_drop_counter', '7'), ('wred_queue_counter', '6')]) + def test_create_only_config_db_buffers_false(self, dvs, counter_type_id): """ Test steps: 1. By default the configuration knob 'create_only_config_db_value' is missing. - 2. Get the counter OID for the interface 'Ethernet0:7' from the counters database. + 2. Get the counter OID for the interface 'Ethernet0', queue 8 or PG 7, from the counters database. 3. Perform assertions based on the 'create_only_config_db_value': - If 'create_only_config_db_value' is 'false' or does not exist, assert that the counter OID has a valid OID value. @@ -730,10 +665,11 @@ def test_create_only_config_db_buffers_false(self, dvs, counter_type): counter_type (str): The type of counter being tested """ self.setup_dbs(dvs) + counter_type, index = counter_type_id meta_data = counter_group_meta[counter_type] self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) - counter_oid = self.counters_db.db_connection.hget(meta_data['name_map'], 'Ethernet0:7') + counter_oid = self.counters_db.db_connection.hget(meta_data['name_map'], 'Ethernet0:' + index) assert counter_oid is not None, "Counter OID should have a valid OID value when create_only_config_db_value is 'false' or does not exist" def test_create_remove_buffer_pg_watermark_counter(self, dvs): @@ -765,12 +701,12 @@ def test_create_remove_buffer_pg_watermark_counter(self, dvs): self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '1', False) self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid) - @pytest.mark.parametrize('counter_type', [('queue_counter'), ('pg_drop_counter')]) - def test_create_only_config_db_buffers_true(self, dvs, counter_type): + @pytest.mark.parametrize('counter_type_id', [('queue_counter', '8'), ('pg_drop_counter', '7'), ('wred_queue_counter', '6')]) + def test_create_only_config_db_buffers_true(self, dvs, counter_type_id): """ Test steps: 1. The 'create_only_config_db_buffers' was set to 'true' by previous test. - 2. Get the counter OID for the interface 'Ethernet0:7' from the counters database. + 2. Get the counter OID for the interface 'Ethernet0', queue 8 or PG 7, from the counters database. 3. Perform assertions based on the 'create_only_config_db_value': - If 'create_only_config_db_value' is 'true', assert that the counter OID is None. @@ -778,13 +714,33 @@ def test_create_only_config_db_buffers_true(self, dvs, counter_type): dvs (object): virtual switch object counter_type (str): The type of counter being tested """ + counter_type, index = counter_type_id self.setup_dbs(dvs) meta_data = counter_group_meta[counter_type] self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) - counter_oid = self.counters_db.db_connection.hget(meta_data['name_map'], 'Ethernet0:7') + counter_oid = self.counters_db.db_connection.hget(meta_data['name_map'], 'Ethernet0:' + index) assert counter_oid is None, "Counter OID should be None when create_only_config_db_value is 'true'" + def test_wred_port_stats_status(self, dvs): + """ + Test steps: + 1. This test tests the counter status for the wred port stats. + + Args: + dvs (object): virtual switch object + """ + counter_type = 'wred_port_counter' + self.setup_dbs(dvs) + meta_data = counter_group_meta[counter_type] + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) + counter_oid = self.counters_db.db_connection.hget(meta_data['name_map'], 'Ethernet0') + stats_entry_disable = {"FLEX_COUNTER_STATUS": "disable"} + self.config_db.set_entry("FLEX_COUNTER_TABLE", meta_data['key'], stats_entry_disable) + stats_entry_enable = {"FLEX_COUNTER_STATUS": "enable"} + self.config_db.set_entry("FLEX_COUNTER_TABLE", meta_data['key'], stats_entry_enable) + assert(counter_oid) + def test_create_remove_buffer_queue_counter(self, dvs): """ Test steps: @@ -802,12 +758,12 @@ def test_create_remove_buffer_queue_counter(self, dvs): self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) - self.config_db.update_entry('BUFFER_QUEUE', 'Ethernet0|7', {'profile': 'egress_lossless_profile'}) - counter_oid = self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', True) + self.config_db.update_entry('BUFFER_QUEUE', 'Ethernet0|8', {'profile': 'egress_lossless_profile'}) + counter_oid = self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '8', True) self.wait_for_id_list(meta_data['group_name'], "Ethernet0", counter_oid) - self.config_db.delete_entry('BUFFER_QUEUE', 'Ethernet0|7') - self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', False) + self.config_db.delete_entry('BUFFER_QUEUE', 'Ethernet0|8') + self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '8', False) self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid) def test_create_remove_buffer_watermark_queue_pg_counter(self, dvs): @@ -830,16 +786,18 @@ def test_create_remove_buffer_watermark_queue_pg_counter(self, dvs): self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) self.config_db.update_entry('BUFFER_PG', 'Ethernet0|7', {'profile': 'ingress_lossy_profile'}) - self.config_db.update_entry('BUFFER_QUEUE', 'Ethernet0|7', {'profile': 'egress_lossless_profile'}) + self.config_db.update_entry('BUFFER_QUEUE', 'Ethernet0|8', {'profile': 'egress_lossless_profile'}) for counterpoll_type, meta_data in counter_group_meta.items(): if 'queue' in counterpoll_type or 'pg' in counterpoll_type: - counter_oid = self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', True) + index = '8' if 'queue' in counterpoll_type else '7' + counter_oid = self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', index, True) self.wait_for_id_list(meta_data['group_name'], "Ethernet0", counter_oid) - self.config_db.delete_entry('BUFFER_QUEUE', 'Ethernet0|7') + self.config_db.delete_entry('BUFFER_QUEUE', 'Ethernet0|8') self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|7') for counterpoll_type, meta_data in counter_group_meta.items(): if 'queue' in counterpoll_type or 'pg' in counterpoll_type: - self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', False) + index = '8' if 'queue' in counterpoll_type else '7' + self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', index, False) self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid) diff --git a/tests/test_hash.py b/tests/test_hash.py index b84dd91eaf9..b402f4d743a 100644 --- a/tests/test_hash.py +++ b/tests/test_hash.py @@ -14,7 +14,8 @@ "DST_IP", "SRC_IP", "L4_DST_PORT", - "L4_SRC_PORT" + "L4_SRC_PORT", + "IPV6_FLOW_LABEL" ] INNER_HASH_FIELD_LIST = [ "INNER_DST_MAC", @@ -50,7 +51,8 @@ "SAI_NATIVE_HASH_FIELD_DST_IP", "SAI_NATIVE_HASH_FIELD_SRC_IP", "SAI_NATIVE_HASH_FIELD_L4_DST_PORT", - "SAI_NATIVE_HASH_FIELD_L4_SRC_PORT" + "SAI_NATIVE_HASH_FIELD_L4_SRC_PORT", + "SAI_NATIVE_HASH_FIELD_IPV6_FLOW_LABEL" ] SAI_INNER_HASH_FIELD_LIST = [ "SAI_NATIVE_HASH_FIELD_INNER_DST_MAC", diff --git a/tests/test_hft.py b/tests/test_hft.py new file mode 100644 index 00000000000..b0096a6ec98 --- /dev/null +++ b/tests/test_hft.py @@ -0,0 +1,520 @@ +import time + +from swsscommon import swsscommon + + +class TestHFT(object): + """Test High Frequency Telemetry (HFT) functionality using DVS.""" + + def setup_method(self, method): + """Set up test method with database connections.""" + pass + + def teardown_method(self, method): + """Clean up after each test method.""" + pass + + def create_hft_profile(self, dvs, name="test", status="enabled", + polling_interval=300): + """Create HFT profile in CONFIG_DB.""" + config_db = swsscommon.DBConnector(4, dvs.redis_sock, 0) + tbl = swsscommon.Table(config_db, "HIGH_FREQUENCY_TELEMETRY_PROFILE") + + fvs = swsscommon.FieldValuePairs([ + ("stream_state", status), + ("poll_interval", str(polling_interval)) + ]) + tbl.set(name, fvs) + + def create_hft_group(self, dvs, profile_name="test", group_name="PORT", + object_names="Ethernet0", + object_counters="IF_IN_OCTETS"): + """Create HFT group in CONFIG_DB.""" + config_db = swsscommon.DBConnector(4, dvs.redis_sock, 0) + tbl = swsscommon.Table(config_db, "HIGH_FREQUENCY_TELEMETRY_GROUP") + + key = f"{profile_name}|{group_name}" + fvs = swsscommon.FieldValuePairs([ + ("object_names", object_names), + ("object_counters", object_counters) + ]) + tbl.set(key, fvs) + + def create_hft_group_without_fields(self, dvs, profile_name="test", group_name="PORT"): + """Create HFT group in CONFIG_DB without object_names and object_counters fields.""" + config_db = swsscommon.DBConnector(4, dvs.redis_sock, 0) + tbl = swsscommon.Table(config_db, "HIGH_FREQUENCY_TELEMETRY_GROUP") + + key = f"{profile_name}|{group_name}" + # Create empty field-value pairs - no object_names or object_counters + fvs = swsscommon.FieldValuePairs([("NULL", "NULL")]) + tbl.set(key, fvs) + + def delete_hft_profile(self, dvs, name="test"): + """Delete HFT profile from CONFIG_DB.""" + config_db = swsscommon.DBConnector(4, dvs.redis_sock, 0) + tbl = swsscommon.Table(config_db, "HIGH_FREQUENCY_TELEMETRY_PROFILE") + tbl._del(name) + + def delete_hft_group(self, dvs, profile_name="test", group_name="PORT"): + """Delete HFT group from CONFIG_DB.""" + config_db = swsscommon.DBConnector(4, dvs.redis_sock, 0) + tbl = swsscommon.Table(config_db, "HIGH_FREQUENCY_TELEMETRY_GROUP") + key = f"{profile_name}|{group_name}" + tbl._del(key) + + def get_asic_db_objects(self, dvs): + """Get all relevant HFT-related objects from ASIC_STATE DB.""" + asic_db = swsscommon.DBConnector(1, dvs.redis_sock, 0) + + # Get all TAM-related objects + tam_transport_tbl = swsscommon.Table( + asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_TAM_TRANSPORT") + tam_collector_tbl = swsscommon.Table( + asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_TAM_COLLECTOR") + tam_tel_type_tbl = swsscommon.Table( + asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_TAM_TEL_TYPE") + tam_report_tbl = swsscommon.Table( + asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_TAM_REPORT") + tam_tbl = swsscommon.Table(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_TAM") + tam_counter_sub_tbl = swsscommon.Table( + asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_TAM_COUNTER_SUBSCRIPTION") + tam_telemetry_tbl = swsscommon.Table( + asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_TAM_TELEMETRY") + hostif_trap_tbl = swsscommon.Table( + asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF_USER_DEFINED_TRAP") + host_trap_group_tbl = swsscommon.Table( + asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF_TRAP_GROUP") + ports_tbl = swsscommon.Table( + asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_PORT") + buffer_pool_tbl = swsscommon.Table( + asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_BUFFER_POOL") + + return { + "tam_transport": self._get_table_entries(tam_transport_tbl), + "tam_collector": self._get_table_entries(tam_collector_tbl), + "tam_tel_type": self._get_table_entries(tam_tel_type_tbl), + "tam_report": self._get_table_entries(tam_report_tbl), + "tam": self._get_table_entries(tam_tbl), + "tam_counter_subscription": self._get_table_entries( + tam_counter_sub_tbl), + "tam_telemetry": self._get_table_entries(tam_telemetry_tbl), + "hostif_user_defined_trap": self._get_table_entries( + hostif_trap_tbl), + "host_trap_group": self._get_table_entries(host_trap_group_tbl), + "ports": self._get_table_entries(ports_tbl), + "buffer_pool": self._get_table_entries(buffer_pool_tbl) + } + + def _get_table_entries(self, table): + """Helper method to get all entries from a table.""" + entries = {} + keys = table.getKeys() + for key in keys: + status, fvs = table.get(key) + if status: + entries[key] = dict(fvs) + return entries + + def verify_asic_db_objects(self, asic_db, groups=[(1, 1)], watermark_count=0): + """Verify HFT objects are created correctly in ASIC_STATE DB.""" + + # If no groups, we expect minimal or no HFT objects + if not groups: + # When no groups are configured, counter subscriptions should be + # empty + assert len(asic_db["tam_counter_subscription"]) == 0, \ + "Expected no tam counter subscriptions when no groups " \ + "configured" + # Other objects might still exist as base infrastructure + return + + # Verify TAM transport + assert len(asic_db["tam_transport"]) == 1, "Expected one tam transport" + tam_transport = list(asic_db["tam_transport"].values())[0] + assert tam_transport["SAI_TAM_TRANSPORT_ATTR_TRANSPORT_TYPE"] == \ + "SAI_TAM_TRANSPORT_TYPE_NONE", \ + "Expected tam transport type to be SAI_TAM_TRANSPORT_TYPE_NONE" + + # Verify TAM collector + assert len(asic_db["tam_collector"]) == 1, "Expected one tam collector" + tam_collector = list(asic_db["tam_collector"].values())[0] + + # Fix: Use only the object ID, not the full key + transport_oid = tam_collector["SAI_TAM_COLLECTOR_ATTR_TRANSPORT"] + assert transport_oid in asic_db["tam_transport"], \ + f"Expected tam collector to reference tam transport. " \ + f"Looking for {transport_oid} in " \ + f"{list(asic_db['tam_transport'].keys())}" + + assert tam_collector["SAI_TAM_COLLECTOR_ATTR_LOCALHOST"] == "true", \ + "Expected tam collector to be localhost" + + # Fix: Use only the object ID + trap_oid = tam_collector["SAI_TAM_COLLECTOR_ATTR_HOSTIF_TRAP"] + assert trap_oid in asic_db["hostif_user_defined_trap"], \ + "Expected tam collector to reference hostif user defined trap" + + # Verify TAM telemetry type + assert len(asic_db["tam_tel_type"]) == len(groups), \ + f"Expected {len(groups)} tam telemetry types" + + for tam_tel_type in asic_db["tam_tel_type"].values(): + assert tam_tel_type[ + "SAI_TAM_TEL_TYPE_ATTR_TAM_TELEMETRY_TYPE"] == \ + "SAI_TAM_TELEMETRY_TYPE_COUNTER_SUBSCRIPTION", \ + "Expected tam telemetry type to be " \ + "SAI_TAM_TELEMETRY_TYPE_COUNTER_SUBSCRIPTION" + assert tam_tel_type[ + "SAI_TAM_TEL_TYPE_ATTR_SWITCH_ENABLE_PORT_STATS"] == \ + "true", \ + "Expected tam telemetry to be switch enable port stats" + assert tam_tel_type["SAI_TAM_TEL_TYPE_ATTR_MODE"] == \ + "SAI_TAM_TEL_TYPE_MODE_SINGLE_TYPE", \ + "Expected tam telemetry to be mode single type" + + # Fix: Use only the object ID + report_oid = tam_tel_type["SAI_TAM_TEL_TYPE_ATTR_REPORT_ID"] + assert report_oid in asic_db["tam_report"], \ + "Expected tam telemetry to reference tam report" + + # Verify TAM report + assert len(asic_db["tam_report"]) == len(groups), \ + f"Expected {len(groups)} tam reports" + + for tam_report in asic_db["tam_report"].values(): + assert tam_report["SAI_TAM_REPORT_ATTR_TYPE"] == \ + "SAI_TAM_REPORT_TYPE_IPFIX", \ + "Expected tam report type to be SAI_TAM_REPORT_TYPE_IPFIX" + assert tam_report["SAI_TAM_REPORT_ATTR_REPORT_MODE"] == \ + "SAI_TAM_REPORT_MODE_BULK", \ + "Expected tam report mode to be SAI_TAM_REPORT_MODE_BULK" + assert tam_report[ + "SAI_TAM_REPORT_ATTR_TEMPLATE_REPORT_INTERVAL"] == \ + "0", \ + "Expected tam report template report interval to be 0" + assert tam_report["SAI_TAM_REPORT_ATTR_REPORT_INTERVAL"] == \ + "300", \ + "Expected tam report report interval to be 300" + + # Verify main TAM object + assert len(asic_db["tam"]) == 1, "Expected one tam object" + tam = list(asic_db["tam"].values())[0] + assert "SAI_TAM_BIND_POINT_TYPE_SWITCH" in \ + tam["SAI_TAM_ATTR_TAM_BIND_POINT_TYPE_LIST"], \ + "Expected tam to have bind point type list" + + # Fix: Extract the telemetry object ID and check directly + tam_telemetry_oid = ":".join( + tam["SAI_TAM_ATTR_TELEMETRY_OBJECTS_LIST"].split(":")[1:3]) + assert tam_telemetry_oid in asic_db["tam_telemetry"], \ + "Expected tam to reference tam telemetry" + + # Verify TAM counter subscriptions + counters_number = sum([group[0] * group[1] for group in groups]) + assert len(asic_db["tam_counter_subscription"]) == counters_number, \ + f"Expected {counters_number} tam counter subscriptions" + + read_mode_count = 0 + watermark_mode_count = 0 + for tam_counter_sub in asic_db["tam_counter_subscription"].values(): + # Fix: Use only the object ID + tel_type_oid = tam_counter_sub[ + "SAI_TAM_COUNTER_SUBSCRIPTION_ATTR_TEL_TYPE"] + assert tel_type_oid in asic_db["tam_tel_type"], \ + "Expected tam counter subscription to reference tam " \ + "telemetry type" + + # Fix: Use only the object ID + subscription_oid = tam_counter_sub[ + "SAI_TAM_COUNTER_SUBSCRIPTION_ATTR_OBJECT_ID"] + assert (subscription_oid in asic_db["ports"] or subscription_oid in asic_db["buffer_pool"]), \ + "Expected tam counter subscription to reference port" + + # Only check if we have counter subscriptions + if counters_number > 0: + if tam_counter_sub[ + "SAI_TAM_COUNTER_SUBSCRIPTION_ATTR_STATS_MODE"] == "SAI_STATS_MODE_READ": + read_mode_count += 1 + elif tam_counter_sub[ + "SAI_TAM_COUNTER_SUBSCRIPTION_ATTR_STATS_MODE"] == "SAI_STATS_MODE_READ_AND_CLEAR": + watermark_mode_count += 1 + if counters_number > 0: + assert read_mode_count == counters_number - watermark_count, \ + f"Expected {counters_number - watermark_count} read mode subscriptions" + assert watermark_mode_count == watermark_count, \ + f"Expected {watermark_count} watermark mode subscriptions" + + # Verify TAM telemetry + assert len(asic_db["tam_telemetry"]) == 1, "Expected one tam telemetry" + tam_telemetry = list(asic_db["tam_telemetry"].values())[0] + + collector_list_count = tam_telemetry[ + "SAI_TAM_TELEMETRY_ATTR_COLLECTOR_LIST"].split(":")[0] + assert collector_list_count == "1", \ + "Expected tam telemetry collector list count to be 1" + + # Fix: Extract the collector object ID and check directly + collector_oid = ":".join(tam_telemetry[ + "SAI_TAM_TELEMETRY_ATTR_COLLECTOR_LIST"].split(":")[1:3]) + assert collector_oid in asic_db["tam_collector"], \ + "Expected tam telemetry to reference tam collector" + + tam_type_list_count = tam_telemetry[ + "SAI_TAM_TELEMETRY_ATTR_TAM_TYPE_LIST"].split(":")[0] + assert tam_type_list_count == str(len(groups)), \ + f"Expected tam telemetry tam type list count to be {len(groups)}" + + if len(groups) == 1: + # Fix: Extract the telemetry type object ID and check directly + tam_type_oid = ":".join(tam_telemetry[ + "SAI_TAM_TELEMETRY_ATTR_TAM_TYPE_LIST"].split(":")[1:3]) + assert tam_type_oid in asic_db["tam_tel_type"], \ + "Expected tam telemetry to reference tam telemetry type" + + # Verify hostif user defined trap + assert len(asic_db["hostif_user_defined_trap"]) == 1, \ + "Expected one hostif user defined trap" + hostif_trap = list(asic_db["hostif_user_defined_trap"].values())[0] + assert hostif_trap["SAI_HOSTIF_USER_DEFINED_TRAP_ATTR_TYPE"] == \ + "SAI_HOSTIF_USER_DEFINED_TRAP_TYPE_TAM", \ + "Expected hostif user defined trap type to be " \ + "SAI_HOSTIF_USER_DEFINED_TRAP_TYPE_TAM" + + def verify_no_asic_objects(self, asic_db): + """Verify that HFT objects are cleaned up from ASIC_STATE DB.""" + # We expect some objects to remain (like base infrastructure) + # but counter subscriptions should be cleaned up + pass + + def test_simple_hft_one_counter(self, dvs, testlog): + """Test basic HFT functionality with one counter.""" + # Create HFT profile and group + self.create_hft_profile(dvs) + self.create_hft_group(dvs) + + # Wait for objects to be created + time.sleep(5) + + # Verify ASIC objects are created + asic_db = self.get_asic_db_objects(dvs) + self.verify_asic_db_objects(asic_db, groups=[(1, 1)]) + + # Clean up group first + self.delete_hft_group(dvs) + time.sleep(2) + + # Verify counter subscriptions are cleaned up + asic_db = self.get_asic_db_objects(dvs) + self.verify_asic_db_objects(asic_db, groups=[]) + + # Clean up profile + self.delete_hft_profile(dvs) + + def test_hft_multiple_counters(self, dvs, testlog): + """Test HFT functionality with multiple counters and objects.""" + # Create HFT profile and group with multiple counters + self.create_hft_profile(dvs) + self.create_hft_group(dvs, + object_names="Ethernet0,Ethernet4,Ethernet8", + object_counters="IF_IN_OCTETS,IF_IN_UCAST_PKTS," + "IF_IN_DISCARDS") + + # Wait for objects to be created + time.sleep(5) + + # Verify ASIC objects are created (3 objects × 3 counters = 9 + # subscriptions) + asic_db = self.get_asic_db_objects(dvs) + self.verify_asic_db_objects(asic_db, groups=[(3, 3)]) + + # Clean up group + self.delete_hft_group(dvs) + time.sleep(2) + + # Verify counter subscriptions are cleaned up + asic_db = self.get_asic_db_objects(dvs) + self.verify_asic_db_objects(asic_db, groups=[]) + + # Clean up profile + self.delete_hft_profile(dvs) + + def test_hft_delete_group_and_rejoin(self, dvs, testlog): + """Test HFT group deletion and recreation.""" + # Create HFT profile and group + self.create_hft_profile(dvs) + self.create_hft_group(dvs, + object_names="Ethernet0,Ethernet4,Ethernet8", + object_counters="IF_IN_OCTETS,IF_IN_UCAST_PKTS," + "IF_IN_DISCARDS") + + # Wait for objects to be created + time.sleep(5) + + # Verify ASIC objects are created + asic_db = self.get_asic_db_objects(dvs) + self.verify_asic_db_objects(asic_db, groups=[(3, 3)]) + + # Delete group + self.delete_hft_group(dvs) + time.sleep(2) + + # Verify counter subscriptions are cleaned up + asic_db = self.get_asic_db_objects(dvs) + self.verify_asic_db_objects(asic_db, groups=[]) + + # Recreate group + self.create_hft_group(dvs, + object_names="Ethernet0,Ethernet4,Ethernet8", + object_counters="IF_IN_OCTETS,IF_IN_UCAST_PKTS," + "IF_IN_DISCARDS") + time.sleep(5) + + # Verify ASIC objects are created again + asic_db = self.get_asic_db_objects(dvs) + self.verify_asic_db_objects(asic_db, groups=[(3, 3)]) + + # Final cleanup + self.delete_hft_group(dvs) + time.sleep(2) + + asic_db = self.get_asic_db_objects(dvs) + self.verify_asic_db_objects(asic_db, groups=[]) + + self.delete_hft_profile(dvs) + + def test_hft_profile_status_disabled(self, dvs, testlog): + """Test HFT profile with disabled status.""" + # Create HFT profile with disabled status + self.create_hft_profile(dvs, status="disabled") + self.create_hft_group(dvs) + + # Wait + time.sleep(3) + + # Verify no TAM objects are created when profile is disabled + # When disabled, we should have minimal or no HFT-specific objects + + # Clean up + self.delete_hft_group(dvs) + self.delete_hft_profile(dvs) + + def test_hft_custom_polling_interval(self, dvs, testlog): + """Test HFT with custom polling interval.""" + # Create HFT profile with custom polling interval + self.create_hft_profile(dvs, polling_interval=600) + self.create_hft_group(dvs) + + # Wait for objects to be created + time.sleep(5) + + # Verify ASIC objects with custom interval + asic_db = self.get_asic_db_objects(dvs) + + # Check that report interval matches our setting + for tam_report in asic_db["tam_report"].values(): + assert tam_report["SAI_TAM_REPORT_ATTR_REPORT_INTERVAL"] == \ + "600", \ + "Expected tam report report interval to be 600" + + # Clean up + self.delete_hft_group(dvs) + self.delete_hft_profile(dvs) + + def test_hft_empty_fields_with_disabled_status(self, dvs, testlog): + """Test HFT with empty object_names and object_counters when profile is disabled.""" + # Create HFT profile with disabled status + self.create_hft_profile(dvs, status="disabled") + + # Create HFT group with empty object_names and object_counters + self.create_hft_group(dvs, + object_names="", + object_counters="") + + # Wait for processing + time.sleep(3) + + # Verify that no counter subscriptions are created when + # profile is disabled and fields are empty + asic_db = self.get_asic_db_objects(dvs) + + assert len(asic_db["tam_counter_subscription"]) == 0, \ + "Expected no tam counter subscriptions when profile is disabled " \ + "and object_names/object_counters are empty" + + # Clean up + self.delete_hft_group(dvs) + self.delete_hft_profile(dvs) + + def test_hft_missing_fields_with_disabled_status(self, dvs, testlog): + """Test HFT without object_names and object_counters fields when profile is disabled.""" + # Create HFT profile with disabled status + self.create_hft_profile(dvs, status="disabled") + + # Create HFT group without object_names and object_counters fields + self.create_hft_group_without_fields(dvs) + + # Wait for processing + time.sleep(3) + + # Verify that no counter subscriptions are created when + # profile is disabled and fields are missing entirely + asic_db = self.get_asic_db_objects(dvs) + + assert len(asic_db["tam_counter_subscription"]) == 0, \ + "Expected no tam counter subscriptions when profile is disabled " \ + "and object_names/object_counters fields are missing" + + # Clean up + self.delete_hft_group(dvs) + self.delete_hft_profile(dvs) + + def test_hft_multiple_groups(self, dvs, testlog): + """Test HFT with multiple groups and objects.""" + # Create HFT profile and groups + self.create_hft_profile(dvs) + self.create_hft_group(dvs, + object_names="Ethernet0,Ethernet4,Ethernet8", + object_counters="IF_IN_OCTETS,IF_IN_UCAST_PKTS," + "IF_IN_DISCARDS") + self.create_hft_group(dvs, + group_name="BUFFER_POOL", + object_names="egress_lossless_pool,egress_lossy_pool,ingress_lossless_pool", + object_counters="DROPPED_PACKETS,CURR_OCCUPANCY_BYTES," + "WATERMARK_BYTES") + # The KVM latform doesn't support ingress priority groups and queues + # self.create_hft_group(dvs, + # group_name="INGRESS_PRIORITY_GROUP", + # object_names="Ethernet0|0,Ethernet4|0,Ethernet8|0", + # object_counters="PACKETS,BYTES,WATERMARK_BYTES") + # self.create_hft_group(dvs, + # group_name="QUEUE", + # object_names="Ethernet0|0,Ethernet4|0,Ethernet8|0", + # object_counters="PACKETS,BYTES,WATERMARK_BYTES") + + # Wait for objects to be created + time.sleep(5) + + # Verify ASIC objects are created (4 groups subscriptions) + asic_db = self.get_asic_db_objects(dvs) + self.verify_asic_db_objects(asic_db, groups=[(3, 3), (3,3)], watermark_count=3) + # self.verify_asic_db_objects(asic_db, groups=[(3, 3), (3,3), (3, 3), (3, 3)]) + + # Clean up group + self.delete_hft_group(dvs, group_name="PORT") + self.delete_hft_group(dvs, group_name="BUFFER_POOL") + self.delete_hft_group(dvs, group_name="INGRESS_PRIORITY_GROUP") + self.delete_hft_group(dvs, group_name="QUEUE") + time.sleep(2) + + # Verify counter subscriptions are cleaned up + asic_db = self.get_asic_db_objects(dvs) + self.verify_asic_db_objects(asic_db, groups=[]) + + # Clean up profile + self.delete_hft_profile(dvs) + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_icmp_echo.py b/tests/test_icmp_echo.py new file mode 100644 index 00000000000..3efe81e9785 --- /dev/null +++ b/tests/test_icmp_echo.py @@ -0,0 +1,764 @@ +import pytest +import time + +from swsscommon import swsscommon + + +class TestIcmpEcho(object): + def setup_db(self, dvs): + dvs.setup_db() + self.pdb = dvs.get_app_db() + self.adb = dvs.get_asic_db() + self.sdb = dvs.get_state_db() + self.cdb = dvs.get_config_db() + # Set switch icmp offload capability + dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'ICMP_OFFLOAD_CAPABLE', 'true') + + def get_exist_icmp_echo_session(self): + return set(self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION")) + + def create_icmp_echo_session(self, key, pairs): + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ICMP_ECHO_SESSION_TABLE") + fvs = swsscommon.FieldValuePairs(list(pairs.items())) + tbl.set(key, fvs) + + def remove_icmp_echo_session(self, key): + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ICMP_ECHO_SESSION_TABLE") + tbl._del(key) + + def check_asic_icmp_echo_session_value(self, key, expected_values): + fvs = self.adb.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", key) + for k, v in expected_values.items(): + assert fvs[k] == v + + def check_state_icmp_echo_session_value(self, key, expected_values): + fvs = self.sdb.get_entry("ICMP_ECHO_SESSION_TABLE", key) + for k, v in expected_values.items(): + assert fvs[k] == v + + def update_icmp_echo_session_state(self, dvs, session, state): + icmp_echo_sai_state = {"Down": "SAI_ICMP_ECHO_SESSION_STATE_DOWN", + "Up": "SAI_ICMP_ECHO_SESSION_STATE_UP"} + + ntf = swsscommon.NotificationProducer(dvs.adb, "NOTIFICATIONS") + fvp = swsscommon.FieldValuePairs() + ntf_data = "[{\"icmp_echo_session_id\":\""+session+"\",\"session_state\":\""+icmp_echo_sai_state[state]+"\"}]" + ntf.send("icmp_echo_session_state_change", ntf_data, fvp) + + def set_admin_status(self, interface, status): + self.cdb.update_entry("PORT", interface, {"admin_status": status}) + + def create_vrf(self, vrf_name): + initial_entries = set(self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER")) + + self.cdb.create_entry("VRF", vrf_name, {"empty": "empty"}) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER", len(initial_entries) + 1) + + current_entries = set(self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER")) + assert len(current_entries - initial_entries) == 1 + return list(current_entries - initial_entries)[0] + + def remove_vrf(self, vrf_name): + self.cdb.delete_entry("VRF", vrf_name) + + def create_l3_intf(self, interface, vrf_name): + if len(vrf_name) == 0: + self.cdb.create_entry("INTERFACE", interface, {"NULL": "NULL"}) + else: + self.cdb.create_entry("INTERFACE", interface, {"vrf_name": vrf_name}) + + def remove_l3_intf(self, interface): + self.cdb.delete_entry("INTERFACE", interface) + + def add_ip_address(self, interface, ip): + self.cdb.create_entry("INTERFACE", interface + "|" + ip, {"NULL": "NULL"}) + + def remove_ip_address(self, interface, ip): + self.cdb.delete_entry("INTERFACE", interface + "|" + ip) + + @pytest.mark.skip(reason="This test is flaky") + def test_addUpdateRemoveIcmpEchoSession(self, dvs): + self.setup_db(dvs) + + icmpEchoSessions = self.get_exist_icmp_echo_session() + + # Create ICMP ECHO session + fieldValues = {"session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip":"10.0.0.2", "tx_interval": + "10", "rx_interval": "10"} + self.create_icmp_echo_session("default:default:5000:NORMAL", fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", len(icmpEchoSessions) + 1) + + # Checked created ICMP ECHO session in ASIC_DB + createdSessions = self.get_exist_icmp_echo_session() - icmpEchoSessions + assert len(createdSessions) == 1 + + # self session + session = createdSessions.pop() + expected_adb_values = { + "SAI_ICMP_ECHO_SESSION_ATTR_GUID": "5000", + "SAI_ICMP_ECHO_SESSION_ATTR_COOKIE": "12345", + "SAI_ICMP_ECHO_SESSION_ATTR_TX_INTERVAL": "10000", + "SAI_ICMP_ECHO_SESSION_ATTR_RX_INTERVAL": "10000", + "SAI_ICMP_ECHO_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_ICMP_ECHO_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_ICMP_ECHO_SESSION_ATTR_IPHDR_VERSION": "4", + "SAI_ICMP_ECHO_SESSION_ATTR_HW_LOOKUP_VALID": "true", + } + self.check_asic_icmp_echo_session_value(session, expected_adb_values) + + # Check STATE_DB entry related to the ICMP ECHO session + expected_sdb_values = {"session_guid": "5000", "session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip": "10.0.0.2", "tx_interval" :"10", + "rx_interval": "10", "hw_lookup": "true"} + self.check_state_icmp_echo_session_value("default|default|5000|NORMAL", expected_sdb_values) + + # Send ICMP ECHO session state notification to update ICMP ECHO session state + self.update_icmp_echo_session_state(dvs, session, "Up") + time.sleep(2) + + # Confirm ICMP ECHO session state in STATE_DB is updated as expected + expected_sdb_values["state"] = "Up" + self.check_state_icmp_echo_session_value("default|default|5000|NORMAL", expected_sdb_values) + + # Update tx/rx_interval in ICMP ECHO session + update_fieldValues = {"session_guid": "5000", "session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip":"10.0.0.2", "tx_interval": + "100", "rx_interval": "50"} + self.create_icmp_echo_session("default:default:5000:NORMAL", update_fieldValues) + # wait after update + time.sleep(2) + + # Confirm tx/rx_interval does get updated + expected_sdb_values["tx_interval"] = "100" + expected_sdb_values["rx_interval"] = "50" + self.check_state_icmp_echo_session_value("default|default|5000|NORMAL", expected_sdb_values) + + # Verify the ASIC_DB gets the updated value + expected_adb_values = { + "SAI_ICMP_ECHO_SESSION_ATTR_GUID": "5000", + "SAI_ICMP_ECHO_SESSION_ATTR_COOKIE": "12345", + "SAI_ICMP_ECHO_SESSION_ATTR_TX_INTERVAL": "100000", + "SAI_ICMP_ECHO_SESSION_ATTR_RX_INTERVAL": "50000", + "SAI_ICMP_ECHO_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_ICMP_ECHO_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_ICMP_ECHO_SESSION_ATTR_IPHDR_VERSION": "4", + "SAI_ICMP_ECHO_SESSION_ATTR_HW_LOOKUP_VALID": "true", + } + self.check_asic_icmp_echo_session_value(session, expected_adb_values) + + # remove the session + self.remove_icmp_echo_session("default:default:5000:NORMAL") + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", session) + + # RX session + peer_fieldValues = {"session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip":"10.0.0.2", "tx_interval": + "10", "rx_interval": "10"} + self.create_icmp_echo_session("default:default:5000:RX", peer_fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", len(icmpEchoSessions)+1) + + # Checked created ICMP ECHO session in ASIC_DB + createdSessions = self.get_exist_icmp_echo_session() - icmpEchoSessions + assert len(createdSessions) == 1 + + session = createdSessions.pop() + expected_adb_values = { + "SAI_ICMP_ECHO_SESSION_ATTR_GUID": "5000", + "SAI_ICMP_ECHO_SESSION_ATTR_COOKIE": "12345", + "SAI_ICMP_ECHO_SESSION_ATTR_TX_INTERVAL": "0", + "SAI_ICMP_ECHO_SESSION_ATTR_RX_INTERVAL": "10000", + "SAI_ICMP_ECHO_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_ICMP_ECHO_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_ICMP_ECHO_SESSION_ATTR_IPHDR_VERSION": "4", + } + self.check_asic_icmp_echo_session_value(session, expected_adb_values) + + # Check STATE_DB entry related to the ICMP ECHO session + expected_sdb_values = {"session_guid": "5000", "session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip": "10.0.0.2", "tx_interval" :"0", + "rx_interval": "10", "hw_lookup": "true"} + self.check_state_icmp_echo_session_value("default|default|5000|RX", expected_sdb_values) + + # Confirm tx_interval does not get updated + peer_update_fieldValues = {"session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip":"10.0.0.2", "tx_interval": + "100", "rx_interval": "100"} + self.create_icmp_echo_session("default:default:5000:RX", peer_update_fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", len(icmpEchoSessions)+1) + + # Checked ICMP ECHO session in ASIC_DB post update + createdSessions = self.get_exist_icmp_echo_session() - icmpEchoSessions + assert len(createdSessions) == 1 + + session = createdSessions.pop() + expected_adb_values = { + "SAI_ICMP_ECHO_SESSION_ATTR_GUID": "5000", + "SAI_ICMP_ECHO_SESSION_ATTR_COOKIE": "12345", + "SAI_ICMP_ECHO_SESSION_ATTR_TX_INTERVAL": "0", + "SAI_ICMP_ECHO_SESSION_ATTR_RX_INTERVAL": "100000", + "SAI_ICMP_ECHO_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_ICMP_ECHO_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_ICMP_ECHO_SESSION_ATTR_IPHDR_VERSION": "4", + } + self.check_asic_icmp_echo_session_value(session, expected_adb_values) + + # Check STATE_DB entry related to the ICMP ECHO session post update + expected_sdb_values = {"session_guid": "5000", "session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip": "10.0.0.2", "tx_interval" :"0", + "rx_interval": "100", "hw_lookup": "true"} + self.check_state_icmp_echo_session_value("default|default|5000|RX", expected_sdb_values) + + # Send ICMP ECHO session state notification to update ICMP ECHO session state + self.update_icmp_echo_session_state(dvs, session, "Up") + time.sleep(2) + + # Confirm ICMP ECHO session state in STATE_DB is updated as expected + expected_sdb_values["state"] = "Up" + self.check_state_icmp_echo_session_value("default|default|5000|RX", expected_sdb_values) + + # Remove the ICMP sessions + self.remove_icmp_echo_session("default:default:5000:RX") + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", session) + + keys = self.sdb.get_keys("ICMP_ECHO_SESSION_TABLE") + assert len(keys) == 0 + + @pytest.mark.skip(reason="This test is flaky") + def test_multipleIcmpEchoSessions(self, dvs): + self.setup_db(dvs) + + # create interfaces and add IP address + self.create_l3_intf("Ethernet0", "default") + self.create_l3_intf("Ethernet4", "default") + self.add_ip_address("Ethernet0", "10.0.0.0/31") + self.add_ip_address("Ethernet4", "10.0.1.0/31") + self.set_admin_status("Ethernet0", "up") + self.set_admin_status("Ethernet4", "up") + + icmpEchoSessions = self.get_exist_icmp_echo_session() + + # Create ICMP session 1 + fieldValues = {"session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip":"10.0.0.2", "tx_interval": + "10", "rx_interval": "10", "dst_mac": "01:23:45:aa:bb:cc"} + + key1_self = "default:Ethernet0:5000:NORMAL" + self.create_icmp_echo_session(key1_self, fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", len(icmpEchoSessions) + 1) + + # Checked created ICMP ECHO session in ASIC_DB + createdSessions = self.get_exist_icmp_echo_session() - icmpEchoSessions + icmpEchoSessions = self.get_exist_icmp_echo_session() + assert len(createdSessions) == 1 + + # self session + session1 = createdSessions.pop() + expected_adb_values = { + "SAI_ICMP_ECHO_SESSION_ATTR_GUID": "5000", + "SAI_ICMP_ECHO_SESSION_ATTR_COOKIE": "12345", + "SAI_ICMP_ECHO_SESSION_ATTR_TX_INTERVAL": "10000", + "SAI_ICMP_ECHO_SESSION_ATTR_RX_INTERVAL": "10000", + "SAI_ICMP_ECHO_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_ICMP_ECHO_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_ICMP_ECHO_SESSION_ATTR_IPHDR_VERSION": "4", + "SAI_ICMP_ECHO_SESSION_ATTR_HW_LOOKUP_VALID": "false", + "SAI_ICMP_ECHO_SESSION_ATTR_DST_MAC_ADDRESS": "01:23:45:AA:BB:CC", + } + self.check_asic_icmp_echo_session_value(session1, expected_adb_values) + + # Check STATE_DB entry related to the ICMP ECHO session + expected_sdb_values = {"session_guid": "5000", "session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip": "10.0.0.2", "tx_interval" :"10", + "rx_interval": "10", "hw_lookup": "false"} + self.check_state_icmp_echo_session_value("default|Ethernet0|5000|NORMAL", expected_sdb_values) + + # Send ICMP ECHO session state notification to update ICMP ECHO session state + self.update_icmp_echo_session_state(dvs, session1, "Up") + time.sleep(2) + + # Confirm ICMP ECHO session state in STATE_DB is updated as expected + expected_sdb_values["state"] = "Up" + self.check_state_icmp_echo_session_value("default|Ethernet0|5000|NORMAL", expected_sdb_values) + + # RX session + peer_fieldValues = {"session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip":"10.0.0.2", "tx_interval": + "10", "rx_interval": "10", "dst_mac": "01:23:45:aa:bb:cc"} + + key1_peer = "default:Ethernet0:6000:RX" + self.create_icmp_echo_session(key1_peer, peer_fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", len(icmpEchoSessions) + 1) + + # Checked created ICMP ECHO session in ASIC_DB + createdSessions = self.get_exist_icmp_echo_session() - icmpEchoSessions + assert len(createdSessions) == 1 + + session2 = createdSessions.pop() + expected_adb_values = { + "SAI_ICMP_ECHO_SESSION_ATTR_GUID": "6000", + "SAI_ICMP_ECHO_SESSION_ATTR_COOKIE": "12345", + "SAI_ICMP_ECHO_SESSION_ATTR_TX_INTERVAL": "0", + "SAI_ICMP_ECHO_SESSION_ATTR_RX_INTERVAL": "10000", + "SAI_ICMP_ECHO_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_ICMP_ECHO_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_ICMP_ECHO_SESSION_ATTR_IPHDR_VERSION": "4", + "SAI_ICMP_ECHO_SESSION_ATTR_HW_LOOKUP_VALID": "false", + "SAI_ICMP_ECHO_SESSION_ATTR_DST_MAC_ADDRESS": "01:23:45:AA:BB:CC", + } + self.check_asic_icmp_echo_session_value(session2, expected_adb_values) + + # Check STATE_DB entry related to the ICMP ECHO session + expected_sdb_values = {"session_guid": "6000", "session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip": "10.0.0.2", "tx_interval" :"0", + "rx_interval": "10", "hw_lookup": "false"} + self.check_state_icmp_echo_session_value("default|Ethernet0|6000|RX", expected_sdb_values) + + # Send ICMP ECHO session state notification to update ICMP ECHO session state + self.update_icmp_echo_session_state(dvs, session2, "Up") + time.sleep(2) + + # Confirm ICMP ECHO session state in STATE_DB is updated as expected + expected_sdb_values["state"] = "Up" + self.check_state_icmp_echo_session_value("default|Ethernet0|6000|RX", expected_sdb_values) + + # Remove the ICMP sessions + self.remove_icmp_echo_session(key1_self) + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", session1) + self.remove_icmp_echo_session(key1_peer) + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", session2) + + keys = self.sdb.get_keys("ICMP_ECHO_SESSION_TABLE") + assert len(keys) == 0 + + def test_icmp_echo_state_db_clear(self, dvs): + self.setup_db(dvs) + + icmpEchoSessions = self.get_exist_icmp_echo_session() + + # Create Icmp echo session + fieldValues = {"session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip":"10.0.0.2", "tx_interval": + "10", "rx_interval": "10"} + + key1_self = "default:default:5000:NORMAL" + self.create_icmp_echo_session(key1_self, fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", len(icmpEchoSessions) + 1) + + # Checked created icmp session in ASIC_DB + createdSessions = self.get_exist_icmp_echo_session() - icmpEchoSessions + assert len(createdSessions) == 1 + + dvs.stop_swss() + dvs.start_swss() + + time.sleep(5) + keys = self.sdb.get_keys("ICMP_ECHO_SESSION_TABLE") + assert len(keys) == 0 + + def test_FailIcmpEchoSessions(self, dvs): + self.setup_db(dvs) + + # create interfaces and add IP address + self.create_l3_intf("Ethernet0", "default") + self.create_l3_intf("Ethernet4", "default") + self.add_ip_address("Ethernet0", "10.0.0.0/31") + self.add_ip_address("Ethernet4", "10.0.1.0/31") + self.set_admin_status("Ethernet0", "up") + self.set_admin_status("Ethernet4", "up") + + icmpEchoSessions = self.get_exist_icmp_echo_session() + + # Create ICMP session 1 + fieldValues = {"session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip":"10.0.0.2", "tx_interval": "10", + "rx_interval": "10", "src_mac": "01:01:02:02:03:04", "ttl" : "3", + "hw_lookup": "true", "tos": "1"} + + # bad key + key1_self = "default:Ethernet0:5000" + self.create_icmp_echo_session(key1_self, fieldValues) + time.sleep(2) + + # Create should for bad key fail + createdSessions = self.get_exist_icmp_echo_session() - icmpEchoSessions + icmpEchoSessions = self.get_exist_icmp_echo_session() + assert len(createdSessions) == 0 + + # missing dst_mac, creation should fail with proper key + key1_self = "default:Ethernet0:5000:" + self.create_icmp_echo_session(key1_self, fieldValues) + time.sleep(2) + + # Create should fail for missing dst_mac when using non-default alias + createdSessions = self.get_exist_icmp_echo_session() - icmpEchoSessions + icmpEchoSessions = self.get_exist_icmp_echo_session() + assert len(createdSessions) == 0 + + # add the dst_mac + fieldValues["hw_lookup"] = "false" + fieldValues["dst_mac"] = "01:23:45:aa:bb:cc" + + # default alias with dst_mac should fail + key1_self = "default:default:5000:" + self.create_icmp_echo_session(key1_self, fieldValues) + time.sleep(2) + + # Create should fail + createdSessions = self.get_exist_icmp_echo_session() - icmpEchoSessions + icmpEchoSessions = self.get_exist_icmp_echo_session() + assert len(createdSessions) == 0 + + # unkown port alias should fail + key1_self = "default:Ethernet128:5000:" + self.create_icmp_echo_session(key1_self, fieldValues) + time.sleep(2) + + # Create should fail + createdSessions = self.get_exist_icmp_echo_session() - icmpEchoSessions + icmpEchoSessions = self.get_exist_icmp_echo_session() + assert len(createdSessions) == 0 + + # Remove the ICMP sessions + self.remove_icmp_echo_session(key1_self) + time.sleep(1) + + # creation should pass with unsupported attrib + fieldValues["unknown_attrib"] = "XXXX" + # creation should pass after hw_lookup is set ot false + key1_self = "default:Ethernet0:5000:" + self.create_icmp_echo_session(key1_self, fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", len(icmpEchoSessions) + 1) + + # Checked created ICMP ECHO session in ASIC_DB + createdSessions = self.get_exist_icmp_echo_session() - icmpEchoSessions + icmpEchoSessions = self.get_exist_icmp_echo_session() + assert len(createdSessions) == 1 + + # self session + session1 = createdSessions.pop() + expected_adb_values = { + "SAI_ICMP_ECHO_SESSION_ATTR_GUID": "5000", + "SAI_ICMP_ECHO_SESSION_ATTR_COOKIE": "12345", + "SAI_ICMP_ECHO_SESSION_ATTR_TX_INTERVAL": "10000", + "SAI_ICMP_ECHO_SESSION_ATTR_RX_INTERVAL": "10000", + "SAI_ICMP_ECHO_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_ICMP_ECHO_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_ICMP_ECHO_SESSION_ATTR_IPHDR_VERSION": "4", + "SAI_ICMP_ECHO_SESSION_ATTR_HW_LOOKUP_VALID": "false", + "SAI_ICMP_ECHO_SESSION_ATTR_DST_MAC_ADDRESS": "01:23:45:AA:BB:CC", + "SAI_ICMP_ECHO_SESSION_ATTR_TTL": "3", + } + self.check_asic_icmp_echo_session_value(session1, expected_adb_values) + + # Check STATE_DB entry related to the ICMP ECHO session + expected_sdb_values = {"session_guid": "5000", "session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip": "10.0.0.2", "tx_interval": "10", + "rx_interval": "10", "hw_lookup": "false"} + self.check_state_icmp_echo_session_value("default|Ethernet0|5000|NORMAL", expected_sdb_values) + + # notification with wrong key + self.update_icmp_echo_session_state(dvs, session1, "Up") + time.sleep(2) + + # Confirm ICMP ECHO session state in STATE_DB is updated as expected + expected_sdb_values["state"] = "Up" + self.check_state_icmp_echo_session_value("default|Ethernet0|5000|NORMAL", expected_sdb_values) + + # Remove the ICMP sessions + self.remove_icmp_echo_session(key1_self) + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", session1) + + keys = self.sdb.get_keys("ICMP_ECHO_SESSION_TABLE") + assert len(keys) == 0 + + # RX session + peer_fieldValues = {"session_cookie": "12345", "src_ip": "10.0.0.1", + "dst_ip":"10.0.0.2", "tx_interval": "10", + "rx_interval": "10", "dst_mac": "01:23:45:aa:bb:cc", + "ttl" : "10"} + + key1_peer = "default:Ethernet0:6000:RX" + self.create_icmp_echo_session(key1_peer, peer_fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", len(icmpEchoSessions)) + + # Checked created ICMP ECHO session in ASIC_DB + createdSessions = self.get_exist_icmp_echo_session() - icmpEchoSessions + assert len(createdSessions) == 1 + + session2 = createdSessions.pop() + expected_adb_values = { + "SAI_ICMP_ECHO_SESSION_ATTR_GUID": "6000", + "SAI_ICMP_ECHO_SESSION_ATTR_COOKIE": "12345", + "SAI_ICMP_ECHO_SESSION_ATTR_TX_INTERVAL": "0", + "SAI_ICMP_ECHO_SESSION_ATTR_RX_INTERVAL": "10000", + "SAI_ICMP_ECHO_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_ICMP_ECHO_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_ICMP_ECHO_SESSION_ATTR_IPHDR_VERSION": "4", + "SAI_ICMP_ECHO_SESSION_ATTR_HW_LOOKUP_VALID": "false", + "SAI_ICMP_ECHO_SESSION_ATTR_DST_MAC_ADDRESS": "01:23:45:AA:BB:CC", + "SAI_ICMP_ECHO_SESSION_ATTR_TTL": "10", + } + self.check_asic_icmp_echo_session_value(session2, expected_adb_values) + + # Check STATE_DB entry related to the ICMP ECHO session + expected_sdb_values = {"session_guid": "6000", "session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip": "10.0.0.2", "tx_interval" :"0", + "rx_interval": "10", "hw_lookup": "false"} + self.check_state_icmp_echo_session_value("default|Ethernet0|6000|RX", expected_sdb_values) + + # Send ICMP ECHO session state notification to update ICMP ECHO session state + self.update_icmp_echo_session_state(dvs, session2, "Up") + time.sleep(2) + + # Confirm ICMP ECHO session state in STATE_DB is updated as expected + expected_sdb_values["state"] = "Up" + self.check_state_icmp_echo_session_value("default|Ethernet0|6000|RX", expected_sdb_values) + + # Failure Remove the ICMP sessions + self.remove_icmp_echo_session(key1_self) + time.sleep(1) + + keys = self.sdb.get_keys("ICMP_ECHO_SESSION_TABLE") + assert len(keys) == 1 + + # Update with valid new field should fail + peer_fieldValues["tos"] = "2" + + self.create_icmp_echo_session(key1_peer, peer_fieldValues) + time.sleep(1) + + # Checked no new created session + createdSessions = self.get_exist_icmp_echo_session() - icmpEchoSessions + assert len(createdSessions) == 1 + + del peer_fieldValues["tos"] + + # Update tx_interval should fail for RX session + peer_fieldValues["tx_interval"] = "20" + + self.create_icmp_echo_session(key1_peer, peer_fieldValues) + time.sleep(1) + + # Checked no new created session + createdSessions = self.get_exist_icmp_echo_session() - icmpEchoSessions + assert len(createdSessions) == 1 + + # check expected values did not change + self.check_state_icmp_echo_session_value("default|Ethernet0|6000|RX", expected_sdb_values) + + del peer_fieldValues["tx_interval"] + + # Update unsupported field should fail + peer_fieldValues["ttl"] = "1" + + self.create_icmp_echo_session(key1_peer, peer_fieldValues) + time.sleep(1) + + # Checked no new created session + createdSessions = self.get_exist_icmp_echo_session() - icmpEchoSessions + assert len(createdSessions) == 1 + + # check expected values did not change + self.check_state_icmp_echo_session_value("default|Ethernet0|6000|RX", expected_sdb_values) + + peer_fieldValues["ttl"] = "10" + + # Update unknown field should fail + peer_fieldValues["unknown_attrib"] = "DDDD" + + self.create_icmp_echo_session(key1_peer, peer_fieldValues) + time.sleep(1) + + # Checked no new created session + createdSessions = self.get_exist_icmp_echo_session() - icmpEchoSessions + assert len(createdSessions) == 1 + + # check expected values did not change + self.check_state_icmp_echo_session_value("default|Ethernet0|6000|RX", expected_sdb_values) + + del peer_fieldValues["unknown_attrib"] + + session2 = createdSessions.pop() + + #remove the second session + self.remove_icmp_echo_session(key1_peer) + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", session2) + + keys = self.sdb.get_keys("ICMP_ECHO_SESSION_TABLE") + assert len(keys) == 0 + + def test_intervalIcmpEchoSessions(self, dvs): + self.setup_db(dvs) + + # create interfaces and add IP address + self.create_l3_intf("Ethernet0", "default") + self.create_l3_intf("Ethernet4", "default") + self.add_ip_address("Ethernet0", "10.0.0.0/31") + self.add_ip_address("Ethernet4", "10.0.1.0/31") + self.set_admin_status("Ethernet0", "up") + self.set_admin_status("Ethernet4", "up") + + icmpEchoSessions = self.get_exist_icmp_echo_session() + + # Create ICMP session 1, use lower than min rx/tx interval + fieldValues = {"session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip":"10.0.0.2", "tx_interval": "1", + "rx_interval": "8", "dst_mac": "01:23:45:aa:bb:cc"} + + key1_self = "default:Ethernet0:5000:NORMAL" + self.create_icmp_echo_session(key1_self, fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", len(icmpEchoSessions) + 1) + + # Checked created ICMP ECHO session in ASIC_DB + createdSessions = self.get_exist_icmp_echo_session() - icmpEchoSessions + icmpEchoSessions = self.get_exist_icmp_echo_session() + assert len(createdSessions) == 1 + + # self session + session1 = createdSessions.pop() + expected_adb_values = { + "SAI_ICMP_ECHO_SESSION_ATTR_GUID": "5000", + "SAI_ICMP_ECHO_SESSION_ATTR_COOKIE": "12345", + "SAI_ICMP_ECHO_SESSION_ATTR_TX_INTERVAL": "3000", + "SAI_ICMP_ECHO_SESSION_ATTR_RX_INTERVAL": "9000", + "SAI_ICMP_ECHO_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_ICMP_ECHO_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_ICMP_ECHO_SESSION_ATTR_IPHDR_VERSION": "4", + "SAI_ICMP_ECHO_SESSION_ATTR_HW_LOOKUP_VALID": "false", + "SAI_ICMP_ECHO_SESSION_ATTR_DST_MAC_ADDRESS": "01:23:45:AA:BB:CC", + } + self.check_asic_icmp_echo_session_value(session1, expected_adb_values) + + # Check STATE_DB entry related to the ICMP ECHO session + expected_sdb_values = {"session_guid": "5000", "session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip": "10.0.0.2", "tx_interval" :"3", + "rx_interval": "9", "hw_lookup": "false"} + self.check_state_icmp_echo_session_value("default|Ethernet0|5000|NORMAL", expected_sdb_values) + + # Send ICMP ECHO session state notification to update ICMP ECHO session state + self.update_icmp_echo_session_state(dvs, session1, "Up") + time.sleep(2) + + # Confirm ICMP ECHO session state in STATE_DB is updated as expected + expected_sdb_values["state"] = "Up" + self.check_state_icmp_echo_session_value("default|Ethernet0|5000|NORMAL", expected_sdb_values) + + # RX session, with rx interval more than max + peer_fieldValues = {"session_cookie": "12345", "src_ip": "10.0.0.1", + "dst_ip":"10.0.0.2", "tx_interval": "10", + "rx_interval": "24001", "dst_mac": "01:23:45:aa:bb:cc"} + + key1_peer = "default:Ethernet0:6000:RX" + self.create_icmp_echo_session(key1_peer, peer_fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", len(icmpEchoSessions) + 1) + + # Checked created ICMP ECHO session in ASIC_DB + createdSessions = self.get_exist_icmp_echo_session() - icmpEchoSessions + assert len(createdSessions) == 1 + + session2 = createdSessions.pop() + expected_adb_values = { + "SAI_ICMP_ECHO_SESSION_ATTR_GUID": "6000", + "SAI_ICMP_ECHO_SESSION_ATTR_COOKIE": "12345", + "SAI_ICMP_ECHO_SESSION_ATTR_TX_INTERVAL": "0", + "SAI_ICMP_ECHO_SESSION_ATTR_RX_INTERVAL": "24000000", + "SAI_ICMP_ECHO_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_ICMP_ECHO_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_ICMP_ECHO_SESSION_ATTR_IPHDR_VERSION": "4", + "SAI_ICMP_ECHO_SESSION_ATTR_HW_LOOKUP_VALID": "false", + "SAI_ICMP_ECHO_SESSION_ATTR_DST_MAC_ADDRESS": "01:23:45:AA:BB:CC", + } + self.check_asic_icmp_echo_session_value(session2, expected_adb_values) + + # Check STATE_DB entry related to the ICMP ECHO session, max rx_interval + expected_sdb_values = {"session_guid": "6000", "session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip": "10.0.0.2", "tx_interval" :"0", + "rx_interval": "24000", "hw_lookup": "false"} + self.check_state_icmp_echo_session_value("default|Ethernet0|6000|RX", expected_sdb_values) + + # Send ICMP ECHO session state notification to update ICMP ECHO session state + self.update_icmp_echo_session_state(dvs, session2, "Up") + time.sleep(2) + + # Confirm ICMP ECHO session state in STATE_DB is updated as expected + expected_sdb_values["state"] = "Up" + self.check_state_icmp_echo_session_value("default|Ethernet0|6000|RX", expected_sdb_values) + + # update the RX session rx interval to lower than min + peer_fieldValues["rx_interval"] = "8" + self.create_icmp_echo_session(key1_peer, peer_fieldValues) + time.sleep(1) + + # Checked no extra created ICMP ECHO session in ASIC_DB + createdSessions = self.get_exist_icmp_echo_session() - icmpEchoSessions + assert len(createdSessions) == 1 + + expected_adb_values = { + "SAI_ICMP_ECHO_SESSION_ATTR_GUID": "6000", + "SAI_ICMP_ECHO_SESSION_ATTR_COOKIE": "12345", + "SAI_ICMP_ECHO_SESSION_ATTR_TX_INTERVAL": "0", + "SAI_ICMP_ECHO_SESSION_ATTR_RX_INTERVAL": "9000", + "SAI_ICMP_ECHO_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_ICMP_ECHO_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_ICMP_ECHO_SESSION_ATTR_IPHDR_VERSION": "4", + "SAI_ICMP_ECHO_SESSION_ATTR_HW_LOOKUP_VALID": "false", + "SAI_ICMP_ECHO_SESSION_ATTR_DST_MAC_ADDRESS": "01:23:45:AA:BB:CC", + } + self.check_asic_icmp_echo_session_value(session2, expected_adb_values) + + # Check STATE_DB entry related to the ICMP ECHO session, max rx_interval + expected_sdb_values = {"session_guid": "6000", "session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip": "10.0.0.2", "tx_interval" :"0", + "rx_interval": "9", "hw_lookup": "false"} + self.check_state_icmp_echo_session_value("default|Ethernet0|6000|RX", expected_sdb_values) + + # Remove the ICMP sessions + self.remove_icmp_echo_session(key1_self) + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", session1) + self.remove_icmp_echo_session(key1_peer) + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", session2) + + # verify max tx interval + icmpEchoSessions = self.get_exist_icmp_echo_session() + + # Create ICMP session 1, use lower than min rx/tx interval + fieldValues = {"session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip":"10.0.0.2", "tx_interval": "1000000", + "rx_interval": "300", "dst_mac": "01:23:45:aa:bb:cc"} + + key1_self = "default:Ethernet0:5000:NORMAL" + self.create_icmp_echo_session(key1_self, fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", len(icmpEchoSessions) + 1) + + # Checked created ICMP ECHO session in ASIC_DB + createdSessions = self.get_exist_icmp_echo_session() - icmpEchoSessions + assert len(createdSessions) == 1 + + # self session + session1 = createdSessions.pop() + expected_adb_values = { + "SAI_ICMP_ECHO_SESSION_ATTR_GUID": "5000", + "SAI_ICMP_ECHO_SESSION_ATTR_COOKIE": "12345", + "SAI_ICMP_ECHO_SESSION_ATTR_TX_INTERVAL": "1200000", + "SAI_ICMP_ECHO_SESSION_ATTR_RX_INTERVAL": "300000", + "SAI_ICMP_ECHO_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", + "SAI_ICMP_ECHO_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", + "SAI_ICMP_ECHO_SESSION_ATTR_IPHDR_VERSION": "4", + "SAI_ICMP_ECHO_SESSION_ATTR_HW_LOOKUP_VALID": "false", + "SAI_ICMP_ECHO_SESSION_ATTR_DST_MAC_ADDRESS": "01:23:45:AA:BB:CC", + } + self.check_asic_icmp_echo_session_value(session1, expected_adb_values) + + # Check STATE_DB entry related to the ICMP ECHO session + expected_sdb_values = {"session_guid": "5000", "session_cookie": "12345", + "src_ip": "10.0.0.1", "dst_ip": "10.0.0.2", "tx_interval" :"1200", + "rx_interval": "300", "hw_lookup": "false"} + self.check_state_icmp_echo_session_value("default|Ethernet0|5000|NORMAL", expected_sdb_values) + + # Remove the ICMP session + self.remove_icmp_echo_session(key1_self) + self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_ICMP_ECHO_SESSION", session1) + + keys = self.sdb.get_keys("ICMP_ECHO_SESSION_TABLE") + assert len(keys) == 0 diff --git a/tests/test_interface.py b/tests/test_interface.py index 98f15271520..4c453710d96 100644 --- a/tests/test_interface.py +++ b/tests/test_interface.py @@ -17,6 +17,8 @@ def set_admin_status(self, dvs, interface, status): tbl_name = "PORTCHANNEL" elif interface.startswith("Vlan"): tbl_name = "VLAN" + elif interface.startswith("Loopback"): + tbl_name = "LOOPBACK_INTERFACE" else: tbl_name = "PORT" tbl = swsscommon.Table(self.cdb, tbl_name) @@ -1965,6 +1967,72 @@ def test_LoopbackInterfaceIpv4AddressWithVrf(self, dvs, testlog): if route["dest"] == "10.0.0.4/32": assert False + def test_LoopbackInterfaceAdminStatus(self, dvs, testlog): + self.setup_db(dvs) + + # Create loopback interfaces + self.create_l3_intf("Loopback0", "") + + # add ip address + self.add_ip_address("Loopback0", "10.1.0.1/32") + + # Check application database + tbl = swsscommon.Table(self.pdb, "INTF_TABLE:Loopback0") + intf_entries = tbl.getKeys() + assert intf_entries[0] == "10.1.0.1/32" + + # Check ASIC database + tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + for key in tbl.getKeys(): + route = json.loads(key) + if route["dest"] == "10.1.0.1/32": + lo0_ip2me_found = True + + assert lo0_ip2me_found + + # check linux kernel, interface should be up by default with no admin_status specified + (exitcode, result) = dvs.runcmd(['sh', '-c', "ip link show Loopback0"]) + assert "UP" in result + + ### Bring interface down and validate + self.set_admin_status(dvs, "Loopback0", "down") + + # check linux kernel + (exitcode, result) = dvs.runcmd(['sh', '-c', "ip link show Loopback0"]) + assert "DOWN" in result + + ### Bring interface up and validate + self.set_admin_status(dvs, "Loopback0", "up") + + # Check ASIC database + tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + for key in tbl.getKeys(): + route = json.loads(key) + if route["dest"] == "10.1.0.1/32": + lo0_ip2me_found = True + + assert lo0_ip2me_found + + # check linux kernel + (exitcode, result) = dvs.runcmd(['sh', '-c', "ip link show Loopback0"]) + assert "UP" in result + + # Cleanup + self.remove_ip_address("Loopback0", "10.1.0.1/32") + self.remove_l3_intf("Loopback0") + + # Check application database + tbl = swsscommon.Table(self.pdb, "INTF_TABLE:Loopback0") + intf_entries = tbl.getKeys() + assert len(intf_entries) == 0 + + # Check ASIC database + tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + for key in tbl.getKeys(): + route = json.loads(key) + if route["dest"] == "10.1.0.1/32": + assert False + def create_ipv6_link_local(self, interface): if interface.startswith("PortChannel"): diff --git a/tests/test_ipv6_link_local.py b/tests/test_ipv6_link_local.py index 048b8f2e171..642274c15df 100644 --- a/tests/test_ipv6_link_local.py +++ b/tests/test_ipv6_link_local.py @@ -59,10 +59,10 @@ def test_NeighborAddRemoveIpv6LinkLocal(self, dvs, testlog): time.sleep(2) # Neigh entries should contain Ipv6-link-local neighbors, should be 4 - neigh_entries = self.pdb.get_keys("NEIGH_TABLE") - assert (len(neigh_entries) == 4) + self.pdb.wait_for_n_keys("NEIGH_TABLE", 4) found_entry = False + neigh_entries = self.pdb.get_keys("NEIGH_TABLE") for key in neigh_entries: if (key.find("Ethernet4:2001::2") or key.find("Ethernet0:2000::2")): found_entry = True diff --git a/tests/test_macsec.py b/tests/test_macsec.py index 9dc5a4ed534..ef1f67e9f74 100644 --- a/tests/test_macsec.py +++ b/tests/test_macsec.py @@ -424,6 +424,7 @@ def init_macsec( wpa.init_macsec_port(port_name) wpa.config_macsec_port(port_name, {"enable_protect": True}) wpa.config_macsec_port(port_name, {"enable_encrypt": True}) + wpa.config_macsec_port(port_name, {"send_sci": True}) wpa.config_macsec_port( port_name, { diff --git a/tests/test_mclag_fdb.py b/tests/test_mclag_fdb.py index 8252db84219..0801b15a1d6 100644 --- a/tests/test_mclag_fdb.py +++ b/tests/test_mclag_fdb.py @@ -83,6 +83,19 @@ def how_many_entries_exist(db, table): tbl = swsscommon.Table(db, table) return len(tbl.getKeys()) +def create_mclag_interface(dvs, domain_id, mclag_interface): + tbl = swsscommon.Table(dvs.cdb, "MCLAG_INTERFACE") + fvs = swsscommon.FieldValuePairs([("if_type", "PortChannel")]) + key_string = domain_id + "|" + mclag_interface + tbl.set(key_string, fvs) + time.sleep(1) + +def remove_mclag_interface(dvs, domain_id, mclag_interface): + tbl = swsscommon.Table(dvs.cdb, "MCLAG_INTERFACE") + key_string = domain_id + "|" + mclag_interface + tbl._del(key_string) + time.sleep(1) + # Test-1 Verify basic config add @pytest.mark.dev_sanity @@ -564,8 +577,100 @@ def test_mclagFdb_remote_to_local_mac_move_ntf(dvs, testlog): dvs.pdb, "MCLAG_FDB_TABLE", "Vlan200:3C:85:99:5E:00:01", ) - -# Test-13 Verify cleanup of the basic config. + +# Test-13 Verify FDB table flush on MCLAG link down. +@pytest.mark.dev_sanity +def test_mclagFdb_flush_on_link_down(dvs, testlog): + dvs.setup_db() + + # Create PortChannel001. We do not use the pre-created portchannels + tbl = swsscommon.Table(dvs.cdb, "PORTCHANNEL") + fvs = swsscommon.FieldValuePairs([("admin_status", "up"),("mtu", "9100"),("oper_status", "up")]) + + tbl.set("PortChannel001", fvs) + time.sleep(1) + + # Create vlan + dvs.create_vlan("200") + + # Add vlan members + dvs.create_vlan_member("200", "PortChannel001") + tbl = swsscommon.Table(dvs.cdb, "PORTCHANNEL_MEMBER") + fvs = swsscommon.FieldValuePairs([("NULL", "NULL")]) + tbl.set("PortChannel001|Ethernet0", fvs) + time.sleep(1) + + # set oper_status for PortChannels + ps = swsscommon.ProducerStateTable(dvs.pdb, "LAG_TABLE") + fvs = swsscommon.FieldValuePairs([("admin_status", "up"),("mtu", "9100"),("oper_status", "up")]) + ps.set("PortChannel001", fvs) + time.sleep(1) + + create_mclag_interface(dvs, "4095", "PortChannel001") + + #Add MAC to FDB_TABLE on PortChannel001 + create_entry_pst( + dvs.pdb, + "FDB_TABLE", "Vlan200:3C:85:99:5E:00:01", + [ + ("port", "PortChannel001"), + ("type", "dynamic"), + ] + ) + + # check that the FDB entry inserted into ASIC DB + assert how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY") == 1, "The MCLAG fdb entry not inserted to ASIC" + + ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY", + [("mac", "3C:85:99:5E:00:01"), ("bvid", str(dvs.getVlanOid("200")))], + [("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_DYNAMIC")] + ) + + assert ok, str(extra) + + # bring PortChannel down + dvs.servers[0].runcmd("ip link set down dev eth0") + time.sleep(1) + ps = swsscommon.ProducerStateTable(dvs.pdb, "LAG_TABLE") + fvs = swsscommon.FieldValuePairs([("admin_status", "up"),("mtu", "9100"),("oper_status", "down")]) + ps.set("PortChannel001", fvs) + time.sleep(1) + + # check that the FDB entry was not deleted from ASIC DB + assert how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY") == 1, "The MCLAG fdb entry was deleted" + + ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY", + [("mac", "3C:85:99:5E:00:01"), ("bvid", str(dvs.getVlanOid("200")))], + [("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_DYNAMIC")] + ) + assert ok, str(extra) + + # Restore eth0 up + dvs.servers[0].runcmd("ip link set up dev eth0") + time.sleep(1) + + remove_mclag_interface(dvs, "4095", "PortChannel001") + + delete_entry_pst( + dvs.pdb, + "FDB_TABLE", "Vlan200:3C:85:99:5E:00:01", + ) + + time.sleep(2) + # check that the FDB entry was deleted from ASIC DB + assert how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY") == 0, "The MCLAG static fdb entry not deleted" + + # remove PortChannel member + tbl = swsscommon.Table(dvs.cdb, "PORTCHANNEL_MEMBER") + tbl._del("PortChannel001|Ethernet0") + time.sleep(1) + + # remove PortChannel + tbl = swsscommon.Table(dvs.cdb, "PORTCHANNEL") + tbl._del("PortChannel001") + time.sleep(2) + +# Test-14 Verify cleanup of the basic config. @pytest.mark.dev_sanity def test_mclagFdb_basic_config_del(dvs, testlog): diff --git a/tests/test_mirror.py b/tests/test_mirror.py index f74ff6fa0da..282fc47310b 100644 --- a/tests/test_mirror.py +++ b/tests/test_mirror.py @@ -60,7 +60,7 @@ def remove_ip_address(self, interface, ip): def add_neighbor(self, interface, ip, mac): tbl = swsscommon.ProducerStateTable(self.pdb, "NEIGH_TABLE") fvs = swsscommon.FieldValuePairs([("neigh", mac), - ("family", "IPv4")]) + ("family", "IPv4" if "." in ip else "IPv6")]) tbl.set(interface + ":" + ip, fvs) time.sleep(1) @@ -103,11 +103,27 @@ def get_mirror_session_state(self, name): assert len(fvs) > 0 return { fv[0]: fv[1] for fv in fvs } + def mirror_session_entry_exists(self, name): + tbl = swsscommon.Table(self.sdb, "MIRROR_SESSION_TABLE") + (status, _) = tbl.get(name) + return status + def check_syslog(self, dvs, marker, log, expected_cnt): (ec, out) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \'%s\' | wc -l" % (marker, log)]) assert out.strip() == str(expected_cnt) - def test_MirrorAddRemove(self, dvs, testlog): + def test_MirrorInvalidEntry(self, dvs): + """ + This test ensures that an invalid mirror session entry is not created. + Here, "invalid" means an entry in which src IP is IPv4 while dst IP is IPv6 + (or vice versa). + """ + self.setup_db(dvs) + session = "TEST_SESSION" + self.create_mirror_session(session, "1.1.1.1", "fc00::2:2:2:2", "0x6558", "8", "100", "0") + assert self.mirror_session_entry_exists(session) == False + + def _test_MirrorAddRemove(self, dvs, testlog, v6_encap=False): """ This test covers the basic mirror session creation and removal operations Operation flow: @@ -119,34 +135,37 @@ def test_MirrorAddRemove(self, dvs, testlog): The session becomes inactive again till the end 4. Remove miror session """ - self.setup_db(dvs) - session = "TEST_SESSION" + src_ip = "1.1.1.1" if v6_encap == False else "fc00::1:1:1:1" + dst_ip = "2.2.2.2" if v6_encap == False else "fc00::2:2:2:2" + intf_addr = "10.0.0.0/31" if v6_encap == False else "fc00::/126" + nhop_ip = "10.0.0.1" if v6_encap == False else "fc00::1" marker = dvs.add_log_marker() # create mirror session - self.create_mirror_session(session, "1.1.1.1", "2.2.2.2", "0x6558", "8", "100", "0") + self.create_mirror_session(session, src_ip, dst_ip, "0x6558", "8", "100", "0") assert self.get_mirror_session_state(session)["status"] == "inactive" - self.check_syslog(dvs, marker, "Attached next hop observer .* for destination IP 2.2.2.2", 1) + assert self.get_mirror_session_state(session)["next_hop_ip"] == ("0.0.0.0@" if v6_encap == False else "::@") + self.check_syslog(dvs, marker, "Attached next hop observer .* for destination IP {}".format(dst_ip), 1) # bring up Ethernet16 self.set_interface_status(dvs, "Ethernet16", "up") assert self.get_mirror_session_state(session)["status"] == "inactive" # add IP address to Ethernet16 - self.add_ip_address("Ethernet16", "10.0.0.0/31") + self.add_ip_address("Ethernet16", intf_addr) assert self.get_mirror_session_state(session)["status"] == "inactive" # add neighbor to Ethernet16 - self.add_neighbor("Ethernet16", "10.0.0.1", "02:04:06:08:10:12") + self.add_neighbor("Ethernet16", nhop_ip, "02:04:06:08:10:12") assert self.get_mirror_session_state(session)["status"] == "inactive" - # add route to mirror destination via 10.0.0.1 - self.add_route(dvs, "2.2.2.2", "10.0.0.1") + # add route to mirror destination via next hop ip + self.add_route(dvs, dst_ip, nhop_ip) assert self.get_mirror_session_state(session)["status"] == "active" assert self.get_mirror_session_state(session)["monitor_port"] == "Ethernet16" assert self.get_mirror_session_state(session)["dst_mac"] == "02:04:06:08:10:12" - assert self.get_mirror_session_state(session)["route_prefix"] == "2.2.2.2/32" + assert self.get_mirror_session_state(session)["route_prefix"] == "{}/{}".format(dst_ip, 32 if v6_encap == False else 128) # check asic database tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION") @@ -164,15 +183,15 @@ def test_MirrorAddRemove(self, dvs, testlog): elif fv[0] == "SAI_MIRROR_SESSION_ATTR_ERSPAN_ENCAPSULATION_TYPE": assert fv[1] == "SAI_ERSPAN_ENCAPSULATION_TYPE_MIRROR_L3_GRE_TUNNEL" elif fv[0] == "SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION": - assert fv[1] == "4" + assert fv[1] == "4" if v6_encap == False else "6" elif fv[0] == "SAI_MIRROR_SESSION_ATTR_TOS": assert fv[1] == "32" elif fv[0] == "SAI_MIRROR_SESSION_ATTR_TTL": assert fv[1] == "100" elif fv[0] == "SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS": - assert fv[1] == "1.1.1.1" + assert fv[1] == src_ip elif fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS": - assert fv[1] == "2.2.2.2" + assert fv[1] == dst_ip elif fv[0] == "SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS": assert fv[1] == dvs.runcmd("bash -c \"ip link show eth0 | grep ether | awk '{print $2}'\"")[1].strip().upper() elif fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS": @@ -183,15 +202,15 @@ def test_MirrorAddRemove(self, dvs, testlog): assert False # remove route - self.remove_route(dvs, "2.2.2.2") + self.remove_route(dvs, dst_ip) assert self.get_mirror_session_state(session)["status"] == "inactive" # remove neighbor - self.remove_neighbor("Ethernet16", "10.0.0.1") + self.remove_neighbor("Ethernet16", nhop_ip) assert self.get_mirror_session_state(session)["status"] == "inactive" # remove IP address - self.remove_ip_address("Ethernet16", "10.0.0.0/31") + self.remove_ip_address("Ethernet16", intf_addr) assert self.get_mirror_session_state(session)["status"] == "inactive" # bring down Ethernet16 @@ -201,7 +220,13 @@ def test_MirrorAddRemove(self, dvs, testlog): marker = dvs.add_log_marker() # remove mirror session self.remove_mirror_session(session) - self.check_syslog(dvs, marker, "Detached next hop observer for destination IP 2.2.2.2", 1) + self.check_syslog(dvs, marker, "Detached next hop observer for destination IP {}".format(dst_ip), 1) + + def test_MirrorAddRemove(self, dvs, testlog): + self.setup_db(dvs) + + self._test_MirrorAddRemove(dvs, testlog) + self._test_MirrorAddRemove(dvs, testlog, v6_encap=True) def create_vlan(self, dvs, vlan): #dvs.runcmd("ip link del Bridge") @@ -239,11 +264,7 @@ def remove_fdb(self, vlan, mac): tbl._del("Vlan" + vlan + ":" + mac) time.sleep(1) - - # Ignore testcase in Debian Jessie - # TODO: Remove this skip if Jessie support is no longer needed - @pytest.mark.skipif(StrictVersion(distro.linux_distribution()[1]) <= StrictVersion('8.9'), reason="Debian 8.9 or before has no support") - def test_MirrorToVlanAddRemove(self, dvs, testlog): + def _test_MirrorToVlanAddRemove(self, dvs, testlog, v6_encap=False): """ This test covers basic mirror session creation and removal operation with destination port sits in a VLAN @@ -254,15 +275,18 @@ def test_MirrorToVlanAddRemove(self, dvs, testlog): 3. Remove FDB; remove neighbor; remove IP; remove VLAN 4. Remove mirror session """ - self.setup_db(dvs) - session = "TEST_SESSION" + src_ip = "5.5.5.5" if v6_encap == False else "fc00::5:5:5:5" + # dst ip in directly connected vlan subnet + dst_ip = "6.6.6.6" if v6_encap == False else "fc00::6:6:6:6" + intf_addr = "6.6.6.0/24" if v6_encap == False else "fc00::6:6:6:0/112" marker = dvs.add_log_marker() # create mirror session - self.create_mirror_session(session, "5.5.5.5", "6.6.6.6", "0x6558", "8", "100", "0") + self.create_mirror_session(session, src_ip, dst_ip, "0x6558", "8", "100", "0") assert self.get_mirror_session_state(session)["status"] == "inactive" - self.check_syslog(dvs, marker, "Attached next hop observer .* for destination IP 6.6.6.6", 1) + assert self.get_mirror_session_state(session)["next_hop_ip"] == ("0.0.0.0@" if v6_encap == False else "::@") + self.check_syslog(dvs, marker, "Attached next hop observer .* for destination IP {}".format(dst_ip), 1) # create vlan; create vlan member self.create_vlan(dvs, "6") @@ -273,11 +297,11 @@ def test_MirrorToVlanAddRemove(self, dvs, testlog): self.set_interface_status(dvs, "Ethernet4", "up") # add ip address to vlan 6 - self.add_ip_address("Vlan6", "6.6.6.0/24") + self.add_ip_address("Vlan6", intf_addr) assert self.get_mirror_session_state(session)["status"] == "inactive" # create neighbor to vlan 6 - self.add_neighbor("Vlan6", "6.6.6.6", "66:66:66:66:66:66") + self.add_neighbor("Vlan6", dst_ip, "66:66:66:66:66:66") assert self.get_mirror_session_state(session)["status"] == "inactive" # create fdb entry to ethernet4 @@ -300,15 +324,15 @@ def test_MirrorToVlanAddRemove(self, dvs, testlog): elif fv[0] == "SAI_MIRROR_SESSION_ATTR_ERSPAN_ENCAPSULATION_TYPE": assert fv[1] == "SAI_ERSPAN_ENCAPSULATION_TYPE_MIRROR_L3_GRE_TUNNEL" elif fv[0] == "SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION": - assert fv[1] == "4" + assert fv[1] == "4" if v6_encap == False else "6" elif fv[0] == "SAI_MIRROR_SESSION_ATTR_TOS": assert fv[1] == "32" elif fv[0] == "SAI_MIRROR_SESSION_ATTR_TTL": assert fv[1] == "100" elif fv[0] == "SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS": - assert fv[1] == "5.5.5.5" + assert fv[1] == src_ip elif fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS": - assert fv[1] == "6.6.6.6" + assert fv[1] == dst_ip elif fv[0] == "SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS": assert fv[1] == dvs.runcmd("bash -c \"ip link show eth0 | grep ether | awk '{print $2}'\"")[1].strip().upper() elif fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS": @@ -333,11 +357,11 @@ def test_MirrorToVlanAddRemove(self, dvs, testlog): assert self.get_mirror_session_state(session)["status"] == "inactive" # remove neighbor - self.remove_neighbor("Vlan6", "6.6.6.6") + self.remove_neighbor("Vlan6", dst_ip) assert self.get_mirror_session_state(session)["status"] == "inactive" # remove ip address - self.remove_ip_address("Vlan6", "6.6.6.0/24") + self.remove_ip_address("Vlan6", intf_addr) assert self.get_mirror_session_state(session)["status"] == "inactive" # bring down vlan and member @@ -351,7 +375,16 @@ def test_MirrorToVlanAddRemove(self, dvs, testlog): marker = dvs.add_log_marker() # remove mirror session self.remove_mirror_session(session) - self.check_syslog(dvs, marker, "Detached next hop observer for destination IP 6.6.6.6", 1) + self.check_syslog(dvs, marker, "Detached next hop observer for destination IP {}".format(dst_ip), 1) + + # Ignore testcase in Debian Jessie + # TODO: Remove this skip if Jessie support is no longer needed + @pytest.mark.skipif(StrictVersion(distro.linux_distribution()[1]) <= StrictVersion('8.9'), reason="Debian 8.9 or before has no support") + def test_MirrorToVlanAddRemove(self, dvs, testlog): + self.setup_db(dvs) + + self._test_MirrorToVlanAddRemove(dvs, testlog) + self._test_MirrorToVlanAddRemove(dvs, testlog, v6_encap=True) def create_port_channel(self, dvs, channel): tbl = swsscommon.ProducerStateTable(self.pdb, "LAG_TABLE") @@ -370,6 +403,9 @@ def remove_port_channel(self, dvs, channel): tbl = swsscommon.Table(self.sdb, "LAG_TABLE") tbl._del("PortChannel" + channel) time.sleep(1) + tbl = swsscommon.Table(self.cdb, "PORTCHANNEL") + tbl._del("PortChannel" + channel) + time.sleep(1) def create_port_channel_member(self, channel, interface): tbl = swsscommon.ProducerStateTable(self.pdb, "LAG_MEMBER_TABLE") @@ -382,8 +418,7 @@ def remove_port_channel_member(self, channel, interface): tbl._del("PortChannel" + channel + ":" + interface) time.sleep(1) - - def test_MirrorToLagAddRemove(self, dvs, testlog): + def _test_MirrorToLagAddRemove(self, dvs, testlog, v6_encap=False): """ This test covers basic mirror session creation and removal operations with destination port sits in a LAG @@ -395,15 +430,18 @@ def test_MirrorToLagAddRemove(self, dvs, testlog): 4. Remove mirror session """ - self.setup_db(dvs) - session = "TEST_SESSION" + src_ip = "10.10.10.10" if v6_encap == False else "fc00::10:10:10:10" + dst_ip = "11.11.11.11" if v6_encap == False else "fc00::11:11:11:11" + # dst ip in directly connected subnet + intf_addr = "11.11.11.0/24" if v6_encap == False else "fc00::11:11:11:0/112" marker = dvs.add_log_marker() # create mirror session - self.create_mirror_session(session, "10.10.10.10", "11.11.11.11", "0x6558", "8", "100", "0") + self.create_mirror_session(session, src_ip, dst_ip, "0x6558", "8", "100", "0") assert self.get_mirror_session_state(session)["status"] == "inactive" - self.check_syslog(dvs, marker, "Attached next hop observer .* for destination IP 11.11.11.11", 1) + assert self.get_mirror_session_state(session)["next_hop_ip"] == ("0.0.0.0@" if v6_encap == False else "::@") + self.check_syslog(dvs, marker, "Attached next hop observer .* for destination IP {}".format(dst_ip), 1) # create port channel; create port channel member self.create_port_channel(dvs, "008") @@ -414,11 +452,11 @@ def test_MirrorToLagAddRemove(self, dvs, testlog): self.set_interface_status(dvs, "Ethernet88", "up") # add ip address to port channel 008 - self.add_ip_address("PortChannel008", "11.11.11.0/24") + self.add_ip_address("PortChannel008", intf_addr) assert self.get_mirror_session_state(session)["status"] == "inactive" # create neighbor to port channel 008 - self.add_neighbor("PortChannel008", "11.11.11.11", "88:88:88:88:88:88") + self.add_neighbor("PortChannel008", dst_ip, "88:88:88:88:88:88") assert self.get_mirror_session_state(session)["status"] == "active" # check asic database @@ -432,13 +470,15 @@ def test_MirrorToLagAddRemove(self, dvs, testlog): assert dvs.asicdb.portoidmap[fv[1]] == "Ethernet88" elif fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS": assert fv[1] == "88:88:88:88:88:88" + elif fv[0] == "SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION": + assert fv[1] == "4" if v6_encap == False else "6" # remove neighbor - self.remove_neighbor("PortChannel008", "11.11.11.11") + self.remove_neighbor("PortChannel008", dst_ip) assert self.get_mirror_session_state(session)["status"] == "inactive" # remove ip address - self.remove_ip_address("PortChannel008", "11.11.11.0/24") + self.remove_ip_address("PortChannel008", intf_addr) assert self.get_mirror_session_state(session)["status"] == "inactive" # bring down port channel and port channel member @@ -452,13 +492,15 @@ def test_MirrorToLagAddRemove(self, dvs, testlog): marker = dvs.add_log_marker() # remove mirror session self.remove_mirror_session(session) - self.check_syslog(dvs, marker, "Detached next hop observer for destination IP 11.11.11.11", 1) + self.check_syslog(dvs, marker, "Detached next hop observer for destination IP {}".format(dst_ip), 1) + def test_MirrorToLagAddRemove(self, dvs, testlog): + self.setup_db(dvs) - # Ignore testcase in Debian Jessie - # TODO: Remove this skip if Jessie support is no longer needed - @pytest.mark.skipif(StrictVersion(distro.linux_distribution()[1]) <= StrictVersion('8.9'), reason="Debian 8.9 or before has no support") - def test_MirrorDestMoveVlan(self, dvs, testlog): + self._test_MirrorToLagAddRemove(dvs, testlog) + self._test_MirrorToLagAddRemove(dvs, testlog, v6_encap=True) + + def _test_MirrorDestMoveVlan(self, dvs, testlog, v6_encap=False): """ This test tests mirror session destination move from non-VLAN to VLAN and back to non-VLAN port @@ -471,19 +513,25 @@ def test_MirrorDestMoveVlan(self, dvs, testlog): 7. Disable non-VLAN monitor port 8. Remove mirror session """ - self.setup_db(dvs) - session = "TEST_SESSION" + src_ip = "7.7.7.7" if v6_encap == False else "fc00::7:7:7:7" + dst_ip = "8.8.8.8" if v6_encap == False else "fc00::8:8:8:8" + port_intf_addr = "80.0.0.0/31" if v6_encap == False else "fc00::80:0:0:0/126" + port_nhop_ip = "80.0.0.1" if v6_encap == False else "fc00::80:0:0:1" + port_ip_prefix = "8.8.0.0/16" if v6_encap == False else "fc00::8:8:0:0/96" + # dst ip moves to directly connected vlan subnet + vlan_intf_addr = "8.8.8.0/24" if v6_encap == False else "fc00::8:8:8:0/112" # create mirror session - self.create_mirror_session(session, "7.7.7.7", "8.8.8.8", "0x6558", "8", "100", "0") + self.create_mirror_session(session, src_ip, dst_ip, "0x6558", "8", "100", "0") assert self.get_mirror_session_state(session)["status"] == "inactive" + assert self.get_mirror_session_state(session)["next_hop_ip"] == ("0.0.0.0@" if v6_encap == False else "::@") # bring up port; add ip; add neighbor; add route self.set_interface_status(dvs, "Ethernet32", "up") - self.add_ip_address("Ethernet32", "80.0.0.0/31") - self.add_neighbor("Ethernet32", "80.0.0.1", "02:04:06:08:10:12") - self.add_route(dvs, "8.8.0.0/16", "80.0.0.1") + self.add_ip_address("Ethernet32", port_intf_addr) + self.add_neighbor("Ethernet32", port_nhop_ip, "02:04:06:08:10:12") + self.add_route(dvs, port_ip_prefix, port_nhop_ip) assert self.get_mirror_session_state(session)["status"] == "active" # check monitor port @@ -496,6 +544,8 @@ def test_MirrorDestMoveVlan(self, dvs, testlog): assert dvs.asicdb.portoidmap[fv[1]] == "Ethernet32" elif fv[0] == "SAI_MIRROR_SESSION_ATTR_VLAN_HEADER_VALID": assert fv[1] == "false" + elif fv[0] == "SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION": + assert fv[1] == "4" if v6_encap == False else "6" # mirror session move round 1 # create vlan; create vlan member; bring up vlan and member @@ -506,11 +556,13 @@ def test_MirrorDestMoveVlan(self, dvs, testlog): assert self.get_mirror_session_state(session)["status"] == "active" # add ip address to vlan 9 - self.add_ip_address("Vlan9", "8.8.8.0/24") + self.add_ip_address("Vlan9", vlan_intf_addr) + time.sleep(2) + # inactive due to no neighbor mac or fdb entry assert self.get_mirror_session_state(session)["status"] == "inactive" # create neighbor to vlan 9 - self.add_neighbor("Vlan9", "8.8.8.8", "88:88:88:88:88:88") + self.add_neighbor("Vlan9", dst_ip, "88:88:88:88:88:88") assert self.get_mirror_session_state(session)["status"] == "inactive" # create fdb entry to ethernet48 @@ -535,6 +587,8 @@ def test_MirrorDestMoveVlan(self, dvs, testlog): assert fv[1] == "0" elif fv[0] == "SAI_MIRROR_SESSION_ATTR_VLAN_CFI": assert fv[1] == "0" + elif fv[0] == "SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION": + assert fv[1] == "4" if v6_encap == False else "6" # mirror session move round 2 # remove fdb entry @@ -542,11 +596,11 @@ def test_MirrorDestMoveVlan(self, dvs, testlog): assert self.get_mirror_session_state(session)["status"] == "inactive" # remove neighbor - self.remove_neighbor("Vlan9", "8.8.8.8") + self.remove_neighbor("Vlan9", dst_ip) assert self.get_mirror_session_state(session)["status"] == "inactive" # remove ip address - self.remove_ip_address("Vlan9", "8.8.8.0/24") + self.remove_ip_address("Vlan9", vlan_intf_addr) assert self.get_mirror_session_state(session)["status"] == "active" # check monitor port @@ -559,6 +613,8 @@ def test_MirrorDestMoveVlan(self, dvs, testlog): assert dvs.asicdb.portoidmap[fv[1]] == "Ethernet32" elif fv[0] == "SAI_MIRROR_SESSION_ATTR_VLAN_HEADER_VALID": assert fv[1] == "false" + elif fv[0] == "SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION": + assert fv[1] == "4" if v6_encap == False else "6" # bring down vlan and member; remove vlan member; remove vlan self.set_interface_status(dvs, "Ethernet48", "down") @@ -567,16 +623,24 @@ def test_MirrorDestMoveVlan(self, dvs, testlog): self.remove_vlan("9") # remove route; remove neighbor; remove ip; bring down port - self.remove_route(dvs, "8.8.8.0/24") - self.remove_neighbor("Ethernet32", "80.0.0.1") - self.remove_ip_address("Ethernet32", "80.0.0.0/31") + self.remove_route(dvs, vlan_intf_addr) + self.remove_neighbor("Ethernet32", port_nhop_ip) + self.remove_ip_address("Ethernet32", port_intf_addr) self.set_interface_status(dvs, "Ethernet32", "down") # remove mirror session self.remove_mirror_session(session) + # Ignore testcase in Debian Jessie + # TODO: Remove this skip if Jessie support is no longer needed + @pytest.mark.skipif(StrictVersion(distro.linux_distribution()[1]) <= StrictVersion('8.9'), reason="Debian 8.9 or before has no support") + def test_MirrorDestMoveVlan(self, dvs, testlog): + self.setup_db(dvs) - def test_MirrorDestMoveLag(self, dvs, testlog): + self._test_MirrorDestMoveVlan(dvs, testlog) + self._test_MirrorDestMoveVlan(dvs, testlog, v6_encap=True) + + def _test_MirrorDestMoveLag(self, dvs, testlog, v6_encap=False): """ This test tests mirror session destination move from non-LAG to LAG and back to non-LAG port @@ -589,19 +653,26 @@ def test_MirrorDestMoveLag(self, dvs, testlog): 7. Disable non-LAG monitor port 8. Remove mirror session """ - self.setup_db(dvs) - session = "TEST_SESSION" + src_ip = "12.12.12.12" if v6_encap == False else "fc00::12:12:12:12" + dst_ip = "13.13.13.13" if v6_encap == False else "fc00::13:13:13:13" + port_intf_addr = "100.0.0.0/31" if v6_encap == False else "fc00::100:0:0:0/126" + port_nhop_ip = "100.0.0.1" if v6_encap == False else "fc00::100:0:0:1" + port_ip_prefix = "13.13.0.0/16" if v6_encap == False else "fc00::13:13:0:0/96" + lag_intf_addr = "200.0.0.0/31" if v6_encap == False else "fc00::200:0:0:0/126" + lag_nhop_ip = "200.0.0.1" if v6_encap == False else "fc00::200:0:0:1" + lag_ip_prefix = "13.13.13.0/24" if v6_encap == False else "fc00::13:13:13:0/112" # create mirror session - self.create_mirror_session(session, "12.12.12.12", "13.13.13.13", "0x6558", "8", "100", "0") + self.create_mirror_session(session, src_ip, dst_ip, "0x6558", "8", "100", "0") assert self.get_mirror_session_state(session)["status"] == "inactive" + assert self.get_mirror_session_state(session)["next_hop_ip"] == ("0.0.0.0@" if v6_encap == False else "::@") # bring up port; add ip; add neighbor; add route self.set_interface_status(dvs, "Ethernet64", "up") - self.add_ip_address("Ethernet64", "100.0.0.0/31") - self.add_neighbor("Ethernet64", "100.0.0.1", "02:04:06:08:10:12") - self.add_route(dvs, "13.13.0.0/16", "100.0.0.1") + self.add_ip_address("Ethernet64", port_intf_addr) + self.add_neighbor("Ethernet64", port_nhop_ip, "02:04:06:08:10:12") + self.add_route(dvs, port_ip_prefix, port_nhop_ip) assert self.get_mirror_session_state(session)["status"] == "active" # check monitor port @@ -612,8 +683,10 @@ def test_MirrorDestMoveLag(self, dvs, testlog): for fv in fvs: if fv[0] == "SAI_MIRROR_SESSION_ATTR_MONITOR_PORT": assert dvs.asicdb.portoidmap[fv[1]] == "Ethernet64" - if fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS": + elif fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS": assert fv[1] == "02:04:06:08:10:12" + elif fv[0] == "SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION": + assert fv[1] == "4" if v6_encap == False else "6" # mirror session move round 1 # create port channel; create port channel member; bring up @@ -623,12 +696,12 @@ def test_MirrorDestMoveLag(self, dvs, testlog): self.set_interface_status(dvs, "Ethernet32", "up") # add ip address to port channel 080; create neighbor to port channel 080 - self.add_ip_address("PortChannel080", "200.0.0.0/31") - self.add_neighbor("PortChannel080", "200.0.0.1", "12:10:08:06:04:02") + self.add_ip_address("PortChannel080", lag_intf_addr) + self.add_neighbor("PortChannel080", lag_nhop_ip, "12:10:08:06:04:02") assert self.get_mirror_session_state(session)["status"] == "active" # add route - self.add_route(dvs, "13.13.13.0/24", "200.0.0.1") + self.add_route(dvs, lag_ip_prefix, lag_nhop_ip) assert self.get_mirror_session_state(session)["status"] == "active" # check monitor port @@ -639,8 +712,10 @@ def test_MirrorDestMoveLag(self, dvs, testlog): for fv in fvs: if fv[0] == "SAI_MIRROR_SESSION_ATTR_MONITOR_PORT": assert dvs.asicdb.portoidmap[fv[1]] == "Ethernet32" - if fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS": + elif fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS": assert fv[1] == "12:10:08:06:04:02" + elif fv[0] == "SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION": + assert fv[1] == "4" if v6_encap == False else "6" # mirror session move round 2 # remove port channel member @@ -660,15 +735,16 @@ def test_MirrorDestMoveLag(self, dvs, testlog): for fv in fvs: if fv[0] == "SAI_MIRROR_SESSION_ATTR_MONITOR_PORT": assert dvs.asicdb.portoidmap[fv[1]] == "Ethernet32" - if fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS": + elif fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS": assert fv[1] == "12:10:08:06:04:02" + elif fv[0] == "SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION": + assert fv[1] == "4" if v6_encap == False else "6" # mirror session move round 4 # remove route - self.remove_route(dvs, "13.13.13.0/24") + self.remove_route(dvs, lag_ip_prefix) assert self.get_mirror_session_state(session)["status"] == "active" - port_oid = "" # check monitor port tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION") assert len(tbl.getKeys()) == 1 @@ -677,12 +753,14 @@ def test_MirrorDestMoveLag(self, dvs, testlog): for fv in fvs: if fv[0] == "SAI_MIRROR_SESSION_ATTR_MONITOR_PORT": assert dvs.asicdb.portoidmap[fv[1]] == "Ethernet64" - if fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS": + elif fv[0] == "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS": assert fv[1] == "02:04:06:08:10:12" + elif fv[0] == "SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION": + assert fv[1] == "4" if v6_encap == False else "6" # remove neighbor; remove ip address to port channel 080 - self.remove_neighbor("PortChannel080", "200.0.0.1") - self.remove_ip_address("PortChannel080", "200.0.0.0/31") + self.remove_neighbor("PortChannel080", lag_nhop_ip) + self.remove_ip_address("PortChannel080", lag_intf_addr) # bring down; remove port channel member; remove port channel self.set_interface_status(dvs, "Ethernet32", "down") @@ -692,15 +770,20 @@ def test_MirrorDestMoveLag(self, dvs, testlog): assert self.get_mirror_session_state(session)["status"] == "active" # remove route; remove neighbor; remove ip; bring down port - self.remove_route(dvs, "13.13.0.0/16") - self.remove_neighbor("Ethernet64", "100.0.0.1") - self.remove_ip_address("Ethernet64", "100.0.0.0/31") + self.remove_route(dvs, port_ip_prefix) + self.remove_neighbor("Ethernet64", port_nhop_ip) + self.remove_ip_address("Ethernet64", port_intf_addr) self.set_interface_status(dvs, "Ethernet64", "down") assert self.get_mirror_session_state(session)["status"] == "inactive" # remove mirror session self.remove_mirror_session(session) + def test_MirrorDestMoveLag(self, dvs, testlog): + self.setup_db(dvs) + + self._test_MirrorDestMoveLag(dvs, testlog) + self._test_MirrorDestMoveLag(dvs, testlog, v6_encap=True) def create_acl_table(self, table, interfaces): tbl = swsscommon.Table(self.cdb, "ACL_TABLE") diff --git a/tests/test_mirror_port_erspan.py b/tests/test_mirror_port_erspan.py index 61c0a17cd3c..7b48d31e764 100644 --- a/tests/test_mirror_port_erspan.py +++ b/tests/test_mirror_port_erspan.py @@ -435,6 +435,13 @@ def test_PortMirrorDestMoveLag(self, dvs, testlog): # add route dvs.add_route("13.13.13.0/24", "200.0.0.1") self.dvs_mirror.verify_session_status(session) + + # As the Route pointing to Down NH Interface it will not get installed and Mirror State should not be changed. + self.dvs_mirror.verify_session(dvs, session, asic_db=expected_asic_db, src_ports=src_asic_ports, direction="RX") + + # Now Make Port Channel oper up so route pointing to this gets installed + (exitcode, _) = dvs.runcmd("ip link set dev PortChannel080 carrier on") + assert exitcode == 0, "ip link set failed" expected_asic_db = {"SAI_MIRROR_SESSION_ATTR_MONITOR_PORT": pmap.get("Ethernet32"), "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS": "12:10:08:06:04:02"} diff --git a/tests/test_mirror_port_span.py b/tests/test_mirror_port_span.py index 7c27eff85c1..f956c1d3eda 100644 --- a/tests/test_mirror_port_span.py +++ b/tests/test_mirror_port_span.py @@ -40,19 +40,19 @@ def test_PortMirrorQueue(self, dvs, testlog): # Sub Test 2 marker = dvs.add_log_marker() - self.dvs_mirror.create_span_session(session, dst_port, src_ports, direction="RX", queue="254") + self.dvs_mirror.create_span_session(session, dst_port, src_ports, direction="RX", queue="15") self.dvs_mirror.verify_session_status(session) self.dvs_mirror.remove_mirror_session(session) self.dvs_mirror.verify_no_mirror() - self.check_syslog(dvs, marker, "Failed to get valid queue 254", 0) + self.check_syslog(dvs, marker, "Failed to get valid queue 15", 0) # Sub Test 3 marker = dvs.add_log_marker() - self.dvs_mirror.create_span_session(session, dst_port, src_ports, direction="TX", queue="255") + self.dvs_mirror.create_span_session(session, dst_port, src_ports, direction="TX", queue="16") self.dvs_mirror.verify_session_status(session, expected=0) self.dvs_mirror.remove_mirror_session(session) self.dvs_mirror.verify_no_mirror() - self.check_syslog(dvs, marker, "Failed to get valid queue 255", 1) + self.check_syslog(dvs, marker, "Failed to get valid queue 16", 1) def test_PortMirrorAddRemove(self, dvs, testlog): diff --git a/tests/test_mux.py b/tests/test_mux.py index 6528fb0a2f0..1ca4fd924b3 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -1,6 +1,7 @@ import time import pytest import json +import itertools from ipaddress import ip_network, ip_address, IPv4Address from swsscommon import swsscommon @@ -66,6 +67,7 @@ class TestMuxTunnelBase(): DEFAULT_TUNNEL_PARAMS = { "tunnel_type": "IPINIP", "dst_ip": SELF_IPV4, + "src_ip": PEER_IPV4, "dscp_mode": "pipe", "ecn_mode": "standard", "ttl_mode": "pipe", @@ -99,6 +101,8 @@ class TestMuxTunnelBase(): DSCP_TO_TC_MAP = {str(i):str(1) for i in range(0, 64)} TC_TO_PRIORITY_GROUP_MAP = {str(i):str(i) for i in range(0, 8)} + BULK_NEIGHBOR_COUNT = 254 + def check_syslog(self, dvs, marker, err_log, expected_cnt): (exitcode, num) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \"%s\" | wc -l" % (marker, err_log)]) assert num.strip() >= str(expected_cnt) @@ -219,11 +223,14 @@ def check_neigh_in_asic_db(self, asicdb, ip, expected=True): return '' - def check_tnl_nexthop_in_asic_db(self, asicdb, expected=1): + def check_tnl_nexthop_in_asic_db(self, asicdb, expected=None): global tunnel_nh_id - nh = asicdb.wait_for_n_keys(self.ASIC_NEXTHOP_TABLE, expected) + if expected: + nh = asicdb.wait_for_n_keys(self.ASIC_NEXTHOP_TABLE, expected) + else: + nh = asicdb.get_keys(self.ASIC_NEXTHOP_TABLE) for key in nh: fvs = asicdb.get_entry(self.ASIC_NEXTHOP_TABLE, key) @@ -268,16 +275,18 @@ def check_nexthop_group_in_asic_db(self, asicdb, key, num_tnl_nh=0): assert num_tnl_nh == count def check_route_nexthop(self, dvs_route, asicdb, route, nexthop, tunnel=False): + """ + Checks if nexthop is a member of a given route + """ route_key = dvs_route.check_asicdb_route_entries([route]) route_nexthop_oid = self.get_route_nexthop_oid(route_key[0], asicdb) - + if tunnel: - assert route_nexthop_oid == nexthop - return + return route_nexthop_oid == nexthop nexthop_oid = self.get_nexthop_oid(asicdb, nexthop) - assert route_nexthop_oid == nexthop_oid + return route_nexthop_oid == nexthop_oid def add_neighbor(self, dvs, ip, mac): if ip_address(ip).version == 6: @@ -309,10 +318,18 @@ def del_fdb(self, dvs, mac): def add_route(self, dvs, route, nexthops, ifaces=[]): apdb = dvs.get_app_db() - nexthop_str = ",".join(nexthops) - if len(ifaces) == 0: - ifaces = [self.VLAN_1000 for k in range(len(nexthops))] - iface_str = ",".join(ifaces) + if len(nexthops) > 1: + nexthop_str = ",".join(nexthops) + if len(ifaces) == 0: + ifaces = [self.VLAN_1000 for k in range(len(nexthops))] + iface_str = ",".join(ifaces) + else: + nexthop_str = str(nexthops[0]) + if len(ifaces) == 0: + iface_str = self.VLAN_1000 + else: + iface_str = ifaces[0] + ps = swsscommon.ProducerStateTable(apdb.db_connection, self.APP_ROUTE_TABLE) fvs = swsscommon.FieldValuePairs( [ @@ -327,8 +344,66 @@ def del_route(self, dvs, route): ps = swsscommon.ProducerStateTable(apdb.db_connection, self.APP_ROUTE_TABLE) ps._del(route) + def wait_for_mux_state(self, dvs, interface, expected_state): + """ + Waits until state change completes - expected state is in state_db + """ + + apdb = dvs.get_app_db() + expected_field = {"state": expected_state} + apdb.wait_for_field_match(self.APP_MUX_CABLE, interface, expected_field) + + def bulk_neighbor_test(self, confdb, appdb, asicdb, dvs, dvs_route): + dvs.runcmd("ip neigh flush all") + self.add_fdb(dvs, "Ethernet0", "00-00-00-00-11-11") + self.set_mux_state(appdb, "Ethernet0", "active") + + class neighbor_info: + ipv4_key = "" + ipv6_key = "" + ipv4 = "" + ipv6 = "" + + def __init__(self, i): + self.ipv4 = "192.168.1." + str(i) + self.ipv6 = "fc02:1001::" + str(i) + + neighbor_list = [neighbor_info(i) for i in range(100, self.BULK_NEIGHBOR_COUNT)] + for neigh_info in neighbor_list: + self.add_neighbor(dvs, neigh_info.ipv4, "00:00:00:00:11:11") + self.add_neighbor(dvs, neigh_info.ipv6, "00:00:00:00:11:11") + neigh_info.ipv4_key = self.check_neigh_in_asic_db(asicdb, neigh_info.ipv4) + neigh_info.ipv6_key = self.check_neigh_in_asic_db(asicdb, neigh_info.ipv6) + + try: + self.set_mux_state(appdb, "Ethernet0", "standby") + self.wait_for_mux_state(dvs, "Ethernet0", "standby") + + for neigh_info in neighbor_list: + asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, neigh_info.ipv4_key) + asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, neigh_info.ipv6_key) + dvs_route.check_asicdb_route_entries( + [neigh_info.ipv4+self.IPV4_MASK, neigh_info.ipv6+self.IPV6_MASK] + ) + + self.set_mux_state(appdb, "Ethernet0", "active") + self.wait_for_mux_state(dvs, "Ethernet0", "active") + + for neigh_info in neighbor_list: + dvs_route.check_asicdb_deleted_route_entries( + [neigh_info.ipv4+self.IPV4_MASK, neigh_info.ipv6+self.IPV6_MASK] + ) + neigh_info.ipv4_key = self.check_neigh_in_asic_db(asicdb, neigh_info.ipv4) + neigh_info.ipv6_key = self.check_neigh_in_asic_db(asicdb, neigh_info.ipv6) + + finally: + for neigh_info in neighbor_list: + self.del_neighbor(dvs, neigh_info.ipv4) + self.del_neighbor(dvs, neigh_info.ipv6) + def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): + self.bulk_neighbor_test(confdb, appdb, asicdb, dvs, dvs_route) self.set_mux_state(appdb, "Ethernet0", "active") self.set_mux_state(appdb, "Ethernet4", "standby") @@ -453,12 +528,15 @@ def create_and_test_fdb(self, appdb, asicdb, dvs, dvs_route): self.del_fdb(dvs, "00-00-00-00-00-11") - def create_and_test_route(self, appdb, asicdb, dvs, dvs_route): - + def create_and_test_route(self, appdb, asicdb, dvs, dvs_route, mac_Ethernet0, mac_Ethernet4): self.set_mux_state(appdb, "Ethernet0", "active") rtprefix = "2.3.4.0/24" + # Make sure neighbor is present + self.add_neighbor(dvs, self.SERV1_IPV4, mac_Ethernet0) + self.add_neighbor(dvs, self.SERV2_IPV4, mac_Ethernet0) + dvs.runcmd( "vtysh -c \"configure terminal\" -c \"ip route " + rtprefix + " " + self.SERV1_IPV4 + "\"" @@ -474,6 +552,7 @@ def create_and_test_route(self, appdb, asicdb, dvs, dvs_route): # Change Mux state to Standby and verify route pointing to Tunnel self.set_mux_state(appdb, "Ethernet0", "standby") + self.check_tnl_nexthop_in_asic_db(asicdb) self.check_nexthop_in_asic_db(asicdb, rtkeys[0], True) # Change Mux state back to Active and verify route is not pointing to Tunnel @@ -504,355 +583,440 @@ def create_and_test_route(self, appdb, asicdb, dvs, dvs_route): self.set_mux_state(appdb, "Ethernet4", "active") dvs_route.check_asicdb_deleted_route_entries([rtprefix]) - def multi_nexthop_test(self, dvs, dvs_route, asicdb, appdb, route, neighbors, macs): - mux_ports = ["Ethernet0", "Ethernet4"] - starting_states = [(ACTIVE, ACTIVE), (ACTIVE, STANDBY), (STANDBY, ACTIVE), (STANDBY, STANDBY)] + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"no ip route " + rtprefix + + " " + self.SERV1_IPV4 + "\"" + ) - # Set state to active for initial state - for port in mux_ports: - self.set_mux_state(appdb, port, "active") + def create_and_test_route_learned_before_neighbor(self, appdb, asicdb, dvs, dvs_route, mac): + rtprefix = "2.3.4.0/24" + neigh = "192.168.0.110" + mux_port = "Ethernet0" - # add neighbors for initial state - for i,neighbor in enumerate(neighbors): - self.add_neighbor(dvs, neighbor, macs[i]) - - try: - # toggle between states and add route in various combos of state - print("Testing add/remove/update of route") - for start in starting_states: - print("Adding route with %s: %s and %s: %s" % (mux_ports[0], start[0], mux_ports[1], start[1])) - self.set_mux_state(appdb, mux_ports[0], start[0]) - self.set_mux_state(appdb, mux_ports[1], start[1]) - self.add_route(dvs, route, neighbors) - if start[0] == "active": - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[0]) - elif start[1] == "active": - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[1]) - else: - self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) - - print("Testing fdb update in %s, %s for %s" % (start[0], start[1], neighbors[0])) - # move neighbor 1 - self.add_neighbor(dvs, neighbors[0], "00:aa:bb:cc:dd:ee") - if start[0] == "active": - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[0]) - elif start[1] == "active": - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[1]) - else: - self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) - - # move neighbor 1 back - self.add_neighbor(dvs, neighbors[0], macs[0]) - - print("Testing fdb update in %s, %s for %s" % (start[0], start[1], neighbors[1])) - # move neighbor 2 - self.add_neighbor(dvs, neighbors[0], "00:aa:bb:cc:dd:ee") - if start[0] == "active": - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[0]) - elif start[1] == "active": - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[1]) - else: - self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) - - # move neighbor 2 back - self.add_neighbor(dvs, neighbors[1], macs[1]) - - self.del_route(dvs, route) - - - # toggle mux states to check setState actions - print("Testing toggling state") - for start in starting_states: - self.set_mux_state(appdb, mux_ports[0], start[0]) - self.set_mux_state(appdb, mux_ports[1], start[1]) - self.add_route(dvs, route, neighbors) - - for toggle_index,port in enumerate(mux_ports): - keep_index = (toggle_index + 1) % 2 - - print("keeping %s as %s while toggling %s from %s" % \ - (mux_ports[keep_index], start[keep_index], mux_ports[toggle_index], start[toggle_index])) - - if start[toggle_index] == ACTIVE: - print("setting %s to %s" % (mux_ports[toggle_index], STANDBY)) - self.set_mux_state(appdb, mux_ports[toggle_index], STANDBY) - if start[keep_index] == ACTIVE: - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[keep_index]) - else: - self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) - - print("setting %s to %s" % (mux_ports[toggle_index], ACTIVE)) - self.set_mux_state(appdb, mux_ports[toggle_index], ACTIVE) - if start[keep_index] == ACTIVE: - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[0]) - else: - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[toggle_index]) - else: - print("setting %s to %s" % (mux_ports[toggle_index], ACTIVE)) - self.set_mux_state(appdb, mux_ports[toggle_index], ACTIVE) - if start[keep_index] == ACTIVE: - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[0]) - else: - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[toggle_index]) - - print("setting %s to %s" % (mux_ports[toggle_index], STANDBY)) - self.set_mux_state(appdb, mux_ports[toggle_index], STANDBY) - if start[keep_index] == ACTIVE: - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[keep_index]) - else: - self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) - self.del_route(dvs, route) - - # Check route_updates - self.add_neighbor(dvs, self.SERV3_IPV4, macs[2]) - print("Testing route updates") - self.add_route(dvs, route, neighbors) - print("setting states to active") - for port in mux_ports: - self.set_mux_state(appdb, port, "active") - self.set_mux_state(appdb, "Ethernet4", "active") + nexthop_map = {"active": neigh, "standby": tunnel_nh_id} + toggle_map = {"active": "standby", "standby": "active"} - print("triggering another route update") - # add new neighbor to route to force route update - self.add_route(dvs, route, [neighbors[0], self.SERV3_IPV4]) - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[0]) - self.del_route(dvs,route) + for state in ["standby", "active"]: + try: + current_state = state + self.set_mux_state(appdb, mux_port, current_state) - time.sleep(2) + self.add_route(dvs, rtprefix, [neigh]) + time.sleep(1) + self.add_neighbor(dvs, neigh, mac) - self.add_route(dvs, route, neighbors) - print("setting states to standby") - for port in mux_ports: - self.set_mux_state(appdb, port, "standby") - self.set_mux_state(appdb, "Ethernet4", "standby") + # Confirm route is pointing to tunnel nh + self.check_route_nexthop(dvs_route, asicdb, rtprefix, nexthop_map[current_state], (current_state == "standby")) - print("triggering another route update") - # add new neighbor to route to force route update - self.add_route(dvs, route, [neighbors[0], self.SERV3_IPV4]) - self.add_route(dvs, route, neighbors) - self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) - self.del_route(dvs,route) + # Toggle the mux a few times + current_state = toggle_map[current_state] + self.set_mux_state(appdb, mux_port, current_state) + self.check_route_nexthop(dvs_route, asicdb, rtprefix, nexthop_map[current_state], (current_state == "standby")) - for neighbor in neighbors: - self.del_neighbor(dvs, neighbor) - self.del_neighbor(dvs, self.SERV3_IPV4) - - print("Testing add/remove of neighbors") - for start in starting_states: - print("Testing add/remove of neighbors in %s, %s" % start) - self.set_mux_state(appdb, mux_ports[0], start[0]) - self.set_mux_state(appdb, mux_ports[1], start[1]) - self.add_route(dvs, route, neighbors) - - # add first neighbor - self.add_neighbor(dvs, neighbors[0], macs[0]) - if start[0] == ACTIVE: - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[0]) - - # add second neighbor - self.add_neighbor(dvs, neighbors[1], macs[1]) - time.sleep(1) - if start[0] == ACTIVE: - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[0]) - elif start[1] == ACTIVE: - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[1]) + current_state = toggle_map[current_state] + self.set_mux_state(appdb, mux_port, current_state) + self.check_route_nexthop(dvs_route, asicdb, rtprefix, nexthop_map[current_state], (current_state == "standby")) + + finally: + self.del_route(dvs, rtprefix) + self.del_neighbor(dvs, neigh) + + def multi_nexthop_check(self, asicdb, dvs_route, route, nexthops, mux_states, non_mux_nexthop=None, expect_active=None): + """ + Checks if multi-mux route points to an active nexthop or tunnel. + - If a non_mux_nexthop is present, expect to point to that + - Checks all active mux ports first + - If no active nexthops are found, assert tunnel nexthop is programmed + - If an active nexthop is expected but none is found, alert + """ + if isinstance(route, list): + route_copy = route.copy() + else: + route_copy = [route] + + # If we don't specify expected active state, default to true if Active mux cable is present + assert_active = (ACTIVE in mux_states) + if expect_active is not None: + assert_active = expect_active + + for rt in route_copy: + if non_mux_nexthop != None: + assert self.check_route_nexthop(dvs_route, asicdb, rt, non_mux_nexthop), \ + f"Nexthop {non_mux_nexthop} expected but not found for route {rt}" + continue + for i,state in enumerate(mux_states): + # Find first active mux port, and check that route points to that neighbor + if state == ACTIVE and self.check_route_nexthop(dvs_route, asicdb, rt, nexthops[i]): + break + else: + # If no active mux port, check that route points to tunnel + if assert_active: + assert False, f"Active nexthop expected for route {rt}, but not found" else: - self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) - - # remove neighbors (shouldn't work) - for neighbor in neighbors: - self.del_neighbor(dvs, neighbor) - if start[0] == ACTIVE: - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[0]) - elif start[1] == ACTIVE: - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[1]) - else: - self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) - - # add neighbor again to trick ip - for i,neighbor in enumerate(neighbors): - self.add_neighbor(dvs, neighbor, macs[i]) - - self.del_route(dvs,route) - for neighbor in neighbors: - self.del_neighbor(dvs, neighbor) - - # add the neighbors - for i,neighbor in enumerate(neighbors): - self.add_neighbor(dvs, neighbor, macs[i]) - - print("Testing multiple routes pointing to one of the NH") - r2 = "3.4.5.0/24" - for start in starting_states: - print("Adding routes with %s: %s and %s: %s" % (mux_ports[0], start[0], mux_ports[1], start[1])) - - self.set_mux_state(appdb, mux_ports[0], start[0]) - self.set_mux_state(appdb, mux_ports[1], start[1]) - self.add_route(dvs, route, neighbors) - - # add R2 -> NH1 - # R2 should behave like a normal route. - self.add_route(dvs, r2, [neighbors[0]]) - if start[0] == "active": - self.check_route_nexthop(dvs_route, asicdb, r2, neighbors[0]) + assert self.check_route_nexthop(dvs_route, asicdb, rt, tunnel_nh_id, True), \ + f"Tunnel nexthop expected for route {rt}, but not found" + + def multi_nexthop_test_create(self, appdb, asicdb, dvs, dvs_route, route, mux_ports, nexthops, non_mux_nexthop = None): + ''' + Tests the creation of a route with multiple nexthops in various combinations of initial mux state + ''' + init_mux_states = list(itertools.product([ACTIVE, STANDBY], repeat=len(mux_ports))) + + print("Test create route in various combos of mux nexthop states for route with multiple nexthops") + for states in init_mux_states: + print("Create route with mux ports: %s in states: %s" % (str(mux_ports), str(states))) + # Set mux states + for i,port in enumerate(mux_ports): + self.set_mux_state(appdb, port, states[i]) + + # Add route + if non_mux_nexthop != None: + self.add_route(dvs, route, nexthops + [non_mux_nexthop]) + else: + self.add_route(dvs, route, nexthops) + self.multi_nexthop_check(asicdb, dvs_route, route, nexthops, states, non_mux_nexthop) + + self.del_route(dvs, route) + + def multi_nexthop_test_toggle(self, appdb, asicdb, dvs_route, route, mux_ports, nexthops, non_mux_nexthop=None): + ''' + Tests toggling mux state for a route with multiple nexthops + ''' + init_mux_states = list(list(tup) for tup in itertools.product([ACTIVE, STANDBY], repeat=len(mux_ports))) + + print("Test toggling mux state for route with multiple mux nexthops") + for states in init_mux_states: + print("Testing state change in states: %s, for nexthops: %s" % (str(states), str(nexthops))) + for i,port in enumerate(mux_ports): + if nexthops[i] == non_mux_nexthop: + continue + self.set_mux_state(appdb, port, states[i]) + + for toggle_index,toggle_port in enumerate(mux_ports): + if nexthops[toggle_index] == non_mux_nexthop: + continue + new_states = states.copy() + + print("Toggling %s from %s" % (toggle_port, states[toggle_index])) + + if states[toggle_index] == ACTIVE: + new_states[toggle_index] = STANDBY + self.set_mux_state(appdb, toggle_port, STANDBY) + self.multi_nexthop_check(asicdb, dvs_route, route, nexthops, new_states, non_mux_nexthop) + + new_states[toggle_index] = ACTIVE + self.set_mux_state(appdb, toggle_port, ACTIVE) + self.multi_nexthop_check(asicdb, dvs_route, route, nexthops, new_states, non_mux_nexthop) else: - self.check_route_nexthop(dvs_route, asicdb, r2, tunnel_nh_id, True) + new_states[toggle_index] = ACTIVE + self.set_mux_state(appdb, toggle_port, ACTIVE) + self.multi_nexthop_check(asicdb, dvs_route, route, nexthops, new_states, non_mux_nexthop) - self.del_route(dvs,route) - self.del_route(dvs,r2) + new_states[toggle_index] = STANDBY + self.set_mux_state(appdb, toggle_port, STANDBY) + self.multi_nexthop_check(asicdb, dvs_route, route, nexthops, new_states, non_mux_nexthop) - # remove the neighbors - for neighbor in neighbors: - self.del_neighbor(dvs, neighbor) + # Set everything back to active + for i,port in enumerate(mux_ports): + if nexthops[i] == non_mux_nexthop: + continue + self.set_mux_state(appdb, port, ACTIVE) - # add one neighbor as MUX other as standalone - self.add_neighbor(dvs, neighbors[0], macs[i]) - self.add_neighbor(dvs, neighbors[1], "00:00:00:00:00:00") + def multi_nexthop_test_route_update_keep_size(self, appdb, asicdb, dvs, dvs_route, route, mux_ports, nexthops, new_nexthop, new_mux_port, nh_is_mux=True): + ''' + Tests route update for a route with multiple nexthops with same number of nexthops + - nh_is_mux: is True if new nexthop is a mux nexthop, False if not + ''' + # Add route + self.add_route(dvs, route, nexthops) - print("Testing one mux, one standalone neighbor") - for start in starting_states: - print("Testing add/remove of neighbors in %s, %s" % start) - self.set_mux_state(appdb, mux_ports[0], start[0]) - self.set_mux_state(appdb, mux_ports[1], start[1]) - self.add_route(dvs, route, neighbors) + print("Test route update for route with multiple mux nexthops") + for i,nexthop in enumerate(nexthops): + new_nexthops = nexthops.copy() + new_muxports = mux_ports.copy() - if start[0] == "active": - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[0]) - else: - # N2 should always be standby - self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) + print("Triggering route update %s to replace: %s with: %s" % (str(new_nexthops), str(nexthop), str(new_nexthop))) + new_nexthops[i] = new_nexthop + new_muxports[i] = new_mux_port - self.del_route(dvs,route) + if nh_is_mux: + # We need to sort the nexthops to match the way they will pe processed + new_nexthops.sort() + new_muxports.sort() - # remove the neighbors - for neighbor in neighbors: - self.del_neighbor(dvs, neighbor) + self.add_route(dvs, route, new_nexthops) - finally: - # Cleanup - for port in mux_ports: - self.set_mux_state(appdb, port, "active") - self.del_route(dvs,route) - for neighbor in neighbors: - self.del_neighbor(dvs, neighbor) + if nh_is_mux: + self.multi_nexthop_test_toggle(appdb, asicdb, dvs_route, route, new_muxports, new_nexthops) + else: + self.multi_nexthop_test_toggle(appdb, asicdb, dvs_route, route, new_muxports, new_nexthops, non_mux_nexthop=new_nexthop) + + # Reset route + self.add_route(dvs, route, nexthops) + + self.del_route(dvs, route) + + def multi_nexthop_test_route_update_increase_size(self, appdb, asicdb, dvs, dvs_route, route, mux_ports, nexthops, non_mux_nexthop=None): + ''' + Tests route update for a route with multiple nexthops increasing number of nexthops over time + ''' + print("Test route update for route with multiple mux nexthops") + for i,nexthop in enumerate(nexthops): + print("Triggering route update to add: %s. new route %s -> %s" % (str(nexthop), route, nexthops[:i+1])) + self.add_route(dvs, route, nexthops[:i+1]) + self.multi_nexthop_test_toggle(appdb, asicdb, dvs_route, route, mux_ports[:i+1], nexthops[:i+1]) + + # Add non_mux_nexthop to route list + if non_mux_nexthop != None: + print("Triggering route update to add non_mux: %s. new route %s -> %s" % (str(non_mux_nexthop), route, nexthops + [non_mux_nexthop])) + self.add_route(dvs, route, nexthops + [non_mux_nexthop]) + self.multi_nexthop_test_toggle(appdb, asicdb, dvs_route, route, mux_ports + [None], nexthops + [non_mux_nexthop], non_mux_nexthop=non_mux_nexthop) + + self.del_route(dvs, route) - def create_and_test_multi_nexthop_routes(self, dvs, dvs_route, appdb, macs, asicdb): + def multi_nexthop_test_route_update_decrease_size(self, appdb, asicdb, dvs, dvs_route, route, mux_ports, nexthops, non_mux_nexthop=None): + ''' + Tests route update for a route with multiple nexthops increasing number of nexthops over time + ''' + print("Test route update for route with multiple mux nexthops") + + if non_mux_nexthop != None: + print("Triggering route update to add non_mux: %s. new route %s -> %s" % (str(non_mux_nexthop), route, [non_mux_nexthop] + nexthops)) + self.add_route(dvs, route, [non_mux_nexthop] + nexthops) + self.multi_nexthop_test_toggle(appdb, asicdb, dvs_route, route, [None] + mux_ports, [non_mux_nexthop] + nexthops, non_mux_nexthop=non_mux_nexthop) + + for i,nexthop in enumerate(nexthops): + print("Triggering route update to remove: %s. new route %s -> %s" % (str(nexthop), route, nexthops[i:])) + self.add_route(dvs, route, nexthops[i:]) + self.multi_nexthop_test_toggle(appdb, asicdb, dvs_route, route, mux_ports[i:], nexthops[i:]) + + self.del_route(dvs, route) + + def multi_nexthop_test_neighbor_delete_and_create(self, appdb, asicdb, dvs, dvs_route, route, mux_ports, nexthops, macs): + ''' + Tests deleting neighbors in one state, then adding them in another + ''' + # Get array of mux states to test: + mux_states = list(list(tup) for tup in itertools.product([ACTIVE, STANDBY], repeat=len(mux_ports)*2)) + + print("Test deleting then creating neighbors:") + for states in mux_states: + initial_states = states[:len(mux_ports)] + final_states = states[len(mux_ports):] + + print(f"Test delete neighbors in {str(initial_states)} then creating in {str(final_states)}") + + # Set intial states: + for i,port in enumerate(mux_ports): + self.set_mux_state(appdb, port, initial_states[i]) + + # Delete all neighbors, ensure either active existing neighbor or tunnel nexthop as we go + for i, nexthop in enumerate(nexthops): + self.del_neighbor(dvs, nexthop) + + expect_active = i < len(initial_states) and \ + ACTIVE in initial_states[i:] + self.multi_nexthop_check(asicdb, dvs_route, route, nexthops, initial_states, expect_active=expect_active) + + # Set final states: + for i,port in enumerate(mux_ports): + self.set_mux_state(appdb, port, initial_states[i]) + + # Create all neighbors, ensure either active existing neighbor or tunnel nexthop as we go + for i, nexthop in enumerate(nexthops): + self.add_neighbor(dvs, nexthop, macs[i]) + + expect_active = i < len(initial_states) and \ + ACTIVE in initial_states[:i] + self.multi_nexthop_check(asicdb, dvs_route, route, nexthops, initial_states, expect_active=expect_active) + + def multi_nexthop_test_vlan_neighbor_update(self, appdb, asicdb, dvs, dvs_route, route, mux_ports, nexthops, macs, new_neighbor): + """ + Tests crash case where neighbor update on vlan updated multi-mux route when it should not have + Assumes 2-nexthop route + """ + for state in [ACTIVE, STANDBY]: + mux_states = [STANDBY, state] + + # Set intial states: + for i,port in enumerate(mux_ports): + self.set_mux_state(appdb, port, mux_states[i]) + + # Delete neighbor + print(f"Delete Neighbor on {mux_ports[0]}: {nexthops[0]}") + self.del_neighbor(dvs, nexthops[0]) + + # Add neighbor on other port: + print(f"Add Neighbor on {mux_ports[1]} in {state} state: {new_neighbor}") + self.add_neighbor(dvs, new_neighbor, macs[1]) + + # Check route is pointing to nexthop[1] (if Active) or tunnel nexthop + self.multi_nexthop_check(asicdb, dvs_route, route, nexthops, mux_states, expect_active=(state == ACTIVE)) + + # Delete neighbor on other port: + print(f"Delete Neighbor on {mux_ports[1]} in {state} state: {new_neighbor}") + self.del_neighbor(dvs, new_neighbor) + self.add_neighbor(dvs, nexthops[0], macs[0]) + + def multi_nexthop_test_fdb(self, appdb, asicdb, dvs, dvs_route, route, mux_ports, nexthops, macs): + ''' + Tests fbd updates for mux neighbors + ''' + init_mux_states = list(itertools.product([ACTIVE, STANDBY], repeat=len(mux_ports))) + + print("Test fdb update on route with multiple mux nexthops for various mux states") + for states in init_mux_states: + print("Testing fdb update in states: %s, for nexthops: %s" % (str(states), str(nexthops))) + + # Set mux states + for i,port in enumerate(mux_ports): + self.set_mux_state(appdb, port, states[i]) + + for i,nexthop in enumerate(nexthops): + print("Triggering fdb update for %s" % (nexthop)) + # only supports testing up to 9 nexhops at the moment + self.add_neighbor(dvs, nexthop, "00:aa:bb:cc:dd:0%d" % (i)) + self.multi_nexthop_check(asicdb, dvs_route, route, nexthops, states) + + # Reset fdb + self.add_neighbor(dvs, nexthop, macs[i]) + + def multi_nexthop_test_neighbor_unresolve(self, appdb, asicdb, dvs, dvs_route, route, mux_ports, nexthops): + ''' + Tests deleting neighbors for a route with multiple nexthops + ''' + print("Test setting 0 mac neighbors for route with multiple mux nexthops") + for nexthop in nexthops: + print("Triggering neighbor unresolved for %s" % (nexthop)) + self.add_neighbor(dvs, nexthop, "00:00:00:00:00:00") + self.multi_nexthop_test_toggle(appdb, asicdb, dvs_route, route, mux_ports, nexthops) + + def multi_nexthop_test_neighbor_resolve(self, appdb, asicdb, dvs, dvs_route, route, mux_ports, nexthops, macs): + ''' + Tests adding neighbors for a route with multiple nexthops + ''' + print("Test adding neighbors for route with multiple mux nexthops") + for i,nexthop in enumerate(nexthops): + print("Triggering neighbor resolved for %s" % (nexthop)) + self.add_neighbor(dvs, nexthop, macs[i]) + self.multi_nexthop_test_toggle(appdb, asicdb, dvs_route, route, mux_ports, nexthops) + + def create_and_test_multi_nexthop_routes(self, dvs, dvs_route, appdb, macs, new_mac, asicdb): ''' Tests case where there are multiple nexthops tied to a route If the nexthops are tied to a mux, then only the first active neighbor will be programmed If not, the route should point to a regular ECMP group ''' - + # Routes route_ipv4 = "2.3.4.0/24" route_ipv6 = "2023::/64" - ipv4_neighbors = [self.SERV1_IPV4, self.SERV2_IPV4] - ipv6_neighbors = [self.SERV1_IPV6, self.SERV2_IPV6] + route_B_ipv4 = "2.3.5.0/24" + route_B_ipv6 = "2024::/64" - self.multi_nexthop_test(dvs, dvs_route, asicdb, appdb, route_ipv4, ipv4_neighbors, macs) - self.multi_nexthop_test(dvs, dvs_route, asicdb, appdb, route_ipv6, ipv6_neighbors, macs) + # Nexthop groups + ipv4_nexthops = [self.SERV1_IPV4, self.SERV2_IPV4] + ipv6_nexthops = [self.SERV1_IPV6, self.SERV2_IPV6] + new_ipv4_nexthop = self.SERV3_IPV4 + new_ipv6_nexthop = self.SERV3_IPV6 - try: - # neighbor not tied to mux cable case - non_mux_ipv4 = ["11.11.11.11", "12.12.12.12"] - non_mux_ipv6 = ["2222::100", "2222::101"] - non_mux_macs = ["00:aa:bb:cc:dd:ee", "00:aa:bb:cc:dd:ff"] - print("Testing neighbors that are not tied to a mux cable") + # Neighbor IPs + non_mux_ipv4 = "11.11.11.11" + non_mux_ipv6 = "2222::100" + mux_neighbor_ipv4 = "192.170.0.100" + mux_neighbor_ipv6 = "fc02:1000:100::100" - for i in range(2): - self.add_neighbor(dvs, non_mux_ipv4[i], non_mux_macs[i]) - self.add_neighbor(dvs, non_mux_ipv6[i], non_mux_macs[i]) + # Neighbor mac + non_mux_mac = "00:aa:aa:aa:aa:aa" - self.add_route(dvs, route_ipv4, non_mux_ipv4) - self.add_route(dvs, route_ipv6, non_mux_ipv6) + # Mux ports + mux_ports = ["Ethernet0", "Ethernet4"] + new_mux_port = "Ethernet8" - # Check for route pointing to first neighbor - self.check_route_nexthop(dvs_route, asicdb, route_ipv4, non_mux_ipv4[0]) - self.check_route_nexthop(dvs_route, asicdb, route_ipv6, non_mux_ipv6[0]) + for i,mac in enumerate(macs): + self.add_neighbor(dvs, ipv4_nexthops[i], mac) + self.add_neighbor(dvs, ipv6_nexthops[i], mac) - # Cleanup - self.del_route(dvs, route_ipv4) - self.del_route(dvs, route_ipv6) - for i in range(2): - self.del_neighbor(dvs, non_mux_ipv4[i]) - self.del_neighbor(dvs, non_mux_ipv6[i]) - - # neighbor not in mux cable case - non_mux_ipv4 = ["11.11.11.11", "12.12.12.12"] - non_mux_ipv6 = ["2222::100", "2222::101"] - non_mux_macs = ["00:aa:bb:cc:dd:ee", "00:aa:bb:cc:dd:ff"] - print("Testing neighbors that are not tied to a mux cable") - for i in range(2): - self.add_neighbor(dvs, non_mux_ipv4[i], non_mux_macs[i]) - self.add_neighbor(dvs, non_mux_ipv6[i], non_mux_macs[i]) - - self.add_route(dvs, route_ipv4, non_mux_ipv4) - self.add_route(dvs, route_ipv6, non_mux_ipv6) - - # Check for route pointing to first neighbor - self.check_route_nexthop(dvs_route, asicdb, route_ipv4, non_mux_ipv4[0]) - self.check_route_nexthop(dvs_route, asicdb, route_ipv6, non_mux_ipv6[0]) + self.add_neighbor(dvs, new_ipv4_nexthop, new_mac) + self.add_neighbor(dvs, new_ipv6_nexthop, new_mac) + self.add_neighbor(dvs, non_mux_ipv4, non_mux_mac) + self.add_neighbor(dvs, non_mux_ipv6, non_mux_mac) - # Cleanup - self.del_route(dvs, route_ipv4) - self.del_route(dvs, route_ipv6) - for i in range(2): - self.del_neighbor(dvs, non_mux_ipv4[i]) - self.del_neighbor(dvs, non_mux_ipv6[i]) - - - # add one neighbor as MUX other as non-mux - mux_ports = ["Ethernet0", "Ethernet4"] - self.set_mux_state(appdb, mux_ports[0], ACTIVE) - self.set_mux_state(appdb, mux_ports[1], ACTIVE) - self.add_neighbor(dvs, ipv4_neighbors[0], macs[0]) - self.add_neighbor(dvs, non_mux_ipv4[0], "00:aa:bb:cc:dd:ee") - - print("Testing one mux, one standalone neighbor ipv4") - starting_states = [(ACTIVE, ACTIVE), (ACTIVE, STANDBY), (STANDBY, ACTIVE), (STANDBY, STANDBY)] - for start in starting_states: - print("Testing add/remove of neighbors in %s, %s" % start) - self.set_mux_state(appdb, mux_ports[0], start[0]) - self.set_mux_state(appdb, mux_ports[1], start[1]) - self.add_route(dvs, route_ipv4, [ipv4_neighbors[0], non_mux_ipv4[0]]) - - #N2 should always be active - self.check_route_nexthop(dvs_route, asicdb, route_ipv4, non_mux_ipv4[0]) - - self.del_route(dvs,route_ipv4) - - self.del_neighbor(dvs, ipv4_neighbors[0]) - self.del_neighbor(dvs, non_mux_ipv4[0]) - - # add one neighbor as MUX other as non-mux for ipv6 - self.set_mux_state(appdb, mux_ports[0], ACTIVE) - self.set_mux_state(appdb, mux_ports[1], ACTIVE) - self.add_neighbor(dvs, ipv6_neighbors[0], macs[0]) - self.add_neighbor(dvs, non_mux_ipv6[0], "00:aa:bb:cc:dd:ee") - - print("Testing one mux, one standalone neighbor ipv6") - mux_ports = ["Ethernet0", "Ethernet4"] - starting_states = [(ACTIVE, ACTIVE), (ACTIVE, STANDBY), (STANDBY, ACTIVE), (STANDBY, STANDBY)] - for start in starting_states: - print("Testing add/remove of neighbors in %s, %s" % start) - self.set_mux_state(appdb, mux_ports[0], start[0]) - self.set_mux_state(appdb, mux_ports[1], start[1]) - self.add_route(dvs, route_ipv6, [ipv6_neighbors[0], non_mux_ipv6[0]]) - - #N2 should always be active - self.check_route_nexthop(dvs_route, asicdb, route_ipv6, non_mux_ipv6[0]) - - self.del_route(dvs,route_ipv6) - - self.del_neighbor(dvs, ipv6_neighbors[0]) - self.del_neighbor(dvs, non_mux_ipv6[0]) + for port in mux_ports: + self.set_mux_state(appdb, port, ACTIVE) + self.set_mux_state(appdb, new_mux_port, ACTIVE) + + try: + ### These tests create route: ### + + ## Testing route changes + self.multi_nexthop_test_create(appdb, asicdb, dvs, dvs_route, route_ipv4, mux_ports, ipv4_nexthops) + self.multi_nexthop_test_create(appdb, asicdb, dvs, dvs_route, route_ipv6, mux_ports, ipv6_nexthops) + self.multi_nexthop_test_create(appdb, asicdb, dvs, dvs_route, route_ipv4, mux_ports, ipv4_nexthops, non_mux_ipv4) + self.multi_nexthop_test_create(appdb, asicdb, dvs, dvs_route, route_ipv6, mux_ports, ipv6_nexthops, non_mux_ipv6) + self.multi_nexthop_test_route_update_keep_size(appdb, asicdb, dvs, dvs_route, route_ipv4, mux_ports, ipv4_nexthops, new_ipv4_nexthop, new_mux_port) + self.multi_nexthop_test_route_update_keep_size(appdb, asicdb, dvs, dvs_route, route_ipv6, mux_ports, ipv6_nexthops, new_ipv6_nexthop, new_mux_port) + self.multi_nexthop_test_route_update_keep_size(appdb, asicdb, dvs, dvs_route, route_ipv4, mux_ports, ipv4_nexthops, non_mux_ipv4, None, nh_is_mux=False) + self.multi_nexthop_test_route_update_keep_size(appdb, asicdb, dvs, dvs_route, route_ipv6, mux_ports, ipv6_nexthops, non_mux_ipv6, None, nh_is_mux=False) + self.multi_nexthop_test_route_update_increase_size(appdb, asicdb, dvs, dvs_route, route_ipv4, mux_ports, ipv4_nexthops, non_mux_nexthop=non_mux_ipv4) + self.multi_nexthop_test_route_update_increase_size(appdb, asicdb, dvs, dvs_route, route_ipv6, mux_ports, ipv6_nexthops, non_mux_nexthop=non_mux_ipv6) + self.multi_nexthop_test_route_update_decrease_size(appdb, asicdb, dvs, dvs_route, route_ipv4, mux_ports, ipv4_nexthops, non_mux_nexthop=non_mux_ipv4) + self.multi_nexthop_test_route_update_decrease_size(appdb, asicdb, dvs, dvs_route, route_ipv6, mux_ports, ipv6_nexthops, non_mux_nexthop=non_mux_ipv6) + + + ### The following tests do not create their own routes, create before and delete after ### + + ## Testing mux neighbors that do not match mux configured ip + + # add new mux vlan neighbor and route to test + self.add_neighbor(dvs, mux_neighbor_ipv4, macs[1]) + self.add_neighbor(dvs, mux_neighbor_ipv6, macs[1]) + self.add_route(dvs, route_ipv4, [self.SERV1_IPV4, mux_neighbor_ipv4]) + self.add_route(dvs, route_ipv6, [self.SERV1_IPV6, mux_neighbor_ipv6]) + + # test + self.multi_nexthop_test_toggle(appdb, asicdb, dvs_route, route_ipv4, mux_ports, [self.SERV1_IPV4, mux_neighbor_ipv4]) + self.multi_nexthop_test_toggle(appdb, asicdb, dvs_route, route_ipv6, mux_ports, [self.SERV1_IPV6, mux_neighbor_ipv6]) + + # cleanup new mux vlan neighbor and route to test + self.del_route(dvs,route_ipv4) + self.del_route(dvs,route_ipv6) + self.del_neighbor(dvs, mux_neighbor_ipv4) + self.del_neighbor(dvs, mux_neighbor_ipv6) + + ## Test neighbor operations: + # create the route + self.add_route(dvs, route_ipv4, ipv4_nexthops) + self.add_route(dvs, route_ipv6, ipv6_nexthops) + self.add_route(dvs, route_B_ipv4, ipv4_nexthops) + self.add_route(dvs, route_B_ipv6, ipv6_nexthops) + + self.multi_nexthop_test_vlan_neighbor_update(appdb, asicdb, dvs, dvs_route, route_ipv4, mux_ports, ipv4_nexthops, macs, mux_neighbor_ipv4) + self.multi_nexthop_test_vlan_neighbor_update(appdb, asicdb, dvs, dvs_route, route_ipv6, mux_ports, ipv6_nexthops, macs, mux_neighbor_ipv6) + + self.multi_nexthop_test_neighbor_delete_and_create(appdb, asicdb, dvs, dvs_route, route_ipv4, mux_ports, ipv4_nexthops, macs) + self.multi_nexthop_test_neighbor_delete_and_create(appdb, asicdb, dvs, dvs_route, route_ipv6, mux_ports, ipv6_nexthops, macs) + + self.multi_nexthop_test_fdb(appdb, asicdb, dvs, dvs_route, [route_ipv4, route_B_ipv4], mux_ports, ipv4_nexthops, macs) + self.multi_nexthop_test_fdb(appdb, asicdb, dvs, dvs_route, [route_ipv6, route_B_ipv6], mux_ports, ipv6_nexthops, macs) + self.multi_nexthop_test_neighbor_unresolve(appdb, asicdb, dvs, dvs_route, [route_ipv4, route_B_ipv4], mux_ports, ipv4_nexthops) + self.multi_nexthop_test_neighbor_unresolve(appdb, asicdb, dvs, dvs_route, [route_ipv6, route_B_ipv6], mux_ports, ipv6_nexthops) + self.multi_nexthop_test_neighbor_resolve(appdb, asicdb, dvs, dvs_route, [route_ipv4, route_B_ipv4], mux_ports, ipv4_nexthops, macs) + self.multi_nexthop_test_neighbor_resolve(appdb, asicdb, dvs, dvs_route, [route_ipv6, route_B_ipv6], mux_ports, ipv6_nexthops, macs) finally: # Cleanup - self.del_route(dvs, route_ipv4) - self.del_route(dvs, route_ipv6) - for i in range(2): - self.del_neighbor(dvs, non_mux_ipv4[i]) - self.del_neighbor(dvs, non_mux_ipv6[i]) + self.del_route(dvs,route_ipv4) + self.del_route(dvs,route_B_ipv4) + self.del_route(dvs,route_ipv6) + self.del_route(dvs,route_B_ipv6) + for neighbor in ipv4_nexthops: + self.del_neighbor(dvs, neighbor) + for neighbor in ipv6_nexthops: + self.del_neighbor(dvs, neighbor) + self.del_neighbor(dvs, new_ipv4_nexthop) + self.del_neighbor(dvs, new_ipv6_nexthop) + self.del_neighbor(dvs, mux_neighbor_ipv4) + self.del_neighbor(dvs, mux_neighbor_ipv6) def create_and_test_NH_routes(self, appdb, asicdb, dvs, dvs_route, mac): ''' @@ -1188,31 +1352,28 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, tunnel_params): src_ip = tunnel_params['src_ip'] if 'src_ip' in tunnel_params else None self.check_tunnel_termination_entry_exists_in_asicdb(asicdb, tunnel_sai_obj, tunnel_params["dst_ip"].split(","), src_ip) - def remove_and_test_tunnel(self, db, asicdb, tunnel_name): + def remove_and_test_tunnel(self, configdb, asicdb, tunnel_name): """ Removes tunnel and checks that ASIC db is clear""" - - tunnel_table = swsscommon.Table(asicdb, self.ASIC_TUNNEL_TABLE) - tunnel_term_table = swsscommon.Table(asicdb, self.ASIC_TUNNEL_TERM_ENTRIES) - tunnel_app_table = swsscommon.Table(asicdb, self.APP_TUNNEL_DECAP_TABLE_NAME) + tunnel_table = swsscommon.Table(asicdb.db_connection, self.ASIC_TUNNEL_TABLE) + tunnel_term_table = swsscommon.Table(asicdb.db_connection, self.ASIC_TUNNEL_TERM_ENTRIES) tunnels = tunnel_table.getKeys() tunnel_sai_obj = tunnels[0] - status, fvs = tunnel_table.get(tunnel_sai_obj) + _, fvs = tunnel_table.get(tunnel_sai_obj) # get overlay loopback interface oid to check if it is deleted with the tunnel overlay_infs_id = {f:v for f, v in fvs}["SAI_TUNNEL_ATTR_OVERLAY_INTERFACE"] - ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) - ps.set(tunnel_name, create_fvs(), 'DEL') + configdb.delete_entry(self.CONFIG_TUNNEL_TABLE_NAME, tunnel_name) # wait till config will be applied - time.sleep(1) + time.sleep(5) assert len(tunnel_table.getKeys()) == 0 assert len(tunnel_term_table.getKeys()) == 0 - assert len(tunnel_app_table.getKeys()) == 0 - assert not self.check_interface_exists_in_asicdb(asicdb, overlay_infs_id) + with pytest.raises(AssertionError): + self.check_interface_exists_in_asicdb(asicdb, overlay_infs_id) def check_app_db_neigh_table( self, appdb, intf, neigh_ip, @@ -1230,6 +1391,7 @@ def check_app_db_neigh_table( appdb.wait_for_field_match(self.APP_NEIGH_TABLE, key, {'neigh': mac}) else: appdb.wait_for_deleted_keys(self.APP_NEIGH_TABLE, key) + def add_qos_map(self, configdb, asicdb, qos_map_type_name, qos_map_name, qos_map): current_oids = asicdb.get_keys(self.ASIC_QOS_MAP_TABLE_KEY) # Apply QoS map to config db @@ -1315,10 +1477,22 @@ def setup_mux_cable(self, dvs): @pytest.fixture(scope='module') def setup_tunnel(self, dvs): - app_db_connector = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - ps = swsscommon.ProducerStateTable(app_db_connector, self.APP_TUNNEL_DECAP_TABLE_NAME) - fvs = create_fvs(**self.DEFAULT_TUNNEL_PARAMS) - ps.set(self.MUX_TUNNEL_0, fvs) + config_db = dvs.get_config_db() + config_db.create_entry( + self.CONFIG_TUNNEL_TABLE_NAME, + self.MUX_TUNNEL_0, + self.DEFAULT_TUNNEL_PARAMS + ) + + @pytest.fixture + def restore_tunnel(self, dvs): + yield + config_db = dvs.get_config_db() + config_db.create_entry( + self.CONFIG_TUNNEL_TABLE_NAME, + self.MUX_TUNNEL_0, + self.DEFAULT_TUNNEL_PARAMS + ) @pytest.fixture def setup_peer_switch(self, dvs): @@ -1329,8 +1503,8 @@ def setup_peer_switch(self, dvs): self.DEFAULT_PEER_SWITCH_PARAMS ) - @pytest.fixture - def remove_peer_switch(self, dvs): + yield + config_db = dvs.get_config_db() config_db.delete_entry(self.CONFIG_PEER_SWITCH, self.PEER_SWITCH_HOST) @@ -1440,11 +1614,11 @@ def setup(self, dvs): self.remove_qos_map(db, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, dscp_to_tc_map_oid) self.remove_qos_map(db, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, tc_to_pg_map_oid) - - def test_Tunnel(self, dvs, setup_tunnel, testlog, setup): + def test_Tunnel(self, dvs, setup_tunnel, restore_tunnel, testlog, setup): """ test IPv4 Mux tunnel creation """ db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = dvs.get_asic_db() + configdb = dvs.get_config_db() #self.cleanup_left_over(db, asicdb) _, _, dscp_to_tc_map_oid, tc_to_pg_map_oid = setup @@ -1454,6 +1628,8 @@ def test_Tunnel(self, dvs, setup_tunnel, testlog, setup): # create tunnel IPv4 tunnel self.create_and_test_tunnel(db, asicdb, self.MUX_TUNNEL_0, tunnel_params) + # remove tunnel IPv4 tunnel + self.remove_and_test_tunnel(configdb, asicdb, self.MUX_TUNNEL_0) def test_Peer(self, dvs, setup_peer_switch, setup_tunnel, setup, testlog): @@ -1465,6 +1641,73 @@ def test_Peer(self, dvs, setup_peer_switch, setup_tunnel, setup, testlog): self.create_and_test_peer(asicdb, encap_tc_to_dscp_map_id, encap_tc_to_queue_map_id) + def test_neighbor_learned_before_mux_config(self, dvs, dvs_route, setup, setup_vlan, setup_peer_switch, setup_tunnel, testlog): + """ test neighbors learned before mux config """ + test_ip_v4 = "192.168.0.110" + test_ip_v6 = "fc02:1000::110" + + toggle_map = {"active": "standby", "standby": "active"} + + asicdb = dvs.get_asic_db() + config_db = dvs.get_config_db() + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + + dvs.runcmd("ip neigh flush all") + for create_route in [False, True]: + for state in ["active", "standby"]: + try: + current_state = state + + # Step 1.a: add neighbor on port + self.add_fdb(dvs, "Ethernet4", "00-00-00-11-11-11") + + self.add_neighbor(dvs, test_ip_v4, "00:00:00:11:11:11") + self.check_neigh_in_asic_db(asicdb, test_ip_v4, expected=True) + + self.add_neighbor(dvs, test_ip_v6, "00:00:00:11:11:11") + self.check_neigh_in_asic_db(asicdb, test_ip_v6, expected=True) + + if create_route: + # Step 1.b: Create a route pointing to the neighbor + self.add_route(dvs, "11.11.11.11/32", ["192.168.0.100"]) + + # Step 2: configure mux port and verify neighbor state. + self.set_mux_state(appdb, "Ethernet4", current_state) + fvs = {"server_ipv4": self.SERV2_IPV4+self.IPV4_MASK, + "server_ipv6": self.SERV2_IPV6+self.IPV6_MASK} + config_db.create_entry(self.CONFIG_MUX_CABLE, "Ethernet4", fvs) + + self.check_neigh_in_asic_db(asicdb, test_ip_v4, expected=(current_state != "standby")) + self.check_tunnel_route_in_app_db(dvs, [test_ip_v4+self.IPV4_MASK], expected=(current_state == "standby")) + self.check_neigh_in_asic_db(asicdb, test_ip_v6, expected=(current_state != "standby")) + self.check_tunnel_route_in_app_db(dvs, [test_ip_v6+self.IPV6_MASK], expected=(current_state == "standby")) + + # Step 3: toggle mux state and verify neighbor state. + current_state = toggle_map[current_state] + self.set_mux_state(appdb, "Ethernet4", current_state) + + self.check_neigh_in_asic_db(asicdb, test_ip_v4, expected=(current_state != "standby")) + self.check_tunnel_route_in_app_db(dvs, [test_ip_v4+self.IPV4_MASK], expected=(current_state == "standby")) + self.check_neigh_in_asic_db(asicdb, test_ip_v6, expected=(current_state != "standby")) + self.check_tunnel_route_in_app_db(dvs, [test_ip_v6+self.IPV6_MASK], expected=(current_state == "standby")) + + # Step 4: toggle mux state back to initial state and verify neighbor state. + current_state = toggle_map[current_state] + self.set_mux_state(appdb, "Ethernet4", current_state) + + self.check_neigh_in_asic_db(asicdb, test_ip_v4, expected=(current_state != "standby")) + self.check_tunnel_route_in_app_db(dvs, [test_ip_v4+self.IPV4_MASK], expected=(current_state == "standby")) + self.check_neigh_in_asic_db(asicdb, test_ip_v6, expected=(current_state != "standby")) + self.check_tunnel_route_in_app_db(dvs, [test_ip_v6+self.IPV6_MASK], expected=(current_state == "standby")) + + finally: + if create_route: + self.del_route(dvs, "11.11.11.11/32") + self.del_neighbor(dvs, test_ip_v4) + self.del_neighbor(dvs, test_ip_v6) + config_db.delete_entry(self.CONFIG_MUX_CABLE, "Ethernet4") + dvs.runcmd("ip neigh flush all") + def test_Neighbor(self, dvs, dvs_route, setup_vlan, setup_mux_cable, testlog): """ test Neighbor entries and mux state change """ @@ -1482,13 +1725,16 @@ def test_Fdb(self, dvs, dvs_route, testlog): self.create_and_test_fdb(appdb, asicdb, dvs, dvs_route) - def test_Route(self, dvs, dvs_route, testlog): + def test_Route(self, dvs, intf_fdb_map, dvs_route, setup, setup_vlan, setup_peer_switch, setup_tunnel, setup_mux_cable, testlog): """ test Route entries and mux state change """ appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = dvs.get_asic_db() + mac_Ethernet0 = intf_fdb_map["Ethernet0"] + mac_Ethernet4 = intf_fdb_map["Ethernet4"] - self.create_and_test_route(appdb, asicdb, dvs, dvs_route) + self.create_and_test_route(appdb, asicdb, dvs, dvs_route, mac_Ethernet0, mac_Ethernet4) + self.create_and_test_route_learned_before_neighbor(appdb, asicdb, dvs, dvs_route, mac_Ethernet0) def test_NH(self, dvs, dvs_route, intf_fdb_map, setup, setup_mux_cable, setup_peer_switch, setup_tunnel, testlog): @@ -1498,16 +1744,17 @@ def test_NH(self, dvs, dvs_route, intf_fdb_map, setup, setup_mux_cable, mac = intf_fdb_map["Ethernet0"] # get tunnel nexthop - self.check_tnl_nexthop_in_asic_db(asicdb, 5) + self.check_tnl_nexthop_in_asic_db(asicdb) self.create_and_test_NH_routes(appdb, asicdb, dvs, dvs_route, mac) - def test_multi_nexthop(self, dvs, dvs_route, intf_fdb_map, neighbor_cleanup, testlog): + def test_multi_nexthop(self, dvs, dvs_route, intf_fdb_map, neighbor_cleanup, testlog, setup): appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = dvs.get_asic_db() - macs = [intf_fdb_map["Ethernet0"], intf_fdb_map["Ethernet4"], intf_fdb_map["Ethernet8"]] + macs = [intf_fdb_map["Ethernet0"], intf_fdb_map["Ethernet4"]] + new_mac = intf_fdb_map["Ethernet8"] - self.create_and_test_multi_nexthop_routes(dvs, dvs_route, appdb, macs, asicdb) + self.create_and_test_multi_nexthop_routes(dvs, dvs_route, appdb, macs, new_mac, asicdb) def test_acl(self, dvs, dvs_acl, testlog): """ test acl and mux state change """ @@ -1587,7 +1834,7 @@ def test_neighbor_miss_no_mux( def test_neighbor_miss_no_peer( self, dvs, dvs_route, setup_vlan, setup_mux_cable, setup_tunnel, - remove_peer_switch, neighbor_cleanup, testlog + neighbor_cleanup, testlog ): """ test neighbor miss with no peer switch configured @@ -1609,7 +1856,7 @@ def test_soc_ip(self, dvs, dvs_route, setup_vlan, setup_mux_cable, testlog): def test_warm_boot_mux_state( self, dvs, dvs_route, setup_vlan, setup_mux_cable, setup_tunnel, - remove_peer_switch, neighbor_cleanup, testlog + setup_peer_switch, neighbor_cleanup, testlog ): """ test mux initialization during warm boot. @@ -1625,6 +1872,7 @@ def test_warm_boot_mux_state( dvs.runcmd("config warm_restart enable swss") dvs.stop_swss() dvs.start_swss() + dvs.runcmd(['sh', '-c', 'supervisorctl start restore_neighbors']) time.sleep(5) @@ -1643,6 +1891,92 @@ def test_warm_boot_mux_state( if key == "state": assert fvs[key] == "standby", "Ethernet8 Mux state is not standby after warm boot, state: {}".format(fvs[key]) + def test_warm_boot_neighbor_restore( + self, dvs, dvs_route, setup, setup_vlan, setup_mux_cable, setup_tunnel, + setup_peer_switch, neighbor_cleanup, testlog + ): + """Test neighbors could be restored to correct state based on mux state after warm boot.""" + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + apdb = dvs.get_app_db() + asicdb = dvs.get_asic_db() + + self.set_mux_state(appdb, "Ethernet0", "active") + self.set_mux_state(appdb, "Ethernet4", "active") + self.set_mux_state(appdb, "Ethernet8", "standby") + + self.add_fdb(dvs, "Ethernet0", "00-00-00-00-00-01") + self.add_fdb(dvs, "Ethernet4", "00-00-00-00-00-02") + self.add_fdb(dvs, "Ethernet8", "00-00-00-00-00-03") + + self.add_neighbor(dvs, self.SERV1_IPV4, "00:00:00:00:00:01") + self.add_neighbor(dvs, self.SERV1_IPV6, "00:00:00:00:00:01") + self.add_neighbor(dvs, self.NEIGH1_IPV4, "00:00:00:00:00:01") + self.add_neighbor(dvs, self.NEIGH1_IPV6, "00:00:00:00:00:01") + self.add_neighbor(dvs, self.SERV2_IPV4, "00:00:00:00:00:02") + self.add_neighbor(dvs, self.SERV2_IPV6, "00:00:00:00:00:02") + self.add_neighbor(dvs, self.NEIGH2_IPV4, "00:00:00:00:00:02") + self.add_neighbor(dvs, self.NEIGH2_IPV6, "00:00:00:00:00:02") + self.add_neighbor(dvs, self.SERV3_IPV4, "00:00:00:00:00:03") + self.add_neighbor(dvs, self.SERV3_IPV6, "00:00:00:00:00:03") + self.add_neighbor(dvs, self.NEIGH3_IPV4, "00:00:00:00:00:03") + self.add_neighbor(dvs, self.NEIGH3_IPV6, "00:00:00:00:00:03") + + time.sleep(5) + + self.check_neigh_in_asic_db(asicdb, self.SERV1_IPV4) + self.check_neigh_in_asic_db(asicdb, self.SERV1_IPV6) + self.check_neigh_in_asic_db(asicdb, self.NEIGH1_IPV4) + self.check_neigh_in_asic_db(asicdb, self.NEIGH1_IPV6) + self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV4) + self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV6) + self.check_neigh_in_asic_db(asicdb, self.NEIGH2_IPV4) + self.check_neigh_in_asic_db(asicdb, self.NEIGH2_IPV6) + dvs_route.check_asicdb_route_entries( + [ + self.SERV3_IPV4 + self.IPV4_MASK, + self.SERV3_IPV6 + self.IPV6_MASK, + self.NEIGH3_IPV4 + self.IPV4_MASK, + self.NEIGH3_IPV6 + self.IPV6_MASK + ] + ) + # Execute the warm reboot + dvs.runcmd("config warm_restart enable system") + dvs.stop_swss() + dvs.start_swss() + + time.sleep(5) + + fvs = apdb.get_entry(self.APP_MUX_CABLE, "Ethernet0") + for key in fvs: + if key == "state": + assert fvs[key] == "active", "Ethernet0 Mux state is not active after warm boot, state: {}".format(fvs[key]) + + fvs = apdb.get_entry(self.APP_MUX_CABLE, "Ethernet4") + for key in fvs: + if key == "state": + assert fvs[key] == "active", "Ethernet4 Mux state is not active after warm boot, state: {}".format(fvs[key]) + + fvs = apdb.get_entry(self.APP_MUX_CABLE, "Ethernet8") + for key in fvs: + if key == "state": + assert fvs[key] == "standby", "Ethernet8 Mux state is not standby after warm boot, state: {}".format(fvs[key]) + + self.check_neigh_in_asic_db(asicdb, self.SERV1_IPV4) + self.check_neigh_in_asic_db(asicdb, self.SERV1_IPV6) + self.check_neigh_in_asic_db(asicdb, self.NEIGH1_IPV4) + self.check_neigh_in_asic_db(asicdb, self.NEIGH1_IPV6) + self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV4) + self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV6) + self.check_neigh_in_asic_db(asicdb, self.NEIGH2_IPV4) + self.check_neigh_in_asic_db(asicdb, self.NEIGH2_IPV6) + dvs_route.check_asicdb_route_entries( + [ + self.SERV3_IPV4 + self.IPV4_MASK, + self.SERV3_IPV6 + self.IPV6_MASK, + self.NEIGH3_IPV4 + self.IPV4_MASK, + self.NEIGH3_IPV6 + self.IPV6_MASK + ] + ) # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_nhg.py b/tests/test_nhg.py index 6647a8d0dec..2b71fb0993a 100644 --- a/tests/test_nhg.py +++ b/tests/test_nhg.py @@ -84,6 +84,8 @@ def get_nhg_map_id(self, nhg_map_index): # Create a NHG fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.1'), ('ifname', 'Ethernet0')]) + nhg_ps.set('_testnhg', fvs) + fvs = swsscommon.FieldValuePairs([('nexthop_group', '_testnhg')]) nhg_ps.set('testnhg', fvs) # Add a CBF NHG pointing to the given map @@ -98,6 +100,7 @@ def get_nhg_map_id(self, nhg_map_index): # Remove the added NHGs cbf_nhg_ps._del('testcbfnhg') nhg_ps._del('testnhg') + nhg_ps._del('_testnhg') self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, asic_nhgs_count) return None @@ -111,6 +114,7 @@ def get_nhg_map_id(self, nhg_map_index): # Remove the added NHGs cbf_nhg_ps._del('testcbfnhg') nhg_ps._del('testnhg') + nhg_ps._del('_testnhg') self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, asic_nhgs_count) return nhg_map_id @@ -121,22 +125,38 @@ def port_name(self, i): def port_ip(self, i): return "10.0.0." + str(i * 2) + def port_ipv6(self, i): + return "fc00::" + str(hex((i * 2)))[2:] + def port_ipprefix(self, i): return self.port_ip(i) + "/31" + def port_ipv6prefix(self, i): + return self.port_ipv6(i) + "/126" + def peer_ip(self, i): return "10.0.0." + str(i * 2 + 1) + def peer_ipv6(self, i): + return "fc00::" + str(hex((i * 2 + 1)))[2:] + def port_mac(self, i): return "00:00:00:00:00:0" + str(i + 1) - def config_intf(self, i): + def config_intf(self, i, is_ipv6_needed=False): fvs = {'NULL': 'NULL'} self.config_db.create_entry("INTERFACE", self.port_name(i), fvs) self.config_db.create_entry("INTERFACE", "{}|{}".format(self.port_name(i), self.port_ipprefix(i)), fvs) + if is_ipv6_needed: + self.config_db.create_entry("INTERFACE", "{}|{}".format(self.port_name(i), self.port_ipv6prefix(i)), fvs) + self.dvs.port_admin_set(self.port_name(i), "up") self.dvs.runcmd("arp -s {} {}".format(self.peer_ip(i), self.port_mac(i))) + if is_ipv6_needed: + command = "ip -6 neighbor replace {} lladdr {} dev {}".format(self.peer_ipv6(i), + self.port_mac(i), self.port_name(i)) + self.dvs.runcmd(command) assert self.dvs.servers[i].runcmd("ip link set down dev eth0") == 0 assert self.dvs.servers[i].runcmd("ip link set up dev eth0") == 0 @@ -184,7 +204,7 @@ def update_bfd_session_state(self, dvs, session, state): ntf.send("bfd_session_state_change", ntf_data, fvp) # BFD utilities for static route BFD and ecmp acceleration -- end - def init_test(self, dvs, num_intfs): + def init_test(self, dvs, num_intfs, is_ipv6_needed=False): self.dvs = dvs self.app_db = self.dvs.get_app_db() self.asic_db = self.dvs.get_asic_db() @@ -201,7 +221,7 @@ def init_test(self, dvs, num_intfs): self.dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_MAX_NUMBER_OF_FORWARDING_CLASSES', '63') for i in range(num_intfs): - self.config_intf(i) + self.config_intf(i, is_ipv6_needed) self.asic_nhgs_count = len(self.asic_db.get_keys(self.ASIC_NHG_STR)) self.asic_nhgms_count = len(self.asic_db.get_keys(self.ASIC_NHGM_STR)) @@ -1546,6 +1566,67 @@ def create_route_inexistent_nhg_test(): self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count) def test_nhgorch_nh_group(self, dvs, testlog): + # Test scenario: + # - create recursive nhg - rec_grp1 with two members - grp1 and grp2 only one of which exists + # - create singleton nhg grp2 and check if the rec_grp1 is updated with both the members + # - create a recursive nhg - rec_grp2 with another recursive nhg - rec_grp1 as member. Assert that the nhg is not created. + def create_recursive_nhg_test(): + # create next hop group in APPL DB + fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.1'), ('ifname', 'Ethernet0')]) + self.nhg_ps.set("grp1", fvs) + + # create a recursive nexthop group with two members + fvs = swsscommon.FieldValuePairs([('nexthop_group', 'grp1,grp2')]) + self.nhg_ps.set("rec_grp1", fvs) + + # check if group was propagated to ASIC DB with the existing member + self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count + 1) + assert self.nhg_exists('rec_grp1') + + # check if the existing member was propagated to ASIC DB + self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 1) + assert len(self.get_nhgm_ids('rec_grp1')) == 1 + + # add another singleton nexthop group - grp2 + fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.3'), ('ifname', 'Ethernet4')]) + self.nhg_ps.set("grp2", fvs) + + # check if both the members were propagated to ASIC DB + self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 2) + assert len(self.get_nhgm_ids('rec_grp1')) == 2 + + # update the recursive nexthop group with another member not yet existing + fvs = swsscommon.FieldValuePairs([('nexthop_group', 'grp1,grp2,grp3')]) + self.nhg_ps.set("rec_grp1", fvs) + + # check if only two members were propagated to ASIC DB + self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 2) + assert len(self.get_nhgm_ids('rec_grp1')) == 2 + + # add another singleton nexthop group - grp3 + fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.5'), ('ifname', 'Ethernet8')]) + self.nhg_ps.set("grp3", fvs) + + # check if all members were propagated to ASIC DB + self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 3) + assert len(self.get_nhgm_ids('rec_grp1')) == 3 + + # create a recursive nhg with another recursive nhg as member + fvs = swsscommon.FieldValuePairs([('nexthop_group', 'rec_grp1')]) + self.nhg_ps.set("rec_grp2", fvs) + + # check that the group was not propagated to ASIC DB + self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count + 1) + assert not self.nhg_exists('rec_grp2') + + self.nhg_ps._del("rec_grp2") + self.nhg_ps._del("rec_grp1") + self.nhg_ps._del("grp1") + self.nhg_ps._del("grp2") + self.nhg_ps._del("grp3") + self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count) + self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count) + # Test scenario: # - create NHG 'group1' and assert it is being added to ASIC DB along with its members def create_nhg_test(): @@ -1705,8 +1786,8 @@ def update_nhgm_count_test(): # Update the group to one NH only fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.1'), ("ifname", "Ethernet0")]) self.nhg_ps.set("group1", fvs) - self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count + 1) - assert len(self.get_nhgm_ids('group1')) == 1 + self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count) + assert len(self.get_nhgm_ids('group1')) == 0 # Update the group to 2 NHs fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.1,10.0.0.3'), ("ifname", "Ethernet0,Ethernet4")]) @@ -1716,6 +1797,7 @@ def update_nhgm_count_test(): self.init_test(dvs, 4) + create_recursive_nhg_test() create_nhg_test() create_route_nhg_test() link_flap_test() @@ -1759,6 +1841,252 @@ def test_nhgorch_label_route(self, dvs, testlog): self.nhg_ps._del("group1") self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count) + def test_route_fallback_to_default_bothv4v6(self, dvs, dvs_route, testlog): + self.init_test(dvs, 6, True) + rtprefix_v6 = "2603:10b0::1/120" + defaultprefix_v6 = "::/0" + nexthop_str_v6 = "fc00::1,fc00::3,fc00::5" + default_nexthop_str_v6 = "fc00::7,fc00::9,fc00::b" + rtprefix = "3.3.3.0/24" + defaultprefix = "0.0.0.0/0" + nexthop_str = "10.0.0.1,10.0.0.3,10.0.0.5" + default_nexthop_str = "10.0.0.7,10.0.0.9,10.0.0.11" + + dvs_route.check_asicdb_deleted_route_entries([rtprefix, rtprefix_v6]) + + try: + dvs.disable_fpmsyncd() + # Program Regular Rouute with fallback to default + fvs = swsscommon.FieldValuePairs([("nexthop",nexthop_str), + ("ifname", "Ethernet0,Ethernet4,Ethernet8"), + ("fallback_to_default_route", "true")]) + self.rt_ps.set(rtprefix, fvs) + time.sleep(1) + + fvs = swsscommon.FieldValuePairs([("nexthop",nexthop_str_v6), + ("ifname", "Ethernet0,Ethernet4,Ethernet8"), + ("fallback_to_default_route", "true")]) + self.rt_ps.set(rtprefix_v6, fvs) + time.sleep(1) + + # Program default route + fvs = swsscommon.FieldValuePairs([("nexthop", default_nexthop_str), + ("ifname", "Ethernet12,Ethernet16,Ethernet20")]) + + self.rt_ps.set(defaultprefix, fvs) + time.sleep(1) + + fvs = swsscommon.FieldValuePairs([("nexthop", default_nexthop_str_v6), + ("ifname", "Ethernet12,Ethernet16,Ethernet20")]) + + self.rt_ps.set(defaultprefix_v6, fvs) + time.sleep(1) + + # check if route was propagated to ASIC DB + rtkeys = dvs_route.check_asicdb_route_entries([rtprefix, rtprefix_v6]) + + # check if default route was propagated to ASIC DB + defaultrtkeys = dvs_route.check_asicdb_route_entries([defaultprefix, defaultprefix_v6]) + + default_nhgid = [] + default_nhops = set() + default_nhgmids = set() + default_nhopsids = set() + default_parentnhgid = set() + + rt_nhgid = [] + rt_nhops = set() + rt_nhopsids = set() + rt_nhgmids = set() + rt_parentnhgid = set() + + # assert the route points to next hop group + flat_list = [] + for x in rtkeys: + flat_list.append(x) + for x in defaultrtkeys: + flat_list.append(x) + for idx, rtkey in enumerate(flat_list): + fvs = self.asic_db.get_entry(self.ASIC_RT_STR, rtkey) + if idx in [0,1]: + rt_nhgid.append(fvs["SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID"]) + else: + default_nhgid.append(fvs["SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID"]) + + nhgid = fvs["SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID"] + + fvs = self.asic_db.get_entry(self.ASIC_NHG_STR, nhgid) + + assert bool(fvs) + + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) + + assert len(keys) == 12 + + for k in keys: + fvs = self.asic_db.get_entry(self.ASIC_NHGM_STR, k) + nhid = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + nh_fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nhid) + + if fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] in rt_nhgid: + rt_nhopsids.add(fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"]) + rt_nhops.add(nh_fvs["SAI_NEXT_HOP_ATTR_IP"]) + rt_nhgmids.add(k) + rt_parentnhgid.add(fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"]) + elif fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] in default_nhgid: + default_nhopsids.add(fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"]) + default_nhops.add(nh_fvs["SAI_NEXT_HOP_ATTR_IP"]) + default_nhgmids.add(k) + default_parentnhgid.add(fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"]) + + assert len(rt_parentnhgid) == 2 + assert len(rt_nhopsids) == 6 + assert len(rt_nhops) == 6 + assert len(rt_nhgmids) == 6 + + assert len(default_parentnhgid) == 2 + assert len(default_nhopsids) == 6 + assert len(default_nhops) == 6 + assert len(default_nhgmids) == 6 + + assert rt_nhops != default_nhops + assert rt_nhopsids != default_nhopsids + assert rt_parentnhgid != default_parentnhgid + assert rt_nhgmids != default_nhgmids + + # bring links down one-by-one + for i in [0, 1, 2]: + self.flap_intf(i, 'down') + time.sleep(1) + + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) + + if i != 2: + assert len(keys) == 10 - (i * 2) + else: + # Last Link down so we will fallback to default eoute 3 members + assert len(keys) == 12 + rt_nhops.clear() + rt_nhgmids.clear() + rt_nhopsids.clear() + for k in keys: + fvs = self.asic_db.get_entry(self.ASIC_NHGM_STR, k) + nhid = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + nh_fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nhid) + + if fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] in rt_nhgid: + rt_nhopsids.add(fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"]) + rt_nhops.add(nh_fvs["SAI_NEXT_HOP_ATTR_IP"]) + rt_nhgmids.add(k) + rt_parentnhgid.add(fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"]) + elif fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] in default_nhgid: + default_nhopsids.add(fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"]) + default_nhops.add(nh_fvs["SAI_NEXT_HOP_ATTR_IP"]) + default_nhgmids.add(k) + default_parentnhgid.add(fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"]) + + assert len(rt_parentnhgid) == 2 + assert len(rt_nhopsids) == 6 + assert len(rt_nhops) == 6 + assert len(rt_nhgmids) == 6 + + assert len(default_parentnhgid) == 2 + assert len(default_nhopsids) == 6 + assert len(default_nhops) == 6 + assert len(default_nhgmids) == 6 + + assert rt_nhops == default_nhops + assert rt_nhopsids == default_nhopsids + assert rt_nhgmids != default_nhgmids + + # bring links up one-by-one + # Bring link up in random order to verify sequence id is as per order + for i, val in enumerate([2,1,0]): + self.flap_intf(i, 'up') + time.sleep(1) + + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) + + assert len(keys) == 12 + + for k in keys: + fvs = self.asic_db.get_entry(self.ASIC_NHGM_STR, k) + nhid = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + nh_fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nhid) + + if fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] in rt_nhgid: + rt_nhopsids.add(fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"]) + rt_nhops.add(nh_fvs["SAI_NEXT_HOP_ATTR_IP"]) + rt_nhgmids.add(k) + rt_parentnhgid.add(fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"]) + elif fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] in default_nhgid: + default_nhopsids.add(fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"]) + default_nhops.add(nh_fvs["SAI_NEXT_HOP_ATTR_IP"]) + default_nhgmids.add(k) + default_parentnhgid.add(fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"]) + + assert len(rt_parentnhgid) == 2 + assert len(rt_nhopsids) == 6 + assert len(rt_nhops) == 6 + assert len(rt_nhgmids) == 6 + + assert len(default_parentnhgid) == 2 + assert len(default_nhopsids) == 6 + assert len(default_nhops) == 6 + assert len(default_nhgmids) == 6 + + assert rt_nhops == default_nhops + assert rt_nhopsids == default_nhopsids + assert rt_nhgmids != default_nhgmids + + # bring links up one-by-one + # Bring link up in random order to verify sequence id is as per order + for i, val in enumerate([2,1,0]): + self.flap_intf(i, 'down') + time.sleep(1) + + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) + + assert len(keys) == 12 + + # bring links up one-by-one + # Bring link up in random order to verify sequence id is as per order + for i, val in enumerate([2,1,0]): + self.flap_intf(i, 'up') + time.sleep(1) + + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) + + assert len(keys) == 12 + + # Remove route 2.2.2.0/24 + self.rt_ps._del(rtprefix) + time.sleep(1) + + self.rt_ps._del(rtprefix_v6) + time.sleep(1) + + # Wait for route 2.2.2.0/24 to be removed + dvs_route.check_asicdb_deleted_route_entries([rtprefix, rtprefix_v6]) + + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) + + assert len(keys) == 6 + + # Remove route 0.0.0.0/0 + self.rt_ps._del(defaultprefix) + time.sleep(1) + + self.rt_ps._del(defaultprefix_v6) + time.sleep(1) + + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) + + assert len(keys) == 0 + + finally: + dvs.start_fpmsyncd() + class TestCbfNextHopGroup(TestNextHopGroupBase): MAX_NHG_MAP_COUNT = 512 @@ -1838,9 +2166,11 @@ def data_validation_test(): # - update the CBF NHG reordering the members and assert the new details match def update_cbf_nhg_members_test(): # Create a NHG with a single next hop - fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.1'), - ("ifname", "Ethernet0")]) + fvs = swsscommon.FieldValuePairs([('nexthop', '10.0.0.1'), ('ifname', 'Ethernet0')]) + self.nhg_ps.set("_group3", fvs) + fvs = swsscommon.FieldValuePairs([('nexthop_group','_group3')]) self.nhg_ps.set("group3", fvs) + self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count + 3) # Create a CBF NHG @@ -2035,6 +2365,8 @@ def create_cbf_invalid_nhg_map_test(): self.cbf_nhg_ps._del('cbfgroup1') self.nhg_ps._del('group2') self.nhg_ps._del('group3') + self.nhg_ps._del('_group3') + self.asic_db.wait_for_n_keys(self.ASIC_NHG_STR, self.asic_nhgs_count) self.asic_db.wait_for_n_keys(self.ASIC_NHGM_STR, self.asic_nhgms_count) diff --git a/tests/test_pac.py b/tests/test_pac.py new file mode 100644 index 00000000000..a913fddfc90 --- /dev/null +++ b/tests/test_pac.py @@ -0,0 +1,209 @@ +import time + +from swsscommon import swsscommon + +def create_entry(tbl, key, pairs): + fvs = swsscommon.FieldValuePairs(pairs) + tbl.set(key, fvs) + + # FIXME: better to wait until DB create them + time.sleep(1) + +def remove_entry(tbl, key): + tbl._del(key) + time.sleep(1) + +def create_entry_tbl(db, table, key, pairs): + tbl = swsscommon.Table(db, table) + create_entry(tbl, key, pairs) + +def remove_entry_tbl(db, table, key): + tbl = swsscommon.Table(db, table) + remove_entry(tbl, key) + +def create_entry_pst(db, table, key, pairs): + tbl = swsscommon.ProducerStateTable(db, table) + create_entry(tbl, key, pairs) + +def how_many_entries_exist(db, table): + tbl = swsscommon.Table(db, table) + return len(tbl.getKeys()) + +def get_port_oid(db, port_name): + port_map_tbl = swsscommon.Table(db, 'COUNTERS_PORT_NAME_MAP') + for k in port_map_tbl.get('')[1]: + if k[0] == port_name: + return k[1] + return None + +def get_bridge_port_oid(db, port_oid): + tbl = swsscommon.Table(db, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT") + for key in tbl.getKeys(): + status, data = tbl.get(key) + assert status + values = dict(data) + if port_oid == values["SAI_BRIDGE_PORT_ATTR_PORT_ID"]: + return key + return None + +def check_learn_mode_in_asicdb(db, interface_oid, learn_mode): + # Get bridge port oid + bridge_port_oid = get_bridge_port_oid(db, interface_oid) + assert bridge_port_oid is not None + + tbl = swsscommon.Table(db, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT") + (status, fvs) = tbl.get(bridge_port_oid) + assert status == True + values = dict(fvs) + if values["SAI_BRIDGE_PORT_ATTR_FDB_LEARNING_MODE"] == learn_mode: + return True + else: + return False + +class TestPac(object): + def test_PacvlanMemberAndFDBAddRemove(self, dvs, testlog): + dvs.setup_db() + time.sleep(2) + + vlan_before = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN") + bp_before = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT") + vm_before = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER") + + # create vlan + dvs.create_vlan("2") + time.sleep(1) + + # Get bvid from vlanid + ok, bvid = dvs.get_vlan_oid(dvs.adb, "2") + assert ok, bvid + + dvs.create_vlan("3") + time.sleep(1) + + # create vlan member + dvs.create_vlan_member("3", "Ethernet0") + time.sleep(1) + + # create a Vlan member entry in Oper State DB + create_entry_tbl( + dvs.sdb, + "OPER_VLAN_MEMBER", "Vlan2|Ethernet0", + [ + ("tagging_mode", "untagged"), + ] + ) + + # check that the vlan information was propagated + vlan_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN") + bp_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT") + vm_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER") + + assert vlan_after - vlan_before == 2, "The Vlan2 wasn't created" + assert bp_after - bp_before == 1, "The bridge port wasn't created" + assert vm_after - vm_before == 1, "The vlan member wasn't added" + + # Add FDB entry in Oper State DB + create_entry_tbl( + dvs.sdb, + "OPER_FDB", "Vlan2|00:00:00:00:00:01", + [ + ("port", "Ethernet0"), + ("type", "dynamic"), + ("discard", "false"), + ] + ) + # Get mapping between interface name and its bridge port_id + iface_2_bridge_port_id = dvs.get_map_iface_bridge_port_id(dvs.adb) + + # check that the FDB entry was inserted into ASIC DB + ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY", + [("mac", "00:00:00:00:00:01"), ("bvid", bvid)], + [("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_DYNAMIC"), + ("SAI_FDB_ENTRY_ATTR_PACKET_ACTION", "SAI_PACKET_ACTION_FORWARD"), + ("SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID", iface_2_bridge_port_id["Ethernet0"])]) + + assert ok, str(extra) + + # Remove FDB entry in Oper State DB + remove_entry_tbl( + dvs.sdb, + "OPER_FDB", "Vlan2|00:00:00:00:00:01" + ) + + # check that the FDB entry was removed from ASIC DB + ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY", + [("mac", "00:00:00:00:00:01"), ("bvid", bvid)], []) + assert ok == False, "The fdb entry still exists in ASIC" + + # remove Vlan member entry in Oper State DB + remove_entry_tbl( + dvs.sdb, + "OPER_VLAN_MEMBER", "Vlan2|Ethernet0" + ) + # check that the vlan information was propagated + vlan_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN") + bp_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT") + vm_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER") + + assert vlan_after - vlan_before == 2, "The Vlan2 wasn't created" + assert bp_after - bp_before == 1, "The bridge port wasn't created" + assert vm_after - vm_before == 1, "The vlan member wasn't added" + + dvs.remove_vlan("2") + dvs.remove_vlan_member("3", "Ethernet0") + dvs.remove_vlan("3") + + vlan_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN") + bp_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT") + vm_after = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER") + + assert vlan_after - vlan_before == 0, "The Vlan2 wasn't removed" + assert bp_after - bp_before == 0, "The bridge port wasn't removed" + assert vm_after - vm_before == 0, "The vlan member wasn't removed" + + def test_PacPortLearnMode(self, dvs, testlog): + dvs.setup_db() + time.sleep(2) + + # create vlan + dvs.create_vlan("2") + time.sleep(1) + + # create vlan member + dvs.create_vlan_member("2", "Ethernet0") + time.sleep(1) + + cntdb = swsscommon.DBConnector(swsscommon.COUNTERS_DB, dvs.redis_sock, 0) + # get port oid + port_oid = get_port_oid(cntdb, "Ethernet0") + assert port_oid is not None + + # check asicdb before setting mac learn mode; The default learn_mode value is SAI_BRIDGE_PORT_FDB_LEARNING_MODE_HW. + status = check_learn_mode_in_asicdb(dvs.adb, port_oid, "SAI_BRIDGE_PORT_FDB_LEARNING_MODE_HW") + assert status == True + + # Set port learn mode to CPU + create_entry_tbl( + dvs.sdb, + "OPER_PORT", "Ethernet0", + [ + ("learn_mode", "cpu_trap"), + ] + ) + status = check_learn_mode_in_asicdb(dvs.adb, port_oid, "SAI_BRIDGE_PORT_FDB_LEARNING_MODE_CPU_TRAP") + assert status == True + + # Set port learn mode back to default + remove_entry_tbl( + dvs.sdb, + "OPER_PORT", "Ethernet0" + ) + status = check_learn_mode_in_asicdb(dvs.adb, port_oid, "SAI_BRIDGE_PORT_FDB_LEARNING_MODE_HW") + assert status == True + dvs.remove_vlan_member("2", "Ethernet0") + dvs.remove_vlan("2") + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_pbh.py b/tests/test_pbh.py index 65401a3ea97..03f5791aeb8 100644 --- a/tests/test_pbh.py +++ b/tests/test_pbh.py @@ -257,6 +257,7 @@ def test_PbhRuleCreationDeletion(self, testlog): @pytest.mark.usefixtures("dvs_hash_manager") +@pytest.mark.xfail(reason="Failing after Bookworm/libnl 3.7.0 upgrade") class TestPbhBasicEditFlows: def test_PbhRuleUpdate(self, testlog): try: diff --git a/tests/test_pfcwd_shared_egress_acl_table.py b/tests/test_pfcwd_shared_egress_acl_table.py new file mode 100644 index 00000000000..d1686a629ff --- /dev/null +++ b/tests/test_pfcwd_shared_egress_acl_table.py @@ -0,0 +1,173 @@ +import time +import pytest +from dvslib.dvs_common import wait_for_result, PollingConfig + +# set ASIC_TYPE=broadcom-dnx to test broadcom-dnx specific implementation. +# set PFC_DLR_INIT_ENABLE=0 to test PfcWdAclHandler, instead of PfcWdDlrHandler. +DVS_ENV = ["ASIC_TYPE=broadcom-dnx", "PFC_DLR_INIT_ENABLE=0"] + +class TestPfcwdFunc(object): + @pytest.fixture + def select_lc(self, vct): + # find a LC to test PFCWD + self.dvs = None + for name in vct.dvss.keys(): + dvs = vct.dvss[name] + config_db = dvs.get_config_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + cfg_switch_type = metatbl.get("switch_type") + if cfg_switch_type == "voq": + self.dvs = dvs + assert self.dvs + + @pytest.fixture + def setup_teardown_test(self, select_lc): + self.asic_db = self.dvs.get_asic_db() + self.config_db = self.dvs.get_config_db() + self.counters_db = self.dvs.get_counters_db() + + self.test_ports = ["Ethernet0"] + self.setup_test(self.dvs) + + self.port_oids = self.counters_db.get_entry("COUNTERS_PORT_NAME_MAP", "") + self.queue_oids = self.counters_db.get_entry("COUNTERS_QUEUE_NAME_MAP", "") + + yield + + self.teardown_test(self.dvs) + + def setup_test(self, dvs): + # save original cable length and set new cabel length + fvs = self.config_db.get_entry("CABLE_LENGTH", "AZURE") + self.orig_cable_len = dict() + for port in self.test_ports: + self.orig_cable_len[port] = fvs[port] + self.set_cable_len(port, "5m") + # startup port + dvs.port_admin_set(port, "up") + + # enable pfcwd + self.set_flex_counter_status("PFCWD", "enable") + # enable queue so that queue oids are generated + self.set_flex_counter_status("QUEUE", "enable") + + def teardown_test(self, dvs): + # disable pfcwd + self.set_flex_counter_status("PFCWD", "disable") + # disable queue + self.set_flex_counter_status("QUEUE", "disable") + + for port in self.test_ports: + if self.orig_cable_len: + self.set_cable_len(port, self.orig_cable_len[port]) + # shutdown port + dvs.port_admin_set(port, "down") + + def set_flex_counter_status(self, key, state): + fvs = {'FLEX_COUNTER_STATUS': state} + self.config_db.update_entry("FLEX_COUNTER_TABLE", key, fvs) + time.sleep(1) + + def set_ports_pfc(self, status='enable', pfc_queues=[3,4]): + keyname = 'pfcwd_sw_enable' + for port in self.test_ports: + if 'enable' in status: + queues = ",".join([str(q) for q in pfc_queues]) + fvs = {keyname: queues, 'pfc_enable': queues} + self.config_db.create_entry("PORT_QOS_MAP", port, fvs) + else: + self.config_db.delete_entry("PORT_QOS_MAP", port) + + def set_cable_len(self, port_name, cable_len): + fvs = {port_name: cable_len} + self.config_db.update_entry("CABLE_LEN", "AZURE", fvs) + + def start_pfcwd_on_ports(self, poll_interval="200", detection_time="200", restoration_time="200", action="drop"): + pfcwd_info = {"POLL_INTERVAL": poll_interval} + self.config_db.update_entry("PFC_WD", "GLOBAL", pfcwd_info) + + pfcwd_info = {"action": action, + "detection_time" : detection_time, + "restoration_time": restoration_time + } + for port in self.test_ports: + self.config_db.update_entry("PFC_WD", port, pfcwd_info) + + def stop_pfcwd_on_ports(self): + for port in self.test_ports: + self.config_db.delete_entry("PFC_WD", port) + + def set_storm_state(self, queues, state="enabled"): + fvs = {"DEBUG_STORM": state} + for port in self.test_ports: + for queue in queues: + queue_name = port + ":" + str(queue) + self.counters_db.update_entry("COUNTERS", self.queue_oids[queue_name], fvs) + + def verify_egress_acls(self, expected_acls=None): + def do_verify_egress_acls(): + egress_acl_table_oids = [] + acl_table_name = "ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE" + for oid_key in self.asic_db.get_keys(acl_table_name): + entry = self.asic_db.get_entry(acl_table_name, oid_key) + # find egress ACL table by checking ACL table attributes + if (entry.get("SAI_ACL_TABLE_ATTR_ACL_STAGE") == "SAI_ACL_STAGE_EGRESS" and + entry.get("SAI_ACL_TABLE_ATTR_FIELD_TC") == "true" and + entry.get("SAI_ACL_TABLE_ATTR_FIELD_OUT_PORT") == "true" and + entry.get("SAI_ACL_TABLE_ATTR_ACL_BIND_POINT_TYPE_LIST") == "1:SAI_ACL_BIND_POINT_TYPE_SWITCH"): + egress_acl_table_oids.append(oid_key) + if len(egress_acl_table_oids) != 1: + return (False, None) + + # find installed ACL entries in egress ACL tables. + installed_acls = [] + acl_entry_table_name = "ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY" + for oid_key in self.asic_db.get_keys(acl_entry_table_name): + entry = self.asic_db.get_entry(acl_entry_table_name, oid_key) + if entry.get("SAI_ACL_ENTRY_ATTR_TABLE_ID") in egress_acl_table_oids: + port_oid = entry.get("SAI_ACL_ENTRY_ATTR_FIELD_OUT_PORT") + tc = entry.get("SAI_ACL_ENTRY_ATTR_FIELD_TC") + tc = int(tc.replace("&mask:0xff", "")) + installed_acls.append((port_oid, tc)) + + # verify installed ACLs against expected ones + return (sorted(installed_acls) == sorted(expected_acls), None) + + max_poll = PollingConfig(polling_interval=5, timeout=600, strict=True) + wait_for_result(do_verify_egress_acls, polling_config=max_poll) + + def test_pfcwd_shared_egress_acl_table(self, setup_teardown_test): + try: + # enable PFC on queues + test_queues = [3, 4] + self.set_ports_pfc(pfc_queues=test_queues) + + # start PFCWD on ports and PFC storm + self.start_pfcwd_on_ports() + storm_queue = test_queues + self.set_storm_state(storm_queue) + + # verify egress ACLs in asic db + expected_acls = [] + for port in self.test_ports: + for queue in storm_queue: + expected_acls.append((self.port_oids[port], queue)) + self.verify_egress_acls(expected_acls) + + # stop storm and PFCWD on port. + self.set_storm_state(storm_queue, state="disabled") + self.stop_pfcwd_on_ports() + + # verify egress ACLs and table are deleted. + expected_acls = [] + self.verify_egress_acls(expected_acls) + + finally: + self.stop_pfcwd_on_ports() + + +# +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_port.py b/tests/test_port.py index 335b0a604bc..129cab981ce 100644 --- a/tests/test_port.py +++ b/tests/test_port.py @@ -286,9 +286,255 @@ def test_PortHostif(self, dvs): status, fvs = atbl.get(intf) assert status, "Error getting value for key" attributes = dict(fvs) + if attributes.get("SAI_HOSTIF_ATTR_TYPE") != "SAI_HOSTIF_TYPE_NETDEV": + continue hostif_queue = attributes.get("SAI_HOSTIF_ATTR_QUEUE") assert hostif_queue == "7" + def test_PortHostTxSignalSet(self, dvs, testlog): + adb = dvs.get_asic_db() + statedb = dvs.get_state_db() + + transceiver_info_tbl = swsscommon.Table(statedb.db_connection, "TRANSCEIVER_INFO") + fvs = swsscommon.FieldValuePairs([("supported_max_tx_power","N/A")]) + transceiver_info_tbl.set("Ethernet0", fvs) + + port_oid = adb.port_name_map["Ethernet0"] + expected_fields = {"SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE":"true"} + adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + + transceiver_info_tbl.hdel("Ethernet0", "supported_max_tx_power") + expected_fields = {"SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE":"false"} + adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + + def test_PortPathTracing(self, dvs, testlog): + pdb = swsscommon.DBConnector(0, dvs.redis_sock, 0) + adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + + ctbl = swsscommon.Table(cdb, "PORT") + ptbl = swsscommon.Table(pdb, "PORT_TABLE") + atbl = swsscommon.Table(adb, "ASIC_STATE:SAI_OBJECT_TYPE_PORT") + + # get the number of ports before removal + num_of_ports = len(atbl.getKeys()) + + initial_entries = set(atbl.getKeys()) + + # read port info and save it + (status, ports_info) = ctbl.get("Ethernet124") + assert status + + # remove buffer pg cfg for the port (record the buffer pgs before removing them) + pgs = dvs.get_config_db().get_keys('BUFFER_PG') + buffer_pgs = {} + for key in pgs: + if "Ethernet124" in key: + buffer_pgs[key] = dvs.get_config_db().get_entry('BUFFER_PG', key) + dvs.get_config_db().delete_entry('BUFFER_PG', key) + dvs.get_app_db().wait_for_deleted_entry("BUFFER_PG_TABLE", key) + + # remove buffer queue cfg for the port + queues = dvs.get_config_db().get_keys('BUFFER_QUEUE') + buffer_queues = {} + for key in queues: + if "Ethernet124" in key: + buffer_queues[key] = dvs.get_config_db().get_entry('BUFFER_QUEUE', key) + dvs.get_config_db().delete_entry('BUFFER_QUEUE', key) + dvs.get_app_db().wait_for_deleted_entry('BUFFER_QUEUE_TABLE', key) + + # shutdown port + dvs.port_admin_set("Ethernet124", 'down') + + # remove this port + ctbl.delete("Ethernet124") + + # verify that the port has been removed + num = dvs.get_asic_db().wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_of_ports - 1) + assert len(num) == num_of_ports - 1 + + # re-add the port with Path Tracing enabled + fvs = swsscommon.FieldValuePairs(ports_info + (("pt_interface_id", "129"), ("pt_timestamp_template", "template2"))) + ctbl.set("Ethernet124", fvs) + + # check application database + dvs.get_app_db().wait_for_entry('PORT_TABLE', "Ethernet124") + (status, fvs) = ptbl.get("Ethernet124") + assert status + for fv in fvs: + if fv[0] == "pt_interface_id": + assert fv[1] == "129" + if fv[0] == "pt_timestamp_template": + assert fv[1] == "template2" + + # verify that the port has been re-added + num = dvs.get_asic_db().wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_of_ports) + assert len(num) == num_of_ports + + # check ASIC DB + atbl = swsscommon.Table(adb, "ASIC_STATE:SAI_OBJECT_TYPE_PORT") + # get PT Interface ID and validate it to be 129 + entries = set(atbl.getKeys()) + new_entries = list(entries - initial_entries) + assert len(new_entries) == 1, "Wrong number of created entries." + + (status, fvs) = atbl.get(new_entries[0]) + assert status + + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_PATH_TRACING_INTF": + assert fv[1] == "129" + if fv[0] == "SAI_PORT_ATTR_PATH_TRACING_TIMESTAMP_TYPE": + assert fv[1] == "SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_12_19" + + # change Path Tracing Interface ID and Timestamp Template on the port + fvs = swsscommon.FieldValuePairs([("pt_interface_id", "130"), ("pt_timestamp_template", "template3")]) + ctbl.set("Ethernet124", fvs) + time.sleep(5) + + # check application database + (status, fvs) = ptbl.get("Ethernet124") + assert status + for fv in fvs: + if fv[0] == "pt_interface_id": + assert fv[1] == "130" + if fv[0] == "pt_timestamp_template": + assert fv[1] == "template3" + + time.sleep(5) + + # check ASIC DB + # get PT Interface ID and validate it to be 130 + (status, fvs) = atbl.get(new_entries[0]) + assert status + + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_PATH_TRACING_INTF": + assert fv[1] == "130" + if fv[0] == "SAI_PORT_ATTR_PATH_TRACING_TIMESTAMP_TYPE": + assert fv[1] == "SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_16_23" + + # shutdown port + dvs.port_admin_set("Ethernet124", 'down') + + # remove the port + ctbl.delete("Ethernet124") + + # re-add the port with the original configuration + ctbl.set("Ethernet124", ports_info) + + # verify that the port has been re-added + num = dvs.get_asic_db().wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_of_ports) + assert len(num) == num_of_ports + + # re-add buffer pg and queue cfg to the port + for key, pg in buffer_pgs.items(): + dvs.get_config_db().update_entry("BUFFER_PG", key, pg) + + for key, queue in buffer_queues.items(): + dvs.get_config_db().update_entry("BUFFER_QUEUE", key, queue) + + def test_PortLinkEventDamping(self, dvs, testlog): + cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + pdb = swsscommon.DBConnector(0, dvs.redis_sock, 0) + + cfg_tbl = swsscommon.Table(cdb, "PORT") + app_tbl = swsscommon.Table(pdb, "PORT_TABLE") + port_name = "Ethernet0" + + # Set link event damping. + fvs = swsscommon.FieldValuePairs([("link_event_damping_algorithm", "aied"), + ("max_suppress_time", "54000"), + ("decay_half_life", "45000"), + ("suppress_threshold", "1650"), + ("reuse_threshold", "1500"), + ("flap_penalty", "1000") + ]) + cfg_tbl.set(port_name, fvs) + time.sleep(1) + + # Check application database. + (status, fvs) = app_tbl.get(port_name) + assert status == True + for fv in fvs: + if fv[0] == "link_event_damping_algorithm": + assert fv[1] == "aied" + elif fv[0] == "max_suppress_time": + assert fv[1] == "54000" + elif fv[0] == "decay_half_life": + assert fv[1] == "45000" + elif fv[0] == "suppress_threshold": + assert fv[1] == "1650" + elif fv[0] == "reuse_threshold": + assert fv[1] == "1500" + elif fv[0] == "flap_penalty": + assert fv[1] == "1000" + + # Disable link event damping. + fvs = swsscommon.FieldValuePairs([("link_event_damping_algorithm", "disabled")]) + cfg_tbl.set(port_name, fvs) + time.sleep(1) + + # Check application database. + (status, fvs) = app_tbl.get(port_name) + assert status == True + for fv in fvs: + if fv[0] == "link_event_damping_algorithm": + assert fv[1] == "disabled" + + def test_PortAdminRestore(self, dvs, testlog): + appdb = swsscommon.DBConnector(0, dvs.redis_sock, 0) + asicdb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + + ptbl = swsscommon.ProducerStateTable(appdb, "PORT_TABLE") + atbl = swsscommon.Table(asicdb, "ASIC_STATE:SAI_OBJECT_TYPE_PORT") + + # Initialize Ethernet0 (admin_status, fec) = (up, rs) + fvs = swsscommon.FieldValuePairs([("admin_status", "up"), + ("fec", "rs")]) + ptbl.set("Ethernet0", fvs) + + time.sleep(1) + + (status, fvs) = atbl.get(dvs.asicdb.portnamemap["Ethernet0"]) + assert status == True + + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_FEC_MODE": + assert fv[1] == "SAI_PORT_FEC_MODE_RS" + if fv[0] == "SAI_PORT_ATTR_ADMIN_STATE": + assert fv[1] == "true" + + # Verify pCfg.admin_status.is_set false by (fec) = (none) + fvs = swsscommon.FieldValuePairs([("fec", "none")]) + ptbl.set("Ethernet0", fvs) + + time.sleep(1) + + (status, fvs) = atbl.get(dvs.asicdb.portnamemap["Ethernet0"]) + assert status == True + + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_FEC_MODE": + assert fv[1] == "SAI_PORT_FEC_MODE_NONE" + if fv[0] == "SAI_PORT_ATTR_ADMIN_STATE": + assert fv[1] == "true" + + # Verify pCfg.admin_status.is_set true by (admin_status, fec) = (down, rs) + fvs = swsscommon.FieldValuePairs([("admin_status", "down"), + ("fec", "rs")]) + ptbl.set("Ethernet0", fvs) + + time.sleep(1) + + (status, fvs) = atbl.get(dvs.asicdb.portnamemap["Ethernet0"]) + assert status == True + + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_FEC_MODE": + assert fv[1] == "SAI_PORT_FEC_MODE_RS" + if fv[0] == "SAI_PORT_ATTR_ADMIN_STATE": + assert fv[1] == "false" # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_port_add_remove.py b/tests/test_port_add_remove.py index 54cd6599c91..ab82c9b106d 100644 --- a/tests/test_port_add_remove.py +++ b/tests/test_port_add_remove.py @@ -19,9 +19,10 @@ def dynamic_buffer(dvs): buffer_model.enable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) yield - buffer_model.disable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) + buffer_model.disable_dynamic_buffer(dvs) +@pytest.mark.skip(reason="Temporarily skip failing tests") @pytest.mark.usefixtures('dvs_port_manager') @pytest.mark.usefixtures("dynamic_buffer") class TestPortAddRemove(object): @@ -252,6 +253,7 @@ def test_add_remove_all_the_ports(self, dvs, testlog, scenario): dvs.remove_vlan("6") +@pytest.mark.skip(reason="Temporarily skip failing tests") @pytest.mark.usefixtures("dynamic_buffer") @pytest.mark.usefixtures("dvs_port_manager") class TestPortAddRemoveDup(object): @@ -328,6 +330,7 @@ def test_add_remove_with_dup_lanes(self, testlog, dvs): app_db.wait_for_entry("BUFFER_QUEUE_TABLE", key.replace(config_db.separator, app_db.separator)) +@pytest.mark.skip(reason="Temporarily skip failing tests") @pytest.mark.usefixtures("dvs_port_manager") class TestPortAddRemoveInvalidMandatoryParam(object): @pytest.mark.parametrize( @@ -355,6 +358,7 @@ def test_add_remove_neg(self, testlog, port, lanes, speed): self.dvs_port.verify_port_count(port_asicdb_count, self.dvs_port.ASIC_DB) +@pytest.mark.skip(reason="Temporarily skip failing tests") @pytest.mark.usefixtures("dvs_port_manager") class TestPortAddRemoveInvalidSerdesParam(object): @pytest.fixture(scope="class") @@ -402,6 +406,7 @@ def test_add_remove_neg(self, testlog, port_attr, serdes): self.verify_add_remove(port_attr, qualifiers) +@pytest.mark.skip(reason="Temporarily skip failing tests") @pytest.mark.usefixtures("dvs_port_manager") class TestPortAddRemoveInvalidParam(object): def verify_add_remove(self, qualifiers): diff --git a/tests/test_port_unlos.py b/tests/test_port_unlos.py new file mode 100644 index 00000000000..7658105c310 --- /dev/null +++ b/tests/test_port_unlos.py @@ -0,0 +1,39 @@ +from swsscommon import swsscommon + + +class TestPortUnreliableLos(object): + def test_PortUnreliableLosForce(self, dvs, testlog): + + db = swsscommon.DBConnector(0, dvs.redis_sock, 0) + adb = dvs.get_asic_db() + + tbl = swsscommon.ProducerStateTable(db, "PORT_TABLE") + fvs = swsscommon.FieldValuePairs([("unreliable_los","off")]) + tbl.set("Ethernet0", fvs) + + tbl = swsscommon.ProducerStateTable(db, "PORT_TABLE") + fvs = swsscommon.FieldValuePairs([("unreliable_los","on")]) + tbl.set("Ethernet4", fvs) + + tbl = swsscommon.ProducerStateTable(db, "PORT_TABLE") + fvs = swsscommon.FieldValuePairs([("unreliable_los","err")]) + tbl.set("Ethernet8", fvs) + + # validate if unreliable false is pushed to asic db when set first time + port_oid = adb.port_name_map["Ethernet0"] + expected_fields = {'NULL': 'NULL', 'SAI_PORT_ATTR_ADMIN_STATE': 'false', 'SAI_PORT_ATTR_AUTO_NEG_MODE': 'true', 'SAI_PORT_ATTR_MTU': '9122', 'SAI_PORT_ATTR_SPEED': '100000', 'SAI_PORT_ATTR_UNRELIABLE_LOS': 'false'} + adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + + # validate if unreliable true is pushed to asic db when set first time + port_oid = adb.port_name_map["Ethernet4"] + expected_fields = {'NULL': 'NULL', 'SAI_PORT_ATTR_ADMIN_STATE': 'false', 'SAI_PORT_ATTR_AUTO_NEG_MODE': 'true', 'SAI_PORT_ATTR_MTU': '9122', 'SAI_PORT_ATTR_SPEED': '100000', 'SAI_PORT_ATTR_UNRELIABLE_LOS': 'true'} + adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + + port_oid = adb.port_name_map["Ethernet8"] + expected_fields = {'NULL': 'NULL', 'SAI_PORT_ATTR_ADMIN_STATE': 'false', 'SAI_PORT_ATTR_AUTO_NEG_MODE': 'true', 'SAI_PORT_ATTR_MTU': '9122', 'SAI_PORT_ATTR_SPEED': '100000'} + adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_portchannel.py b/tests/test_portchannel.py index 0a922e6936e..b9f7a994ddd 100644 --- a/tests/test_portchannel.py +++ b/tests/test_portchannel.py @@ -452,6 +452,12 @@ def test_portchannel_member_netdev_oper_status(self, dvs, testlog): fvs = dict(fvs) assert fvs['netdev_oper_status'] == 'up' + # verify a PORT_TABLE entry containing the PortChannel is NOT created + # in APPDB (sonic-buildimage Issue #21688) + tbl = swsscommon.Table(app_db, "PORT_TABLE") + status, _ = tbl.get("PortChannel111") + assert status is False + # remove port-channel members tbl = swsscommon.Table(config_db, "PORTCHANNEL_MEMBER") tbl._del("PortChannel111|Ethernet0") diff --git a/tests/test_route.py b/tests/test_route.py index dfa6d04cc44..647e7f05a47 100644 --- a/tests/test_route.py +++ b/tests/test_route.py @@ -3,6 +3,7 @@ import time import json import pytest +import ipaddress from swsscommon import swsscommon from dvslib.dvs_common import wait_for_result @@ -65,20 +66,21 @@ def _access_function(): def check_route_state(self, prefix, value): found = False + fvs = {} + + for _ in range(5): # Try for up to ~5 seconds + route_entries = self.sdb.get_keys("ROUTE_TABLE") + for key in route_entries: + if key != prefix: + continue + found = True + fvs = self.sdb.get_entry("ROUTE_TABLE", key) + if fvs.get("state") == value: + return + time.sleep(1) - route_entries = self.sdb.get_keys("ROUTE_TABLE") - for key in route_entries: - if key != prefix: - continue - found = True - fvs = self.sdb.get_entry("ROUTE_TABLE", key) - - assert fvs != {} - - for f,v in fvs.items(): - if f == "state": - assert v == value assert found + assert fvs.get("state") == value, f"Expected state '{value}', but got '{fvs.get('state')}'" def get_asic_db_key(self, destination): route_entries = self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") @@ -132,6 +134,7 @@ def clear_srv_config(self, dvs): class TestRoute(TestRouteBase): """ Functionality tests for route """ + @pytest.mark.skip(reason="Covered by mock test: RouteOrch_AddRemoveIPv4_And_DefaultRoute_State (GTest)") def test_RouteAddRemoveIpv4Route(self, dvs, testlog): self.setup_db(dvs) @@ -211,6 +214,7 @@ def test_RouteAddRemoveIpv4Route(self, dvs, testlog): dvs.servers[1].runcmd("ip route del default dev eth0") dvs.servers[1].runcmd("ip address del 10.0.0.3/31 dev eth0") + @pytest.mark.skip(reason="Covered by mock test: RouteOrch_AddRemoveIPv6_And_DefaultRoute_State (GTest)") def test_RouteAddRemoveIpv6Route(self, dvs, testlog): self.setup_db(dvs) @@ -1073,7 +1077,7 @@ def is_offloaded(self, dvs, route): route_entry = json.loads(output) return bool(route_entry[route][0].get('offloaded')) - @pytest.mark.xfail(reason="Requires VS docker update in https://github.com/sonic-net/sonic-buildimage/pull/12853") + @pytest.mark.xfail(reason="BGP suppress FIB disabled on master/202405 - https://github.com/sonic-net/sonic-buildimage/issues/19092") @pytest.mark.parametrize("suppress_state", ["enabled", "disabled"]) def test_offload(self, suppress_state, setup, dvs): route = "1.1.1.0/24" @@ -1113,6 +1117,150 @@ def check_offloaded(): # make sure route suppression is disabled dvs.runcmd("config suppress-fib-pending disabled") + +class TestSubnetDecapVipRoute(TestRouteBase): + VLAN_ID = "1000" + VLAN_INTF = "Vlan1000" + SERV_IPV4 = "192.168.0.100" + SERV_IPV6 = "fc02:1000::100" + CFG_SUBNET_DECAP_TABLE_NAME = "SUBNET_DECAP" + APP_TUNNEL_DECAP_TERM_TABLE_NAME = "TUNNEL_DECAP_TERM_TABLE" + + def add_neighbor(self, dvs, ip, mac): + if ipaddress.ip_address(ip).version == 6: + dvs.runcmd("ip -6 neigh replace " + ip + " lladdr " + mac + " dev " + self.VLAN_INTF) + else: + dvs.runcmd("ip -4 neigh replace " + ip + " lladdr " + mac + " dev " + self.VLAN_INTF) + + def add_route(self, dvs, ip_prefix): + if ipaddress.ip_network(ip_prefix).version == 4: + dvs.runcmd( + 'vtysh -c "configure terminal" -c "ip route %s %s"' % (ip_prefix, self.SERV_IPV4) + ) + else: + dvs.runcmd( + 'vtysh -c "configure terminal" -c "ipv6 route %s %s"' % (ip_prefix, self.SERV_IPV6) + ) + + def remove_route(self, dvs, ip_prefix): + if ipaddress.ip_network(ip_prefix).version == 4: + dvs.runcmd( + 'vtysh -c "configure terminal" -c "no ip route %s %s"' % (ip_prefix, self.SERV_IPV4) + ) + else: + dvs.runcmd( + 'vtysh -c "configure terminal" -c "no ipv6 route %s %s"' % (ip_prefix, self.SERV_IPV6) + ) + + def validate_subnet_decap_term(self, dvs, ip_prefix_list): + tunnel_decap_term_app_table = swsscommon.Table(dvs.pdb, self.APP_TUNNEL_DECAP_TERM_TABLE_NAME) + decap_term_list = tunnel_decap_term_app_table.getKeys() + decap_term_prefix_list = [decap_term.split(":", 1)[1] for decap_term in decap_term_list] + + assert len(ip_prefix_list) == len(decap_term_prefix_list) + for ip_prefix in ip_prefix_list: + assert ip_prefix in decap_term_prefix_list + + for decap_term in decap_term_list: + _, fvs = tunnel_decap_term_app_table.get(decap_term) + decap_term_attrs = dict(fvs) + assert decap_term_attrs["term_type"] == "MP2MP" + assert decap_term_attrs["subnet_type"] == "vip" + + @pytest.fixture(scope="class", autouse=True) + def setup_vlan(self, dvs, dvs_vlan_manager, setup_subnet_decap): + dvs.setup_db() + + subnet_decap_config = { + "status": "enable", + "src_ip": "10.10.10.0/24", + "src_ip_v6": "20c1:ba8::/64" + } + setup_subnet_decap(subnet_decap_config) + + vlan = self.VLAN_ID + vlan_intf = self.VLAN_INTF + self.dvs_vlan.create_vlan(vlan) + vlan_oid = self.dvs_vlan.get_and_verify_vlan_ids(1)[0] + self.dvs_vlan.verify_vlan(vlan_oid, vlan) + + dvs.port_admin_set("Ethernet0", "up") + self.dvs_vlan.create_vlan_member(vlan, "Ethernet0") + self.dvs_vlan.verify_vlan_member(vlan_oid, "Ethernet0") + + dvs.add_ip_address(vlan_intf, "192.168.0.1/24") + dvs.add_ip_address(vlan_intf, "fc02:1000::1/64") + + yield + + dvs.remove_ip_address(vlan_intf, "192.168.0.1/24") + dvs.remove_ip_address(vlan_intf, "fc02:1000::1/64") + + self.dvs_vlan.remove_vlan_member(vlan, "Ethernet0") + self.dvs_vlan.get_and_verify_vlan_member_ids(0) + + time.sleep(2) + self.dvs_vlan.remove_vlan(vlan) + self.dvs_vlan.get_and_verify_vlan_ids(0) + + @pytest.fixture(scope="class", autouse=True) + def setup_server_neighbor(self, dvs, setup_vlan): + self.add_neighbor(dvs, self.SERV_IPV4, "00:00:00:00:00:01") + self.add_neighbor(dvs, self.SERV_IPV6, "00:00:00:00:00:01") + + @pytest.fixture(scope="class") + def setup_subnet_decap(self, dvs): + + def _apply_subnet_decap_config(subnet_decap_config): + """Apply subnet decap config to CONFIG_DB.""" + fvs = swsscommon.FieldValuePairs(list(subnet_decap_config.items())) + subnet_decap_tbl.set("AZURE", fvs) + + def _cleanup_subnet_decap_config(): + """Cleanup subnet decap config in CONFIG_DB.""" + for key in subnet_decap_tbl.getKeys(): + subnet_decap_tbl._del(key) + + configdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + subnet_decap_tbl = swsscommon.Table(configdb, self.CFG_SUBNET_DECAP_TABLE_NAME) + _cleanup_subnet_decap_config() + + yield _apply_subnet_decap_config + + _cleanup_subnet_decap_config() + + def test_vip_route(self, dvs, setup_subnet_decap): + self.add_route(dvs, "10.10.20.0/24") + self.add_route(dvs, "2001:506:28:9d::/64") + + time.sleep(2) + self.validate_subnet_decap_term(dvs, ["10.10.20.0/24", "2001:506:28:9d::/64"]) + + self.remove_route(dvs, "10.10.20.0/24") + self.remove_route(dvs, "2001:506:28:9d::/64") + + time.sleep(2) + self.validate_subnet_decap_term(dvs, []) + + subnet_decap_config = { + "status": "disable", + "src_ip": "10.10.10.0/24", + "src_ip_v6": "20c1:ba8::/64" + } + setup_subnet_decap(subnet_decap_config) + + self.add_route(dvs, "10.10.20.0/24") + self.add_route(dvs, "2001:506:28:9d::/64") + + time.sleep(2) + self.validate_subnet_decap_term(dvs, []) + + self.remove_route(dvs, "10.10.20.0/24") + self.remove_route(dvs, "2001:506:28:9d::/64") + + time.sleep(2) + self.validate_subnet_decap_term(dvs, []) + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/tests/test_soft_bfd.py b/tests/test_soft_bfd.py new file mode 100644 index 00000000000..eb67cd1b142 --- /dev/null +++ b/tests/test_soft_bfd.py @@ -0,0 +1,185 @@ +from swsscommon import swsscommon + +#Replace with swsscommon.SOFTWARE_BFD_SESSION_STATE_TABLE once available in azure pipeline +#SOFT_BFD_STATE_TABLE = swsscommon.STATE_BFD_SOFTWARE_SESSION_TABLE_NAME +SOFT_BFD_STATE_TABLE = "BFD_SOFTWARE_SESSION_TABLE" + +DVS_ENV = ["BFDOFFLOAD=false"] + +class TestSoftBfd(object): + def setup_db(self, dvs): + dvs.setup_db() + self.pdb = dvs.get_app_db() + self.sdb = dvs.get_state_db() + self.cdb = dvs.get_config_db() + + #Restart swss to pick up new switch type + dvs.stop_swss() + dvs.start_swss() + + def get_exist_bfd_session(self): + return set(self.sdb.get_keys(SOFT_BFD_STATE_TABLE)) + + def create_bfd_session(self, key, pairs): + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "BFD_SESSION_TABLE") + fvs = swsscommon.FieldValuePairs(list(pairs.items())) + tbl.set(key, fvs) + + def remove_bfd_session(self, key): + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "BFD_SESSION_TABLE") + tbl._del(key) + + def check_state_bfd_session_value(self, key, expected_values): + #Key format is different in STATE_DB compared to APP_DB + key = key.replace(":", "|", 2) + fvs = self.sdb.get_entry(SOFT_BFD_STATE_TABLE, key) + for k, v in expected_values.items(): + assert fvs[k] == v + + def test_addRemoveBfdSession(self, dvs): + self.setup_db(dvs) + bfd_session_key = "default:default:10.0.0.2" + bfdSessions = self.get_exist_bfd_session() + + # Create BFD session + fieldValues = {"local_addr": "10.0.0.1", "tos": "64", "multiplier": "5", "tx_interval": "300", + "rx_interval": "500"} + self.create_bfd_session(bfd_session_key, fieldValues) + self.sdb.wait_for_n_keys(SOFT_BFD_STATE_TABLE, len(bfdSessions) + 1) + + # Check created BFD session in STATE_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + assert len(createdSessions) == 1 + + session = createdSessions.pop() + + # Check STATE_DB entry related to the BFD session + self.check_state_bfd_session_value(bfd_session_key, fieldValues) + + # Remove the BFD session + self.remove_bfd_session(bfd_session_key) + self.sdb.wait_for_deleted_entry(SOFT_BFD_STATE_TABLE, session) + + def test_addRemoveBfdSession_ipv6(self, dvs): + self.setup_db(dvs) + bfd_session_key = "default:default:2000::2" + bfdSessions = self.get_exist_bfd_session() + + # Create BFD session + fieldValues = {"local_addr": "2000::1", "multihop": "true", "multiplier": "3", "tx_interval": "400", + "rx_interval": "200"} + self.create_bfd_session(bfd_session_key, fieldValues) + self.sdb.wait_for_n_keys(SOFT_BFD_STATE_TABLE, len(bfdSessions) + 1) + + # Check created BFD session in STATE_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + assert len(createdSessions) == 1 + + session = createdSessions.pop() + + # Check STATE_DB entry related to the BFD session + self.check_state_bfd_session_value(bfd_session_key, fieldValues) + + # Remove the BFD session + self.remove_bfd_session(bfd_session_key) + self.sdb.wait_for_deleted_entry(SOFT_BFD_STATE_TABLE, session) + + def test_addRemoveBfdSession_interface(self, dvs): + self.setup_db(dvs) + bfd_session_key = "default:Ethernet0:10.0.0.2" + bfdSessions = self.get_exist_bfd_session() + + # Create BFD session + fieldValues = {"local_addr": "10.0.0.1", "dst_mac": "00:02:03:04:05:06", "type": "async_passive"} + self.create_bfd_session("default:Ethernet0:10.0.0.2", fieldValues) + self.sdb.wait_for_n_keys(SOFT_BFD_STATE_TABLE, len(bfdSessions) + 1) + + # Check created BFD session in STATE_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + assert len(createdSessions) == 1 + + session = createdSessions.pop() + + # Check STATE_DB entry related to the BFD session + self.check_state_bfd_session_value(bfd_session_key, fieldValues) + + # Remove the BFD session + self.remove_bfd_session(bfd_session_key) + self.sdb.wait_for_deleted_entry(SOFT_BFD_STATE_TABLE, session) + + def test_multipleBfdSessions(self, dvs): + self.setup_db(dvs) + bfdSessions = self.get_exist_bfd_session() + + # Create BFD session 1 + key1 = "default:default:10.0.0.2" + fieldValues = {"local_addr": "10.0.0.1"} + self.create_bfd_session(key1, fieldValues) + self.sdb.wait_for_n_keys(SOFT_BFD_STATE_TABLE, len(bfdSessions) + 1) + + # Checked BFD session 1 in STATE_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + bfdSessions = self.get_exist_bfd_session() + assert len(createdSessions) == 1 + + session1 = createdSessions.pop() + + # Check STATE_DB entry related to the BFD session + self.check_state_bfd_session_value(key1, fieldValues) + + # Create BFD session 2 + key2 = "default:default:10.0.1.2" + fieldValues = {"local_addr": "10.0.0.1", "tx_interval": "300", "rx_interval": "500"} + self.create_bfd_session(key2, fieldValues) + self.sdb.wait_for_n_keys(SOFT_BFD_STATE_TABLE, len(bfdSessions) + 1) + + # Check BFD session 2 in STATE_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + bfdSessions = self.get_exist_bfd_session() + assert len(createdSessions) == 1 + + session2 = createdSessions.pop() + + # Check STATE_DB entry related to the BFD session + self.check_state_bfd_session_value(key2, fieldValues) + + # Create BFD session 3 + key3 = "default:default:2000::2" + fieldValues = {"local_addr": "2000::1", "type": "demand_active"} + self.create_bfd_session(key3, fieldValues) + self.sdb.wait_for_n_keys(SOFT_BFD_STATE_TABLE, len(bfdSessions) + 1) + + # Check BFD session 3 in STATE_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + bfdSessions = self.get_exist_bfd_session() + assert len(createdSessions) == 1 + + session3 = createdSessions.pop() + + # Check STATE_DB entry related to the BFD session + self.check_state_bfd_session_value(key3, fieldValues) + + # Create BFD session 4 + key4 = "default:default:3000::2" + fieldValues = {"local_addr": "3000::1"} + self.create_bfd_session(key4, fieldValues) + self.sdb.wait_for_n_keys(SOFT_BFD_STATE_TABLE, len(bfdSessions) + 1) + + # Check BFD session 3 in STATE_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + assert len(createdSessions) == 1 + + session4 = createdSessions.pop() + + # Check STATE_DB entry related to the BFD session + self.check_state_bfd_session_value(key4, fieldValues) + + # Remove the BFD sessions + self.remove_bfd_session(key1) + self.sdb.wait_for_deleted_entry(SOFT_BFD_STATE_TABLE, session1) + self.remove_bfd_session(key2) + self.sdb.wait_for_deleted_entry(SOFT_BFD_STATE_TABLE, session2) + self.remove_bfd_session(key3) + self.sdb.wait_for_deleted_entry(SOFT_BFD_STATE_TABLE, session3) + self.remove_bfd_session(key4) + self.sdb.wait_for_deleted_entry(SOFT_BFD_STATE_TABLE, session4) diff --git a/tests/test_srv6.py b/tests/test_srv6.py index 3ce19421b02..92174e5dda8 100644 --- a/tests/test_srv6.py +++ b/tests/test_srv6.py @@ -3,8 +3,11 @@ import time import json import pytest +import distro +import platform from swsscommon import swsscommon +from distutils.version import LooseVersion from dvslib.dvs_common import wait_for_result def get_exist_entries(db, table): @@ -18,6 +21,13 @@ def get_created_entry(db, table, existed_entries): assert len(new_entries) == 1, "Wrong number of created entries." return new_entries[0] +def get_created_entries(db, table, existed_entries, number): + tbl = swsscommon.Table(db, table) + entries = set(tbl.getKeys()) + new_entries = list(entries - existed_entries) + assert len(new_entries) == number, "Wrong number of created entries." + return new_entries + class TestSrv6Mysid(object): def setup_db(self, dvs): self.pdb = dvs.get_app_db() @@ -310,7 +320,7 @@ def test_mysid(self, dvs, testlog): if fv[0] == "SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID": assert fv[1] == next_hop_ipv4_id if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": - assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX4" + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDX4" elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" @@ -328,7 +338,7 @@ def test_mysid(self, dvs, testlog): if fv[0] == "SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID": assert fv[1] == next_hop_ipv6_id if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": - assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX6" + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDX6" elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" @@ -475,7 +485,7 @@ def create_srv6_route(self, routeip,segname,segsrc): table = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY" existed_entries = get_exist_entries(self.adb.db_connection, table) - fvs=swsscommon.FieldValuePairs([('seg_src',segsrc),('segment',segname)]) + fvs=swsscommon.FieldValuePairs([('seg_src',segsrc), ('segment',segname)]) routetbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE") routetbl.set(routeip,fvs) @@ -627,6 +637,1624 @@ def test_srv6(self, dvs, testlog): assert nexthop_entries == get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") assert route_entries == get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + +class TestSrv6MySidFpmsyncd(object): + """ Functionality tests for Srv6 MySid handling in fpmsyncd """ + + def setup_db(self, dvs): + self.pdb = dvs.get_app_db() + self.adb = dvs.get_asic_db() + self.cdb = dvs.get_config_db() + + def create_vrf(self, vrf_name): + table = "ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER" + existed_entries = get_exist_entries(self.adb.db_connection, table) + + self.cdb.create_entry("VRF", vrf_name, {"empty": "empty"}) + + self.adb.wait_for_n_keys(table, len(existed_entries) + 1) + return get_created_entry(self.adb.db_connection, table, existed_entries) + + def remove_vrf(self, vrf_name): + self.cdb.delete_entry("VRF", vrf_name) + + def add_ip_address(self, interface, ip): + self.cdb.create_entry("INTERFACE", interface + "|" + ip, {"NULL": "NULL"}) + + def remove_ip_address(self, interface, ip): + self.cdb.delete_entry("INTERFACE", interface + "|" + ip) + + def add_neighbor(self, interface, ip, mac, family): + table = "ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY" + existed_entries = get_exist_entries(self.adb.db_connection, table) + + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "NEIGH_TABLE") + fvs = swsscommon.FieldValuePairs([("neigh", mac), + ("family", family)]) + tbl.set(interface + ":" + ip, fvs) + + self.adb.wait_for_n_keys(table, len(existed_entries) + 1) + return get_created_entry(self.adb.db_connection, table, existed_entries) + + def remove_neighbor(self, interface, ip): + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "NEIGH_TABLE") + tbl._del(interface + ":" + ip) + time.sleep(1) + + def create_l3_intf(self, interface, vrf_name): + table = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE" + existed_entries = get_exist_entries(self.adb.db_connection, table) + + if len(vrf_name) == 0: + self.cdb.create_entry("INTERFACE", interface, {"NULL": "NULL"}) + else: + self.cdb.create_entry("INTERFACE", interface, {"vrf_name": vrf_name}) + + self.adb.wait_for_n_keys(table, len(existed_entries) + 1) + return get_created_entry(self.adb.db_connection, table, existed_entries) + + def remove_l3_intf(self, interface): + self.cdb.delete_entry("INTERFACE", interface) + + def get_nexthop_id(self, ip_address): + next_hop_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + for next_hop_entry in next_hop_entries: + (status, fvs) = tbl.get(next_hop_entry) + + assert status == True + assert len(fvs) == 3 + + for fv in fvs: + if fv[0] == "SAI_NEXT_HOP_ATTR_IP" and fv[1] == ip_address: + return next_hop_entry + + return None + + def set_interface_status(self, dvs, interface, admin_status): + tbl_name = "PORT" + tbl = swsscommon.Table(self.cdb.db_connection, tbl_name) + fvs = swsscommon.FieldValuePairs([("admin_status", "up")]) + tbl.set(interface, fvs) + time.sleep(1) + + def setup_srv6(self, dvs): + self.setup_db(dvs) + + dvs.runcmd("sysctl -w net.vrf.strict_mode=1") + + # create interface + self.create_l3_intf("Ethernet104", "") + + # assign IP to interface + self.add_ip_address("Ethernet104", "2001::2/126") + self.add_ip_address("Ethernet104", "192.0.2.2/30") + + time.sleep(3) + + # bring up Ethernet104 + self.set_interface_status(dvs, "Ethernet104", "up") + + time.sleep(3) + + # save the initial number of entries in MySID table + self.initial_my_sid_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + + # save the initial number of entries in Nexthop table + self.initial_next_hop_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + + # now, let's create the IPv6 neighbor + self.add_neighbor("Ethernet104", "2001::1", "00:00:00:01:02:04", "IPv6") + + # verify that the nexthop is created in the ASIC (i.e., we have the previous number of next hop entries + 1) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", len(self.initial_next_hop_entries) + 1) + + # get the new nexthop and nexthop ID, which will be used later to verify the MySID entry + next_hop_entry = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", self.initial_next_hop_entries) + assert next_hop_entry is not None + self.next_hop_ipv6_id = self.get_nexthop_id("2001::1") + assert self.next_hop_ipv6_id is not None + + # save the number of entries in Nexthop table, after adding the ipv6 neighbor + updated_next_hop_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + + # now, let's create the IPv4 neighbor + self.add_neighbor("Ethernet104", "192.0.2.1", "00:00:00:01:02:05", "IPv4") + + # verify that the nexthop is created in the ASIC (i.e., we have the previous number of next hop entries + 1) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", len(updated_next_hop_entries) + 1) + + # get the new nexthop and nexthop ID, which will be used later to verify the MySID entry + next_hop_entry = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", updated_next_hop_entries) + assert next_hop_entry is not None + self.next_hop_ipv4_id = self.get_nexthop_id("192.0.2.1") + assert self.next_hop_ipv4_id is not None + + # create vrf + initial_vrf_entries = set(self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER")) + self.create_vrf("Vrf10") + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER", len(initial_vrf_entries) + 1) + current_vrf_entries = set(self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER")) + self.vrf_id = list(current_vrf_entries - initial_vrf_entries)[0] + _, vrf_info = dvs.runcmd("ip --json -d link show Vrf10") + vrf_info_json = json.loads(vrf_info) + self.vrf_table_id = str(vrf_info_json[0]["linkinfo"]["info_data"]["table"]) + + # create dummy interface sr0 + dvs.runcmd("ip link add sr0 type dummy") + dvs.runcmd("ip link set sr0 up") + + def teardown_srv6(self, dvs): + # remove dummy interface sr0 + dvs.runcmd("ip link del sr0 type dummy") + + # remove vrf + initial_vrf_entries = set(self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER")) + self.remove_vrf("Vrf10") + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER", len(initial_vrf_entries) - 1) + + # remove the IPv4 neighbor + initial_neighbor_entries = set(self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP")) + self.remove_neighbor("Ethernet104", "192.0.2.1") + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", len(initial_neighbor_entries) - 1) + + # remove the IPv6 neighbor + initial_neighbor_entries = set(self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP")) + self.remove_neighbor("Ethernet104", "2001::1") + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", len(initial_neighbor_entries) - 1) + + time.sleep(3) + + # put Ethernet104 down + self.set_interface_status(dvs, "Ethernet104", "down") + + time.sleep(3) + + # remove IP from interface + self.remove_ip_address("Ethernet104", "2001::2/126") + self.remove_ip_address("Ethernet104", "192.0.2.2/30") + + # remove interface + initial_interface_entries = set(self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE")) + self.remove_l3_intf("Ethernet104") + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE", len(initial_interface_entries) - 1) + + def test_AddRemoveSrv6MySidEnd(self, dvs, testlog): + + _, output = dvs.runcmd(f"vtysh -c 'show zebra dplane providers'") + if 'dplane_fpm_sonic' not in output: + pytest.skip("'dplane_fpm_sonic' required for this test is not available, skipping", allow_module_level=True) + + self.setup_srv6(dvs) + + # configure srv6 locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:1::/48 block-len 32 node-len 16 func-bits 16\"") + + # create srv6 mysid end behavior + dvs.runcmd("ip -6 route add fc00:0:1:64::/128 encap seg6local action End dev sr0") + + # check application database + self.pdb.wait_for_entry("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:1:64::") + expected_fields = {"action": "end"} + self.pdb.wait_for_field_match("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:1:64::", expected_fields) + + # verify that the mysid has been programmed into the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries) + 1) + + # check ASIC SAI_OBJECT_TYPE_MY_SID_ENTRY database + my_sid = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", self.initial_my_sid_entries) + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(my_sid) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_E" + + # remove srv6 mysid end behavior + dvs.runcmd("ip -6 route del fc00:0:1:64::/128 encap seg6local action End dev sr0") + + # check application database + self.pdb.wait_for_deleted_entry("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:1:64::") + + # verify that the mysid has been removed from the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries)) + + # unconfigure srv6 locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"no srv6\"") + + self.teardown_srv6(dvs) + + def test_AddRemoveSrv6MySidEndX(self, dvs, testlog): + + _, output = dvs.runcmd(f"vtysh -c 'show zebra dplane providers'") + if 'dplane_fpm_sonic' not in output: + pytest.skip("'dplane_fpm_sonic' required for this test is not available, skipping", allow_module_level=True) + + self.setup_srv6(dvs) + + # configure srv6 locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:1::/48 block-len 32 node-len 16 func-bits 16\"") + + # create srv6 mysid end.x behavior + dvs.runcmd("ip -6 route add fc00:0:1:65::/128 encap seg6local action End.X nh6 2001::1 dev sr0") + + # check application database + self.pdb.wait_for_entry("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:1:65::") + expected_fields = {"action": "end.x", "adj": "2001::1"} + self.pdb.wait_for_field_match("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:1:65::", expected_fields) + + # verify that the mysid has been programmed into the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries) + 1) + + # check ASIC SAI_OBJECT_TYPE_MY_SID_ENTRY database + my_sid = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", self.initial_my_sid_entries) + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(my_sid) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_X" + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == self.next_hop_ipv6_id + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" + + # remove srv6 mysid end.x behavior + dvs.runcmd("ip -6 route del fc00:0:1:65::/128 encap seg6local action End.X nh6 2001::1 dev sr0") + + # check application database + self.pdb.wait_for_deleted_entry("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:1:65::") + + # verify that the mysid has been removed from the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries)) + + # unconfigure srv6 locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"no srv6\"") + + self.teardown_srv6(dvs) + + @pytest.mark.skipif(LooseVersion(platform.release()) < LooseVersion('5.11'), + reason="This test requires Linux kernel 5.11 or higher") + def test_AddRemoveSrv6MySidEndDT4(self, dvs, testlog): + + _, output = dvs.runcmd(f"vtysh -c 'show zebra dplane providers'") + if 'dplane_fpm_sonic' not in output: + pytest.skip("'dplane_fpm_sonic' required for this test is not available, skipping", allow_module_level=True) + + self.setup_srv6(dvs) + + # enable VRF strict mode + dvs.runcmd("sysctl -w net.vrf.strict_mode=1") + + # configure srv6 locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:1::/48 block-len 32 node-len 16 func-bits 16\"") + + # create srv6 mysid end.dt4 behavior + dvs.runcmd("ip -6 route add fc00:0:1:6b::/128 encap seg6local action End.DT4 vrftable {} dev sr0".format(self.vrf_table_id)) + + # check application database + self.pdb.wait_for_entry("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:1:6b::") + + # verify that the mysid has been programmed into the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries) + 1) + + # check ASIC SAI_OBJECT_TYPE_MY_SID_ENTRY database + my_sid = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", self.initial_my_sid_entries) + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(my_sid) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT4" + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_VRF": + assert fv[1] == self.vrf_id + + # remove srv6 mysid end.dt4 behavior + dvs.runcmd("ip -6 route del fc00:0:1:6b::/128 encap seg6local action End.DT4 vrftable {} dev sr0".format(self.vrf_table_id)) + + # check application database + self.pdb.wait_for_deleted_entry("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:1:6b::") + + # verify that the mysid has been removed from the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries)) + + # unconfigure srv6 locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"no srv6\"") + + self.teardown_srv6(dvs) + + def test_AddRemoveSrv6MySidEndDT6(self, dvs, testlog): + + _, output = dvs.runcmd(f"vtysh -c 'show zebra dplane providers'") + if 'dplane_fpm_sonic' not in output: + pytest.skip("'dplane_fpm_sonic' required for this test is not available, skipping", allow_module_level=True) + + self.setup_srv6(dvs) + + # configure srv6 locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:1::/48 block-len 32 node-len 16 func-bits 16\"") + + # create srv6 mysid end.dt6 behavior + dvs.runcmd("ip -6 route add fc00:0:1:6b::/128 encap seg6local action End.DT6 vrftable {} dev sr0".format(self.vrf_table_id)) + + # check application database + self.pdb.wait_for_entry("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:1:6b::") + expected_fields = {"action": "end.dt6", "vrf": "Vrf10"} + self.pdb.wait_for_field_match("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:1:6b::", expected_fields) + + # verify that the mysid has been programmed into the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries) + 1) + + # check ASIC SAI_OBJECT_TYPE_MY_SID_ENTRY database + my_sid = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", self.initial_my_sid_entries) + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(my_sid) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT6" + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_VRF": + assert fv[1] == self.vrf_id + + # remove srv6 mysid end.dt6 behavior + dvs.runcmd("ip -6 route del fc00:0:1:6b::/128 encap seg6local action End.DT6 vrftable {} dev sr0".format(self.vrf_table_id)) + + # check application database + self.pdb.wait_for_deleted_entry("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:1:6b::") + + # verify that the mysid has been removed from the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries)) + + # unconfigure srv6 locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"no srv6\"") + + self.teardown_srv6(dvs) + + @pytest.mark.skipif(LooseVersion(platform.release()) < LooseVersion('5.14'), + reason="This test requires Linux kernel 5.14 or higher") + def test_AddRemoveSrv6MySidEndDT46(self, dvs, testlog): + + _, output = dvs.runcmd(f"vtysh -c 'show zebra dplane providers'") + if 'dplane_fpm_sonic' not in output: + pytest.skip("'dplane_fpm_sonic' required for this test is not available, skipping", allow_module_level=True) + + self.setup_srv6(dvs) + + # enable VRF strict mode + dvs.runcmd("sysctl -w net.vrf.strict_mode=1") + + # configure srv6 locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:1::/48 block-len 32 node-len 16 func-bits 16\"") + + # create srv6 mysid end.dt46 behavior + dvs.runcmd("ip -6 route add fc00:0:1:6b::/128 encap seg6local action End.DT46 vrftable {} dev sr0".format(self.vrf_table_id)) + + # check application database + self.pdb.wait_for_entry("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:1:6b::") + + # verify that the mysid has been programmed into the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries) + 1) + + # check ASIC SAI_OBJECT_TYPE_MY_SID_ENTRY database + my_sid = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", self.initial_my_sid_entries) + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(my_sid) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT46" + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_VRF": + assert fv[1] == self.vrf_id + + # remove srv6 mysid end.dt46 behavior + dvs.runcmd("ip -6 route del fc00:0:1:6b::/128 encap seg6local action End.DT46 vrftable {} dev sr0".format(self.vrf_table_id)) + + # check application database + self.pdb.wait_for_deleted_entry("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:1:6b::") + + # verify that the mysid has been removed from the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries)) + + # unconfigure srv6 locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"no srv6\"") + + self.teardown_srv6(dvs) + + @pytest.mark.skipif(LooseVersion(platform.release()) < LooseVersion('6.1'), + reason="This test requires Linux kernel 6.1 or higher") + def test_AddRemoveSrv6MySidUN(self, dvs, testlog): + + _, output = dvs.runcmd(f"vtysh -c 'show zebra dplane providers'") + if 'dplane_fpm_sonic' not in output: + pytest.skip("'dplane_fpm_sonic' required for this test is not available, skipping", allow_module_level=True) + + self.setup_srv6(dvs) + + # configure srv6 usid locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:2::/48 block-len 32 node-len 16 func-bits 16\" -c \"behavior usid\"") + + # create srv6 mysid un behavior + dvs.runcmd("ip -6 route add fc00:0:2::/48 encap seg6local action End dev sr0") + # dvs.runcmd("ip -6 route add fc00:0:2::/48 encap seg6local action End flavors next-csid lblen 32 nflen 16 dev sr0") + + # check application database + self.pdb.wait_for_entry("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:2::") + + # verify that the mysid has been programmed into the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries) + 1) + + # check ASIC SAI_OBJECT_TYPE_MY_SID_ENTRY database + my_sid = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", self.initial_my_sid_entries) + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(my_sid) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UN" + + # remove srv6 mysid un behavior + dvs.runcmd("ip -6 route del fc00:0:2::/48 encap seg6local action End dev sr0".format(self.vrf_table_id)) + # dvs.runcmd("ip -6 route del fc00:0:2::/48 encap seg6local action End flavors next-csid lblen 32 nflen 16 dev sr0".format(self.vrf_table_id)) + + # check application database + self.pdb.wait_for_deleted_entry("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:2::") + + # verify that the mysid has been removed from the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries)) + + # unconfigure srv6 locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"no srv6\"") + + self.teardown_srv6(dvs) + + @pytest.mark.skipif(LooseVersion(platform.release()) < LooseVersion('6.6'), + reason="This test requires Linux kernel 6.6 or higher") + def test_AddRemoveSrv6MySidUA(self, dvs, testlog): + + _, output = dvs.runcmd(f"vtysh -c 'show zebra dplane providers'") + if 'dplane_fpm_sonic' not in output: + pytest.skip("'dplane_fpm_sonic' required for this test is not available, skipping", allow_module_level=True) + + self.setup_srv6(dvs) + + # configure srv6 usid locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:2::/48 block-len 32 node-len 16 func-bits 16\" -c \"behavior usid\"") + + # create srv6 mysid ua behavior + dvs.runcmd("ip -6 route add fc00:0:2:ff00::/64 encap seg6local action End.X nh6 2001::1 dev sr0") + # dvs.runcmd("ip -6 route add fc00:0:2:ff00::/64 encap seg6local action End.X nh6 2001::1 flavors next-csid lblen 32 nflen 16 dev sr0") + + # check application database + self.pdb.wait_for_entry("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:2:ff00::") + + # verify that the mysid has been programmed into the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries) + 1) + + # check ASIC SAI_OBJECT_TYPE_MY_SID_ENTRY database + my_sid = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", self.initial_my_sid_entries) + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(my_sid) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UA" + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == self.next_hop_ipv6_id + + # remove srv6 mysid ua behavior + dvs.runcmd("ip -6 route del fc00:0:2:ff00::/64 encap seg6local action End.DT6 nh6 2001::1 dev sr0") + # dvs.runcmd("ip -6 route del fc00:0:2:ff00::/64 encap seg6local action End.DT6 nh6 2001::1 flavors next-csid lblen 32 nflen 16 dev sr0") + + # check application database + self.pdb.wait_for_deleted_entry("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:2:ff00::") + + # verify that the mysid has been removed from the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries)) + + # unconfigure srv6 locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"no srv6\"") + + self.teardown_srv6(dvs) + + @pytest.mark.skipif(LooseVersion(platform.release()) < LooseVersion('5.11'), + reason="This test requires Linux kernel 5.11 or higher") + def test_AddRemoveSrv6MySidUDT4(self, dvs, testlog): + + _, output = dvs.runcmd(f"vtysh -c 'show zebra dplane providers'") + if 'dplane_fpm_sonic' not in output: + pytest.skip("'dplane_fpm_sonic' required for this test is not available, skipping", allow_module_level=True) + + self.setup_srv6(dvs) + + # enable VRF strict mode + dvs.runcmd("sysctl -w net.vrf.strict_mode=1") + + # configure srv6 usid locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:2::/48 block-len 32 node-len 16 func-bits 16\" -c \"behavior usid\"") + + # create srv6 mysid udt4 behavior + dvs.runcmd("ip -6 route add fc00:0:2:ff05::/128 encap seg6local action End.DT4 vrftable {} dev sr0".format(self.vrf_table_id)) + + # check application database + self.pdb.wait_for_entry("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:2:ff05::") + + # verify that the mysid has been programmed into the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries) + 1) + + # check ASIC SAI_OBJECT_TYPE_MY_SID_ENTRY database + my_sid = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", self.initial_my_sid_entries) + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(my_sid) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDT4" + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_VRF": + assert fv[1] == self.vrf_id + + # remove srv6 mysid udt4 behavior + dvs.runcmd("ip -6 route del fc00:0:2:ff05::/128 encap seg6local action End.DT4 vrftable {} dev sr0".format(self.vrf_table_id)) + + # check application database + self.pdb.wait_for_deleted_entry("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:2:ff05::") + + # verify that the mysid has been removed from the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries)) + + # unconfigure srv6 locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"no srv6\"") + + self.teardown_srv6(dvs) + + def test_AddRemoveSrv6MySidUDT6(self, dvs, testlog): + + _, output = dvs.runcmd(f"vtysh -c 'show zebra dplane providers'") + if 'dplane_fpm_sonic' not in output: + pytest.skip("'dplane_fpm_sonic' required for this test is not available, skipping", allow_module_level=True) + + self.setup_srv6(dvs) + + # configure srv6 usid locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:2::/48 block-len 32 node-len 16 func-bits 16\" -c \"behavior usid\"") + + # create srv6 mysid udt6 behavior + dvs.runcmd("ip -6 route add fc00:0:2:ff05::/128 encap seg6local action End.DT6 vrftable {} dev sr0".format(self.vrf_table_id)) + + # check application database + self.pdb.wait_for_entry("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:2:ff05::") + expected_fields = {"action": "udt6", "vrf": "Vrf10"} + self.pdb.wait_for_field_match("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:2:ff05::", expected_fields) + + # verify that the mysid has been programmed into the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries) + 1) + + # check ASIC SAI_OBJECT_TYPE_MY_SID_ENTRY database + my_sid = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", self.initial_my_sid_entries) + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(my_sid) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDT6" + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_VRF": + assert fv[1] == self.vrf_id + + # remove srv6 mysid udt6 behavior + dvs.runcmd("ip -6 route del fc00:0:2:ff05::/128 encap seg6local action End.DT6 vrftable {} dev sr0".format(self.vrf_table_id)) + + # check application database + self.pdb.wait_for_deleted_entry("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:2:ff05::") + + # verify that the mysid has been removed from the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries)) + + # unconfigure srv6 locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"no srv6\"") + + self.teardown_srv6(dvs) + + @pytest.mark.skipif(LooseVersion(platform.release()) < LooseVersion('5.14'), + reason="This test requires Linux kernel 5.14 or higher") + def test_AddRemoveSrv6MySidUDT46(self, dvs, testlog): + + _, output = dvs.runcmd(f"vtysh -c 'show zebra dplane providers'") + if 'dplane_fpm_sonic' not in output: + pytest.skip("'dplane_fpm_sonic' required for this test is not available, skipping", allow_module_level=True) + + self.setup_srv6(dvs) + + # enable VRF strict mode + dvs.runcmd("sysctl -w net.vrf.strict_mode=1") + + # configure srv6 usid locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:2::/48 block-len 32 node-len 16 func-bits 16\" -c \"behavior usid\"") + + # create srv6 mysid udt46 behavior + dvs.runcmd("ip -6 route add fc00:0:2:ff05::/128 encap seg6local action End.DT46 vrftable {} dev sr0".format(self.vrf_table_id)) + + # check application database + self.pdb.wait_for_entry("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:2:ff05::") + + # verify that the mysid has been programmed into the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries) + 1) + + # check ASIC SAI_OBJECT_TYPE_MY_SID_ENTRY database + my_sid = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", self.initial_my_sid_entries) + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(my_sid) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UDT46" + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_VRF": + assert fv[1] == self.vrf_id + + # remove srv6 mysid udt46 behavior + dvs.runcmd("ip -6 route del fc00:0:2:ff05::/128 encap seg6local action End.DT46 vrftable {} dev sr0".format(self.vrf_table_id)) + + # check application database + self.pdb.wait_for_deleted_entry("SRV6_MY_SID_TABLE", "32:16:16:0:fc00:0:2:ff05::") + + # verify that the mysid has been removed from the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries)) + + # unconfigure srv6 locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"no srv6\"") + + self.teardown_srv6(dvs) + + def verify_attribute_value(self, table, key, attribute, expected_value): + (status, fvs) = table.get(key) + assert status == True + for fv in fvs: + if fv[0] == attribute: + assert fv[1] == expected_value + + def add_mysid_cfgdb(self, locator, addr, prefix="", dscp_mode="uniform", func_len=0): + if not prefix: + prefix = addr + self.cdb.create_entry("SRV6_MY_LOCATORS", locator, {"prefix": prefix, "block_len": "32", "node_len": "16", "func_len": str(func_len), "arg_len": "0"}) + self.cdb.create_entry("SRV6_MY_SIDS", f'{locator}|{addr}/{48 + func_len}', {"decap_dscp_mode": dscp_mode}) + + def remove_mysid_cfgdb(self, locator, addr, func_len=0): + self.cdb.delete_entry("SRV6_MY_SIDS", f"{locator}|{addr}/{48 + func_len}") + self.cdb.delete_entry("SRV6_MY_LOCATORS", locator) + + def add_mysid_vtysh(self, dvs, locator, addr, prefix="", func_len=0): + if not prefix: + prefix = addr + loc_cmd = f'vtysh -c "configure terminal" -c "segment-routing" -c "srv6" -c "locators" -c "locator {locator}" -c "prefix {prefix}/48 block-len 32 node-len 16 func-bits {func_len}" -c "behavior usid"' + sid_cmd = f'vtysh -c "configure terminal" -c "segment-routing" -c "srv6" -c "static-sids" -c "sid {addr}/{48 + func_len} locator {locator} behavior uN"' + dvs.runcmd(loc_cmd) + dvs.runcmd(sid_cmd) + + def remove_mysid_vtysh(self, dvs, locator, addr, func_len=0): + sid_cmd = f'vtysh -c "configure terminal" -c "segment-routing" -c "srv6" -c "static-sids" -c "no sid {addr}/{48 + func_len} locator {locator} behavior uN"' + loc_cmd = f'vtysh -c "configure terminal" -c "segment-routing" -c "srv6" -c "locators" -c "no locator {locator}"' + dvs.runcmd(sid_cmd) + dvs.runcmd(loc_cmd) + + def test_Srv6MySidUNTunnelDscpMode(self, dvs, testlog): + + _, output = dvs.runcmd(f"vtysh -c 'show zebra dplane providers'") + if 'dplane_fpm_sonic' not in output: + pytest.skip("'dplane_fpm_sonic' required for this test is not available, skipping", allow_module_level=True) + + self.setup_srv6(dvs) + + my_sid_table = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + tunnel_table = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL") + tunnel_term_table = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY") + tunnel_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL") + tunnel_term_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY") + + mysid1 = "fc00:0:1::" + mysid2 = "fd00:0:1::" + mysid3 = "fe00:0:1:aabb::" + + # Confiure the dcsp_mode in config db + self.add_mysid_cfgdb("loc1", mysid1, dscp_mode="uniform") + self.add_mysid_cfgdb("loc2", mysid2, dscp_mode="pipe") + self.add_mysid_cfgdb("loc3", mysid3, "fe00:0:1::", dscp_mode="pipe", func_len=16) + + # Create MySID entry with dscp_mode uniformß + self.add_mysid_vtysh(dvs, "loc1", mysid1) + + self.pdb.wait_for_entry("SRV6_MY_SID_TABLE", f"32:16:0:0:{mysid1}") + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries) + 1) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL", len(tunnel_entries) + 1) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY", len(tunnel_term_entries) + 1) + my_sid_uniform = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", self.initial_my_sid_entries) + tunnel_uniform = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL", tunnel_entries) + mysid_uniform_term_entry = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY", tunnel_term_entries) + + # Create MySID entry with dscp_mode pipe + self.add_mysid_vtysh(dvs, "loc2", mysid2) + + self.pdb.wait_for_entry("SRV6_MY_SID_TABLE", f"32:16:0:0:{mysid2}") + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries) + 2) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL", len(tunnel_entries) + 2) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY", len(tunnel_term_entries) + 2) + my_sid_pipe = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", self.initial_my_sid_entries | set([my_sid_uniform])) + tunnel_pipe = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL", tunnel_entries | set([tunnel_uniform])) + mysid_pipe_term_entry = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY", tunnel_term_entries | set([mysid_uniform_term_entry])) + + # Validate tunnel DSCP mode configuration + self.verify_attribute_value(my_sid_table, my_sid_uniform, "SAI_MY_SID_ENTRY_ATTR_TUNNEL_ID", tunnel_uniform) + self.verify_attribute_value(my_sid_table, my_sid_pipe, "SAI_MY_SID_ENTRY_ATTR_TUNNEL_ID", tunnel_pipe) + self.verify_attribute_value(tunnel_table, tunnel_uniform, "SAI_TUNNEL_ATTR_DECAP_DSCP_MODE", "SAI_TUNNEL_DSCP_MODE_UNIFORM_MODEL") + self.verify_attribute_value(tunnel_table, tunnel_pipe, "SAI_TUNNEL_ATTR_DECAP_DSCP_MODE", "SAI_TUNNEL_DSCP_MODE_PIPE_MODEL") + self.verify_attribute_value(tunnel_table, tunnel_uniform, "SAI_TUNNEL_ATTR_DECAP_TTL_MODE", "SAI_TUNNEL_TTL_MODE_PIPE_MODEL") + self.verify_attribute_value(tunnel_table, tunnel_pipe, "SAI_TUNNEL_ATTR_DECAP_TTL_MODE", "SAI_TUNNEL_TTL_MODE_PIPE_MODEL") + self.verify_attribute_value(my_sid_table, my_sid_uniform, "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR", "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_USD") + self.verify_attribute_value(my_sid_table, my_sid_pipe, "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR", "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_USD") + + # Validate tunnel term configuration + self.verify_attribute_value(tunnel_term_table, mysid_uniform_term_entry, "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID", tunnel_uniform) + self.verify_attribute_value(tunnel_term_table, mysid_uniform_term_entry, "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP", mysid1) + self.verify_attribute_value(tunnel_term_table, mysid_pipe_term_entry, "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID", tunnel_pipe) + self.verify_attribute_value(tunnel_term_table, mysid_pipe_term_entry, "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP", mysid2) + + # Add another MySID entry with dscp_mode pipe + self.add_mysid_vtysh(dvs, "loc3", mysid3, prefix="fe00:0:1::", func_len=16) + + self.pdb.wait_for_entry("SRV6_MY_SID_TABLE", f"32:16:16:0:{mysid3}") + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries) + 3) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY", len(tunnel_term_entries) + 3) + + # Verify that the tunnel is reused + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL", len(tunnel_entries) + 2) + my_sid_pipe2 = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", self.initial_my_sid_entries | set([my_sid_uniform, my_sid_pipe])) + self.verify_attribute_value(my_sid_table, my_sid_pipe2, "SAI_MY_SID_ENTRY_ATTR_TUNNEL_ID", tunnel_pipe) + + # Remove MySID entries + self.remove_mysid_vtysh(dvs, "loc1", mysid1) + self.remove_mysid_vtysh(dvs, "loc2", mysid2) + self.remove_mysid_vtysh(dvs, "loc3", mysid3, func_len=16) + + self.pdb.wait_for_deleted_entry("SRV6_MY_SID_TABLE", f"32:16:0:0:{mysid1}") + self.pdb.wait_for_deleted_entry("SRV6_MY_SID_TABLE", f"32:16:0:0:{mysid2}") + self.pdb.wait_for_deleted_entry("SRV6_MY_SID_TABLE", f"32:16:16:0:{mysid3}") + + self.remove_mysid_cfgdb("loc1", mysid1) + self.remove_mysid_cfgdb("loc2", mysid2) + self.remove_mysid_cfgdb("loc3", mysid3, func_len=16) + + # Verify that the MySID and tunnel configuration is removed + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries)) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL", len(tunnel_entries)) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(tunnel_term_entries)) + + # Unconfigure srv6 locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"no srv6\"") + + self.teardown_srv6(dvs) + + def test_Srv6MySidUNTunnelDscpModeAmbiguity(self, dvs, testlog): + _, output = dvs.runcmd(f"vtysh -c 'show zebra dplane providers'") + if 'dplane_fpm_sonic' not in output: + pytest.skip("'dplane_fpm_sonic' required for this test is not available, skipping", allow_module_level=True) + + self.setup_srv6(dvs) + + tunnel_table = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL") + tunnel_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL") + + loc1_prefix = "aaaa:bbbb:1::" + loc2_prefix = "aaaa:bbbb:1:2::" + sid_addr = "aaaa:bbbb:1:2::/64" + + # Add locator 1 + dvs.runcmd(f'vtysh -c "configure terminal" -c "segment-routing" -c "srv6" -c "locators" -c "locator loc1" -c "prefix {loc1_prefix}/48 block-len 32 node-len 16 func-bits 16" -c "behavior usid"') + self.cdb.create_entry("SRV6_MY_LOCATORS", "loc1", {"prefix": loc1_prefix, "block_len": "32", "node_len": "16", "func_len": "16", "arg_len": "0"}) + + # Add locator 2 + dvs.runcmd(f'vtysh -c "configure terminal" -c "segment-routing" -c "srv6" -c "locators" -c "locator loc2" -c "prefix {loc2_prefix}/64 block-len 32 node-len 32 func-bits 0" -c "behavior usid"') + self.cdb.create_entry("SRV6_MY_LOCATORS", "loc2", {"prefix": loc2_prefix, "block_len": "32", "node_len": "32", "func_len": "0", "arg_len": "0"}) + + # Add SIDs CONIFG_DB + self.cdb.create_entry("SRV6_MY_SIDS", f'loc1|{sid_addr}', {"decap_dscp_mode": "uniform"}) + self.cdb.create_entry("SRV6_MY_SIDS", f'loc2|{sid_addr}', {"decap_dscp_mode": "pipe"}) + + # Add first SID + dvs.runcmd(f'vtysh -c "configure terminal" -c "segment-routing" -c "srv6" -c "static-sids" -c "sid {sid_addr} locator loc1 behavior uN"') + + self.pdb.wait_for_entry("SRV6_MY_SID_TABLE", f"32:16:16:0:aaaa:bbbb:1:2::") + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries) + 1) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL", len(tunnel_entries) + 1) + tunnel_uniform = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL", tunnel_entries) + + # Add second SID + dvs.runcmd(f'vtysh -c "configure terminal" -c "segment-routing" -c "srv6" -c "static-sids" -c "sid {sid_addr} locator loc2 behavior uN"') + + self.pdb.wait_for_entry("SRV6_MY_SID_TABLE", f"32:32:0:0:aaaa:bbbb:1:2::") + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(self.initial_my_sid_entries) + 2) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL", len(tunnel_entries) + 2) + tunnel_pipe = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL", tunnel_entries | set([tunnel_uniform])) + + self.verify_attribute_value(tunnel_table, tunnel_uniform, "SAI_TUNNEL_ATTR_DECAP_DSCP_MODE", "SAI_TUNNEL_DSCP_MODE_UNIFORM_MODEL") + self.verify_attribute_value(tunnel_table, tunnel_pipe, "SAI_TUNNEL_ATTR_DECAP_DSCP_MODE", "SAI_TUNNEL_DSCP_MODE_PIPE_MODEL") + self.verify_attribute_value(tunnel_table, tunnel_uniform, "SAI_TUNNEL_ATTR_DECAP_TTL_MODE", "SAI_TUNNEL_TTL_MODE_PIPE_MODEL") + self.verify_attribute_value(tunnel_table, tunnel_pipe, "SAI_TUNNEL_ATTR_DECAP_TTL_MODE", "SAI_TUNNEL_TTL_MODE_PIPE_MODEL") + + # Cleanup + dvs.runcmd(f'vtysh -c "configure terminal" -c "segment-routing" -c "srv6" -c "static-sids" -c "no sid {sid_addr} locator loc1 behavior uN"') + dvs.runcmd(f'vtysh -c "configure terminal" -c "segment-routing" -c "srv6" -c "static-sids" -c "no sid {sid_addr} locator loc2 behavior uN"') + dvs.runcmd(f'vtysh -c "configure terminal" -c "segment-routing" -c "srv6" -c "locators" -c "no locator loc1"') + dvs.runcmd(f'vtysh -c "configure terminal" -c "segment-routing" -c "srv6" -c "locators" -c "no locator loc2"') + + self.cdb.delete_entry("SRV6_MY_SIDS", f'loc1|{sid_addr}') + self.cdb.delete_entry("SRV6_MY_SIDS", f'loc2|{sid_addr}') + self.cdb.delete_entry("SRV6_MY_LOCATORS", "loc1") + self.cdb.delete_entry("SRV6_MY_LOCATORS", "loc2") + + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"no srv6\"") + self.teardown_srv6(dvs) + +class TestSrv6VpnFpmsyncd(object): + """ Functionality tests for SRv6 VPN handling in fpmsyncd """ + + def setup_db(self, dvs): + self.pdb = dvs.get_app_db() + self.adb = dvs.get_asic_db() + self.cdb = dvs.get_config_db() + + def create_vrf(self, vrf_name): + table = "ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER" + existed_entries = get_exist_entries(self.adb.db_connection, table) + + self.cdb.create_entry("VRF", vrf_name, {"empty": "empty"}) + + self.adb.wait_for_n_keys(table, len(existed_entries) + 1) + return get_created_entry(self.adb.db_connection, table, existed_entries) + + def remove_vrf(self, vrf_name): + self.cdb.delete_entry("VRF", vrf_name) + + def setup_srv6(self, dvs): + self.setup_db(dvs) + + # create vrf + initial_vrf_entries = set(self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER")) + self.create_vrf("Vrf13") + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER", len(initial_vrf_entries) + 1) + + # create dummy interface sr0 + dvs.runcmd("ip link add sr0 type dummy") + dvs.runcmd("ip link set sr0 up") + + def teardown_srv6(self, dvs): + # remove dummy interface sr0 + dvs.runcmd("ip link del sr0 type dummy") + + # remove vrf + initial_vrf_entries = set(self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER")) + self.remove_vrf("Vrf13") + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER", len(initial_vrf_entries) - 1) + + def test_AddRemoveSrv6SteeringRouteIpv4(self, dvs, testlog): + + _, output = dvs.runcmd(f"vtysh -c 'show zebra dplane providers'") + if 'dplane_fpm_sonic' not in output: + pytest.skip("'dplane_fpm_sonic' required for this test is not available, skipping", allow_module_level=True) + + self.setup_srv6(dvs) + + dvs.runcmd("vtysh -c \"configure terminal\" -c \"interface lo\" -c \"ip address fc00:0:2::1/128\"") + + # configure srv6 usid locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:2::/48 block-len 32 node-len 16 func-bits 16\" -c \"behavior usid\"") + + # save exist asic db entries + tunnel_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL") + nexthop_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + route_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + sidlist_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_SRV6_SIDLIST") + + # create v4 route with vpn sid + dvs.runcmd("ip route add 192.0.2.0/24 encap seg6 mode encap segs fc00:0:1:e000:: dev sr0 vrf Vrf13") + + time.sleep(3) + + # check application database + self.pdb.wait_for_entry("ROUTE_TABLE", "Vrf13:192.0.2.0/24") + expected_fields = {"segment": "fc00:0:1:e000::", "seg_src": "fc00:0:2::1"} + self.pdb.wait_for_field_match("ROUTE_TABLE", "Vrf13:192.0.2.0/24", expected_fields) + + self.pdb.wait_for_entry("SRV6_SID_LIST_TABLE", "fc00:0:1:e000::") + expected_fields = {"path": "fc00:0:1:e000::"} + self.pdb.wait_for_field_match("SRV6_SID_LIST_TABLE", "fc00:0:1:e000::", expected_fields) + + # verify that the route has been programmed into the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL", len(tunnel_entries) + 1) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", len(nexthop_entries) + 1) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_SRV6_SIDLIST", len(sidlist_entries) + 1) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", len(route_entries) + 1) + + # get created entries + route_key = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", route_entries) + nexthop_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nexthop_entries) + tunnel_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL", tunnel_entries) + sidlist_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_SRV6_SIDLIST", sidlist_entries) + + # check ASIC SAI_OBJECT_TYPE_SRV6_SIDLIST database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_SRV6_SIDLIST") + (status, fvs) = tbl.get(sidlist_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_SRV6_SIDLIST_ATTR_SEGMENT_LIST": + assert fv[1] == "1:fc00:0:1:e000::" + elif fv[0] == "SAI_SRV6_SIDLIST_ATTR_TYPE": + assert fv[1] == "SAI_SRV6_SIDLIST_TYPE_ENCAPS_RED" + + # check ASIC SAI_OBJECT_TYPE_ROUTE_ENTRY database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + (status, fvs) = tbl.get(route_key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == nexthop_id + + # check ASIC SAI_OBJECT_TYPE_NEXT_HOP database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + (status, fvs) = tbl.get(nexthop_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_NEXT_HOP_ATTR_TYPE": + assert fv[1] == "SAI_NEXT_HOP_TYPE_SRV6_SIDLIST" + if fv[0] == "SAI_NEXT_HOP_ATTR_SRV6_SIDLIST_ID": + assert fv[1] == sidlist_id + elif fv[0] == "SAI_NEXT_HOP_ATTR_TUNNEL_ID": + assert fv[1] == tunnel_id + + # check ASIC SAI_OBJECT_TYPE_TUNNEL database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL") + (status, fvs) = tbl.get(tunnel_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_TUNNEL_ATTR_TYPE": + assert fv[1] == "SAI_TUNNEL_TYPE_SRV6" + elif fv[0] == "SAI_TUNNEL_ATTR_ENCAP_SRC_IP": + assert fv[1] == "fc00:0:2::1" + + # remove v4 route with vpn sid + dvs.runcmd("ip route del 192.0.2.0/24 encap seg6 mode encap segs fc00:0:1:e000:: dev sr0 vrf Vrf13") + + time.sleep(3) + + # check application database + self.pdb.wait_for_deleted_entry("ROUTE_TABLE", "Vrf13:192.0.2.0/24") + self.pdb.wait_for_deleted_entry("SRV6_SID_LIST_TABLE", "fc00:0:1:e000::") + + # verify that the route has been removed from the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", len(nexthop_entries)) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL", len(tunnel_entries)) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", len(route_entries)) + + # unconfigure srv6 locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"no srv6\"") + + self.teardown_srv6(dvs) + + def test_AddRemoveSrv6SteeringRouteIpv6(self, dvs, testlog): + + _, output = dvs.runcmd(f"vtysh -c 'show zebra dplane providers'") + if 'dplane_fpm_sonic' not in output: + pytest.skip("'dplane_fpm_sonic' required for this test is not available, skipping", allow_module_level=True) + + self.setup_srv6(dvs) + + dvs.runcmd("vtysh -c \"configure terminal\" -c \"interface lo\" -c \"ip address fc00:0:2::1/128\"") + + # configure srv6 usid locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"srv6\" -c \"locators\" -c \"locator loc1\" -c \"prefix fc00:0:2::/48 block-len 32 node-len 16 func-bits 16\" -c \"behavior usid\"") + + # save exist asic db entries + tunnel_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL") + nexthop_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + route_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + sidlist_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_SRV6_SIDLIST") + + # create v6 route with vpn sid + dvs.runcmd("ip -6 route add 2001:db8:1:1::/64 encap seg6 mode encap segs fc00:0:1:e000:: dev sr0 vrf Vrf13") + + time.sleep(3) + + # check application database + self.pdb.wait_for_entry("ROUTE_TABLE", "Vrf13:2001:db8:1:1::/64") + expected_fields = {"segment": "fc00:0:1:e000::", "seg_src": "fc00:0:2::1"} + self.pdb.wait_for_field_match("ROUTE_TABLE", "Vrf13:2001:db8:1:1::/64", expected_fields) + + self.pdb.wait_for_entry("SRV6_SID_LIST_TABLE", "fc00:0:1:e000::") + expected_fields = {"path": "fc00:0:1:e000::"} + self.pdb.wait_for_field_match("SRV6_SID_LIST_TABLE", "fc00:0:1:e000::", expected_fields) + + # verify that the route has been programmed into the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL", len(tunnel_entries) + 1) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", len(nexthop_entries) + 1) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_SRV6_SIDLIST", len(sidlist_entries) + 1) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", len(route_entries) + 1) + + # get created entries + route_key = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", route_entries) + nexthop_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nexthop_entries) + tunnel_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL", tunnel_entries) + sidlist_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_SRV6_SIDLIST", sidlist_entries) + + # check ASIC SAI_OBJECT_TYPE_SRV6_SIDLIST database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_SRV6_SIDLIST") + (status, fvs) = tbl.get(sidlist_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_SRV6_SIDLIST_ATTR_SEGMENT_LIST": + assert fv[1] == "1:fc00:0:1:e000::" + elif fv[0] == "SAI_SRV6_SIDLIST_ATTR_TYPE": + assert fv[1] == "SAI_SRV6_SIDLIST_TYPE_ENCAPS_RED" + + # check ASIC SAI_OBJECT_TYPE_ROUTE_ENTRY database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + (status, fvs) = tbl.get(route_key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == nexthop_id + + # check ASIC SAI_OBJECT_TYPE_NEXT_HOP database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + (status, fvs) = tbl.get(nexthop_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_NEXT_HOP_ATTR_TYPE": + assert fv[1] == "SAI_NEXT_HOP_TYPE_SRV6_SIDLIST" + if fv[0] == "SAI_NEXT_HOP_ATTR_SRV6_SIDLIST_ID": + assert fv[1] == sidlist_id + elif fv[0] == "SAI_NEXT_HOP_ATTR_TUNNEL_ID": + assert fv[1] == tunnel_id + + # check ASIC SAI_OBJECT_TYPE_TUNNEL database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL") + (status, fvs) = tbl.get(tunnel_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_TUNNEL_ATTR_TYPE": + assert fv[1] == "SAI_TUNNEL_TYPE_SRV6" + elif fv[0] == "SAI_TUNNEL_ATTR_ENCAP_SRC_IP": + assert fv[1] == "fc00:0:2::1" + + # remove v4 route with vpn sid + dvs.runcmd("ip route del 2001:db8:1:1::/64 encap seg6 mode encap segs fc00:0:1:e000:: dev sr0 vrf Vrf13") + + time.sleep(3) + + # check application database + self.pdb.wait_for_deleted_entry("ROUTE_TABLE", "Vrf13:2001:db8:1:1::/64") + self.pdb.wait_for_deleted_entry("SRV6_SID_LIST_TABLE", "fc00:0:1:e000::") + + # verify that the route has been removed from the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", len(nexthop_entries)) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL", len(tunnel_entries)) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", len(route_entries)) + + # unconfigure srv6 locator + dvs.runcmd("vtysh -c \"configure terminal\" -c \"segment-routing\" -c \"no srv6\"") + + self.teardown_srv6(dvs) + +class TestSrv6Vpn(object): + def setup_db(self, dvs): + self.pdb = dvs.get_app_db() + self.adb = dvs.get_asic_db() + self.cdb = dvs.get_config_db() + + def create_srv6_vpn_route(self, routeip, nexthop, segsrc, vpn_sid, ifname): + table = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY" + existed_entries = get_exist_entries(self.adb.db_connection, table) + + fvs=swsscommon.FieldValuePairs([('seg_src', segsrc), ('nexthop', nexthop), ('vpn_sid', vpn_sid), ('ifname', ifname)]) + routetbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE") + routetbl.set(routeip,fvs) + + self.adb.wait_for_n_keys(table, len(existed_entries) + 1) + return get_created_entry(self.adb.db_connection, table, existed_entries) + + def create_srv6_vpn_route_with_nhg(self, routeip, nhg_index, pic_ctx_index): + table = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY" + existed_entries = get_exist_entries(self.adb.db_connection, table) + + fvs=swsscommon.FieldValuePairs([('nexthop_group', nhg_index), ('pic_context_id', pic_ctx_index)]) + routetbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE") + routetbl.set(routeip,fvs) + + self.adb.wait_for_n_keys(table, len(existed_entries) + 1) + return get_created_entry(self.adb.db_connection, table, existed_entries) + + def update_srv6_vpn_route_attribute_with_nhg(self, routeip, nhg_index, pic_ctx_index): + fvs=swsscommon.FieldValuePairs([('nexthop_group', nhg_index), ('pic_context_id', pic_ctx_index)]) + routetbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE") + routetbl.set(routeip,fvs) + return True + + def update_srv6_vpn_route_attribute(self, routeip, nexthops, segsrc_list, vpn_list, ifname_list): + fvs=swsscommon.FieldValuePairs([('seg_src', ",".join(segsrc_list)), ('nexthop', ",".join(nexthops)), ('vpn_sid', ",".join(vpn_list)), ('ifname', ",".join(ifname_list))]) + routetbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE") + routetbl.set(routeip,fvs) + return True + + def remove_srv6_route(self, routeip): + routetbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE") + routetbl._del(routeip) + + def create_nhg(self, nhg_index, nexthops, segsrc_list, ifname_list): + table = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER" + existed_entries = get_exist_entries(self.adb.db_connection, table) + + fvs=swsscommon.FieldValuePairs([('seg_src', ",".join(segsrc_list)), ('nexthop', ",".join(nexthops)), ('ifname', ",".join(ifname_list))]) + nhgtbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "NEXTHOP_GROUP_TABLE") + nhgtbl.set(nhg_index,fvs) + + self.adb.wait_for_n_keys(table, len(existed_entries) + len(nexthops)) + return get_created_entries(self.adb.db_connection, table, existed_entries, len(nexthops)) + + def remove_nhg(self, nhg_index): + nhgtbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "NEXTHOP_GROUP_TABLE") + nhgtbl._del(nhg_index) + + def create_pic_context(self, pic_ctx_id, nexthops, vpn_list): + table = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY" + existed_entries = get_exist_entries(self.adb.db_connection, table) + + fvs=swsscommon.FieldValuePairs([('nexthop', ",".join(nexthops)), ('vpn_sid', ",".join(vpn_list))]) + pictbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "PIC_CONTEXT_TABLE") + pictbl.set(pic_ctx_id,fvs) + + self.adb.wait_for_n_keys(table, len(existed_entries) + len(vpn_list)) + return get_created_entries(self.adb.db_connection, table, existed_entries, len(vpn_list)) + + def remove_pic_context(self, pic_ctx_id): + pictbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "PIC_CONTEXT_TABLE") + pictbl._del(pic_ctx_id) + + def check_deleted_route_entries(self, destinations): + def _access_function(): + route_entries = self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + route_destinations = [json.loads(route_entry)["dest"] for route_entry in route_entries] + return (all(destination not in route_destinations for destination in destinations), None) + + wait_for_result(_access_function) + + def test_srv6_vpn_with_single_nh(self, dvs, testlog): + self.setup_db(dvs) + dvs.setup_db() + + # save exist asic db entries + tunnel_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL") + nexthop_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + map_entry_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY") + map_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP") + + # create v4 route with vpn sid + route_key = self.create_srv6_vpn_route('5000::/64', '2001::1', '1001:2000::1', '3000::1', 'unknown') + nexthop_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nexthop_entries) + tunnel_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL", tunnel_entries) + map_entry_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY", map_entry_entries) + map_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP", map_entries) + prefix_agg_id = "1" + + # check ASIC SAI_OBJECT_TYPE_ROUTE_ENTRY database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + (status, fvs) = tbl.get(route_key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_ROUTE_ENTRY_ATTR_PREFIX_AGG_ID": + assert prefix_agg_id == fv[1] + + # check ASIC SAI_OBJECT_TYPE_TUNNEL_MAP database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP") + (status, fvs) = tbl.get(map_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_TUNNEL_MAP_ATTR_TYPE": + assert fv[1] == "SAI_TUNNEL_MAP_TYPE_PREFIX_AGG_ID_TO_SRV6_VPN_SID" + + # check ASIC SAI_OBJECT_TYPE_TUNNEL database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL") + (status, fvs) = tbl.get(tunnel_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_TUNNEL_ATTR_PEER_MODE": + assert fv[1] == "SAI_TUNNEL_PEER_MODE_P2P" + + # check vpn sid value in SRv6 route is created + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY") + (status, fvs) = tbl.get(map_entry_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_TUNNEL_MAP_ENTRY_ATTR_SRV6_VPN_SID_VALUE": + assert fv[1] == "3000::1" + if fv[0] == "SAI_TUNNEL_MAP_ENTRY_ATTR_PREFIX_AGG_ID_KEY": + assert fv[1] == prefix_agg_id + + # check sid list value in ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP is created + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + (status, fvs) = tbl.get(nexthop_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_NEXT_HOP_ATTR_TYPE": + assert fv[1] == "SAI_NEXT_HOP_TYPE_SRV6_SIDLIST" + + self.remove_srv6_route('5000::/64') + self.check_deleted_route_entries('5000::/64') + time.sleep(5) + # check ASIC SAI_OBJECT_TYPE_TUNNEL_MAP is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP") + (status, fvs) = tbl.get(map_id) + assert status == False + + # check ASIC SAI_OBJECT_TYPE_TUNNEL is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL") + (status, fvs) = tbl.get(tunnel_id) + assert status == False + + # check vpn sid value in SRv6 route is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY") + (status, fvs) = tbl.get(map_entry_id) + assert status == False + + # check nexthop id in SRv6 route is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + (status, fvs) = tbl.get(nexthop_id) + assert status == False + + # check ASIC SAI_OBJECT_TYPE_ROUTE_ENTRY is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + (status, fvs) = tbl.get(route_key) + assert status == False + + def test_pic(self, dvs, testlog): + self.setup_db(dvs) + dvs.setup_db() + + segsrc_list = [] + nexthop_list = [] + ifname_list = [] + vpn_list = [] + nhg_index = '100' + pic_ctx_index = '200' + + # save exist asic db entries + tunnel_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL") + nexthop_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + map_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP") + nexthop_group_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP") + nexthop_group_member_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + map_entry_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY") + + segsrc_list.append('1001:2000::1') + segsrc_list.append('1001:2000::1') + + nexthop_list.append('2000::1') + nexthop_list.append('2000::2') + + ifname_list.append('unknown') + ifname_list.append('unknown') + + vpn_list.append('3000::1') + vpn_list.append('3000::2') + + self.create_nhg(nhg_index, nexthop_list, segsrc_list, ifname_list) + tunnel_ids = get_created_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL", tunnel_entries, 2) + nh_ids = get_created_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nexthop_entries, 2) + nhg_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP", nexthop_group_entries) + nhg_mem = get_created_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER", nexthop_group_member_entries, 2) + map_ids = get_created_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP", map_entries, 2) + + nh_ids = sorted(nh_ids) + nhg_mem = sorted(nhg_mem) + + # check ASIC SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + (status, fvs) = tbl.get(nhg_mem[0]) + assert status == True + for fv in fvs: + if fv[0] == "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID": + assert fv[1] == nhg_id + elif fv[0] == "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID": + assert fv[1] == nh_ids[0] + + # check ASIC SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + (status, fvs) = tbl.get(nhg_mem[1]) + assert status == True + for fv in fvs: + if fv[0] == "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID": + assert fv[1] == nhg_id + elif fv[0] == "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID": + assert fv[1] == nh_ids[1] + + # check ASIC SAI_OBJECT_TYPE_TUNNEL_MAP database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP") + for map_id in map_ids: + (status, fvs) = tbl.get(map_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_TUNNEL_MAP_ATTR_TYPE": + assert fv[1] == "SAI_TUNNEL_MAP_TYPE_PREFIX_AGG_ID_TO_SRV6_VPN_SID" + + # check ASIC SAI_OBJECT_TYPE_TUNNEL database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL") + for tunnel_id in tunnel_ids: + (status, fvs) = tbl.get(tunnel_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_TUNNEL_ATTR_PEER_MODE": + assert fv[1] == "SAI_TUNNEL_PEER_MODE_P2P" + + # check sid list value in ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP is created + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + for nh_id in nh_ids: + (status, fvs) = tbl.get(nh_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_NEXT_HOP_ATTR_TYPE": + assert fv[1] == "SAI_NEXT_HOP_TYPE_SRV6_SIDLIST" + + self.create_pic_context(pic_ctx_index, nexthop_list, vpn_list) + map_entry_ids = get_created_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY", map_entry_entries, 2) + prefix_agg_id = "1" + + # check vpn sid value in SRv6 route is created + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY") + for map_entry_id in map_entry_ids: + (status, fvs) = tbl.get(map_entry_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_TUNNEL_MAP_ENTRY_ATTR_PREFIX_AGG_ID_KEY": + assert fv[1] == prefix_agg_id + + # remove nhg and pic_context + self.remove_nhg(nhg_index) + self.remove_pic_context(pic_ctx_index) + + time.sleep(5) + # check ASIC SAI_OBJECT_TYPE_NEXT_HOP_GROUP is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP") + (status, fvs) = tbl.get(nhg_id) + assert status == False + + # check ASIC SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + for nhg_mem_id in nhg_mem: + (status, fvs) = tbl.get(nhg_mem_id) + assert status == False + + # check ASIC SAI_OBJECT_TYPE_TUNNEL_MAP is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP") + for map_id in map_ids: + (status, fvs) = tbl.get(map_id) + assert status == False + + # check ASIC SAI_OBJECT_TYPE_TUNNEL is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL") + for tunnel_id in tunnel_ids: + (status, fvs) = tbl.get(tunnel_id) + assert status == False + + # check next hop in ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + for nh_id in nh_ids: + (status, fvs) = tbl.get(nh_id) + assert status == False + + # check vpn sid value in SRv6 route is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY") + for map_entry_id in map_entry_ids: + (status, fvs) = tbl.get(map_entry_id) + assert status == False + + def test_srv6_vpn_with_nhg(self, dvs, testlog): + self.setup_db(dvs) + dvs.setup_db() + + segsrc_list = [] + nexthop_list = [] + vpn_list = [] + ifname_list = [] + nhg_index = '100' + pic_ctx_index = '200' + + # save exist asic db entries + nexthop_group_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP") + + segsrc_list.append('1001:2000::1') + segsrc_list.append('1001:2000::1') + + nexthop_list.append('2000::1') + nexthop_list.append('2000::2') + + vpn_list.append('3000::1') + vpn_list.append('3000::2') + + ifname_list.append('unknown') + ifname_list.append('unknown') + + self.create_nhg(nhg_index, nexthop_list, segsrc_list, ifname_list) + self.create_pic_context(pic_ctx_index, nexthop_list, vpn_list) + route_key = self.create_srv6_vpn_route_with_nhg('5000::/64', nhg_index, pic_ctx_index) + + nhg_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP", nexthop_group_entries) + prefix_agg_id = "1" + + # check ASIC SAI_OBJECT_TYPE_ROUTE_ENTRY database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + (status, fvs) = tbl.get(route_key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == nhg_id + if fv[0] == "SAI_ROUTE_ENTRY_ATTR_PREFIX_AGG_ID": + assert fv[1] == prefix_agg_id + + route_key_new = self.create_srv6_vpn_route_with_nhg('5001::/64', nhg_index, pic_ctx_index) + + # check ASIC SAI_OBJECT_TYPE_ROUTE_ENTRY database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + (status, fvs) = tbl.get(route_key_new) + assert status == True + for fv in fvs: + if fv[0] == "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == nhg_id + if fv[0] == "SAI_ROUTE_ENTRY_ATTR_PREFIX_AGG_ID": + assert fv[1] == prefix_agg_id + + # remove routes + self.remove_srv6_route('5001::/64') + self.check_deleted_route_entries('5001::/64') + + time.sleep(5) + # check ASIC SAI_OBJECT_TYPE_ROUTE_ENTRY is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + (status, fvs) = tbl.get(route_key_new) + assert status == False + + # remove routes + self.remove_srv6_route('5000::/64') + self.check_deleted_route_entries('5000::/64') + + time.sleep(5) + # check ASIC SAI_OBJECT_TYPE_ROUTE_ENTRY is removed + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + (status, fvs) = tbl.get(route_key) + assert status == False + + # remove nhg and pic_context + self.remove_nhg(nhg_index) + self.remove_pic_context(pic_ctx_index) + + def test_srv6_vpn_nh_update(self, dvs, testlog): + self.setup_db(dvs) + dvs.setup_db() + + segsrc_list = [] + nexthop_list = [] + vpn_list = [] + ifname_list = [] + + # save exist asic db entries + nexthop_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + map_entry_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY") + + nexthop_group_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP") + nexthop_group_member_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + map_entry_prefix_agg_id = "1" + route_entry_prefix_agg_id = "1" + route_entry_next_hop_id = "1" + + # create v4 route with vpn sid + route_key = self.create_srv6_vpn_route('5000::/64', '2000::1', '1001:2000::1', '3000::1', 'unknown') + map_entry_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY", map_entry_entries) + + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY") + (status, fvs) = tbl.get(map_entry_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_TUNNEL_MAP_ENTRY_ATTR_PREFIX_AGG_ID_KEY": + map_entry_prefix_agg_id = fv[1] + + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + (status, fvs) = tbl.get(route_key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": + route_entry_next_hop_id = fv[1] + if fv[0] == "SAI_ROUTE_ENTRY_ATTR_PREFIX_AGG_ID": + route_entry_prefix_agg_id = fv[1] + + segsrc_list.append('1001:2000::1') + segsrc_list.append('1001:2000::1') + + nexthop_list.append('2000::1') + nexthop_list.append('2000::2') + + vpn_list.append('3000::1') + vpn_list.append('3000::2') + + ifname_list.append('unknown') + ifname_list.append('unknown') + + nhg_index = '100' + pic_ctx_index = '200' + + map_entry_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY") + + self.create_nhg(nhg_index, nexthop_list, segsrc_list, ifname_list) + self.create_pic_context(pic_ctx_index, nexthop_list, vpn_list) + self.update_srv6_vpn_route_attribute_with_nhg('5000::/64', nhg_index, pic_ctx_index) + + time.sleep(5) + nh_ids = get_created_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nexthop_entries, 2) + nhg_id = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP", nexthop_group_entries) + nhg_mem = get_created_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER", nexthop_group_member_entries, 2) + + map_entry_ids = get_created_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY", map_entry_entries, 2) + map_entry_id_group = "1" + + for map_id in map_entry_ids: + if map_id != map_entry_id: + map_entry_id_group = map_id + break + + nh_ids = sorted(nh_ids) + nhg_mem = sorted(nhg_mem) + + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY") + (status, fvs) = tbl.get(map_entry_id_group) + assert status == True + for fv in fvs: + if fv[0] == "SAI_TUNNEL_MAP_ENTRY_ATTR_PREFIX_AGG_ID_KEY": + assert fv[1] != map_entry_prefix_agg_id + + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP") + (status, fvs) = tbl.get(nhg_id) + assert status == True + + # check ASIC SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + (status, fvs) = tbl.get(nhg_mem[0]) + assert status == True + for fv in fvs: + if fv[0] == "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID": + assert fv[1] == nhg_id + elif fv[0] == "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID": + assert fv[1] == nh_ids[0] + + # check ASIC SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + (status, fvs) = tbl.get(nhg_mem[1]) + assert status == True + for fv in fvs: + if fv[0] == "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID": + assert fv[1] == nhg_id + elif fv[0] == "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID": + assert fv[1] == nh_ids[1] + + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + (status, fvs) = tbl.get(route_key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] != route_entry_next_hop_id + if fv[0] == "SAI_ROUTE_ENTRY_ATTR_PREFIX_AGG_ID": + assert fv[1] != route_entry_prefix_agg_id + + # remove routes + self.remove_srv6_route('5000::/64') + self.check_deleted_route_entries('5000::/64') + time.sleep(5) + + # remove nhg and pic_context + self.remove_nhg(nhg_index) + self.remove_pic_context(pic_ctx_index) + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/tests/test_sub_port_intf.py b/tests/test_sub_port_intf.py index ec76ec13bbe..9ff8ad63e6a 100644 --- a/tests/test_sub_port_intf.py +++ b/tests/test_sub_port_intf.py @@ -393,7 +393,7 @@ def get_default_vrf_oid(self): assert len(oids) == 1, "Wrong # of default vrfs: %d, expected #: 1." % (len(oids)) return oids[0] - def get_ip_prefix_nhg_oid(self, ip_prefix, vrf_oid=None): + def get_ip_prefix_nhg_oid(self, ip_prefix, vrf_oid=None, prefix_present=True): if vrf_oid is None: vrf_oid = self.default_vrf_oid @@ -407,18 +407,24 @@ def _access_function(): route_entry_found = True assert route_entry_key["vr"] == vrf_oid break - - return (route_entry_found, raw_route_entry_key) + if prefix_present: + return (route_entry_found, raw_route_entry_key) + else: + return (not route_entry_found, None) (route_entry_found, raw_route_entry_key) = wait_for_result(_access_function) - fvs = self.asic_db.get_entry(ASIC_ROUTE_ENTRY_TABLE, raw_route_entry_key) + if not prefix_present: + assert raw_route_entry_key == None + return None + else: + fvs = self.asic_db.get_entry(ASIC_ROUTE_ENTRY_TABLE, raw_route_entry_key) - nhg_oid = fvs.get("SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID", "") - assert nhg_oid != "" - assert nhg_oid != "oid:0x0" + nhg_oid = fvs.get("SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID", "") + assert nhg_oid != "" + assert nhg_oid != "oid:0x0" - return nhg_oid + return nhg_oid def check_sub_port_intf_key_existence(self, db, table_name, key): db.wait_for_matching_keys(table_name, [key]) @@ -582,6 +588,99 @@ def _test_sub_port_intf_creation(self, dvs, sub_port_intf_name, vrf_name=None): self.remove_lag(parent_port) self.check_lag_removal(parent_port_oid) + def _test_sub_port_intf_creation_add_lag_member(self, dvs, sub_port_intf_name, vrf_name=None): + substrs = sub_port_intf_name.split(VLAN_SUB_INTERFACE_SEPARATOR) + parent_port = self.get_parent_port(sub_port_intf_name) + vlan_id = substrs[1] + + assert parent_port.startswith(SUBINTF_LAG_PREFIX) + state_tbl_name = STATE_LAG_TABLE_NAME + phy_ports = self.LAG_MEMBERS_UNDER_TEST + old_lag_oids = self.get_oids(ASIC_LAG_TABLE) + + vrf_oid = self.default_vrf_oid + old_rif_oids = self.get_oids(ASIC_RIF_TABLE) + + self.set_parent_port_admin_status(dvs, parent_port, "up") + + parent_port_oid = self.get_newly_created_oid(ASIC_LAG_TABLE, old_lag_oids) + # Add lag members to test physical port host interface vlan tag attribute + self.add_lag_members(parent_port, self.LAG_MEMBERS_UNDER_TEST[1:]) + self.asic_db.wait_for_n_keys(ASIC_LAG_MEMBER_TABLE, len(self.LAG_MEMBERS_UNDER_TEST[1:])) + if vrf_name: + self.create_vrf(vrf_name) + vrf_oid = self.get_newly_created_oid(ASIC_VIRTUAL_ROUTER_TABLE, [vrf_oid]) + self.create_sub_port_intf_profile(sub_port_intf_name, vrf_name) + self.add_lag_members(parent_port, self.LAG_MEMBERS_UNDER_TEST[:1]) + self.asic_db.wait_for_n_keys(ASIC_LAG_MEMBER_TABLE, len(self.LAG_MEMBERS_UNDER_TEST)) + + # Verify that sub port interface state ok is pushed to STATE_DB by Intfmgrd + fv_dict = { + "state": "ok", + } + self.check_sub_port_intf_fvs(self.state_db, state_tbl_name, sub_port_intf_name, fv_dict) + + # Verify vrf name sub port interface bound to in STATE_DB INTERFACE_TABLE + fv_dict = { + "vrf": vrf_name if vrf_name else "", + } + self.check_sub_port_intf_fvs(self.state_db, STATE_INTERFACE_TABLE_NAME, sub_port_intf_name, fv_dict) + + # If bound to non-default vrf, verify sub port interface vrf binding in linux kernel, + # and parent port not bound to vrf + if vrf_name: + self.check_sub_port_intf_vrf_bind_kernel(dvs, sub_port_intf_name, vrf_name) + self.check_sub_port_intf_vrf_nobind_kernel(dvs, parent_port, vrf_name) + + # Verify that sub port interface configuration is synced to APPL_DB INTF_TABLE by Intfmgrd + fv_dict = { + ADMIN_STATUS: "up", + } + if vrf_name: + fv_dict[VRF_NAME if vrf_name.startswith(VRF_PREFIX) else VNET_NAME] = vrf_name + self.check_sub_port_intf_fvs(self.app_db, APP_INTF_TABLE_NAME, sub_port_intf_name, fv_dict) + + # Verify that a sub port router interface entry is created in ASIC_DB + fv_dict = { + "SAI_ROUTER_INTERFACE_ATTR_TYPE": "SAI_ROUTER_INTERFACE_TYPE_SUB_PORT", + "SAI_ROUTER_INTERFACE_ATTR_OUTER_VLAN_ID": "{}".format(vlan_id), + "SAI_ROUTER_INTERFACE_ATTR_ADMIN_V4_STATE": "true", + "SAI_ROUTER_INTERFACE_ATTR_ADMIN_V6_STATE": "true", + "SAI_ROUTER_INTERFACE_ATTR_MTU": DEFAULT_MTU, + "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": vrf_oid, + "SAI_ROUTER_INTERFACE_ATTR_PORT_ID": parent_port_oid, + } + rif_oid = self.get_newly_created_oid(ASIC_RIF_TABLE, old_rif_oids) + self.check_sub_port_intf_fvs(self.asic_db, ASIC_RIF_TABLE, rif_oid, fv_dict) + + # Verify physical port host interface vlan tag attribute + fv_dict = { + "SAI_HOSTIF_ATTR_VLAN_TAG": "SAI_HOSTIF_VLAN_TAG_KEEP", + } + for phy_port in phy_ports: + hostif_oid = dvs.asicdb.hostifnamemap[phy_port] + self.check_sub_port_intf_fvs(self.asic_db, ASIC_HOSTIF_TABLE, hostif_oid, fv_dict) + + # Remove a sub port interface + self.remove_sub_port_intf_profile(sub_port_intf_name) + self.check_sub_port_intf_profile_removal(rif_oid) + + # Remove vrf if created + if vrf_name: + self.remove_vrf(vrf_name) + self.check_vrf_removal(vrf_oid) + if vrf_name.startswith(VNET_PREFIX): + self.remove_vxlan_tunnel(self.TUNNEL_UNDER_TEST) + self.app_db.wait_for_n_keys(ASIC_TUNNEL_TABLE, 0) + + # Remove lag members from lag parent port + self.remove_lag_members(parent_port, self.LAG_MEMBERS_UNDER_TEST) + self.asic_db.wait_for_n_keys(ASIC_LAG_MEMBER_TABLE, 0) + + # Remove lag + self.remove_lag(parent_port) + self.check_lag_removal(parent_port_oid) + def test_sub_port_intf_creation(self, dvs): self.connect_dbs(dvs) @@ -594,6 +693,8 @@ def test_sub_port_intf_creation(self, dvs): self._test_sub_port_intf_creation(dvs, self.SUB_PORT_INTERFACE_UNDER_TEST, self.VNET_UNDER_TEST) self._test_sub_port_intf_creation(dvs, self.LAG_SUB_PORT_INTERFACE_UNDER_TEST, self.VNET_UNDER_TEST) + self._test_sub_port_intf_creation_add_lag_member(dvs, self.LAG_SUB_PORT_INTERFACE_UNDER_TEST) + def _test_sub_port_intf_add_ip_addrs(self, dvs, sub_port_intf_name, vrf_name=None): substrs = sub_port_intf_name.split(VLAN_SUB_INTERFACE_SEPARATOR) parent_port = self.get_parent_port(sub_port_intf_name) @@ -1543,21 +1644,26 @@ def _test_sub_port_intf_oper_down_with_pending_neigh_route_tasks(self, dvs, sub_ self.add_route_appl_db(ip_prefix, nhop_ips, ifnames, vrf_name) # Verify route entry created in ASIC_DB and get next hop group oid - nhg_oid = self.get_ip_prefix_nhg_oid(ip_prefix, vrf_oid) + nhg_oid = self.get_ip_prefix_nhg_oid(ip_prefix, vrf_oid, prefix_present = i < (nhop_num - 1)) - # Verify next hop group of the specified oid created in ASIC_DB - self.check_sub_port_intf_key_existence(self.asic_db, ASIC_NEXT_HOP_GROUP_TABLE, nhg_oid) + if i < (nhop_num - 1): + # Verify next hop group of the specified oid created in ASIC_DB + self.check_sub_port_intf_key_existence(self.asic_db, ASIC_NEXT_HOP_GROUP_TABLE, nhg_oid) - # Verify next hop group member # created in ASIC_DB - nhg_member_oids = self.asic_db.wait_for_n_keys(ASIC_NEXT_HOP_GROUP_MEMBER_TABLE, - (nhop_num - 1) - i if create_intf_on_parent_port == False else ((nhop_num - 1) - i) * 2) - - # Verify that next hop group members all belong to the next hop group of the specified oid - fv_dict = { - "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID": nhg_oid, - } - for nhg_member_oid in nhg_member_oids: - self.check_sub_port_intf_fvs(self.asic_db, ASIC_NEXT_HOP_GROUP_MEMBER_TABLE, nhg_member_oid, fv_dict) + # Verify next hop group member # created in ASIC_DB + nhg_member_oids = self.asic_db.wait_for_n_keys(ASIC_NEXT_HOP_GROUP_MEMBER_TABLE, + (nhop_num - 1) - i if create_intf_on_parent_port == False \ + else ((nhop_num - 1) - i) * 2) + + # Verify that next hop group members all belong to the next hop group of the specified oid + fv_dict = { + "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID": nhg_oid, + } + for nhg_member_oid in nhg_member_oids: + self.check_sub_port_intf_fvs(self.asic_db, ASIC_NEXT_HOP_GROUP_MEMBER_TABLE, nhg_member_oid, fv_dict) + else: + assert nhg_oid == None + self.asic_db.wait_for_n_keys(ASIC_NEXT_HOP_GROUP_MEMBER_TABLE, 0) nhop_cnt = len(self.asic_db.get_keys(ASIC_NEXT_HOP_TABLE)) # Remove next hop objects on sub port interfaces diff --git a/tests/test_switch.py b/tests/test_switch.py index 93cfd1389c3..652fa465736 100644 --- a/tests/test_switch.py +++ b/tests/test_switch.py @@ -65,6 +65,27 @@ def vxlan_switch_test(dvs, oid, port, mac, mask, sport): ) +def ecmp_lag_hash_offset_test(dvs, oid, lag_offset, ecmp_offset): + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + create_entry_pst( + app_db, + "SWITCH_TABLE", ':', "switch", + [ + ("ecmp_hash_offset", ecmp_offset), + ("lag_hash_offset", lag_offset) + ], + ) + time.sleep(2) + + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + check_object(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH", oid, + { + 'SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_OFFSET': ecmp_offset, + 'SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_OFFSET': lag_offset, + } + ) + + class TestSwitch(object): ''' Test- Check switch attributes @@ -75,6 +96,8 @@ def test_switch_attribute(self, dvs, testlog): vxlan_switch_test(dvs, switch_oid, "56789", "00:0A:0B:0C:0D:0E", "15", "56789") + ecmp_lag_hash_offset_test(dvs, switch_oid, "10", "10") + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_trimming.py b/tests/test_trimming.py new file mode 100644 index 00000000000..72f2358893b --- /dev/null +++ b/tests/test_trimming.py @@ -0,0 +1,1611 @@ +import pytest +import time +import logging + +from typing import NamedTuple +from swsscommon import swsscommon + +import buffer_model + + +logging.basicConfig(level=logging.INFO) +trimlogger = logging.getLogger(__name__) + + +SAI_DSCP_MODE_DICT = { + "dscp-value": "SAI_PACKET_TRIM_DSCP_RESOLUTION_MODE_DSCP_VALUE", + "from-tc": "SAI_PACKET_TRIM_DSCP_RESOLUTION_MODE_FROM_TC" +} +SAI_QUEUE_MODE_DICT = { + "static": "SAI_PACKET_TRIM_QUEUE_RESOLUTION_MODE_STATIC", + "dynamic": "SAI_PACKET_TRIM_QUEUE_RESOLUTION_MODE_DYNAMIC" +} +SAI_BUFFER_PROFILE_MODE_DICT = { + "drop": "SAI_BUFFER_PROFILE_PACKET_ADMISSION_FAIL_ACTION_DROP", + "trim": "SAI_BUFFER_PROFILE_PACKET_ADMISSION_FAIL_ACTION_DROP_AND_TRIM" +} +SAI_BUFFER_PROFILE_LIST_DICT = { + "ingress": "SAI_PORT_ATTR_QOS_INGRESS_BUFFER_PROFILE_LIST", + "egress": "SAI_PORT_ATTR_QOS_EGRESS_BUFFER_PROFILE_LIST" +} + + +class TrimmingTuple(NamedTuple): + """Config DB trimming attribute container""" + size: str + dscp: str + tc: str + queue: str + + +class TrimmingTupleSai(NamedTuple): + """ASIC DB trimming attribute container""" + size: str + dscpMode: str + dscp: str + tc: str + queueMode: str + queue: str + + +@pytest.fixture(scope="class") +def dynamicModel(dvs): + trimlogger.info("Enable dynamic buffer model") + buffer_model.enable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) + yield + buffer_model.disable_dynamic_buffer(dvs) + trimlogger.info("Disable dynamic buffer model") + + +@pytest.fixture(scope="class") +def switchCounters(request, dvs_flex_counter_manager): + trimlogger.info("Initialize switch counters") + + request.cls.dvs_flex_counter.set_interval("SWITCH", "1000") + request.cls.dvs_flex_counter.set_status("SWITCH", "enable") + + attr_dict = { + swsscommon.FLEX_COUNTER_STATUS_FIELD: "enable", + swsscommon.POLL_INTERVAL_FIELD: "1000", + } + + request.cls.dvs_flex_counter.verify_flex_counter( + stat_name="SWITCH_STAT_COUNTER", + qualifiers=attr_dict + ) + + yield + + request.cls.dvs_flex_counter.set_status("SWITCH", "disable") + request.cls.dvs_flex_counter.set_interval("SWITCH", "60000") + + attr_dict = { + swsscommon.FLEX_COUNTER_STATUS_FIELD: "disable", + swsscommon.POLL_INTERVAL_FIELD: "60000", + } + + request.cls.dvs_flex_counter.verify_flex_counter( + stat_name="SWITCH_STAT_COUNTER", + qualifiers=attr_dict + ) + + trimlogger.info("Deinitialize switch counters") + + +@pytest.fixture(scope="class") +def portCounters(request, dvs_flex_counter_manager): + trimlogger.info("Initialize port counters") + + request.cls.dvs_flex_counter.set_status("PORT", "enable") + + attr_dict = { + swsscommon.FLEX_COUNTER_STATUS_FIELD: "enable" + } + + request.cls.dvs_flex_counter.verify_flex_counter( + stat_name="PORT_STAT_COUNTER", + qualifiers=attr_dict + ) + + yield + + request.cls.dvs_flex_counter.set_status("PORT", "disable") + + attr_dict = { + swsscommon.FLEX_COUNTER_STATUS_FIELD: "disable", + } + + request.cls.dvs_flex_counter.verify_flex_counter( + stat_name="PORT_STAT_COUNTER", + qualifiers=attr_dict + ) + + trimlogger.info("Deinitialize port counters") + + +@pytest.fixture(scope="class") +def pgCounters(request, dvs_flex_counter_manager): + trimlogger.info("Initialize priority group counters") + + request.cls.dvs_flex_counter.set_interval("PG_WATERMARK", "1000") + request.cls.dvs_flex_counter.set_status("PG_WATERMARK", "enable") + + attr_dict = { + swsscommon.FLEX_COUNTER_STATUS_FIELD: "enable", + swsscommon.POLL_INTERVAL_FIELD: "1000", + } + + request.cls.dvs_flex_counter.verify_flex_counter( + stat_name="PG_WATERMARK_STAT_COUNTER", + qualifiers=attr_dict + ) + + yield + + request.cls.dvs_flex_counter.set_status("PG_WATERMARK", "disable") + request.cls.dvs_flex_counter.set_interval("PG_WATERMARK", "60000") + + attr_dict = { + swsscommon.FLEX_COUNTER_STATUS_FIELD: "disable", + swsscommon.POLL_INTERVAL_FIELD: "60000", + } + + request.cls.dvs_flex_counter.verify_flex_counter( + stat_name="PG_WATERMARK_STAT_COUNTER", + qualifiers=attr_dict + ) + + trimlogger.info("Deinitialize priority group counters") + + +@pytest.fixture(scope="class") +def queueCounters(request, dvs_flex_counter_manager): + trimlogger.info("Initialize queue counters") + + request.cls.dvs_flex_counter.set_interval("QUEUE", "1000") + request.cls.dvs_flex_counter.set_status("QUEUE", "enable") + + attr_dict = { + swsscommon.FLEX_COUNTER_STATUS_FIELD: "enable", + swsscommon.POLL_INTERVAL_FIELD: "1000", + } + + request.cls.dvs_flex_counter.verify_flex_counter( + stat_name="QUEUE_STAT_COUNTER", + qualifiers=attr_dict + ) + + yield + + request.cls.dvs_flex_counter.set_status("QUEUE", "disable") + request.cls.dvs_flex_counter.set_interval("QUEUE", "10000") + + attr_dict = { + swsscommon.FLEX_COUNTER_STATUS_FIELD: "disable", + swsscommon.POLL_INTERVAL_FIELD: "10000", + } + + request.cls.dvs_flex_counter.verify_flex_counter( + stat_name="QUEUE_STAT_COUNTER", + qualifiers=attr_dict + ) + + trimlogger.info("Deinitialize queue counters") + + +@pytest.mark.usefixtures("dvs_switch_manager") +@pytest.mark.usefixtures("testlog") +class TestTrimmingFlows: + @pytest.fixture(scope="class") + def switchData(self): + trimlogger.info("Initialize switch data") + + trimlogger.info("Verify switch count") + self.dvs_switch.verify_switch_count(0) + + trimlogger.info("Get switch id") + switchIdList = self.dvs_switch.get_switch_ids() + + # Assumption: VS has only one switch object + meta_dict = { + "id": switchIdList[0] + } + + yield meta_dict + + trimlogger.info("Deinitialize switch data") + + +class TestTrimmingBasicFlows(TestTrimmingFlows): + @pytest.mark.parametrize( + "attrDict,saiAttrDict", [ + pytest.param( + TrimmingTuple(size="100", dscp="10", tc=None, queue="1"), + TrimmingTupleSai( + size="100", + dscpMode=SAI_DSCP_MODE_DICT["dscp-value"], + dscp="10", + tc=None, + queueMode=SAI_QUEUE_MODE_DICT["static"], + queue="1" + ), + id="symmetric-dscp-static-queue-index" + ), + pytest.param( + TrimmingTuple(size="200", dscp="20", tc=None, queue="dynamic"), + TrimmingTupleSai( + size="200", + dscpMode=SAI_DSCP_MODE_DICT["dscp-value"], + dscp="20", + tc=None, + queueMode=SAI_QUEUE_MODE_DICT["dynamic"], + queue="1" + ), + id="symmetric-dscp-dynamic-queue-index" + ), + pytest.param( + TrimmingTuple(size="100", dscp="from-tc", tc="1", queue="1"), + TrimmingTupleSai( + size="100", + dscpMode=SAI_DSCP_MODE_DICT["from-tc"], + dscp="20", + tc="1", + queueMode=SAI_QUEUE_MODE_DICT["static"], + queue="1" + ), + id="asymmetric-dscp-static-queue-index" + ), + pytest.param( + TrimmingTuple(size="200", dscp="from-tc", tc="2", queue="dynamic"), + TrimmingTupleSai( + size="200", + dscpMode=SAI_DSCP_MODE_DICT["from-tc"], + dscp="20", + tc="2", + queueMode=SAI_QUEUE_MODE_DICT["dynamic"], + queue="1" + ), + id="asymmetric-dscp-dynamic-queue-index" + ) + ] + ) + def test_TrimSwitchGlobalConfiguration(self, switchData, attrDict, saiAttrDict): + attr_dict = { + "size": attrDict.size, + "dscp_value": attrDict.dscp, + "queue_index": attrDict.queue + } + + if attrDict.tc is not None: + attr_dict["tc_value"] = attrDict.tc + + trimlogger.info("Update trimming global") + self.dvs_switch.update_switch_trimming( + qualifiers=attr_dict + ) + + switchId = switchData["id"] + sai_attr_dict = { + "SAI_SWITCH_ATTR_PACKET_TRIM_SIZE": saiAttrDict.size, + "SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_RESOLUTION_MODE": saiAttrDict.dscpMode, + "SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_VALUE": saiAttrDict.dscp, + "SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_RESOLUTION_MODE": saiAttrDict.queueMode, + "SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_INDEX": saiAttrDict.queue + } + + if saiAttrDict.tc is not None: + sai_attr_dict["SAI_SWITCH_ATTR_PACKET_TRIM_TC_VALUE"] = saiAttrDict.tc + + trimlogger.info("Validate trimming global") + self.dvs_switch.verify_switch( + sai_switch_id=switchId, + sai_qualifiers=sai_attr_dict + ) + + +class TestTrimmingAdvancedFlows(TestTrimmingFlows): + def test_TrimAsymToSymMigration(self, switchData): + switchId = switchData["id"] + + # Configure Asymmetric DSCP mode + attr_dict = { + "size": "200", + "dscp_value": "from-tc", + "tc_value": "2", + "queue_index": "2" + } + + trimlogger.info("Update trimming global") + self.dvs_switch.update_switch_trimming( + qualifiers=attr_dict + ) + + sai_attr_dict = { + "SAI_SWITCH_ATTR_PACKET_TRIM_SIZE": "200", + "SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_RESOLUTION_MODE": SAI_DSCP_MODE_DICT["from-tc"], + "SAI_SWITCH_ATTR_PACKET_TRIM_TC_VALUE": "2", + "SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_RESOLUTION_MODE": SAI_QUEUE_MODE_DICT["static"], + "SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_INDEX": "2" + } + + trimlogger.info("Validate trimming global") + self.dvs_switch.verify_switch( + sai_switch_id=switchId, + sai_qualifiers=sai_attr_dict + ) + + # Configure Symmetric DSCP mode + attr_dict = { + "size": "100", + "dscp_value": "10", + "queue_index": "1" + } + + trimlogger.info("Update trimming global") + self.dvs_switch.update_switch_trimming( + qualifiers=attr_dict + ) + + sai_attr_dict = { + "SAI_SWITCH_ATTR_PACKET_TRIM_SIZE": "100", + "SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_RESOLUTION_MODE": SAI_DSCP_MODE_DICT["dscp-value"], + "SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_VALUE": "10", + "SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_RESOLUTION_MODE": SAI_QUEUE_MODE_DICT["static"], + "SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_INDEX": "1" + } + + trimlogger.info("Validate trimming global") + self.dvs_switch.verify_switch( + sai_switch_id=switchId, + sai_qualifiers=sai_attr_dict + ) + + # Update TC value + attr_dict = { + "tc_value": "5" + } + + trimlogger.info("Update trimming global") + self.dvs_switch.update_switch_trimming( + qualifiers=attr_dict + ) + + sai_attr_dict = { + "SAI_SWITCH_ATTR_PACKET_TRIM_TC_VALUE": "2" + } + + trimlogger.info("Validate trimming global") + self.dvs_switch.verify_switch( + sai_switch_id=switchId, + sai_qualifiers=sai_attr_dict + ) + + # Configure Asymmetric DSCP mode + attr_dict = { + "size": "200", + "dscp_value": "from-tc", + "tc_value": "2", + "queue_index": "2" + } + + trimlogger.info("Update trimming global") + self.dvs_switch.update_switch_trimming( + qualifiers=attr_dict + ) + + sai_attr_dict = { + "SAI_SWITCH_ATTR_PACKET_TRIM_SIZE": "200", + "SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_RESOLUTION_MODE": SAI_DSCP_MODE_DICT["from-tc"], + "SAI_SWITCH_ATTR_PACKET_TRIM_TC_VALUE": "2", + "SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_RESOLUTION_MODE": SAI_QUEUE_MODE_DICT["static"], + "SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_INDEX": "2" + } + + trimlogger.info("Validate trimming global") + self.dvs_switch.verify_switch( + sai_switch_id=switchId, + sai_qualifiers=sai_attr_dict + ) + + +@pytest.mark.usefixtures("genericConfig") +@pytest.mark.usefixtures("restoreConfig") +class TestTrimmingNegativeFlows(TestTrimmingFlows): + @pytest.fixture(scope="class") + def genericConfig(self, switchData): + trimlogger.info("Add generic configuration") + + switchId = switchData["id"] + + # Asymmetric DSCP mode + + attr_dict = { + "size": "100", + "dscp_value": "from-tc", + "tc_value": "1", + "queue_index": "1" + } + + trimlogger.info("Update trimming global") + self.dvs_switch.update_switch_trimming( + qualifiers=attr_dict + ) + + sai_attr_dict = { + "SAI_SWITCH_ATTR_PACKET_TRIM_SIZE": "100", + "SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_RESOLUTION_MODE": SAI_DSCP_MODE_DICT["from-tc"], + "SAI_SWITCH_ATTR_PACKET_TRIM_TC_VALUE": "1", + "SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_RESOLUTION_MODE": SAI_QUEUE_MODE_DICT["static"], + "SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_INDEX": "1" + } + + trimlogger.info("Validate trimming global") + self.dvs_switch.verify_switch( + sai_switch_id=switchId, + sai_qualifiers=sai_attr_dict + ) + + # Symmetric DSCP mode + + attr_dict = { + "size": "100", + "dscp_value": "10", + "queue_index": "1" + } + + trimlogger.info("Update trimming global") + self.dvs_switch.update_switch_trimming( + qualifiers=attr_dict + ) + + sai_attr_dict = { + "SAI_SWITCH_ATTR_PACKET_TRIM_SIZE": "100", + "SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_RESOLUTION_MODE": SAI_DSCP_MODE_DICT["dscp-value"], + "SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_VALUE": "10", + "SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_RESOLUTION_MODE": SAI_QUEUE_MODE_DICT["static"], + "SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_INDEX": "1" + } + + trimlogger.info("Validate trimming global") + self.dvs_switch.verify_switch( + sai_switch_id=switchId, + sai_qualifiers=sai_attr_dict + ) + + yield + + sai_attr_dict = { + "SAI_SWITCH_ATTR_PACKET_TRIM_SIZE": "100", + "SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_RESOLUTION_MODE": SAI_DSCP_MODE_DICT["dscp-value"], + "SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_VALUE": "10", + "SAI_SWITCH_ATTR_PACKET_TRIM_TC_VALUE": "1", + "SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_RESOLUTION_MODE": SAI_QUEUE_MODE_DICT["static"], + "SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_INDEX": "1" + } + + trimlogger.info("Validate trimming global") + self.dvs_switch.verify_switch( + sai_switch_id=switchId, + sai_qualifiers=sai_attr_dict + ) + + trimlogger.info("Verify generic configuration") + + @pytest.fixture(scope="function") + def restoreConfig(self, switchData, request): + switchId = switchData["id"] + + attrDict = request.getfixturevalue("attrDict") + saiAttrDict = request.getfixturevalue("saiAttrDict") + + yield + + attr_dict = {} + + if attrDict.size is not None: + attr_dict = { + "size": "100" + } + + if attrDict.dscp is not None: + attr_dict = { + "dscp_value": "10" + } + + if attrDict.tc is not None: + attr_dict = { + "tc_value": "1" + } + + if attrDict.queue is not None: + attr_dict = { + "queue_index": "1" + } + + trimlogger.info("Update trimming global") + self.dvs_switch.update_switch_trimming( + qualifiers=attr_dict + ) + + sai_attr_dict = {} + + if saiAttrDict.size is not None: + sai_attr_dict = { + "SAI_SWITCH_ATTR_PACKET_TRIM_SIZE": "100" + } + + if saiAttrDict.dscp is not None: + sai_attr_dict = { + "SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_RESOLUTION_MODE": SAI_DSCP_MODE_DICT["dscp-value"], + "SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_VALUE": "10" + } + + if saiAttrDict.tc is not None: + sai_attr_dict = { + "SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_RESOLUTION_MODE": SAI_DSCP_MODE_DICT["dscp-value"], + "SAI_SWITCH_ATTR_PACKET_TRIM_TC_VALUE": "1" + } + + + if saiAttrDict.queue is not None: + sai_attr_dict = { + "SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_RESOLUTION_MODE": SAI_QUEUE_MODE_DICT["static"], + "SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_INDEX": "1" + } + + trimlogger.info("Validate trimming global") + self.dvs_switch.verify_switch( + sai_switch_id=switchId, + sai_qualifiers=sai_attr_dict + ) + + trimlogger.info("Verify configuration rollback: {}".format(str(attrDict))) + + @pytest.mark.parametrize( + "attrDict,saiAttrDict", [ + pytest.param( + TrimmingTuple(size="", dscp=None, tc=None, queue=None), + TrimmingTupleSai(size="100", dscpMode=None, dscp=None, tc=None, queueMode=None, queue=None), + id="size-empty" + ), + pytest.param( + TrimmingTuple(size="-1", dscp=None, tc=None, queue=None), + TrimmingTupleSai(size="100", dscpMode=None, dscp=None, tc=None, queueMode=None, queue=None), + id="size-min-1" + ), + pytest.param( + TrimmingTuple(size="4294967296", dscp=None, tc=None, queue=None), + TrimmingTupleSai(size="100", dscpMode=None, dscp=None, tc=None, queueMode=None, queue=None), + id="size-max+1" + ), + pytest.param( + TrimmingTuple(size=None, dscp="", tc=None, queue=None), + TrimmingTupleSai( + size=None, tc=None, queueMode=None, queue=None, + dscpMode=SAI_DSCP_MODE_DICT["dscp-value"], dscp="10" + ), + id="dscp-empty" + ), + pytest.param( + TrimmingTuple(size=None, dscp="-1", tc=None, queue=None), + TrimmingTupleSai( + size=None, tc=None, queueMode=None, queue=None, + dscpMode=SAI_DSCP_MODE_DICT["dscp-value"], dscp="10" + ), + id="dscp-min-1" + ), + pytest.param( + TrimmingTuple(size=None, dscp="64", tc=None, queue=None), + TrimmingTupleSai( + size=None, tc=None, queueMode=None, queue=None, + dscpMode=SAI_DSCP_MODE_DICT["dscp-value"], dscp="10" + ), + id="dscp-max+1" + ), + pytest.param( + TrimmingTuple(size=None, dscp=None, tc="", queue=None), + TrimmingTupleSai(size=None, dscpMode=None, dscp=None, tc="1", queueMode=None, queue=None), + id="tc-empty" + ), + pytest.param( + TrimmingTuple(size=None, dscp=None, tc="-1", queue=None), + TrimmingTupleSai(size=None, dscpMode=None, dscp=None, tc="1", queueMode=None, queue=None), + id="tc-min-1" + ), + pytest.param( + TrimmingTuple(size=None, dscp=None, tc="256", queue=None), + TrimmingTupleSai(size=None, dscpMode=None, dscp=None, tc="1", queueMode=None, queue=None), + id="tc-max+1" + ), + pytest.param( + TrimmingTuple(size=None, dscp=None, tc=None, queue=""), + TrimmingTupleSai( + size=None, dscpMode=None, dscp=None, tc=None, + queueMode=SAI_QUEUE_MODE_DICT["static"], queue="1" + ), + id="queue-empty" + ), + pytest.param( + TrimmingTuple(size=None, dscp=None, tc=None, queue="-1"), + TrimmingTupleSai( + size=None, dscpMode=None, dscp=None, tc=None, + queueMode=SAI_QUEUE_MODE_DICT["static"], queue="1" + ), + id="queue-min-1" + ), + pytest.param( + TrimmingTuple(size=None, dscp=None, tc=None, queue="256"), + TrimmingTupleSai( + size=None, dscpMode=None, dscp=None, tc=None, + queueMode=SAI_QUEUE_MODE_DICT["static"], queue="1" + ), + id="queue-max+1" + ) + ] + ) + def test_TrimNegValueOutOfBound(self, switchData, attrDict, saiAttrDict): + switchId = switchData["id"] + + attr_dict = {} + + if attrDict.size is not None: + attr_dict = { + "size": attrDict.size + } + + if attrDict.dscp is not None: + attr_dict = { + "dscp_value": attrDict.dscp + } + + if attrDict.tc is not None: + attr_dict = { + "tc_value": attrDict.tc + } + + if attrDict.queue is not None: + attr_dict = { + "queue_index": attrDict.queue + } + + trimlogger.info("Update trimming global") + self.dvs_switch.update_switch_trimming( + qualifiers=attr_dict + ) + + sai_attr_dict = {} + + if saiAttrDict.size is not None: + sai_attr_dict = { + "SAI_SWITCH_ATTR_PACKET_TRIM_SIZE": saiAttrDict.size + } + + if saiAttrDict.dscp is not None: + sai_attr_dict = { + "SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_RESOLUTION_MODE": saiAttrDict.dscpMode, + "SAI_SWITCH_ATTR_PACKET_TRIM_DSCP_VALUE": saiAttrDict.dscp + } + + if saiAttrDict.tc is not None: + sai_attr_dict = { + "SAI_SWITCH_ATTR_PACKET_TRIM_TC_VALUE": saiAttrDict.tc + } + + if saiAttrDict.queue is not None: + sai_attr_dict = { + "SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_RESOLUTION_MODE": saiAttrDict.queueMode, + "SAI_SWITCH_ATTR_PACKET_TRIM_QUEUE_INDEX": saiAttrDict.queue + } + + trimlogger.info("Validate trimming global") + self.dvs_switch.verify_switch( + sai_switch_id=switchId, + sai_qualifiers=sai_attr_dict + ) + + +@pytest.mark.usefixtures("dvs_buffer_manager") +@pytest.mark.usefixtures("testlog") +class TrimmingBufferModel: + PORT = "Ethernet0" + MMU = "12766208" + + @pytest.fixture(scope="class") + def dynamicBuffer(self, dvs, dynamicModel): + trimlogger.info("Add dynamic buffer configuration") + + # W/A: Enable dynamic buffer model on VS platform + trimlogger.info("Configure buffer MMU: size={}".format(self.MMU)) + self.dvs_buffer.update_buffer_mmu(self.MMU) + + trimlogger.info("Set interface admin state to UP: port={}".format(self.PORT)) + dvs.port_admin_set(self.PORT, "up") + + yield + + trimlogger.info("Set interface admin state to DOWN: port={}".format(self.PORT)) + dvs.port_admin_set(self.PORT, "down") + + trimlogger.info("Remove buffer MMU") + self.dvs_buffer.remove_buffer_mmu() + + trimlogger.info("Remove dynamic buffer configuration") + + +@pytest.mark.usefixtures("dvs_queue_manager") +class TrimmingRegularBufferModel(TrimmingBufferModel): + QUEUE = "0" + + @pytest.fixture(scope="class") + def bufferData(self, queueCounters): + trimlogger.info("Initialize buffer data") + + trimlogger.info("Verify buffer profiles are loaded") + self.dvs_buffer.wait_for_buffer_profiles() + + trimlogger.info("Get buffer profile name: port={}, queue={}".format(self.PORT, self.QUEUE)) + bufferProfileName = self.dvs_queue.get_queue_buffer_profile_name(self.PORT, self.QUEUE) + + trimlogger.info("Get buffer profile id: port={}, queue={}".format(self.PORT, self.QUEUE)) + bufferProfileId = self.dvs_queue.get_queue_buffer_profile_id(self.PORT, self.QUEUE) + + meta_dict = { + "name": bufferProfileName, + "id": bufferProfileId + } + + yield meta_dict + + attr_dict = { + "packet_discard_action": "drop" + } + + trimlogger.info("Reset buffer profile trimming configuration: {}".format(bufferProfileName)) + self.dvs_buffer.update_buffer_profile(bufferProfileName, attr_dict) + + trimlogger.info("Deinitialize buffer data") + + def verifyBufferProfileConfiguration(self, bufferData, action): + attr_dict = { + "packet_discard_action": action + } + + trimlogger.info("Update buffer profile: {}".format(bufferData["name"])) + self.dvs_buffer.update_buffer_profile( + buffer_profile_name=bufferData["name"], + qualifiers=attr_dict + ) + + bufferProfileId = bufferData["id"] + sai_attr_dict = { + "SAI_BUFFER_PROFILE_ATTR_PACKET_ADMISSION_FAIL_ACTION": SAI_BUFFER_PROFILE_MODE_DICT[action] + } + + trimlogger.info("Validate buffer profile: {}".format(bufferData["name"])) + self.dvs_buffer.verify_buffer_profile( + sai_buffer_profile_id=bufferProfileId, + sai_qualifiers=sai_attr_dict + ) + + +class TestTrimmingTraditionalBufferModel(TrimmingRegularBufferModel): + @pytest.mark.parametrize( + "action", [ + pytest.param("drop", id="drop-packet"), + pytest.param("trim", id="trim-packet") + ] + ) + def test_TrimStaticBufferProfileConfiguration(self, bufferData, action): + self.verifyBufferProfileConfiguration(bufferData, action) + + +@pytest.mark.usefixtures("dynamicBuffer") +class TestTrimmingDynamicBufferModel(TrimmingRegularBufferModel): + @pytest.mark.parametrize( + "action", [ + pytest.param("drop", id="drop-packet"), + pytest.param("trim", id="trim-packet") + ] + ) + def test_TrimDynamicBufferProfileConfiguration(self, bufferData, action): + self.verifyBufferProfileConfiguration(bufferData, action) + + +@pytest.mark.usefixtures("dvs_port_manager") +class TrimmingNegativeBufferModel(TrimmingBufferModel): + INGRESS_TRIM_PROFILE = "ingress_trim_profile" + EGRESS_TRIM_PROFILE = "egress_trim_profile" + + INGRESS_DEFAULT_PROFILE = "ingress_default_profile" + EGRESS_DEFAULT_PROFILE = "egress_default_profile" + + PG = "0" + + def createBufferProfile(self, profileName, attrDict): + bufferProfileIds = self.dvs_buffer.get_buffer_profile_ids() + + self.dvs_buffer.create_buffer_profile( + buffer_profile_name=profileName, + qualifiers=attrDict + ) + + bufferProfileIdsExt = self.dvs_buffer.get_buffer_profile_ids(len(bufferProfileIds)+1) + + return (set(bufferProfileIdsExt) - set(bufferProfileIds)).pop() + + def getPgBufferKeyValuePair(self, portName, pgIdx): + keyList = self.dvs_buffer.get_buffer_pg_keys(portName, pgIdx) + assert len(keyList) <= 1, "Invalid BUFFER_PG table" + + if keyList: + key = keyList[0] + value = self.dvs_buffer.get_buffer_pg_value(key) + + return key, value + + return None, None + + @pytest.fixture(scope="class") + def portData(self, portCounters): + trimlogger.info("Initialize port data") + + trimlogger.info("Get port id: port={}".format(self.PORT)) + portId = self.dvs_port.get_port_id(self.PORT) + + meta_dict = { + "id": portId + } + + yield meta_dict + + trimlogger.info("Deinitialize port data") + + @pytest.fixture(scope="class") + def pgData(self, pgCounters): + trimlogger.info("Initialize priority group data") + + trimlogger.info("Get priority group id: port={}, pg={}".format(self.PORT, self.PG)) + pgId = self.dvs_buffer.get_priority_group_id(self.PORT, self.PG) + + trimlogger.info("Get priority group buffer profile: port={}, pg={}".format(self.PORT, self.PG)) + pgBufferKey, pgBufferProfile = self.getPgBufferKeyValuePair(self.PORT, self.PG) + + if pgBufferKey is not None: + trimlogger.info("Remove priority group buffer profile: key={}".format(pgBufferKey)) + self.dvs_buffer.remove_buffer_pg(pgBufferKey) + + meta_dict = { + "id": pgId + } + + yield meta_dict + + if pgBufferKey is not None: + trimlogger.info( + "Restore priority group buffer profile: key={}, profile={}".format( + pgBufferKey, pgBufferProfile + ) + ) + self.dvs_buffer.update_buffer_pg( + pg_buffer_key=pgBufferKey, + pg_buffer_profile=pgBufferProfile + ) + else: + if self.dvs_buffer.is_priority_group_exists(self.PORT, self.PG): + trimlogger.info("Remove priority group buffer profile: port={}, pg={}".format(self.PORT, self.PG)) + self.dvs_buffer.remove_priority_group(self.PORT, self.PG) + + trimlogger.info("Deinitialize priority group data") + + @pytest.fixture(scope="class") + def bufferData(self): + trimlogger.info("Initialize buffer data") + + trimlogger.info("Verify buffer profiles are loaded") + self.dvs_buffer.wait_for_buffer_profiles() + + attr_dict = { + "dynamic_th": "3", + "size": "0", + "pool": "ingress_lossless_pool" + } + + trimlogger.info("Create buffer profile: {}".format(self.INGRESS_TRIM_PROFILE)) + iBufferProfileTrimId = self.createBufferProfile(self.INGRESS_TRIM_PROFILE, attr_dict) + + trimlogger.info("Create buffer profile: {}".format(self.INGRESS_DEFAULT_PROFILE)) + iBufferProfileDefId = self.createBufferProfile(self.INGRESS_DEFAULT_PROFILE, attr_dict) + + attr_dict = { + "dynamic_th": "3", + "size": "1518", + "pool": "egress_lossy_pool" + } + + trimlogger.info("Create buffer profile: {}".format(self.EGRESS_TRIM_PROFILE)) + eBufferProfileTrimId = self.createBufferProfile(self.EGRESS_TRIM_PROFILE, attr_dict) + + trimlogger.info("Create buffer profile: {}".format(self.EGRESS_DEFAULT_PROFILE)) + eBufferProfileDefId = self.createBufferProfile(self.EGRESS_DEFAULT_PROFILE, attr_dict) + + iBufferProfileListOld = None + eBufferProfileListOld = None + + if self.dvs_port.is_buffer_profile_list_exists(self.PORT): + trimlogger.info("Get ingress buffer profile list: port={}".format(self.PORT)) + iBufferProfileListOld = self.dvs_port.get_buffer_profile_list(self.PORT) + + if self.dvs_port.is_buffer_profile_list_exists(self.PORT, False): + trimlogger.info("Get egress buffer profile list: port={}".format(self.PORT)) + eBufferProfileListOld = self.dvs_port.get_buffer_profile_list(self.PORT, False) + + meta_dict = { + "id": { + "profile": { + "trim": { + "ingress": iBufferProfileTrimId, + "egress": eBufferProfileTrimId + }, + "default": { + "ingress": iBufferProfileDefId, + "egress": eBufferProfileDefId + } + } + } + } + + yield meta_dict + + if iBufferProfileListOld is not None: + trimlogger.info( + "Restore ingress buffer profile list: port={}, profile_list={}".format( + self.PORT, ",".join(iBufferProfileListOld) + ) + ) + self.dvs_port.update_buffer_profile_list( + port_name=self.PORT, + profile_list=",".join(iBufferProfileListOld) + ) + else: + if self.dvs_port.is_buffer_profile_list_exists(self.PORT): + trimlogger.info("Remove ingress buffer profile list: port={}".format(self.PORT)) + self.dvs_port.remove_buffer_profile_list(self.PORT) + + if eBufferProfileListOld is not None: + trimlogger.info( + "Restore egress buffer profile list: port={}, profile_list={}".format( + self.PORT, ",".join(eBufferProfileListOld) + ) + ) + self.dvs_port.update_buffer_profile_list( + port_name=self.PORT, + profile_list=",".join(eBufferProfileListOld), + ingress=False + ) + else: + if self.dvs_port.is_buffer_profile_list_exists(self.PORT, False): + trimlogger.info("Remove egress buffer profile list: port={}".format(self.PORT)) + self.dvs_port.remove_buffer_profile_list(self.PORT, False) + + trimlogger.info("Remove buffer profile: {}".format(self.INGRESS_TRIM_PROFILE)) + self.dvs_buffer.remove_buffer_profile(self.INGRESS_TRIM_PROFILE) + + trimlogger.info("Remove buffer profile: {}".format(self.INGRESS_DEFAULT_PROFILE)) + self.dvs_buffer.remove_buffer_profile(self.INGRESS_DEFAULT_PROFILE) + + trimlogger.info("Remove buffer profile: {}".format(self.EGRESS_TRIM_PROFILE)) + self.dvs_buffer.remove_buffer_profile(self.EGRESS_TRIM_PROFILE) + + trimlogger.info("Remove buffer profile: {}".format(self.EGRESS_DEFAULT_PROFILE)) + self.dvs_buffer.remove_buffer_profile(self.EGRESS_DEFAULT_PROFILE) + + trimlogger.info("Deinitialize buffer data") + + def verifyPriorityGroupBufferAttachConfiguration(self, bufferData, pgData): + trimProfile = self.INGRESS_TRIM_PROFILE + defaultProfile = self.INGRESS_DEFAULT_PROFILE + + pgId = pgData["id"] + trimProfileId = bufferData["id"]["profile"]["trim"]["ingress"] + defaultProfileId = bufferData["id"]["profile"]["default"]["ingress"] + + # Update priority group with the default buffer profile + + trimlogger.info( + "Update priority group: port={}, pg={}, profile={}".format(self.PORT, self.PG, defaultProfile) + ) + self.dvs_buffer.update_priority_group( + port_name=self.PORT, + pg_index=self.PG, + buffer_profile_name=defaultProfile + ) + + sai_attr_dict = { + "SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE": defaultProfileId + } + + trimlogger.info( + "Validate priority group: port={}, pg={}, profile={}".format(self.PORT, self.PG, defaultProfile) + ) + self.dvs_buffer.verify_priority_group( + sai_priority_group_id=pgId, + sai_qualifiers=sai_attr_dict + ) + + # Set buffer profile trimming eligibility + + attr_dict = { + "packet_discard_action": "trim" + } + + trimlogger.info("Update buffer profile: {}".format(trimProfile)) + self.dvs_buffer.update_buffer_profile( + buffer_profile_name=trimProfile, + qualifiers=attr_dict + ) + + sai_attr_dict = { + "SAI_BUFFER_PROFILE_ATTR_PACKET_ADMISSION_FAIL_ACTION": SAI_BUFFER_PROFILE_MODE_DICT["trim"] + } + + trimlogger.info("Validate buffer profile: {}".format(trimProfile)) + self.dvs_buffer.verify_buffer_profile( + sai_buffer_profile_id=trimProfileId, + sai_qualifiers=sai_attr_dict + ) + + # Update priority group with the trimming buffer profile + # and verify no update is done to ASIC DB + + trimlogger.info( + "Update priority group: port={}, pg={}, profile={}".format(self.PORT, self.PG, trimProfile) + ) + self.dvs_buffer.update_priority_group( + port_name=self.PORT, + pg_index=self.PG, + buffer_profile_name=trimProfile + ) + time.sleep(1) + + sai_attr_dict = { + "SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE": defaultProfileId + } + + trimlogger.info( + "Validate priority group: port={}, pg={}, profile={}".format(self.PORT, self.PG, defaultProfile) + ) + self.dvs_buffer.verify_priority_group( + sai_priority_group_id=pgId, + sai_qualifiers=sai_attr_dict + ) + + def verifyPriorityGroupBufferEditConfiguration(self, bufferData, pgData): + trimProfile = self.INGRESS_TRIM_PROFILE + defaultProfile = self.INGRESS_DEFAULT_PROFILE + + pgId = pgData["id"] + trimProfileId = bufferData["id"]["profile"]["trim"]["ingress"] + defaultProfileId = bufferData["id"]["profile"]["default"]["ingress"] + + # Reset buffer profile trimming eligibility + + attr_dict = { + "packet_discard_action": "drop" + } + + trimlogger.info("Update buffer profile: {}".format(trimProfile)) + self.dvs_buffer.update_buffer_profile( + buffer_profile_name=trimProfile, + qualifiers=attr_dict + ) + + sai_attr_dict = { + "SAI_BUFFER_PROFILE_ATTR_PACKET_ADMISSION_FAIL_ACTION": SAI_BUFFER_PROFILE_MODE_DICT["drop"] + } + + trimlogger.info("Validate buffer profile: {}".format(trimProfile)) + self.dvs_buffer.verify_buffer_profile( + sai_buffer_profile_id=trimProfileId, + sai_qualifiers=sai_attr_dict + ) + + # Update priority group with the trimming buffer profile + + trimlogger.info( + "Update priority group: port={}, pg={}, profile={}".format(self.PORT, self.PG, trimProfile) + ) + self.dvs_buffer.update_priority_group( + port_name=self.PORT, + pg_index=self.PG, + buffer_profile_name=trimProfile + ) + + sai_attr_dict = { + "SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE": trimProfileId + } + + trimlogger.info( + "Validate priority group: port={}, pg={}, profile={}".format(self.PORT, self.PG, trimProfile) + ) + self.dvs_buffer.verify_priority_group( + sai_priority_group_id=pgId, + sai_qualifiers=sai_attr_dict + ) + + # Set buffer profile trimming eligibility + # and verify no update is done to ASIC DB + + attr_dict = { + "packet_discard_action": "trim" + } + + trimlogger.info("Update buffer profile: {}".format(trimProfile)) + self.dvs_buffer.update_buffer_profile( + buffer_profile_name=trimProfile, + qualifiers=attr_dict + ) + time.sleep(1) + + sai_attr_dict = { + "SAI_BUFFER_PROFILE_ATTR_PACKET_ADMISSION_FAIL_ACTION": SAI_BUFFER_PROFILE_MODE_DICT["drop"] + } + + trimlogger.info("Validate buffer profile: {}".format(trimProfile)) + self.dvs_buffer.verify_buffer_profile( + sai_buffer_profile_id=trimProfileId, + sai_qualifiers=sai_attr_dict + ) + + # Update priority group with the default buffer profile + + trimlogger.info( + "Update priority group: port={}, pg={}, profile={}".format(self.PORT, self.PG, defaultProfile) + ) + self.dvs_buffer.update_priority_group( + port_name=self.PORT, + pg_index=self.PG, + buffer_profile_name=defaultProfile + ) + + sai_attr_dict = { + "SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE": defaultProfileId + } + + trimlogger.info( + "Validate priority group: port={}, pg={}, profile={}".format(self.PORT, self.PG, defaultProfile) + ) + self.dvs_buffer.verify_priority_group( + sai_priority_group_id=pgId, + sai_qualifiers=sai_attr_dict + ) + + def verifyProfileListBufferAttachConfiguration(self, portData, bufferData, ingress): + trimProfile = self.INGRESS_TRIM_PROFILE if ingress else self.EGRESS_TRIM_PROFILE + defaultProfile = self.INGRESS_DEFAULT_PROFILE if ingress else self.EGRESS_DEFAULT_PROFILE + + direction = "ingress" if ingress else "egress" + + portId = portData["id"] + trimProfileId = bufferData["id"]["profile"]["trim"][direction] + defaultProfileId = bufferData["id"]["profile"]["default"][direction] + + # Update port ingress/egress buffer profile list with the default buffer profile + + trimlogger.info( + "Update {} buffer profile list: port={}, profile={}".format(direction, self.PORT, defaultProfile) + ) + self.dvs_port.update_buffer_profile_list( + port_name=self.PORT, + profile_list=defaultProfile, + ingress=ingress + ) + + sai_attr_dict = { + SAI_BUFFER_PROFILE_LIST_DICT[direction]: [ defaultProfileId ] + } + + trimlogger.info( + "Validate {} buffer profile list: port={}, profile={}".format(direction, self.PORT, defaultProfile) + ) + self.dvs_port.verify_port( + sai_port_id=portId, + sai_qualifiers=sai_attr_dict + ) + + # Set buffer profile trimming eligibility + + attr_dict = { + "packet_discard_action": "trim" + } + + trimlogger.info("Update buffer profile: {}".format(trimProfile)) + self.dvs_buffer.update_buffer_profile( + buffer_profile_name=trimProfile, + qualifiers=attr_dict + ) + + sai_attr_dict = { + "SAI_BUFFER_PROFILE_ATTR_PACKET_ADMISSION_FAIL_ACTION": SAI_BUFFER_PROFILE_MODE_DICT["trim"] + } + + trimlogger.info("Validate buffer profile: {}".format(trimProfile)) + self.dvs_buffer.verify_buffer_profile( + sai_buffer_profile_id=trimProfileId, + sai_qualifiers=sai_attr_dict + ) + + # Update port ingress/egress buffer profile list with the trimming buffer profile + # and verify no update is done to ASIC DB + + trimlogger.info( + "Update {} buffer profile list: port={}, profile={}".format(direction, self.PORT, trimProfile) + ) + self.dvs_port.update_buffer_profile_list( + port_name=self.PORT, + profile_list=trimProfile, + ingress=ingress + ) + time.sleep(1) + + sai_attr_dict = { + SAI_BUFFER_PROFILE_LIST_DICT[direction]: [ defaultProfileId ] + } + + trimlogger.info( + "Validate {} buffer profile list: port={}, profile={}".format(direction, self.PORT, trimProfile) + ) + self.dvs_port.verify_port( + sai_port_id=portId, + sai_qualifiers=sai_attr_dict + ) + + # Update port ingress/egress buffer profile list with the default buffer profile + + trimlogger.info( + "Update {} buffer profile list: port={}, profile={}".format(direction, self.PORT, defaultProfile) + ) + self.dvs_port.update_buffer_profile_list( + port_name=self.PORT, + profile_list=defaultProfile, + ingress=ingress + ) + + sai_attr_dict = { + SAI_BUFFER_PROFILE_LIST_DICT[direction]: [ defaultProfileId ] + } + + trimlogger.info( + "Validate {} buffer profile list: port={}, profile={}".format(direction, self.PORT, defaultProfile) + ) + self.dvs_port.verify_port( + sai_port_id=portId, + sai_qualifiers=sai_attr_dict + ) + + def verifyProfileListBufferEditConfiguration(self, portData, bufferData, ingress): + trimProfile = self.INGRESS_TRIM_PROFILE if ingress else self.EGRESS_TRIM_PROFILE + defaultProfile = self.INGRESS_DEFAULT_PROFILE if ingress else self.EGRESS_DEFAULT_PROFILE + + direction = "ingress" if ingress else "egress" + + portId = portData["id"] + trimProfileId = bufferData["id"]["profile"]["trim"][direction] + defaultProfileId = bufferData["id"]["profile"]["default"][direction] + + # Reset buffer profile trimming eligibility + + attr_dict = { + "packet_discard_action": "drop" + } + + trimlogger.info("Update buffer profile: {}".format(trimProfile)) + self.dvs_buffer.update_buffer_profile( + buffer_profile_name=trimProfile, + qualifiers=attr_dict + ) + + sai_attr_dict = { + "SAI_BUFFER_PROFILE_ATTR_PACKET_ADMISSION_FAIL_ACTION": SAI_BUFFER_PROFILE_MODE_DICT["drop"] + } + + trimlogger.info("Validate buffer profile: {}".format(trimProfile)) + self.dvs_buffer.verify_buffer_profile( + sai_buffer_profile_id=trimProfileId, + sai_qualifiers=sai_attr_dict + ) + + # Update port ingress/egress buffer profile list with the trimming buffer profile + + trimlogger.info( + "Update {} buffer profile list: port={}, profile={}".format(direction, self.PORT, trimProfile) + ) + self.dvs_port.update_buffer_profile_list( + port_name=self.PORT, + profile_list=trimProfile, + ingress=ingress + ) + + sai_attr_dict = { + SAI_BUFFER_PROFILE_LIST_DICT[direction]: [ trimProfileId ] + } + + trimlogger.info( + "Validate {} buffer profile list: port={}, profile={}".format(direction, self.PORT, trimProfile) + ) + self.dvs_port.verify_port( + sai_port_id=portId, + sai_qualifiers=sai_attr_dict + ) + + # Set buffer profile trimming eligibility + # and verify no update is done to ASIC DB + + attr_dict = { + "packet_discard_action": "trim" + } + + trimlogger.info("Update buffer profile: {}".format(trimProfile)) + self.dvs_buffer.update_buffer_profile( + buffer_profile_name=trimProfile, + qualifiers=attr_dict + ) + time.sleep(1) + + sai_attr_dict = { + "SAI_BUFFER_PROFILE_ATTR_PACKET_ADMISSION_FAIL_ACTION": SAI_BUFFER_PROFILE_MODE_DICT["drop"] + } + + trimlogger.info("Validate buffer profile: {}".format(trimProfile)) + self.dvs_buffer.verify_buffer_profile( + sai_buffer_profile_id=trimProfileId, + sai_qualifiers=sai_attr_dict + ) + + # Update port ingress/egress buffer profile list with the default buffer profile + + trimlogger.info( + "Update {} buffer profile list: port={}, profile={}".format(direction, self.PORT, defaultProfile) + ) + self.dvs_port.update_buffer_profile_list( + port_name=self.PORT, + profile_list=defaultProfile, + ingress=ingress + ) + + sai_attr_dict = { + SAI_BUFFER_PROFILE_LIST_DICT[direction]: [ defaultProfileId ] + } + + trimlogger.info( + "Validate {} buffer profile list: port={}, profile={}".format(direction, self.PORT, defaultProfile) + ) + self.dvs_port.verify_port( + sai_port_id=portId, + sai_qualifiers=sai_attr_dict + ) + + +class TestTrimmingNegativeTraditionalBufferModel(TrimmingNegativeBufferModel): + @pytest.mark.parametrize( + "target", [ + pytest.param("pg", id="priority-group"), + pytest.param("ibuf", id="ingress-buffer-profile-list"), + pytest.param("ebuf", id="egress-buffer-profile-list") + ] + ) + def test_TrimNegStaticBufferProfileAttach(self, bufferData, portData, pgData, target): + if target == "pg": + self.verifyPriorityGroupBufferAttachConfiguration(bufferData, pgData) + elif target == "ibuf": + self.verifyProfileListBufferAttachConfiguration(portData, bufferData, True) + elif target == "ebuf": + self.verifyProfileListBufferAttachConfiguration(portData, bufferData, False) + + @pytest.mark.parametrize( + "target", [ + pytest.param("pg", id="priority-group"), + pytest.param("ibuf", id="ingress-buffer-profile-list"), + pytest.param("ebuf", id="egress-buffer-profile-list") + ] + ) + def test_TrimNegStaticBufferProfileEdit(self, bufferData, portData, pgData, target): + if target == "pg": + self.verifyPriorityGroupBufferEditConfiguration(bufferData, pgData) + elif target == "ibuf": + self.verifyProfileListBufferEditConfiguration(portData, bufferData, True) + elif target == "ebuf": + self.verifyProfileListBufferEditConfiguration(portData, bufferData, False) + + +@pytest.mark.usefixtures("dynamicBuffer") +class TestTrimmingNegativeDynamicBufferModel(TrimmingNegativeBufferModel): + @pytest.mark.parametrize( + "target", [ + pytest.param("pg", id="priority-group"), + pytest.param("ibuf", id="ingress-buffer-profile-list"), + pytest.param("ebuf", id="egress-buffer-profile-list") + ] + ) + def test_TrimNegDynamicBufferProfileAttach(self, bufferData, portData, pgData, target): + if target == "pg": + self.verifyPriorityGroupBufferAttachConfiguration(bufferData, pgData) + elif target == "ibuf": + self.verifyProfileListBufferAttachConfiguration(portData, bufferData, True) + elif target == "ebuf": + self.verifyProfileListBufferAttachConfiguration(portData, bufferData, False) + + @pytest.mark.parametrize( + "target", [ + pytest.param("pg", id="priority-group"), + pytest.param("ibuf", id="ingress-buffer-profile-list"), + pytest.param("ebuf", id="egress-buffer-profile-list") + ] + ) + def test_TrimNegDynamicBufferProfileEdit(self, bufferData, portData, pgData, target): + if target == "pg": + self.verifyPriorityGroupBufferEditConfiguration(bufferData, pgData) + elif target == "ibuf": + self.verifyProfileListBufferEditConfiguration(portData, bufferData, True) + elif target == "ebuf": + self.verifyProfileListBufferEditConfiguration(portData, bufferData, False) + + +@pytest.mark.usefixtures("dvs_switch_manager") +@pytest.mark.usefixtures("dvs_port_manager") +@pytest.mark.usefixtures("dvs_queue_manager") +@pytest.mark.usefixtures("testlog") +class TestTrimmingStats: + PORT = "Ethernet4" + QUEUE = "1" + + @pytest.fixture(scope="class") + def switchData(self, switchCounters): + trimlogger.info("Initialize switch data") + + trimlogger.info("Verify switch count") + self.dvs_switch.verify_switch_count(0) + + trimlogger.info("Get switch id") + switchIdList = self.dvs_switch.get_switch_ids() + + # Assumption: VS has only one switch object + meta_dict = { + "id": switchIdList[0] + } + + yield meta_dict + + sai_attr_dict = { + "SAI_SWITCH_STAT_TX_TRIM_PACKETS": "0", + "SAI_SWITCH_STAT_DROPPED_TRIM_PACKETS": "0", + } + + trimlogger.info("Reset switch trimming counters") + self.dvs_switch.set_switch_counter(switchIdList[0], sai_attr_dict) + + trimlogger.info("Deinitialize switch data") + + @pytest.fixture(scope="class") + def portData(self, portCounters): + trimlogger.info("Initialize port data") + + trimlogger.info("Get port id: port={}".format(self.PORT)) + portId = self.dvs_port.get_port_id(self.PORT) + + meta_dict = { + "id": portId + } + + yield meta_dict + + sai_attr_dict = { + "SAI_PORT_STAT_TRIM_PACKETS": "0", + "SAI_PORT_STAT_TX_TRIM_PACKETS": "0", + "SAI_PORT_STAT_DROPPED_TRIM_PACKETS": "0" + } + + trimlogger.info("Reset port trimming counters: port={}".format(self.PORT)) + self.dvs_port.set_port_counter(portId, sai_attr_dict) + + trimlogger.info("Deinitialize port data") + + @pytest.fixture(scope="class") + def queueData(self, queueCounters): + trimlogger.info("Initialize queue data") + + trimlogger.info("Get queue id: port={}, queue={}".format(self.PORT, self.QUEUE)) + queueId = self.dvs_queue.get_queue_id(self.PORT, self.QUEUE) + + meta_dict = { + "id": queueId + } + + yield meta_dict + + sai_attr_dict = { + "SAI_QUEUE_STAT_TRIM_PACKETS": "0", + "SAI_QUEUE_STAT_TX_TRIM_PACKETS": "0", + "SAI_QUEUE_STAT_DROPPED_TRIM_PACKETS": "0" + } + + trimlogger.info("Reset queue trimming counters: port={}, queue={}".format(self.PORT, self.QUEUE)) + self.dvs_queue.set_queue_counter(queueId, sai_attr_dict) + + trimlogger.info("Deinitialize queue data") + + @pytest.mark.parametrize( + "attr, value", [ + pytest.param("SAI_SWITCH_STAT_TX_TRIM_PACKETS", "1000", id="tx-packet"), + pytest.param("SAI_SWITCH_STAT_DROPPED_TRIM_PACKETS", "2000", id="drop-packet") + ] + ) + def test_TrimSwitchStats(self, switchData, attr, value): + sai_attr_dict = { + attr: value + } + + trimlogger.info("Update switch counter") + self.dvs_switch.set_switch_counter( + sai_switch_id=switchData["id"], + sai_qualifiers=sai_attr_dict + ) + + trimlogger.info("Validate switch counter") + self.dvs_switch.verify_switch_counter( + sai_switch_id=switchData["id"], + sai_qualifiers=sai_attr_dict + ) + + @pytest.mark.parametrize( + "attr, value", [ + pytest.param("SAI_PORT_STAT_TRIM_PACKETS", "1000", id="trim-packet"), + pytest.param("SAI_PORT_STAT_TX_TRIM_PACKETS", "2000", id="tx-packet"), + pytest.param("SAI_PORT_STAT_DROPPED_TRIM_PACKETS", "3000", id="drop-packet") + ] + ) + def test_TrimPortStats(self, portData, attr, value): + sai_attr_dict = { + attr: value + } + + trimlogger.info("Update port counters: port={}".format(self.PORT)) + self.dvs_port.set_port_counter( + sai_port_id=portData["id"], + sai_qualifiers=sai_attr_dict + ) + + trimlogger.info("Validate port counters: port={}".format(self.PORT)) + self.dvs_port.verify_port_counter( + sai_port_id=portData["id"], + sai_qualifiers=sai_attr_dict + ) + + @pytest.mark.parametrize( + "attr, value", [ + pytest.param("SAI_QUEUE_STAT_TRIM_PACKETS", "1000", id="trim-packet"), + pytest.param("SAI_QUEUE_STAT_TX_TRIM_PACKETS", "2000", id="tx-packet"), + pytest.param("SAI_QUEUE_STAT_DROPPED_TRIM_PACKETS", "3000", id="drop-packet") + ] + ) + def test_TrimQueueStats(self, queueData, attr, value): + sai_attr_dict = { + attr: value + } + + trimlogger.info("Update queue counters: port={}, queue={}".format(self.PORT, self.QUEUE)) + self.dvs_queue.set_queue_counter( + sai_queue_id=queueData["id"], + sai_qualifiers=sai_attr_dict + ) + + trimlogger.info("Validate queue counters: port={}, queue={}".format(self.PORT, self.QUEUE)) + self.dvs_queue.verify_queue_counter( + sai_queue_id=queueData["id"], + sai_qualifiers=sai_attr_dict + ) + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_tunnel.py b/tests/test_tunnel.py index 4b96eb5060e..37e86d0e731 100644 --- a/tests/test_tunnel.py +++ b/tests/test_tunnel.py @@ -1,3 +1,4 @@ +import ipaddress import time import pytest @@ -7,15 +8,34 @@ def create_fvs(**kwargs): return swsscommon.FieldValuePairs(list(kwargs.items())) + +def convert_fvs_to_dict(fvs): + return {f:v for f,v in fvs} + + class TestTunnelBase(object): - APP_TUNNEL_DECAP_TABLE_NAME = "TUNNEL_DECAP_TABLE" - ASIC_TUNNEL_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL" - ASIC_TUNNEL_TERM_ENTRIES = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY" - ASIC_RIF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE" - ASIC_VRF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER" - ASIC_QOS_MAP_TABLE_KEY = "ASIC_STATE:SAI_OBJECT_TYPE_QOS_MAP" - TUNNEL_QOS_MAP_NAME = "AZURE_TUNNEL" - CONFIG_TUNNEL_TABLE_NAME = "TUNNEL" + APPL_DB_SEPARATOR = ":" + STATE_DB_SEPARATOR = "|" + APP_TUNNEL_DECAP_TABLE_NAME = "TUNNEL_DECAP_TABLE" + APP_TUNNEL_DECAP_TERM_TABLE_NAME = "TUNNEL_DECAP_TERM_TABLE" + STATE_TUNNEL_DECAP_TABLE_NAME = "TUNNEL_DECAP_TABLE" + STATE_TUNNEL_DECAP_TERM_TABLE_NAME = "TUNNEL_DECAP_TERM_TABLE" + CFG_SUBNET_DECAP_TABLE_NAME = "SUBNET_DECAP" + ASIC_TUNNEL_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL" + ASIC_TUNNEL_TERM_ENTRIES = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY" + ASIC_RIF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE" + ASIC_VRF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER" + ASIC_QOS_MAP_TABLE_KEY = "ASIC_STATE:SAI_OBJECT_TYPE_QOS_MAP" + TUNNEL_QOS_MAP_NAME = "AZURE_TUNNEL" + CONFIG_TUNNEL_TABLE_NAME = "TUNNEL" + SAI_NULL_OBJECT_ID = 0 + + decap_term_type_map = { + "P2P" : "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2P", + "P2MP" : "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP", + "MP2P" : "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_MP2P", + "MP2MP" : "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_MP2MP" + } ecn_modes_map = { "standard" : "SAI_TUNNEL_DECAP_ECN_MODE_STANDARD", @@ -46,54 +66,162 @@ def check_vr_exists_in_asicdb(self, asicdb, sai_oid): status, fvs = vfr_table.get(sai_oid) return status - def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid, dst_ips, src_ip=None): + def create_and_test_tunnel_decap_terms(self, db, asicdb, statedb, tunnel_name, tunnel_sai_oid, + decap_term_attr_list, skip_decap_term_creation=False, + is_decap_terms_existed=True, subnet_decap_config=None): + """Create decap terms and verify all needed entries in ASIC DB exists""" + if not skip_decap_term_creation: + ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TERM_TABLE_NAME) + for decap_term_attrs in decap_term_attr_list: + dst_ip = decap_term_attrs["dst_ip"] + fvs = create_fvs(**{k:v for k,v in decap_term_attrs.items() if k != "dst_ip"}) + ps.set(tunnel_name + self.APPL_DB_SEPARATOR + dst_ip, fvs) + + time.sleep(1) + tunnel_term_table = swsscommon.Table(asicdb, self.ASIC_TUNNEL_TERM_ENTRIES) tunnel_term_entries = tunnel_term_table.getKeys() - assert len(tunnel_term_entries) == len(dst_ips) + if not is_decap_terms_existed: + assert len(tunnel_term_entries) == 0 + return + assert len(tunnel_term_entries) == len(decap_term_attr_list) + + decap_terms = {} + for decap_term_attrs in decap_term_attr_list: + dst_ip = ipaddress.ip_network(decap_term_attrs["dst_ip"]) + decap_terms[(str(dst_ip.network_address), str(dst_ip.netmask))] = decap_term_attrs - expected_term_type = "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2P" if src_ip else "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP" - expected_len = 6 if src_ip else 5 for term_entry in tunnel_term_entries: status, fvs = tunnel_term_table.get(term_entry) + term_attrs = convert_fvs_to_dict(fvs) + dst_ip = term_attrs["SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP"] + if "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP_MASK" in term_attrs: + dst_ip_mask = term_attrs["SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP_MASK"] + else: + if ipaddress.ip_address(dst_ip).version == 4: + dst_ip_mask = "255.255.255.255" + else: + dst_ip_mask = "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff" + + decap_term = decap_terms[(dst_ip, dst_ip_mask)] + dst_ip_str = decap_term["dst_ip"] + + decap_term_type_str = decap_term.get("term_type", "P2MP") + if decap_term_type_str == "P2MP": + expected_len = 5 + elif decap_term_type_str == "P2P": + expected_len = 6 + elif decap_term_type_str == "MP2P": + expected_len = 7 + elif decap_term_type_str == "MP2MP": + expected_len = 8 assert status == True assert len(fvs) == expected_len - for field, value in fvs: + decap_term_type = self.decap_term_type_map[decap_term_type_str] + for field, value in term_attrs.items(): if field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID": assert self.check_vr_exists_in_asicdb(asicdb, value) elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE": - assert value == expected_term_type + assert value == decap_term_type elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE": assert value == "SAI_TUNNEL_TYPE_IPINIP" elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID": assert value == tunnel_sai_oid elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP": - assert value in dst_ips - elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP" and src_ip: - assert value == src_ip + assert value == str(ipaddress.ip_network(dst_ip_str).network_address) + elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP_MASK": + assert value == str(ipaddress.ip_network(dst_ip_str).netmask) + elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP": + if subnet_decap_config: + src_ip_asic = ipaddress.ip_address(value) + if src_ip_asic.version == 4: + expected_value = str(ipaddress.ip_network(subnet_decap_config["src_ip"]).network_address) + else: + expected_value = str(ipaddress.ip_network(subnet_decap_config["src_ip_v6"]).network_address) + else: + expected_value = str(ipaddress.ip_network(decap_term["src_ip"]).network_address) + assert value == expected_value + elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP_MASK": + if subnet_decap_config: + src_ip_mask_asic = ipaddress.ip_address(value) + if src_ip_mask_asic.version == 4: + expected_value = str(ipaddress.ip_network(subnet_decap_config["src_ip"]).netmask) + else: + expected_value = str(ipaddress.ip_network(subnet_decap_config["src_ip_v6"]).netmask) + else: + expected_value = str(ipaddress.ip_network(decap_term["src_ip"]).netmask) + assert value == expected_value + else: + assert False, "Field %s is not tested" % field + + tunnel_decap_term_state_table = swsscommon.Table(statedb, self.STATE_TUNNEL_DECAP_TERM_TABLE_NAME) + + tunnel_term_state_entries = tunnel_decap_term_state_table.getKeys() + for term_entry in tunnel_term_state_entries: + status, fvs = tunnel_decap_term_state_table.get(term_entry) + tunnel_name, dst_ip_str = term_entry.split(self.STATE_DB_SEPARATOR) + dst_ip = ipaddress.ip_network(dst_ip_str) + + assert (str(dst_ip.network_address), str(dst_ip.netmask)) in decap_terms + assert status == True + + decap_term = decap_terms[(str(dst_ip.network_address), str(dst_ip.netmask))] + assert dst_ip_str == decap_term["dst_ip"] + for field, value in fvs: + if field == "src_ip": + if subnet_decap_config: + if dst_ip.version == 4: + expected_value = subnet_decap_config["src_ip"] + else: + expected_value = subnet_decap_config["src_ip_v6"] + else: + expected_value = decap_term["src_ip"] + assert value == expected_value + elif field == "term_type": + assert value == decap_term.get("term_type", "P2MP") + elif field == "subnet_type": + assert value == decap_term["subnet_type"] else: assert False, "Field %s is not tested" % field - def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): - """ Create tunnel and verify all needed enties in ASIC DB exists """ + def remove_and_test_tunnel_decap_terms(self, db, asicdb, statedb, tunnel_name, decap_term_attr_list): + tunnel_term_table = swsscommon.Table(asicdb, self.ASIC_TUNNEL_TERM_ENTRIES) + tunnel_decap_term_state_table = swsscommon.Table(statedb, self.STATE_TUNNEL_DECAP_TERM_TABLE_NAME) + + dst_ips = {decap_term_attrs["dst_ip"] for decap_term_attrs in decap_term_attr_list} + ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TERM_TABLE_NAME) + for dst_ip in dst_ips: + ps._del(tunnel_name + self.APPL_DB_SEPARATOR + dst_ip) + + time.sleep(1) + + assert len(tunnel_term_table.getKeys()) == 0 + assert len(tunnel_decap_term_state_table.getKeys()) == 0 + + def create_and_test_tunnel(self, db, asicdb, statedb, tunnel_name, **kwargs): + """ Create tunnel and verify all needed entries in ASIC DB exists """ is_symmetric_tunnel = "src_ip" in kwargs - + decap_dscp_to_tc_map_oid = None decap_tc_to_pg_map_oid = None skip_tunnel_creation = False + is_tunnel_existed = True if "decap_dscp_to_tc_map_oid" in kwargs: decap_dscp_to_tc_map_oid = kwargs.pop("decap_dscp_to_tc_map_oid") if "decap_tc_to_pg_map_oid" in kwargs: decap_tc_to_pg_map_oid = kwargs.pop("decap_tc_to_pg_map_oid") - + if "skip_tunnel_creation" in kwargs: skip_tunnel_creation = kwargs.pop("skip_tunnel_creation") - + if "is_tunnel_existed" in kwargs: + is_tunnel_existed = kwargs.pop("is_tunnel_existed") + if not skip_tunnel_creation: fvs = create_fvs(**kwargs) # create tunnel entry in DB @@ -107,6 +235,10 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): tunnel_table = swsscommon.Table(asicdb, self.ASIC_TUNNEL_TABLE) tunnels = tunnel_table.getKeys() + if not is_tunnel_existed: + assert len(tunnels) == 0 + return self.SAI_NULL_OBJECT_ID + assert len(tunnels) == 1 tunnel_sai_obj = tunnels[0] @@ -121,12 +253,12 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): expected_ecn_mode = self.ecn_modes_map[kwargs["ecn_mode"]] expected_dscp_mode = self.dscp_modes_map[kwargs["dscp_mode"]] expected_ttl_mode = self.ttl_modes_map[kwargs["ttl_mode"]] - + if decap_dscp_to_tc_map_oid: expected_len += 1 if decap_tc_to_pg_map_oid: expected_len += 1 - + assert len(fvs) == expected_len for field, value in fvs: @@ -150,15 +282,38 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): assert value == decap_tc_to_pg_map_oid else: assert False, "Field %s is not tested" % field - src_ip = kwargs["src_ip"] if "src_ip" in kwargs else None - self.check_tunnel_termination_entry_exists_in_asicdb(asicdb, tunnel_sai_obj, kwargs["dst_ip"].split(","), src_ip) - def remove_and_test_tunnel(self, db, asicdb, tunnel_name): + tunnel_state_table = swsscommon.Table(statedb, self.STATE_TUNNEL_DECAP_TABLE_NAME) + + tunnels = tunnel_state_table.getKeys() + for tunnel in tunnels: + status, fvs = tunnel_state_table.get(tunnel) + assert status == True + + for field, value in fvs: + if field == "tunnel_type": + assert value == "IPINIP" + elif field == "dscp_mode": + assert value == kwargs["dscp_mode"] + elif field == "ecn_mode": + assert value == kwargs["ecn_mode"] + elif field == "ttl_mode": + assert value == kwargs["ttl_mode"] + elif field == "encap_ecn_mode": + assert value == kwargs["encap_ecn_mode"] + else: + assert False, "Field %s is not tested" % field + + return tunnel_sai_obj + + def remove_and_test_tunnel(self, db, asicdb, statedb, tunnel_name, skip_validation=False): """ Removes tunnel and checks that ASIC db is clear""" tunnel_table = swsscommon.Table(asicdb, self.ASIC_TUNNEL_TABLE) tunnel_term_table = swsscommon.Table(asicdb, self.ASIC_TUNNEL_TERM_ENTRIES) - tunnel_app_table = swsscommon.Table(asicdb, self.APP_TUNNEL_DECAP_TABLE_NAME) + tunnel_app_table = swsscommon.Table(db, self.APP_TUNNEL_DECAP_TABLE_NAME) + tunnel_state_table = swsscommon.Table(statedb, self.STATE_TUNNEL_DECAP_TABLE_NAME) + tunnel_decap_term_state_table = swsscommon.Table(statedb, self.STATE_TUNNEL_DECAP_TERM_TABLE_NAME) tunnels = tunnel_table.getKeys() tunnel_sai_obj = tunnels[0] @@ -169,7 +324,10 @@ def remove_and_test_tunnel(self, db, asicdb, tunnel_name): overlay_infs_id = {f:v for f,v in fvs}["SAI_TUNNEL_ATTR_OVERLAY_INTERFACE"] ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) - ps.set(tunnel_name, create_fvs(), 'DEL') + ps._del(tunnel_name) + + if skip_validation: + return # wait till config will be applied time.sleep(1) @@ -177,6 +335,8 @@ def remove_and_test_tunnel(self, db, asicdb, tunnel_name): assert len(tunnel_table.getKeys()) == 0 assert len(tunnel_term_table.getKeys()) == 0 assert len(tunnel_app_table.getKeys()) == 0 + assert len(tunnel_state_table.getKeys()) == 0 + assert len(tunnel_decap_term_state_table.getKeys()) == 0 assert not self.check_interface_exists_in_asicdb(asicdb, overlay_infs_id) def add_qos_map(self, configdb, asicdb, qos_map_type_name, qos_map_name, qos_map): @@ -189,18 +349,18 @@ def add_qos_map(self, configdb, asicdb, qos_map_type_name, qos_map_name, qos_map fvs = swsscommon.FieldValuePairs(list(qos_map.items())) table.set(qos_map_name, fvs) time.sleep(1) - + diff = set(qos_table.getKeys()) - set(current_oids) assert len(diff) == 1 oid = diff.pop() return oid - + def remove_qos_map(self, configdb, qos_map_type_name, qos_map_name): """ Remove the testing qos map""" table = swsscommon.Table(configdb, qos_map_type_name) table._del(qos_map_name) - def cleanup_left_over(self, db, asicdb): + def cleanup_left_over(self, db, statedb, asicdb): """ Cleanup APP and ASIC tables """ tunnel_table = swsscommon.Table(asicdb, self.ASIC_TUNNEL_TABLE) @@ -211,10 +371,22 @@ def cleanup_left_over(self, db, asicdb): for key in tunnel_term_table.getKeys(): tunnel_term_table._del(key) + tunnel_decap_term_app_table = swsscommon.Table(db, self.APP_TUNNEL_DECAP_TERM_TABLE_NAME) + for key in tunnel_decap_term_app_table.getKeys(): + tunnel_decap_term_app_table._del(key) + tunnel_app_table = swsscommon.Table(db, self.APP_TUNNEL_DECAP_TABLE_NAME) for key in tunnel_app_table.getKeys(): tunnel_app_table._del(key) + tunnel_state_table = swsscommon.Table(statedb, self.STATE_TUNNEL_DECAP_TABLE_NAME) + for key in tunnel_state_table.getKeys(): + tunnel_state_table._del(key) + + tunnel_decap_term_state_table = swsscommon.Table(statedb, self.STATE_TUNNEL_DECAP_TERM_TABLE_NAME) + for key in tunnel_decap_term_state_table.getKeys(): + tunnel_decap_term_state_table._del(key) + class TestDecapTunnel(TestTunnelBase): """ Tests for decap tunnel creation and removal """ @@ -223,37 +395,178 @@ def test_TunnelDecap_v4(self, dvs, testlog): db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - self.cleanup_left_over(db, asicdb) + self.cleanup_left_over(db, statedb, asicdb) + decap_terms = [ + {"dst_ip": "2.2.2.2", "term_type": "P2MP"}, + {"dst_ip": "3.3.3.3", "term_type": "P2MP"}, + {"dst_ip": "4.4.4.4", "src_ip": "5.5.5.5", "term_type": "P2P"}, + {"dst_ip": "192.168.0.0/24", "src_ip": "10.10.10.0/24", "term_type": "MP2MP"} + ] # create tunnel IPv4 tunnel - self.create_and_test_tunnel(db, asicdb, tunnel_name="IPINIPv4Decap", tunnel_type="IPINIP", - dst_ip="2.2.2.2,3.3.3.3", dscp_mode="uniform", - ecn_mode="standard", ttl_mode="pipe") - self.remove_and_test_tunnel(db, asicdb, "IPINIPv4Decap") + tunnel_sai_oid = self.create_and_test_tunnel( + db, asicdb, statedb, "IPINIPv4Decap", tunnel_type="IPINIP", + dscp_mode="uniform", ecn_mode="standard", ttl_mode="pipe" + ) + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIPv4Decap", + tunnel_sai_oid, decap_terms + ) + + self.remove_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIPv4Decap", decap_terms + ) + self.remove_and_test_tunnel(db, asicdb, statedb, "IPINIPv4Decap") def test_TunnelDecap_v6(self, dvs, testlog): """ test IPv6 tunnel creation """ db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - self.cleanup_left_over(db, asicdb) + self.cleanup_left_over(db, statedb, asicdb) + decap_terms = [ + {"dst_ip": "2::2", "term_type": "P2MP"}, + {"dst_ip": "3::3", "term_type": "P2MP"}, + {"dst_ip": "4::4", "src_ip": "5::5", "term_type": "P2P"}, + {"dst_ip": "2001:db8::/32", "src_ip": "2002:db8::/32", "term_type": "MP2MP"} + ] # create tunnel IPv6 tunnel - self.create_and_test_tunnel(db, asicdb, tunnel_name="IPINIPv6Decap", tunnel_type="IPINIP", - dst_ip="2::2,3::3", dscp_mode="pipe", - ecn_mode="copy_from_outer", ttl_mode="uniform") - self.remove_and_test_tunnel(db, asicdb,"IPINIPv6Decap") - + tunnel_sai_oid = self.create_and_test_tunnel( + db, asicdb, statedb, "IPINIPv6Decap", tunnel_type="IPINIP", + dscp_mode="pipe", ecn_mode="copy_from_outer", ttl_mode="uniform" + ) + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIPv6Decap", + tunnel_sai_oid, decap_terms + ) + + self.remove_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIPv6Decap", decap_terms + ) + self.remove_and_test_tunnel(db, asicdb, statedb, "IPINIPv6Decap") + + def test_TunnelDecap_Invalid_Decap_Term_Attribute(self, dvs, testlog): + """ test IPv4 tunnel creation """ + + db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + + self.cleanup_left_over(db, statedb, asicdb) + + decap_terms = [ + {"dst_ip": "3.3.3.3", "term_type": "P2P"}, + {"dst_ip": "4.4.4.4", "term_type": "MP2MP"} + ] + # create tunnel IPv4 tunnel + tunnel_sai_oid = self.create_and_test_tunnel( + db, asicdb, statedb, "IPINIPv4Decap", tunnel_type="IPINIP", + dscp_mode="uniform", ecn_mode="standard", ttl_mode="pipe" + ) + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIPv4Decap", + tunnel_sai_oid, decap_terms, + is_decap_terms_existed=False + ) + + self.remove_and_test_tunnel(db, asicdb, statedb, "IPINIPv4Decap") + + def test_TunnelDecap_Remove_Tunnel_First(self, dvs, testlog): + db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + + self.cleanup_left_over(db, statedb, asicdb) + + decap_terms = [ + {"dst_ip": "2.2.2.2", "term_type": "P2MP"}, + {"dst_ip": "3.3.3.3", "term_type": "P2MP"}, + {"dst_ip": "4.4.4.4", "src_ip": "5.5.5.5", "term_type": "P2P"}, + {"dst_ip": "192.168.0.0/24", "src_ip": "10.10.10.0/24", "term_type": "MP2MP"} + ] + # create tunnel IPv4 tunnel + tunnel_sai_oid = self.create_and_test_tunnel( + db, asicdb, statedb, "IPINIPv4Decap", tunnel_type="IPINIP", + dscp_mode="uniform", ecn_mode="standard", ttl_mode="pipe" + ) + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIPv4Decap", + tunnel_sai_oid, decap_terms + ) + + # the removal of tunnel with decap terms will fail + self.remove_and_test_tunnel(db, asicdb, statedb, "IPINIPv4Decap", skip_validation=True) + # validate the tunnel and decap terms are still existed + self.create_and_test_tunnel( + db, asicdb, statedb, "IPINIPv4Decap", tunnel_type="IPINIP", + dscp_mode="uniform", ecn_mode="standard", ttl_mode="pipe", + skip_tunnel_creation=True + ) + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIPv4Decap", + tunnel_sai_oid, decap_terms, + skip_decap_term_creation=True + ) + + self.remove_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIPv4Decap", decap_terms + ) + tunnel_table = swsscommon.Table(asicdb, self.ASIC_TUNNEL_TABLE) + tunnels = tunnel_table.getKeys() + assert len(tunnels) == 0 + + def test_TunnelDecap_Add_Decap_Term_First(self, dvs, testlog): + db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + + self.cleanup_left_over(db, statedb, asicdb) + + decap_terms = [ + {"dst_ip": "2.2.2.2", "term_type": "P2MP"}, + {"dst_ip": "3.3.3.3", "term_type": "P2MP"}, + {"dst_ip": "4.4.4.4", "src_ip": "5.5.5.5", "term_type": "P2P"}, + {"dst_ip": "192.168.0.0/24", "src_ip": "10.10.10.0/24", "term_type": "MP2MP"} + ] + # create decap terms of not-existed tunnel + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIPv4Decap", + self.SAI_NULL_OBJECT_ID, decap_terms, is_decap_terms_existed=False + ) + # remove decap terms of not-existed tunnel + self.remove_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIPv4Decap", decap_terms[:1] + ) + # create tunnel + tunnel_sai_oid = self.create_and_test_tunnel( + db, asicdb, statedb, "IPINIPv4Decap", tunnel_type="IPINIP", + dscp_mode="uniform", ecn_mode="standard", ttl_mode="pipe" + ) + # verify the decap terms are created + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIPv4Decap", + tunnel_sai_oid, decap_terms[1:], skip_decap_term_creation=True + ) + + self.remove_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIPv4Decap", decap_terms[1:] + ) + self.remove_and_test_tunnel(db, asicdb, statedb, "IPINIPv4Decap") + def test_TunnelDecap_MuxTunnel(self, dvs, testlog): """ Test MuxTunnel creation. """ db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) configdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + + self.cleanup_left_over(db, statedb, asicdb) - self.cleanup_left_over(db, asicdb) - dscp_to_tc_map_oid = self.add_qos_map(configdb, asicdb, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.DSCP_TO_TC_MAP) tc_to_pg_map_oid = self.add_qos_map(configdb, asicdb, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_PRIORITY_GROUP_MAP) @@ -261,7 +574,6 @@ def test_TunnelDecap_MuxTunnel(self, dvs, testlog): params = { "tunnel_type": "IPINIP", "src_ip": "1.1.1.1", - "dst_ip": "1.1.1.2", "dscp_mode": "pipe", "ecn_mode": "copy_from_outer", "ttl_mode": "uniform", @@ -270,10 +582,17 @@ def test_TunnelDecap_MuxTunnel(self, dvs, testlog): "decap_tc_to_pg_map": "AZURE_TUNNEL", "decap_tc_to_pg_map_oid": tc_to_pg_map_oid } - self.create_and_test_tunnel(db, asicdb, tunnel_name="MuxTunnel0", **params) - + decap_terms = [{"dst_ip": "1.1.1.2", "src_ip": "1.1.1.1", "term_type": "P2P"}] + tunnel_sai_oid = self.create_and_test_tunnel(db, asicdb, statedb, tunnel_name="MuxTunnel0", **params) + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "MuxTunnel0", + tunnel_sai_oid, decap_terms + ) # Remove Tunnel first - self.remove_and_test_tunnel(db, asicdb,"MuxTunnel0") + self.remove_and_test_tunnel_decap_terms( + db, asicdb, statedb, "MuxTunnel0", decap_terms + ) + self.remove_and_test_tunnel(db, asicdb, statedb, "MuxTunnel0") self.remove_qos_map(configdb, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME) self.remove_qos_map(configdb, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME) @@ -283,31 +602,30 @@ def test_TunnelDecap_MuxTunnel_with_retry(self, dvs, testlog): db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) configdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - self.cleanup_left_over(db, asicdb) + self.cleanup_left_over(db, statedb, asicdb) # Create MuxTunnel0 with QoS remapping attributes params = { "tunnel_type": "IPINIP", "src_ip": "1.1.1.1", - "dst_ip": "1.1.1.2", "dscp_mode": "pipe", "ecn_mode": "copy_from_outer", "ttl_mode": "uniform", "decap_dscp_to_tc_map": "AZURE_TUNNEL", - "decap_tc_to_pg_map": "AZURE_TUNNEL", + "decap_tc_to_pg_map": "AZURE_TUNNEL", } + decap_terms = [ + {"dst_ip": "1.1.1.2", "src_ip": "1.1.1.1", "term_type": "P2P"}, + ] # Verify tunnel is not created when decap_dscp_to_tc_map/decap_tc_to_pg_map is specified while oid is not ready in qosorch - fvs = create_fvs(**params) - # create tunnel entry in DB - ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) - ps.set("MuxTunnel0", fvs) - - time.sleep(1) - # check asic db table - tunnel_table = swsscommon.Table(asicdb, self.ASIC_TUNNEL_TABLE) - tunnels = tunnel_table.getKeys() - assert len(tunnels) == 0 + self.create_and_test_tunnel(db, asicdb, statedb, tunnel_name="MuxTunnel0", is_tunnel_existed=False, **params) + # create decap term entry in DB + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "MuxTunnel0", + self.SAI_NULL_OBJECT_ID, decap_terms, is_decap_terms_existed=False + ) #Verify tunneldecaporch creates tunnel when qos map is available dscp_to_tc_map_oid = self.add_qos_map(configdb, asicdb, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.DSCP_TO_TC_MAP) @@ -317,13 +635,21 @@ def test_TunnelDecap_MuxTunnel_with_retry(self, dvs, testlog): "decap_tc_to_pg_map_oid": tc_to_pg_map_oid, "skip_tunnel_creation": True }) - self.create_and_test_tunnel(db, asicdb, tunnel_name="MuxTunnel0", **params) + tunnel_sai_oid = self.create_and_test_tunnel(db, asicdb, statedb, tunnel_name="MuxTunnel0", **params) + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "MuxTunnel0", + tunnel_sai_oid, decap_terms, skip_decap_term_creation=True + ) # Cleanup - self.remove_and_test_tunnel(db, asicdb,"MuxTunnel0") + self.remove_and_test_tunnel_decap_terms( + db, asicdb, statedb, "MuxTunnel0", decap_terms + ) + self.remove_and_test_tunnel(db, asicdb, statedb, "MuxTunnel0") self.remove_qos_map(configdb, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, dscp_to_tc_map_oid) self.remove_qos_map(configdb, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, tc_to_pg_map_oid) + class TestSymmetricTunnel(TestTunnelBase): """ Tests for symmetric tunnel creation and removal """ @@ -332,31 +658,372 @@ def test_TunnelSymmetric_v4(self, dvs, testlog): db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - self.cleanup_left_over(db, asicdb) + self.cleanup_left_over(db, statedb, asicdb) # create tunnel IPv4 tunnel - self.create_and_test_tunnel(db, asicdb, tunnel_name="IPINIPv4Symmetric", tunnel_type="IPINIP", - src_ip="1.1.1.1", - dst_ip="2.2.2.2,3.3.3.3", dscp_mode="pipe", - ecn_mode="copy_from_outer", ttl_mode="uniform") - self.remove_and_test_tunnel(db, asicdb, "IPINIPv4Symmetric") + decap_terms = [ + {"dst_ip": "2.2.2.2", "src_ip": "1.1.1.1", "term_type": "P2P"}, + {"dst_ip": "3.3.3.3", "src_ip": "1.1.1.1", "term_type": "P2P"}, + ] + tunnel_sai_oid = self.create_and_test_tunnel( + db, asicdb, statedb, tunnel_name="IPINIPv4Symmetric", + tunnel_type="IPINIP", src_ip="1.1.1.1", dscp_mode="pipe", + ecn_mode="copy_from_outer", ttl_mode="uniform" + ) + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIPv4Symmetric", + tunnel_sai_oid, decap_terms + ) + self.remove_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIPv4Symmetric", decap_terms + ) + self.remove_and_test_tunnel(db, asicdb, statedb, "IPINIPv4Symmetric") def test_TunnelSymmetric_v6(self, dvs, testlog): """ test IPv6 tunnel creation """ db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - self.cleanup_left_over(db, asicdb) + self.cleanup_left_over(db, statedb, asicdb) # create tunnel IPv6 tunnel - self.create_and_test_tunnel(db, asicdb, tunnel_name="IPINIPv6Symmetric", tunnel_type="IPINIP", - src_ip="1::1", - dst_ip="2::2,3::3", dscp_mode="uniform", - ecn_mode="standard", ttl_mode="pipe") - self.remove_and_test_tunnel(db, asicdb, "IPINIPv6Symmetric") + decap_terms = [ + {"dst_ip": "2::2", "src_ip": "1::1", "term_type": "P2P"}, + {"dst_ip": "3::3", "src_ip": "1::1", "term_type": "P2P"}, + ] + tunnel_sai_oid = self.create_and_test_tunnel( + db, asicdb, statedb, tunnel_name="IPINIPv6Symmetric", + tunnel_type="IPINIP", src_ip="1::1", dscp_mode="uniform", + ecn_mode="standard", ttl_mode="pipe" + ) + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIPv6Symmetric", + tunnel_sai_oid, decap_terms + ) + self.remove_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIPv6Symmetric", decap_terms + ) + self.remove_and_test_tunnel(db, asicdb, statedb, "IPINIPv6Symmetric") + + +class TestSubnetDecap(TestTunnelBase): + """ Tests for subnet decap creation and removal """ + + @pytest.fixture + def setup_subnet_decap(self, dvs): + + def _apply_subnet_decap_config(subnet_decap_config): + """Apply subnet decap config to CONFIG_DB.""" + fvs = create_fvs(**subnet_decap_config) + subnet_decap_tbl.set("AZURE", fvs) + + def _cleanup_subnet_decap_config(): + """Cleanup subnet decap config in CONFIG_DB.""" + for key in subnet_decap_tbl.getKeys(): + subnet_decap_tbl._del(key) + + configdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + subnet_decap_tbl = swsscommon.Table(configdb, self.CFG_SUBNET_DECAP_TABLE_NAME) + _cleanup_subnet_decap_config() + + yield _apply_subnet_decap_config + + _cleanup_subnet_decap_config() + + def test_SubnetDecap_Enable_Source_IP_Update_v4(self, dvs, testlog, setup_subnet_decap): + """Test subnet decap source IP update.""" + db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + + self.cleanup_left_over(db, statedb, asicdb) + + subnet_decap_config = { + "status": "enable", + "src_ip": "10.10.10.0/24", + "src_ip_v6": "20c1:ba8::/64" + } + decap_terms = [ + {"dst_ip": "192.168.0.0/24", "term_type": "MP2MP", "subnet_type": "vlan"}, + {"dst_ip": "192.168.1.0/24", "term_type": "MP2MP", "subnet_type": "vlan"}, + {"dst_ip": "192.168.2.0/24", "term_type": "MP2MP", "subnet_type": "vlan"} + ] + setup_subnet_decap(subnet_decap_config) + # create tunnel IPv4 tunnel + tunnel_sai_oid = self.create_and_test_tunnel( + db, asicdb, statedb, "IPINIP_SUBNET", tunnel_type="IPINIP", + dscp_mode="uniform", ecn_mode="standard", ttl_mode="pipe" + ) + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIP_SUBNET", + tunnel_sai_oid, decap_terms, + subnet_decap_config=subnet_decap_config + ) + + subnet_decap_config = { + "status": "enable", + "src_ip": "10.10.20.0/24", + "src_ip_v6": "20c1:ba8::/64" + } + setup_subnet_decap(subnet_decap_config) + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIP_SUBNET", + tunnel_sai_oid, decap_terms, + skip_decap_term_creation=True, + subnet_decap_config=subnet_decap_config + ) + + self.remove_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIP_SUBNET", decap_terms + ) + self.remove_and_test_tunnel(db, asicdb, statedb, "IPINIP_SUBNET") + + def test_SubnetDecap_Enable_Source_IP_Update_v6(self, dvs, testlog, setup_subnet_decap): + """Test subnet decap source IPv6 update.""" + db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + + self.cleanup_left_over(db, statedb, asicdb) + + subnet_decap_config = { + "status": "enable", + "src_ip": "10.10.10.0/24", + "src_ip_v6": "20c1:ba8::/64" + } + decap_terms = [ + {"dst_ip": "fc02:1000::/64", "term_type": "MP2MP", "subnet_type": "vlan"}, + {"dst_ip": "fc02:1001::/64", "term_type": "MP2MP", "subnet_type": "vlan"}, + {"dst_ip": "fc02:1002::/64", "term_type": "MP2MP", "subnet_type": "vlan"} + ] + setup_subnet_decap(subnet_decap_config) + # create tunnel IPv4 tunnel + tunnel_sai_oid = self.create_and_test_tunnel( + db, asicdb, statedb, "IPINIP_SUBNET_V6", tunnel_type="IPINIP", + dscp_mode="uniform", ecn_mode="standard", ttl_mode="pipe" + ) + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIP_SUBNET_V6", + tunnel_sai_oid, decap_terms, + subnet_decap_config=subnet_decap_config + ) + + subnet_decap_config = { + "status": "enable", + "src_ip": "10.10.10.0/24", + "src_ip_v6": "20c1:ba9::/64" + } + setup_subnet_decap(subnet_decap_config) + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIP_SUBNET_V6", + tunnel_sai_oid, decap_terms, + skip_decap_term_creation=True, + subnet_decap_config=subnet_decap_config + ) + + self.remove_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIP_SUBNET_V6", decap_terms + ) + self.remove_and_test_tunnel(db, asicdb, statedb, "IPINIP_SUBNET_V6") + + def test_SubnetDecap_Enable_Source_IP_Update_Add_Decap_Term_First_1(self, dvs, testlog, setup_subnet_decap): + """Test subnet decap source IP update.""" + db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + configdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + self.cleanup_left_over(db, statedb, asicdb) + + subnet_decap_config = { + "status": "enable", + "src_ip": "10.10.10.0/24", + "src_ip_v6": "20c1:ba8::/64" + } + decap_terms = [ + {"dst_ip": "192.168.0.0/24", "term_type": "MP2MP", "subnet_type": "vlan"}, + {"dst_ip": "192.168.1.0/24", "term_type": "MP2MP", "subnet_type": "vlan"}, + {"dst_ip": "192.168.2.0/24", "term_type": "MP2MP", "subnet_type": "vlan"} + ] + setup_subnet_decap(subnet_decap_config) + # create decap terms of not-existed tunnel + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIP_SUBNET", + self.SAI_NULL_OBJECT_ID, decap_terms, + is_decap_terms_existed=False + ) + # create tunnel + tunnel_sai_oid = self.create_and_test_tunnel( + db, asicdb, statedb, "IPINIP_SUBNET", tunnel_type="IPINIP", + dscp_mode="uniform", ecn_mode="standard", ttl_mode="pipe" + ) + # verify the decap terms are created + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIP_SUBNET", + tunnel_sai_oid, decap_terms, + skip_decap_term_creation=True, + subnet_decap_config=subnet_decap_config + ) + + subnet_decap_config = { + "status": "enable", + "src_ip": "10.10.20.0/24", + "src_ip_v6": "20c1:ba8::/64" + } + setup_subnet_decap(subnet_decap_config) + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIP_SUBNET", + tunnel_sai_oid, decap_terms, + skip_decap_term_creation=True, + subnet_decap_config=subnet_decap_config + ) + + self.remove_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIP_SUBNET", decap_terms + ) + self.remove_and_test_tunnel(db, asicdb, statedb, "IPINIP_SUBNET") + + def test_SubnetDecap_Enable_Source_IP_Update_Add_Decap_Term_First_2(self, dvs, testlog, setup_subnet_decap): + """Test subnet decap source IP update.""" + db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + configdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + self.cleanup_left_over(db, statedb, asicdb) + subnet_decap_config = { + "status": "enable", + "src_ip": "10.10.10.0/24", + "src_ip_v6": "20c1:ba8::/64" + } + decap_terms = [ + {"dst_ip": "192.168.0.0/24", "term_type": "MP2MP", "subnet_type": "vlan"}, + {"dst_ip": "192.168.1.0/24", "term_type": "MP2MP", "subnet_type": "vlan"}, + {"dst_ip": "192.168.2.0/24", "term_type": "MP2MP", "subnet_type": "vlan"} + ] + setup_subnet_decap(subnet_decap_config) + # create decap terms of not-existed tunnel + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIP_SUBNET", + self.SAI_NULL_OBJECT_ID, decap_terms, + is_decap_terms_existed=False + ) + + # update subnet decap source IP + subnet_decap_config = { + "status": "enable", + "src_ip": "10.10.20.0/24", + "src_ip_v6": "20c1:ba8::/64" + } + setup_subnet_decap(subnet_decap_config) + + # create tunnel + tunnel_sai_oid = self.create_and_test_tunnel( + db, asicdb, statedb, "IPINIP_SUBNET", tunnel_type="IPINIP", + dscp_mode="uniform", ecn_mode="standard", ttl_mode="pipe" + ) + # verify the decap terms are created with updated source IP + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIP_SUBNET", + tunnel_sai_oid, decap_terms, + skip_decap_term_creation=True, + subnet_decap_config=subnet_decap_config + ) + + self.remove_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIP_SUBNET", decap_terms + ) + self.remove_and_test_tunnel(db, asicdb, statedb, "IPINIP_SUBNET") + + def test_SubnetDecap_Disable(self, dvs, testlog, setup_subnet_decap): + db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + configdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + self.cleanup_left_over(db, statedb, asicdb) + + subnet_decap_config = { + "status": "disable", + "src_ip": "10.10.10.0/24", + "src_ip_v6": "20c1:ba8::/64" + } + decap_terms = [ + {"dst_ip": "192.168.0.0/24", "term_type": "MP2MP", "subnet_type": "vlan"}, + {"dst_ip": "192.168.1.0/24", "term_type": "MP2MP", "subnet_type": "vlan"}, + {"dst_ip": "192.168.2.0/24", "term_type": "MP2MP", "subnet_type": "vlan"} + ] + setup_subnet_decap(subnet_decap_config) + # create tunnel IPv4 tunnel + tunnel_sai_oid = self.create_and_test_tunnel( + db, asicdb, statedb, "IPINIP_SUBNET", tunnel_type="IPINIP", + dscp_mode="uniform", ecn_mode="standard", ttl_mode="pipe" + ) + # subnet decap is disabled, no decap term will be created + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIP_SUBNET", + tunnel_sai_oid, decap_terms, + subnet_decap_config=subnet_decap_config, + is_decap_terms_existed=False + ) + + subnet_decap_config = { + "status": "enable", + "src_ip": "10.10.20.0/24", + "src_ip_v6": "20c1:ba8::/64" + } + setup_subnet_decap(subnet_decap_config) + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIP_SUBNET", + tunnel_sai_oid, decap_terms, + subnet_decap_config=subnet_decap_config + ) + + self.remove_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIP_SUBNET", decap_terms + ) + self.remove_and_test_tunnel(db, asicdb, statedb, "IPINIP_SUBNET") + + def test_SubnetDecap_Invalid_Decap_Term_Attribute(self, dvs, testlog, setup_subnet_decap): + """Test adding decap terms with invalid attributes.""" + db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + configdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + self.cleanup_left_over(db, statedb, asicdb) + + subnet_decap_config = { + "status": "enable", + "src_ip": "10.10.10.0/24", + "src_ip_v6": "20c1:ba8::/64" + } + decap_terms = [ + {"dst_ip": "192.168.0.0abc", "term_type": "MP2MP", "subnet_type": "vlan"}, + {"dst_ip": "192.168.1.0/24", "term_type": "MP2MPP", "subnet_type": "vlan"}, + {"dst_ip": "192.168.2.0/24", "term_type": "P2MP", "subnet_type": "vlan"}, + {"dst_ip": "192.168.3.0/24", "term_type": "MP2MP", "subnet_type": "uknown"}, + {"dst_ip": "192.168.4.0/24", "term_type": "MP2MP", "subnet_type": "vlan", "bad_attr": "bad_val"} + ] + setup_subnet_decap(subnet_decap_config) + # create tunnel IPv4 tunnel + tunnel_sai_oid = self.create_and_test_tunnel( + db, asicdb, statedb, "IPINIP_SUBNET", tunnel_type="IPINIP", + dscp_mode="uniform", ecn_mode="standard", ttl_mode="pipe" + ) + self.create_and_test_tunnel_decap_terms( + db, asicdb, statedb, "IPINIP_SUBNET", + tunnel_sai_oid, decap_terms, + subnet_decap_config=subnet_decap_config, + is_decap_terms_existed=False + ) + + self.remove_and_test_tunnel(db, asicdb, statedb, "IPINIP_SUBNET") # Add Dummy always-pass test at end as workaroud diff --git a/tests/test_twamp.py b/tests/test_twamp.py new file mode 100644 index 00000000000..d2d8edb8f0e --- /dev/null +++ b/tests/test_twamp.py @@ -0,0 +1,182 @@ +# This test suite covers the functionality of twamp light feature in SwSS +import pytest +import time + +@pytest.mark.usefixtures("testlog") +@pytest.mark.usefixtures('dvs_twamp_manager') +class TestTwampLight(object): + + def check_syslog(self, dvs, marker, log, expected_cnt): + (ec, out) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \'%s\' | wc -l" % (marker, log)]) + assert out.strip() == str(expected_cnt) + + def test_SenderPacketCountSingle(self, dvs, testlog): + """ + This test covers the TWAMP Light session creation and removal operations + Operation flow: + 1. Create twamp-light session-sender using once packet-count + The session remains inactive + 2. Start twamp-light session + The session becomes active + 3. Remove twamp-light session + """ + + session = "TEST_SENDER1" + src_ip = "1.1.1.1" + src_udp_port = "862" + dst_ip = "2.2.2.2" + dst_udp_port = "863" + packet_count = "1000" + tx_interval = "10" + timeout = "10" + stats_interval = "20000" + + marker = dvs.add_log_marker() + + # create twamp-light session + self.dvs_twamp.create_twamp_light_session_sender_packet_count(session, src_ip, src_udp_port, dst_ip, dst_udp_port, packet_count, tx_interval, timeout) + + # start twamp-light session + self.dvs_twamp.start_twamp_light_sender(session) + + # wait for sending TWAMP-test done + time.sleep(12) + + # remove twamp-light session + self.dvs_twamp.remove_twamp_light_session(session) + self.dvs_twamp.verify_no_session() + + def test_SenderPacketCountMulti(self, dvs, testlog): + """ + This test covers the TWAMP Light Sender session creation and removal operations + Operation flow: + 1. Create twamp-light session-sender using multi packet-count + The session remains inactive + 2. Start twamp-light session + The session becomes active + 3. Remove twamp-light session + """ + + session = "TEST_SENDER1" + src_ip = "1.2.3.4" + src_udp_port = "862" + dst_ip = "5.6.7.8" + dst_udp_port = "863" + packet_count = "1000" + tx_interval = "10" + timeout = "10" + stats_interval = "11000" + + marker = dvs.add_log_marker() + + # create twamp-light session + self.dvs_twamp.create_twamp_light_session_sender_packet_count(session, src_ip, src_udp_port, dst_ip, dst_udp_port, packet_count, tx_interval, timeout, stats_interval) + + # start twamp-light session + self.dvs_twamp.start_twamp_light_sender(session) + + # wait for sending TWAMP-test done + time.sleep(120) + + # remove twamp-light session + self.dvs_twamp.remove_twamp_light_session(session) + self.dvs_twamp.verify_no_session() + + def test_SenderContinuousSingle(self, dvs, testlog): + """ + This test covers the TWAMP Light Sender session creation and removal operations + Operation flow: + 1. Create twamp-light session-sender using once continuous + The session remains inactive + 2. Start twamp-light session + The session becomes active + 3. Remove twamp-light session + """ + + session = "TEST_SENDER2" + src_ip = "11.11.11.11" + src_udp_port = "862" + dst_ip = "12.12.12.12" + dst_udp_port = "863" + monitor_time = "60" + tx_interval = "100" + timeout = "10" + stats_interval = "60000" + + marker = dvs.add_log_marker() + + # create twamp-light session + self.dvs_twamp.create_twamp_light_session_sender_continuous(session, src_ip, src_udp_port, dst_ip, dst_udp_port, monitor_time, tx_interval, timeout) + + # start twamp-light session + self.dvs_twamp.start_twamp_light_sender(session) + # wait for sending TWAMP-test done + time.sleep(60) + + # remove twamp-light session + self.dvs_twamp.remove_twamp_light_session(session) + self.dvs_twamp.verify_no_session() + + def test_SenderContinuousMulti(self, dvs, testlog): + """ + This test covers the continuous TWAMP Light Sender session creation and removal operations + Operation flow: + 1. Create twamp-light session-sender using multi continuous + The session remains inactive + 2. Start twamp-light session + The session becomes active + 3. Remove twamp-light session + """ + + session = "TEST_SENDER2" + src_ip = "11.12.13.14" + src_udp_port = "862" + dst_ip = "15.16.17.18" + dst_udp_port = "863" + monitor_time = "60" + tx_interval = "100" + timeout = "10" + stats_interval = "20000" + + marker = dvs.add_log_marker() + + # create twamp-light session + self.dvs_twamp.create_twamp_light_session_sender_continuous(session, src_ip, src_udp_port, dst_ip, dst_udp_port, monitor_time, tx_interval, timeout, stats_interval) + + # start twamp-light session + self.dvs_twamp.start_twamp_light_sender(session) + + # wait for sending TWAMP-test done + time.sleep(60) + + # remove twamp-light session + self.dvs_twamp.remove_twamp_light_session(session) + self.dvs_twamp.verify_no_session() + + def test_Reflector(self, dvs, testlog): + """ + This test covers the TWAMP Light Reflector session creation and removal operations + Operation flow: + 1. Create twamp-light session-reflector + 2. Remove twamp-light session + """ + + session = "TEST_REFLECTOR1" + src_ip = "22.1.1.1" + src_udp_port = "862" + dst_ip = "22.1.1.2" + dst_udp_port = "863" + + marker = dvs.add_log_marker() + + # create twamp-light session + self.dvs_twamp.create_twamp_light_session_reflector(session, src_ip, src_udp_port, dst_ip, dst_udp_port) + + # remove twamp-light session + self.dvs_twamp.remove_twamp_light_session(session) + self.dvs_twamp.verify_no_session() + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_virtual_chassis.py b/tests/test_virtual_chassis.py index c92ed88c40d..a284b91e06c 100644 --- a/tests/test_virtual_chassis.py +++ b/tests/test_virtual_chassis.py @@ -5,6 +5,8 @@ import pytest import buffer_model +DVS_ENV = ["ASIC_VENDOR=vs"] + class TestVirtualChassis(object): def set_lag_id_boundaries(self, vct): @@ -25,6 +27,8 @@ def set_lag_id_boundaries(self, vct): chassis_app_db = DVSDatabase(swsscommon.CHASSIS_APP_DB, dvs.redis_chassis_sock) chassis_app_db.db_connection.set("SYSTEM_LAG_ID_START", "1") chassis_app_db.db_connection.set("SYSTEM_LAG_ID_END", "2") + chassis_app_db.db_connection.rpush("SYSTEM_LAG_IDS_FREE_LIST", "1") + chassis_app_db.db_connection.rpush("SYSTEM_LAG_IDS_FREE_LIST", "2") break def config_inbandif_port(self, vct, ibport): @@ -59,7 +63,70 @@ def del_inbandif_port(self, vct, ibport): # Applicable only for line cards if cfg_switch_type == "voq": config_db.delete_entry("VOQ_INBAND_INTERFACE", f"{ibport}") - + + def get_lc_dvs(self, vct, lc_switch_id): + dvss = vct.dvss + for name in dvss.keys(): + dvs = dvss[name] + + config_db = dvs.get_config_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + + cfg_switch_type = metatbl.get("switch_type") + + if cfg_switch_type == "voq": + switch_id = metatbl.get("switch_id") + assert switch_id != "", "Got error in getting switch_id from CONFIG_DB DEVICE_METADATA" + if lc_switch_id == switch_id: + return dvs + + def get_sup_dvs(self, vct): + dvss = vct.dvss + for name in dvss.keys(): + if name.startswith("supervisor"): + return dvss[name] + + def configure_neighbor(self, dvs, action, test_neigh_ip, mac_address, test_neigh_dev): + _, res = dvs.runcmd(['sh', "-c", "ip neigh show"]) + if action == "add": + _, res = dvs.runcmd(['sh', "-c", f"ip neigh {action} {test_neigh_ip} lladdr {mac_address} dev {test_neigh_dev}"]) + assert res == "", "Error configuring static neigh" + else: + _, res = dvs.runcmd(['sh', "-c", f"ip neigh del {test_neigh_ip} dev {test_neigh_dev}"]) + assert res == "", "Error deleting static neigh" + + def get_num_of_ecmp_paths_from_asic_db(self, dvs, ip_prefix): + # get the route entry + routes = dvs.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + + + # find the entry for the interested prefix + route_key = "" + for route in routes: + if ip_prefix in route: + route_key = route + break + + assert route_key != "", "Route not found" + + # get the nexthop group oid + route_entry =dvs.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", route_key) + nhg_id = route_entry.get("SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID", None) + + assert nhg_id is not None, "nexthop group is not found" + + # find the nexthop in the nexthop group member table which belong the nhg_id + nhs = dvs.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER") + count = 0 + for nh in nhs: + nh_entry = dvs.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER", nh) + nh_nhg_id = nh_entry.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID", None) + + if nh_nhg_id == nhg_id: + count+=1 + + return count + def test_connectivity(self, vct): if vct is None: return @@ -138,7 +205,6 @@ def test_voq_switch(self, vct): spcfg = ast.literal_eval(value) assert spcfg['count'] == sp_count, "Number of systems ports configured is invalid" - @pytest.mark.skip(reason="Failing. Under investigation") def test_chassis_app_db_sync(self, vct): """Test chassis app db syncing. @@ -159,7 +225,6 @@ def test_chassis_app_db_sync(self, vct): keys = chassis_app_db.get_keys("SYSTEM_INTERFACE") assert len(keys), "No chassis app db syncing is done" - @pytest.mark.skip(reason="Failing. Under investigation") def test_chassis_system_interface(self, vct): """Test RIF record creation in ASIC_DB for remote interfaces. @@ -216,7 +281,6 @@ def test_chassis_system_interface(self, vct): # Remote system ports's switch id should not match local switch id assert spcfginfo["attached_switch_id"] != lc_switch_id, "RIF system port with wrong switch_id" - @pytest.mark.skip(reason="Failing. Under investigation") def test_chassis_system_neigh(self, vct): """Test neigh record create/delete and syncing to chassis app db. @@ -312,8 +376,8 @@ def chassis_system_neigh_create(): test_sysneigh = "" for sysnk in sysneighkeys: sysnk_tok = sysnk.split("|") - assert len(sysnk_tok) == 3, "Invalid system neigh key in chassis app db" - if sysnk_tok[2] == test_neigh_ip: + assert len(sysnk_tok) == 4, "Invalid system neigh key in chassis app db" + if sysnk_tok[3] == test_neigh_ip: test_sysneigh = sysnk break @@ -372,7 +436,7 @@ def chassis_system_neigh_create(): # Check for kernel entries _, output = dvs.runcmd("ip neigh show") - assert f"{test_neigh_ip} dev {inband_port}" in output, "Kernel neigh not found for remote neighbor" + assert f"{test_neigh_ip} dev {inband_port} lladdr {mac_address}" in output, "Kernel neigh not found for remote neighbor" _, output = dvs.runcmd("ip route show") assert f"{test_neigh_ip} dev {inband_port} scope link" in output, "Kernel route not found for remote neighbor" @@ -487,7 +551,6 @@ def chassis_system_neigh_create(): # Cleanup inband if configuration self.del_inbandif_port(vct, inband_port) - @pytest.mark.skip(reason="Failing. Under investigation") def test_chassis_system_lag(self, vct): """Test PortChannel in VOQ based chassis systems. @@ -624,7 +687,6 @@ def test_chassis_system_lag(self, vct): break - @pytest.mark.skip(reason="Failing. Under investigation") def test_chassis_system_lag_id_allocator_table_full(self, vct): """Test lag id allocator table full. @@ -702,7 +764,6 @@ def test_chassis_system_lag_id_allocator_table_full(self, vct): break - @pytest.mark.skip(reason="Failing. Under investigation") def test_chassis_system_lag_id_allocator_del_id(self, vct): """Test lag id allocator's release id and re-use id processing. @@ -854,62 +915,6 @@ def test_chassis_system_lag_id_allocator_del_id(self, vct): break - def test_chassis_add_remove_ports(self, vct): - """Test removing and adding a port in a VOQ chassis. - - Test validates that when a port is created the port is removed from the default vlan. - """ - dvss = vct.dvss - for name in dvss.keys(): - dvs = dvss[name] - buffer_model.enable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) - - config_db = dvs.get_config_db() - app_db = dvs.get_app_db() - asic_db = dvs.get_asic_db() - metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") - cfg_switch_type = metatbl.get("switch_type") - - if cfg_switch_type == "voq": - num_ports = len(asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT")) - # Get the port info we'll flap - port = config_db.get_keys('PORT')[0] - port_info = config_db.get_entry("PORT", port) - - # Remove port's other configs - pgs = config_db.get_keys('BUFFER_PG') - queues = config_db.get_keys('BUFFER_QUEUE') - for key in pgs: - if port in key: - config_db.delete_entry('BUFFER_PG', key) - app_db.wait_for_deleted_entry('BUFFER_PG_TABLE', key) - - for key in queues: - if port in key: - config_db.delete_entry('BUFFER_QUEUE', key) - app_db.wait_for_deleted_entry('BUFFER_QUEUE_TABLE', key) - - # Remove port - config_db.delete_entry('PORT', port) - app_db.wait_for_deleted_entry('PORT_TABLE', port) - num = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", - num_ports) - assert len(num) == num_ports - - # Create port - config_db.update_entry("PORT", port, port_info) - app_db.wait_for_entry("PORT_TABLE", port) - num = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", - num_ports) - assert len(num) == num_ports - - # Check that we see the logs for removing default vlan - _, logSeen = dvs.runcmd( [ "sh", "-c", - "awk STARTFILE/ENDFILE /var/log/syslog | grep 'removeDefaultVlanMembers: Remove 32 VLAN members from default VLAN' | wc -l"] ) - assert logSeen.strip() == "1" - - buffer_model.disable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) - def test_voq_egress_queue_counter(self, vct): if vct is None: return @@ -976,6 +981,335 @@ def test_chassis_wred_profile_on_system_ports(self, vct): # Total number of logs = (No of system ports * No of lossless priorities) - No of lossless priorities for CPU ports assert logSeen.strip() == str(len(system_ports)*2 - 2) + + def test_chassis_add_remove_ports(self, vct): + """Test removing and adding a port in a VOQ chassis. + + Test validates that when a port is created the port is removed from the default vlan. + """ + dvss = vct.dvss + for name in dvss.keys(): + dvs = dvss[name] + buffer_model.enable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) + + config_db = dvs.get_config_db() + app_db = dvs.get_app_db() + asic_db = dvs.get_asic_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + cfg_switch_type = metatbl.get("switch_type") + cfg_hostname = metatbl.get("hostname") + cfg_asic_name = metatbl.get("asic_name") + + if cfg_switch_type == "voq": + num_ports = len(asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT")) + # Get the port info we'll flap + port = config_db.get_keys('PORT')[0] + port_info = config_db.get_entry("PORT", port) + system_port = cfg_hostname+"|"+cfg_asic_name+"|"+port + + # Remove port's other configs + pgs = config_db.get_keys('BUFFER_PG') + buf_queues = config_db.get_keys('BUFFER_QUEUE') + queues = config_db.get_keys('QUEUE') + for key in pgs: + if port in key: + config_db.delete_entry('BUFFER_PG', key) + app_db.wait_for_deleted_entry('BUFFER_PG_TABLE', key) + + for key in buf_queues: + if port in key: + config_db.delete_entry('BUFFER_QUEUE', key) + app_db.wait_for_deleted_entry('BUFFER_QUEUE_TABLE', key) + + queue_info = {} + for key in queues: + if system_port in key: + queue_info[key] = config_db.get_entry("QUEUE", key) + config_db.delete_entry('QUEUE', key) + config_db.wait_for_deleted_entry('QUEUE_TABLE', key) + + # Remove port + config_db.delete_entry('PORT', port) + app_db.wait_for_deleted_entry('PORT_TABLE', port) + num = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", + num_ports) + assert len(num) == num_ports + + # Create port + config_db.update_entry("PORT", port, port_info) + app_db.wait_for_entry("PORT_TABLE", port) + num = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", + num_ports) + assert len(num) == num_ports + + if len(queue_info): + for key, value in queue_info.items(): + config_db.update_entry("QUEUE", key, value) + config_db.wait_for_entry("QUEUE", key) + + # Check that we see the logs for removing default vlan + _, logSeen = dvs.runcmd( [ "sh", "-c", + "awk STARTFILE/ENDFILE /var/log/syslog | grep 'removeDefaultVlanMembers: Remove 32 VLAN members from default VLAN' | wc -l"] ) + assert logSeen.strip() == "1" + + buffer_model.disable_dynamic_buffer(dvs) + + def test_chassis_system_intf_status(self, vct): + dvs = self.get_sup_dvs(vct) + chassis_app_db = DVSDatabase(swsscommon.CHASSIS_APP_DB, dvs.redis_chassis_sock) + keys = chassis_app_db.get_keys("SYSTEM_INTERFACE") + assert len(keys) > 0, "No system interface entries in chassis app db" + for key in keys: + intf = chassis_app_db.get_entry("SYSTEM_INTERFACE", key) + # Get the oper_status + oper_status = intf.get("oper_status", "unknown") + assert oper_status != "unknown", "System interface oper status is unknown" + + def test_remote_port_down(self, vct): + # test params + local_lc_switch_id = '0' + remote_lc_switch_id = '2' + test_system_port = "lc1|Asic0|Ethernet4" + test_prefix = "13.13.0.0/16" + inband_port = "Ethernet0" + test_neigh_ip_1 = "10.8.104.10" + test_neigh_dev_1 = "Ethernet4" + test_neigh_mac_1 = "00:01:02:03:04:05" + test_neigh_ip_2 = "10.8.108.10" + test_neigh_dev_2 = "Ethernet8" + test_neigh_mac_2 = "00:01:02:03:04:06" + + local_lc_dvs = self.get_lc_dvs(vct, local_lc_switch_id) + remote_lc_dvs = self.get_lc_dvs(vct, remote_lc_switch_id) + # config inband port + self.config_inbandif_port(vct, inband_port) + + # add 2 neighbors + self.configure_neighbor(local_lc_dvs, "add", test_neigh_ip_1, test_neigh_mac_1, test_neigh_dev_1) + self.configure_neighbor(local_lc_dvs, "add", test_neigh_ip_2, test_neigh_mac_2, test_neigh_dev_2) + + time.sleep(30) + + # add route of LC1(pretend learnt via bgp) + _, res = remote_lc_dvs.runcmd(['sh', '-c', f"ip route add {test_prefix} nexthop via {test_neigh_ip_1} nexthop via {test_neigh_ip_2}"]) + assert res == "", "Error configuring route" + time.sleep(10) + # verify 2 nexthops are programmed in asic_db + paths = self.get_num_of_ecmp_paths_from_asic_db(remote_lc_dvs, test_prefix) + assert paths == 2, "ECMP paths not configured" + + # shut down port on LC0 + local_lc_dvs.port_admin_set("Ethernet4", "down") + time.sleep(10) + + # verify the port oper status is down in chassis db + sup_dvs = self.get_sup_dvs(vct) + chassis_app_db = DVSDatabase(swsscommon.CHASSIS_APP_DB, sup_dvs.redis_chassis_sock) + keys = chassis_app_db.get_keys("SYSTEM_INTERFACE") + assert len(keys) > 0, "No system interface entries in chassis app db" + port_status = chassis_app_db.get_entry("SYSTEM_INTERFACE", test_system_port) + oper_status = port_status.get("oper_status", "unknown") + assert oper_status == "down", "System interface oper status is not down" + + # verify the number of paths is reduced by 1 + paths = self.get_num_of_ecmp_paths_from_asic_db(remote_lc_dvs, test_prefix) + assert paths == 1, "Remote port down does not remote ecmp member" + + # shut down port on LC0 + local_lc_dvs.port_admin_set("Ethernet4", "up") + time.sleep(10) + + # verify the port oper status is up in chassis db + sup_dvs = self.get_sup_dvs(vct) + chassis_app_db = DVSDatabase(swsscommon.CHASSIS_APP_DB, sup_dvs.redis_chassis_sock) + keys = chassis_app_db.get_keys("SYSTEM_INTERFACE") + assert len(keys) > 0, "No system interface entries in chassis app db" + port_status = chassis_app_db.get_entry("SYSTEM_INTERFACE", test_system_port) + oper_status = port_status.get("oper_status", "unknown") + assert oper_status == "up", "System interface oper status is not down" + + # verify the number of paths is reduced by 1 + paths = self.get_num_of_ecmp_paths_from_asic_db(remote_lc_dvs,test_prefix) + assert paths == 2, "Remote port up is not added in nexthop group" + + #cleanup + _, res = remote_lc_dvs.runcmd(['sh', '-c', f"ip route del {test_prefix} nexthop via {test_neigh_ip_1} nexthop via {test_neigh_ip_2}"]) + assert res == "", "Error configuring route" + + # Cleanup inband if configuration + self.del_inbandif_port(vct, inband_port) + self.configure_neighbor(local_lc_dvs, "del", test_neigh_ip_2, test_neigh_mac_2, test_neigh_dev_2) + + + def test_remote_neighbor_add(self, vct): + # test params + local_lc_switch_id = '0' + remote_lc_switch_id = '2' + test_prefix = "14.14.0.0/16" + inband_port = "Ethernet0" + test_neigh_ip_1 = "10.8.104.50" + test_neigh_dev_1 = "Ethernet4" + test_neigh_mac_1 = "00:09:03:04:05:06" + test_neigh_dev_2 = "Ethernet8" + + local_lc_dvs = self.get_lc_dvs(vct, local_lc_switch_id) + remote_lc_dvs = self.get_lc_dvs(vct, remote_lc_switch_id) + + # config inband port + self.config_inbandif_port(vct, inband_port) + + # add neighbor + self.configure_neighbor(local_lc_dvs, "add", test_neigh_ip_1, test_neigh_mac_1, test_neigh_dev_1) + + time.sleep(10) + + asic_db = remote_lc_dvs.get_asic_db() + asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY", 1) + neighkeys = asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY") + assert len(neighkeys), "No neigh entries in ASIC_DB" + + # Check for presence of the remote neighbor in ASIC_DB + remote_neigh = "" + for nkey in neighkeys: + ne = ast.literal_eval(nkey) + if ne['ip'] == test_neigh_ip_1: + remote_neigh = nkey + break + + assert remote_neigh != "", "Remote neigh not found in ASIC_DB" + + # Preserve remote neigh asic db neigh key for delete verification later + test_remote_neigh_asic_db_key = remote_neigh + + asic_db = remote_lc_dvs.get_asic_db() + nexthop_keys = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", 1) + assert len(nexthop_keys), "No Nexthop entries in ASIC_DB" + + nexthop_entry = asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nexthop_keys[0]) + ip = nexthop_entry.get("SAI_NEXT_HOP_ATTR_IP") + assert ip != "", "Ip address not found for nexthop entry in asic db" + rif1 = nexthop_entry.get("SAI_NEXT_HOP_ATTR_ROUTER_INTERFACE_ID") + + + # add route of LC1(pretend learnt via bgp) + _, res = remote_lc_dvs.runcmd(['sh', '-c', f"ip route add {test_prefix} nexthop via {test_neigh_ip_1}"]) + assert res == "", "Error configuring route" + time.sleep(5) + + # del neighbor on first port and add it on second port + self.configure_neighbor(local_lc_dvs, "del", test_neigh_ip_1, test_neigh_mac_1, test_neigh_dev_1) + time.sleep(5) + self.configure_neighbor(local_lc_dvs, "add", test_neigh_ip_1, test_neigh_mac_1, test_neigh_dev_2) + + time.sleep(10) + + asic_db = remote_lc_dvs.get_asic_db() + asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY", 1) + neighkeys = asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY") + assert len(neighkeys), "No neigh entries in ASIC_DB" + + # Check for presence of the remote neighbor in ASIC_DB + remote_neigh = "" + for nkey in neighkeys: + ne = ast.literal_eval(nkey) + if ne['ip'] == test_neigh_ip_1: + remote_neigh = nkey + break + + assert remote_neigh != "", "Remote neigh not found in ASIC_DB" + + nexthop_keys = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", 1) + assert len(nexthop_keys), "No Nexthop entries in ASIC_DB" + nexthop_entry = asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nexthop_keys[0]) + print("2:nexthop_entrty:",nexthop_entry) + rif2 = nexthop_entry.get("SAI_NEXT_HOP_ATTR_ROUTER_INTERFACE_ID") + assert rif1 == rif2, "Neighbor is already replaced with new rif" + + #del the route + _, res = remote_lc_dvs.runcmd(['sh', '-c', f"ip route del {test_prefix} nexthop via {test_neigh_ip_1} "]) + assert res == "", "Error configuring route" + + time.sleep(10) + + asic_db = remote_lc_dvs.get_asic_db() + asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY", 1) + neighkeys = asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY") + assert len(neighkeys), "No neigh entries in ASIC_DB" + + # Check for presence of the remote neighbor in ASIC_DB + remote_neigh = "" + for nkey in neighkeys: + ne = ast.literal_eval(nkey) + if ne['ip'] == test_neigh_ip_1: + remote_neigh = nkey + break + assert remote_neigh != "", "Remote neigh not found in ASIC_DB" + + nexthop_keys = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", 1) + assert len(nexthop_keys), "No Nexthop entries in ASIC_DB" + nexthop_entry = asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", nexthop_keys[0]) + print("3:nexthop_entrty:",nexthop_entry) + rif3 = nexthop_entry.get("SAI_NEXT_HOP_ATTR_ROUTER_INTERFACE_ID") + assert rif1 != rif3, "Neighbor is not replaced with new rif" + + #del the neighbor + self.configure_neighbor(local_lc_dvs, "del", test_neigh_ip_1, test_neigh_mac_1, test_neigh_dev_2) + time.sleep(10) + asic_db = remote_lc_dvs.get_asic_db() + asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY", 0) + neighkeys = asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY") + assert len(neighkeys) == 0, "Neigh entries still in ASIC_DB" + + nexthop_keys = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", 0) + assert len(nexthop_keys) == 0, "Nexthop entries in still ASIC_DB" + + # Cleanup inband if configuration + self.del_inbandif_port(vct, inband_port) + + def test_voq_drop_counters(self, vct): + """Test VOQ switch drop counters. + + This test validates VOQ Switch counters for Voq/fabric switches - packet integrity counters + """ + + if vct is None: + return + + dvss = vct.dvss + for name in dvss.keys(): + dvs = dvss[name] + # Get the config info + config_db = dvs.get_config_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + + cfg_switch_type = metatbl.get("switch_type") + + # Test only for voq or fabric + if cfg_switch_type == "voq" or cfg_switch_type == "fabric": + print("VOQ drop counters test for {}".format(name)) + + # Verify that a counter has been created FLEX_COUNTER_DB and COUNTERS_DB. We will verify the state of + # the counter in the next step. + flex_db = dvs.get_flex_db() + keys = flex_db.get_keys("FLEX_COUNTER_TABLE") + assert len(keys), "No FLEX_COUNTER_TABLE in FLEX_COUNTER_DB" + for key in keys: + if "SWITCH_DEBUG_COUNTER" in key: + drop_entry = flex_db.get_entry("FLEX_COUNTER_TABLE", key) + value = drop_entry.get("SWITCH_DEBUG_COUNTER_ID_LIST") + assert value == "SAI_SWITCH_STAT_PACKET_INTEGRITY_DROP", "Got error in getting Voq Switch Drop counter from FLEX_COUNTER_DB" + + asic_db = dvs.get_asic_db() + keys = asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_SWITCH") + switch_oid_key = keys[0] + cntr_db = dvs.get_counters_db() + stat_name_entry = cntr_db.get_entry("COUNTERS_DEBUG_NAME_SWITCH_STAT_MAP", "") + value = stat_name_entry.get("SWITCH_ID") + assert value == switch_oid_key, "Wrong Switch Id" + + stat_entry = cntr_db.get_entry("COUNTERS", switch_oid_key) + value = stat_entry.get("SAI_SWITCH_STAT_PACKET_INTEGRITY_DROP") + assert value == "0", "SAI_SWITCH_STAT_PACKET_INTEGRITY_DROP is non zero in COUNTERS_DB" # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_vlan.py b/tests/test_vlan.py index 28d3de3a295..b8a277f282c 100644 --- a/tests/test_vlan.py +++ b/tests/test_vlan.py @@ -1,9 +1,25 @@ import distro import pytest +import ipaddress +import time from distutils.version import StrictVersion from dvslib.dvs_common import PollingConfig, wait_for_result + +def mac_to_link_local_ipv6(mac): + mac_bytes = mac.split(':') + mac_bytes = mac_bytes[:3] + ["ff", "fe"] + mac_bytes[3:] + second_digit = int(mac_bytes[0][1], 16) + second_digit ^= 0x2 # Reverse the second bit from right + mac_bytes[0] = mac_bytes[0][0] + format(second_digit, "x") + ipv6 = ["fe80:"] + for i in range(0, 7, 2): + ipv6 += [":", mac_bytes[i], mac_bytes[i + 1]] + ipv6 = "".join(ipv6) + return str(ipaddress.IPv6Address(ipv6)) # Conversion to IPv6Address is done to compress ipv6. + + @pytest.mark.usefixtures("testlog") @pytest.mark.usefixtures('dvs_vlan_manager') @pytest.mark.usefixtures('dvs_lag_manager') @@ -233,8 +249,58 @@ def test_AddPortChannelToVlan(self, dvs): self.dvs_vlan.create_vlan(vlan) self.dvs_vlan.get_and_verify_vlan_ids(1) + + self.dvs_vlan.create_vlan_member(vlan, lag_interface, "tagged") + self.dvs_vlan.get_and_verify_vlan_member_ids(1) + + self.dvs_vlan.remove_vlan_member(vlan, lag_interface) + self.dvs_vlan.get_and_verify_vlan_member_ids(0) + + self.dvs_vlan.remove_vlan(vlan) + self.dvs_vlan.get_and_verify_vlan_ids(0) + + self.dvs_lag.remove_port_channel_member(lag_id, lag_member) + self.dvs_vlan.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_LAG_MEMBER", 0) + + self.dvs_lag.remove_port_channel(lag_id) + self.dvs_vlan.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_LAG", 0) + + def test_AddPortChannelToVlanRaceCondition(self, dvs): + + vlan = "2" + lag_member = "Ethernet0" + lag_id = "0001" + lag_interface = "PortChannel{}".format(lag_id) + + self.dvs_lag.create_port_channel(lag_id) + lag_entries = self.dvs_vlan.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_LAG", 1) + + self.dvs_lag.create_port_channel_member(lag_id, lag_member) + + # Verify the LAG has been initialized properly + lag_member_entries = self.dvs_vlan.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_LAG_MEMBER", 1) + fvs = self.dvs_vlan.asic_db.wait_for_entry("ASIC_STATE:SAI_OBJECT_TYPE_LAG_MEMBER", lag_member_entries[0]) + assert len(fvs) == 4 + assert fvs.get("SAI_LAG_MEMBER_ATTR_LAG_ID") == lag_entries[0] + assert self.dvs_vlan.asic_db.port_to_id_map[fvs.get("SAI_LAG_MEMBER_ATTR_PORT_ID")] == lag_member + + self.dvs_vlan.create_vlan(vlan) + self.dvs_vlan.get_and_verify_vlan_ids(1) + # Kill teamsyncd + dvs.stop_teamsyncd() + + # Delete netdevice + dvs.runcmd("ip link del PortChannel" + lag_id) self.dvs_vlan.create_vlan_member(vlan, lag_interface, "tagged") + + self.dvs_vlan.get_and_verify_vlan_member_ids(0) + #Start teamsyncd + dvs.start_teamsyncd() + + #Start teammgrd + dvs.restart_teammgrd() + self.dvs_vlan.get_and_verify_vlan_member_ids(1) self.dvs_vlan.remove_vlan_member(vlan, lag_interface) @@ -249,6 +315,8 @@ def test_AddPortChannelToVlan(self, dvs): self.dvs_lag.remove_port_channel(lag_id) self.dvs_vlan.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_LAG", 0) + + def test_AddVlanMemberWithNonExistVlan(self, dvs): vlan = "2" @@ -534,6 +602,50 @@ def test_VlanMemberLinkDown(self, dvs): self.dvs_vlan.remove_vlan(vlan) self.dvs_vlan.get_and_verify_vlan_ids(0) + def test_MacMatchesLinkLocalIPv6(self, dvs): + """ + Checks whether the MAC addresses assigned to the Bridge, dummy, Vlan1000 and Ethernet4 + interfaces match their corresponding interface's link-local IPv6 address. + """ + dvs.setup_db() + + vlan_id = "1000" + vlan_member = "Ethernet4" + vlan_interface = f"Vlan{vlan_id}" + vlan_mac = "00:aa:bb:cc:dd:ee" + + assert dvs.get_interface_oper_status("Bridge") == "UP" + assert dvs.get_interface_oper_status("dummy") != "DOWN" + + bridge_mac = dvs.get_interface_mac("Bridge") + assert mac_to_link_local_ipv6(bridge_mac) == dvs.get_interface_link_local_ipv6("Bridge") + + dummy_mac = dvs.get_interface_mac("dummy") + assert mac_to_link_local_ipv6(dummy_mac) == dvs.get_interface_link_local_ipv6("dummy") + + self.dvs_vlan.create_vlan_with_mac(vlan_id, vlan_mac) + time.sleep(1) + assert dvs.get_interface_oper_status(vlan_interface) == "UP" + # The MAC address of the Bridge is expected to have changed, so we need to check again. + bridge_mac = dvs.get_interface_mac("Bridge") + assert mac_to_link_local_ipv6(bridge_mac) == dvs.get_interface_link_local_ipv6("Bridge") + vlan_mac = dvs.get_interface_mac(vlan_interface) + assert mac_to_link_local_ipv6(vlan_mac) == dvs.get_interface_link_local_ipv6(vlan_interface) + + self.dvs_vlan.create_vlan_member(vlan_interface, vlan_member) + time.sleep(1) + dvs.set_interface_status(vlan_member, "up") + assert dvs.get_interface_oper_status(vlan_member) != "DOWN" + member_mac = dvs.get_interface_mac(vlan_member) + assert mac_to_link_local_ipv6(member_mac) == dvs.get_interface_link_local_ipv6(vlan_member) + + # Tear down + dvs.set_interface_status(vlan_member, "down") + self.dvs_vlan.remove_vlan_member(vlan_interface, vlan_member) + self.dvs_vlan.remove_vlan(vlan_interface) + time.sleep(1) + + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/tests/test_vnet.py b/tests/test_vnet.py index 3b1ef6efd90..3a8f6435183 100644 --- a/tests/test_vnet.py +++ b/tests/test_vnet.py @@ -1,4 +1,5 @@ import time +import ipaddress import json import random import time @@ -7,2921 +8,2691 @@ from swsscommon import swsscommon from pprint import pprint from dvslib.dvs_common import wait_for_result +from vnet_lib import * +class TestVnetOrch(object): -def create_entry(tbl, key, pairs): - fvs = swsscommon.FieldValuePairs(pairs) - tbl.set(key, fvs) - time.sleep(1) - + CFG_SUBNET_DECAP_TABLE_NAME = "SUBNET_DECAP" -def create_entry_tbl(db, table, separator, key, pairs): - tbl = swsscommon.Table(db, table) - create_entry(tbl, key, pairs) + @pytest.fixture + def setup_subnet_decap(self, dvs): + def _apply_subnet_decap_config(subnet_decap_config): + """Apply subnet decap config to CONFIG_DB.""" + subnet_decap_tbl = swsscommon.Table(configdb, self.CFG_SUBNET_DECAP_TABLE_NAME) + fvs = create_fvs(**subnet_decap_config) + subnet_decap_tbl.set("AZURE", fvs) -def create_entry_pst(db, table, separator, key, pairs): - tbl = swsscommon.ProducerStateTable(db, table) - create_entry(tbl, key, pairs) + def _cleanup_subnet_decap_config(): + """Cleanup subnet decap config in CONFIG_DB.""" + subnet_decap_tbl = swsscommon.Table(configdb, self.CFG_SUBNET_DECAP_TABLE_NAME) + for key in subnet_decap_tbl.getKeys(): + subnet_decap_tbl._del(key) + configdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + _cleanup_subnet_decap_config() -def delete_entry_tbl(db, table, key): - tbl = swsscommon.Table(db, table) - tbl._del(key) - time.sleep(1) + yield _apply_subnet_decap_config + _cleanup_subnet_decap_config() -def delete_entry_pst(db, table, key): - tbl = swsscommon.ProducerStateTable(db, table) - tbl._del(key) - time.sleep(1) + def get_vnet_obj(self): + return VnetVxlanVrfTunnel() + def setup_db(self, dvs): + self.pdb = dvs.get_app_db() + self.adb = dvs.get_asic_db() + self.cdb = dvs.get_config_db() + self.sdb = dvs.get_state_db() -def how_many_entries_exist(db, table): - tbl = swsscommon.Table(db, table) - return len(tbl.getKeys()) + def clear_srv_config(self, dvs): + dvs.servers[0].runcmd("ip address flush dev eth0") + dvs.servers[1].runcmd("ip address flush dev eth0") + dvs.servers[2].runcmd("ip address flush dev eth0") + dvs.servers[3].runcmd("ip address flush dev eth0") + def set_admin_status(self, interface, status): + self.cdb.update_entry("PORT", interface, {"admin_status": status}) -def entries(db, table): - tbl = swsscommon.Table(db, table) - return set(tbl.getKeys()) + def create_l3_intf(self, interface, vrf_name): + if len(vrf_name) == 0: + self.cdb.create_entry("INTERFACE", interface, {"NULL": "NULL"}) + else: + self.cdb.create_entry("INTERFACE", interface, {"vrf_name": vrf_name}) + def add_ip_address(self, interface, ip): + self.cdb.create_entry("INTERFACE", interface + "|" + ip, {"NULL": "NULL"}) -def get_exist_entries(dvs, table): - db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - tbl = swsscommon.Table(db, table) - return set(tbl.getKeys()) + def remove_ip_address(self, interface, ip): + self.cdb.delete_entry("INTERFACE", interface + "|" + ip) + def add_neighbor(self, interface, ip, mac): + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "NEIGH_TABLE") + fvs = swsscommon.FieldValuePairs([("neigh", mac), + ("family", "IPv4")]) + tbl.set(interface + ":" + ip, fvs) + time.sleep(1) -def get_created_entry(db, table, existed_entries): - tbl = swsscommon.Table(db, table) - entries = set(tbl.getKeys()) - new_entries = list(entries - existed_entries) - assert len(new_entries) == 1, "Wrong number of created entries." - return new_entries[0] + def remove_neighbor(self, interface, ip): + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "NEIGH_TABLE") + tbl._del(interface + ":" + ip) + time.sleep(1) + def create_route_entry(self, key, pairs): + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE") + fvs = swsscommon.FieldValuePairs(list(pairs.items())) + tbl.set(key, fvs) -def get_all_created_entries(db, table, existed_entries): - tbl = swsscommon.Table(db, table) - entries = set(tbl.getKeys()) - new_entries = list(entries - set(existed_entries)) - assert len(new_entries) >= 0, "Get all could be no new created entries." - new_entries.sort() - return new_entries + def remove_route_entry(self, key): + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE") + tbl._del(key) + def check_route_entries(self, destinations): + def _access_function(): + route_entries = self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + route_destinations = [json.loads(route_entry)["dest"] + for route_entry in route_entries] + return (all(destination in route_destinations for destination in destinations), None) -def get_created_entries(db, table, existed_entries, count): - new_entries = get_all_created_entries(db, table, existed_entries) - assert len(new_entries) == count, "Wrong number of created entries." - return new_entries + wait_for_result(_access_function) -def get_deleted_entries(db, table, existed_entries, count): - tbl = swsscommon.Table(db, table) - entries = set(tbl.getKeys()) - old_entries = list(existed_entries - entries) - assert len(old_entries) == count, "Wrong number of deleted entries." - old_entries.sort() - return old_entries + @pytest.fixture(params=["true", "false"]) + def ordered_ecmp(self, dvs, request): + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + if request.param == "true": + create_entry_pst( + app_db, + "SWITCH_TABLE", ':', "switch", + [ + ('ordered_ecmp', 'true') + ], + ) + dvs.get_state_db().wait_for_field_match("SWITCH_CAPABILITY", "switch", {"ORDERED_ECMP_CAPABLE": "true"}) -def get_default_vr_id(dvs): - db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - table = 'ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER' - tbl = swsscommon.Table(db, table) - keys = tbl.getKeys() - assert len(keys) == 1, "Wrong number of virtual routers found" + yield request.param - return keys[0] + if request.param == "true": + create_entry_pst( + app_db, + "SWITCH_TABLE", ':', "switch", + [ + ('ordered_ecmp', 'false') + ], + ) + dvs.get_state_db().wait_for_field_match("SWITCH_CAPABILITY", "switch", {"ORDERED_ECMP_CAPABLE": "false"}) + ''' + Test 1 - Create Vlan Interface, Tunnel and Vnet + ''' + def test_vnet_orch_1(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() -def check_object(db, table, key, expected_attributes): - tbl = swsscommon.Table(db, table) - keys = tbl.getKeys() - assert key in keys, "The desired key is not presented" + tunnel_name = 'tunnel_1' - status, fvs = tbl.get(key) - assert status, "Got an error when get a key" + vnet_obj.fetch_exist_entries(dvs) - assert len(fvs) >= len(expected_attributes), "Incorrect attributes" + create_vxlan_tunnel(dvs, tunnel_name, '10.10.10.10') + create_vnet_entry(dvs, 'Vnet_2000', tunnel_name, '2000', "") - attr_keys = {entry[0] for entry in fvs} + vnet_obj.check_vnet_entry(dvs, 'Vnet_2000') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_2000', '2000') - for name, value in fvs: - if name in expected_attributes: - assert expected_attributes[name] == value, "Wrong value %s for the attribute %s = %s" % \ - (value, name, expected_attributes[name]) + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '10.10.10.10') -def check_deleted_object(db, table, key): - tbl = swsscommon.Table(db, table) - keys = tbl.getKeys() - assert key not in keys, "The desired key is not removed" + vid = create_vlan_interface(dvs, "Vlan100", "Ethernet24", "Vnet_2000", "100.100.3.1/24") + vnet_obj.check_router_interface(dvs, "Vlan100", 'Vnet_2000', vid, intf_type="vlan") + vid = create_vlan_interface(dvs, "Vlan101", "Ethernet28", "Vnet_2000", "100.100.4.1/24") + vnet_obj.check_router_interface(dvs, "Vlan101", 'Vnet_2000', vid, intf_type="vlan") -def create_vnet_local_routes(dvs, prefix, vnet_name, ifname): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet_2000', '10.10.10.1') + vnet_obj.check_vnet_routes(dvs, 'Vnet_2000', '10.10.10.1', tunnel_name) + check_state_db_routes(dvs, 'Vnet_2000', "100.100.1.1/32", ['10.10.10.1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - create_entry_tbl( - conf_db, - "VNET_ROUTE", '|', "%s|%s" % (vnet_name, prefix), - [ - ("ifname", ifname), - ] - ) + create_vnet_local_routes(dvs, "100.100.3.0/24", 'Vnet_2000', 'Vlan100') + # The route to Vlan100's subnet must have already been added to self.ASIC_ROUTE_ENTRY + vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2000', vlan_subnet_route=True) - time.sleep(2) + create_vnet_local_routes(dvs, "100.100.4.0/24", 'Vnet_2000', 'Vlan101') + # The route to Vlan101's subnet must have already been added to self.ASIC_ROUTE_ENTRY + vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2000', vlan_subnet_route=True) + #Create Physical Interface in another Vnet -def delete_vnet_local_routes(dvs, prefix, vnet_name): - app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + create_vnet_entry(dvs, 'Vnet_2001', tunnel_name, '2001', "") - delete_entry_pst(app_db, "VNET_ROUTE_TABLE", "%s:%s" % (vnet_name, prefix)) + vnet_obj.check_vnet_entry(dvs, 'Vnet_2001') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_2001', '2001') - time.sleep(2) + create_phy_interface(dvs, "Ethernet4", "Vnet_2001", "100.102.1.1/24") + vnet_obj.check_router_interface(dvs, "Ethernet4", 'Vnet_2001') + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet_2001', '10.10.10.2', "00:12:34:56:78:9A") + vnet_obj.check_vnet_routes(dvs, 'Vnet_2001', '10.10.10.2', tunnel_name, "00:12:34:56:78:9A") + check_state_db_routes(dvs, 'Vnet_2001', "100.100.2.1/32", ['10.10.10.2']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.2.1/32") -def create_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile="", primary="", monitoring="", adv_prefix=""): - set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac=mac, vni=vni, ep_monitor=ep_monitor, profile=profile, primary=primary, monitoring=monitoring, adv_prefix=adv_prefix) + create_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet_2001', 'Ethernet4') + vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2001') + # Clean-up and verify remove flows -def set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile="", primary="", monitoring="", adv_prefix=""): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + delete_vnet_local_routes(dvs, "100.100.3.0/24", 'Vnet_2000') + vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet_2000', "100.100.3.0/24") - attrs = [ - ("endpoint", endpoint), - ] + delete_vnet_local_routes(dvs, "100.100.4.0/24", 'Vnet_2000') + vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet_2000', "100.100.4.0/24") - if vni: - attrs.append(('vni', vni)) + delete_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet_2001') + vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet_2001', "100.102.1.0/24") - if mac: - attrs.append(('mac_address', mac)) + delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet_2001') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2001') + check_remove_state_db_routes(dvs, 'Vnet_2001', "100.100.2.1/32") + check_remove_routes_advertisement(dvs, "100.100.2.1/32") - if ep_monitor: - attrs.append(('endpoint_monitor', ep_monitor)) + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet_2000') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2000') + check_remove_state_db_routes(dvs, 'Vnet_2000', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - if profile: - attrs.append(('profile', profile)) + delete_phy_interface(dvs, "Ethernet4", "100.102.1.1/24") + vnet_obj.check_del_router_interface(dvs, "Ethernet4") - if primary: - attrs.append(('primary', primary)) + delete_vlan_interface(dvs, "Vlan100", "100.100.3.1/24") + vnet_obj.check_del_router_interface(dvs, "Vlan100") - if monitoring: - attrs.append(('monitoring', monitoring)) + delete_vlan_interface(dvs, "Vlan101", "100.100.4.1/24") + vnet_obj.check_del_router_interface(dvs, "Vlan101") - if adv_prefix: - attrs.append(('adv_prefix', adv_prefix)) + delete_vnet_entry(dvs, 'Vnet_2001') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet_2001') - tbl = swsscommon.Table(conf_db, "VNET_ROUTE_TUNNEL") - fvs = swsscommon.FieldValuePairs(attrs) - tbl.set("%s|%s" % (vnet_name, prefix), fvs) + delete_vnet_entry(dvs, 'Vnet_2000') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet_2000') - time.sleep(2) + delete_vxlan_tunnel(dvs, tunnel_name) + vnet_obj.check_del_vxlan_tunnel(dvs) + ''' + Test 2 - Two VNets, One HSMs per VNet + ''' + def test_vnet_orch_2(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() -def delete_vnet_routes(dvs, prefix, vnet_name): - app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + tunnel_name = 'tunnel_2' - delete_entry_pst(app_db, "VNET_ROUTE_TUNNEL_TABLE", "%s:%s" % (vnet_name, prefix)) + vnet_obj.fetch_exist_entries(dvs) - time.sleep(2) + create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') + create_vnet_entry(dvs, 'Vnet_1', tunnel_name, '1111', "") + vnet_obj.check_vnet_entry(dvs, 'Vnet_1') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_1', '1111') -def create_vlan(dvs, vlan_name, vlan_ids): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + tun_id = vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') - vlan_id = vlan_name[4:] + vid = create_vlan_interface(dvs, "Vlan1001", "Ethernet0", "Vnet_1", "1.1.10.1/24") + vnet_obj.check_router_interface(dvs, "Vlan1001", 'Vnet_1', vid, intf_type="vlan") - # create vlan - create_entry_tbl( - conf_db, - "VLAN", '|', vlan_name, - [ - ("vlanid", vlan_id), - ], - ) + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "1.1.1.10/32", 'Vnet_1', '100.1.1.10') + vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '100.1.1.10', tunnel_name) + check_state_db_routes(dvs, 'Vnet_1', "1.1.1.10/32", ['100.1.1.10']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "1.1.1.10/32") - time.sleep(1) + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "1.1.1.11/32", 'Vnet_1', '100.1.1.10') + vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '100.1.1.10', tunnel_name) + check_state_db_routes(dvs, 'Vnet_1', "1.1.1.11/32", ['100.1.1.10']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "1.1.1.11/32") - vlan_oid = get_created_entry(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN", vlan_ids) + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "1.1.1.12/32", 'Vnet_1', '200.200.1.200') + vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '200.200.1.200', tunnel_name) + check_state_db_routes(dvs, 'Vnet_1', "1.1.1.12/32", ['200.200.1.200']) + check_remove_routes_advertisement(dvs, "1.1.1.12/32") - check_object(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN", vlan_oid, - { - "SAI_VLAN_ATTR_VLAN_ID": vlan_id, - } - ) + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "1.1.1.14/32", 'Vnet_1', '200.200.1.201') + vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '200.200.1.201', tunnel_name) + check_state_db_routes(dvs, 'Vnet_1', "1.1.1.14/32", ['200.200.1.201']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "1.1.1.14/32") - return vlan_oid + create_vnet_local_routes(dvs, "1.1.10.0/24", 'Vnet_1', 'Vlan1001') + vnet_obj.check_vnet_local_routes(dvs, 'Vnet_1', vlan_subnet_route=True) + create_vnet_entry(dvs, 'Vnet_2', tunnel_name, '2222', "") -def create_vlan_interface(dvs, vlan_name, ifname, vnet_name, ipaddr): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + vnet_obj.check_vnet_entry(dvs, 'Vnet_2') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_2', '2222') - vlan_ids = get_exist_entries(dvs, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN") + vid = create_vlan_interface(dvs, "Vlan1002", "Ethernet4", "Vnet_2", "2.2.10.1/24") + vnet_obj.check_router_interface(dvs, "Vlan1002", 'Vnet_2', vid, intf_type="vlan") - vlan_oid = create_vlan (dvs, vlan_name, vlan_ids) + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "2.2.2.10/32", 'Vnet_2', '100.1.1.20') + vnet_obj.check_vnet_routes(dvs, 'Vnet_2', '100.1.1.20', tunnel_name) + check_state_db_routes(dvs, 'Vnet_2', "2.2.2.10/32", ['100.1.1.20']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "2.2.2.10/32") - # create a vlan member in config db - create_entry_tbl( - conf_db, - "VLAN_MEMBER", '|', "%s|%s" % (vlan_name, ifname), - [ - ("tagging_mode", "untagged"), - ], - ) + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "2.2.2.11/32", 'Vnet_2', '100.1.1.20') + vnet_obj.check_vnet_routes(dvs, 'Vnet_2', '100.1.1.20', tunnel_name) + check_state_db_routes(dvs, 'Vnet_2', "2.2.2.11/32", ['100.1.1.20']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "2.2.2.11/32") - time.sleep(1) + create_vnet_local_routes(dvs, "2.2.10.0/24", 'Vnet_2', 'Vlan1002') + vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2', vlan_subnet_route=True) - # create vlan interface in config db - create_entry_tbl( - conf_db, - "VLAN_INTERFACE", '|', vlan_name, - [ - ("vnet_name", vnet_name), - ("proxy_arp", "enabled"), - ], - ) + # Clean-up and verify remove flows - #FIXME - This is created by IntfMgr - app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - create_entry_pst( - app_db, - "INTF_TABLE", ':', vlan_name, - [ - ("vnet_name", vnet_name), - ("proxy_arp", "enabled"), - ], - ) - time.sleep(2) + delete_vnet_local_routes(dvs, "2.2.10.0/24", 'Vnet_2') + vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet_2', "2.2.10.0/24") - create_entry_tbl( - conf_db, - "VLAN_INTERFACE", '|', "%s|%s" % (vlan_name, ipaddr), - [ - ("family", "IPv4"), - ], - ) + delete_vnet_local_routes(dvs, "1.1.10.0/24", 'Vnet_1') + vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet_1', "1.1.10.0/24") - time.sleep(2) + delete_vnet_routes(dvs, "2.2.2.11/32", 'Vnet_2') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2') + check_remove_state_db_routes(dvs, 'Vnet_2', "2.2.2.11/32") + check_remove_routes_advertisement(dvs, "2.2.2.11/32") - return vlan_oid + delete_vnet_routes(dvs, "2.2.2.10/32", 'Vnet_2') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2') + check_remove_state_db_routes(dvs, 'Vnet_2', "2.2.2.10/32") + check_remove_routes_advertisement(dvs, "2.2.2.10/32") + delete_vnet_routes(dvs, "1.1.1.14/32", 'Vnet_1') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') + check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.14/32") + check_remove_routes_advertisement(dvs, "1.1.1.14/32") -def delete_vlan_interface(dvs, ifname, ipaddr): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + delete_vnet_routes(dvs, "1.1.1.12/32", 'Vnet_1') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') + check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.12/32") + check_remove_routes_advertisement(dvs, "1.1.1.12/32") - delete_entry_tbl(conf_db, "VLAN_INTERFACE", "%s|%s" % (ifname, ipaddr)) + delete_vnet_routes(dvs, "1.1.1.11/32", 'Vnet_1') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') + check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.11/32") + check_remove_routes_advertisement(dvs, "1.1.1.11/32") - time.sleep(2) + delete_vnet_routes(dvs, "1.1.1.10/32", 'Vnet_1') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') + check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.10/32") + check_remove_routes_advertisement(dvs, "1.1.1.10/32") - delete_entry_tbl(conf_db, "VLAN_INTERFACE", ifname) + delete_vlan_interface(dvs, "Vlan1002", "2.2.10.1/24") + vnet_obj.check_del_router_interface(dvs, "Vlan1002") - time.sleep(2) + delete_vlan_interface(dvs, "Vlan1001", "1.1.10.1/24") + vnet_obj.check_del_router_interface(dvs, "Vlan1001") + delete_vnet_entry(dvs, 'Vnet_1') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet_1') -def create_phy_interface(dvs, ifname, vnet_name, ipaddr): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + delete_vnet_entry(dvs, 'Vnet_2') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet_2') - exist_rifs = get_exist_entries(dvs, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE") + delete_vxlan_tunnel(dvs, tunnel_name) + vnet_obj.check_del_vxlan_tunnel(dvs) - # create vlan interface in config db - create_entry_tbl( - conf_db, - "INTERFACE", '|', ifname, - [ - ("vnet_name", vnet_name), - ], - ) + ''' + Test 3 - Two VNets, One HSMs per VNet, Peering + ''' + def test_vnet_orch_3(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() - #FIXME - This is created by IntfMgr - app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - create_entry_pst( - app_db, - "INTF_TABLE", ':', ifname, - [ - ("vnet_name", vnet_name), - ], - ) - time.sleep(2) + tunnel_name = 'tunnel_3' - create_entry_tbl( - conf_db, - "INTERFACE", '|', "%s|%s" % (ifname, ipaddr), - [ - ("family", "IPv4"), - ], - ) + vnet_obj.fetch_exist_entries(dvs) + create_vxlan_tunnel(dvs, tunnel_name, '7.7.7.7') -def delete_phy_interface(dvs, ifname, ipaddr): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + create_vnet_entry(dvs, 'Vnet_10', tunnel_name, '3333', "Vnet_20") - delete_entry_tbl(conf_db, "INTERFACE", "%s|%s" % (ifname, ipaddr)) + vnet_obj.check_vnet_entry(dvs, 'Vnet_10', ['Vnet_20']) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_10', '3333') - time.sleep(2) + create_vnet_entry(dvs, 'Vnet_20', tunnel_name, '4444', "Vnet_10") - delete_entry_tbl(conf_db, "INTERFACE", ifname) + vnet_obj.check_vnet_entry(dvs, 'Vnet_20', ['Vnet_10']) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_20', '4444') - time.sleep(2) + tun_id = vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '7.7.7.7') + vid = create_vlan_interface(dvs, "Vlan2001", "Ethernet8", "Vnet_10", "5.5.10.1/24") + vnet_obj.check_router_interface(dvs, "Vlan2001", 'Vnet_10', vid, intf_type="vlan-one-peer") -def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope="", advertise_prefix=False, overlay_dmac=""): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + vid = create_vlan_interface(dvs, "Vlan2002", "Ethernet12", "Vnet_20", "8.8.10.1/24") + vnet_obj.check_router_interface(dvs, "Vlan2002", 'Vnet_20', vid, intf_type="vlan-one-peer") - attrs = [ - ("vxlan_tunnel", tunnel), - ("vni", vni), - ("peer_list", peer_list), - ] + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "5.5.5.10/32", 'Vnet_10', '50.1.1.10') + vnet_obj.check_vnet_routes(dvs, 'Vnet_10', '50.1.1.10', tunnel_name) + check_state_db_routes(dvs, 'Vnet_10', "5.5.5.10/32", ['50.1.1.10']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "5.5.5.10/32") - if scope: - attrs.append(('scope', scope)) + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "8.8.8.10/32", 'Vnet_20', '80.1.1.20') + vnet_obj.check_vnet_routes(dvs, 'Vnet_10', '80.1.1.20', tunnel_name) + check_state_db_routes(dvs, 'Vnet_20', "8.8.8.10/32", ['80.1.1.20']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "8.8.8.10/32") - if advertise_prefix: - attrs.append(('advertise_prefix', 'true')) + create_vnet_local_routes(dvs, "5.5.10.0/24", 'Vnet_10', 'Vlan2001') + vnet_obj.check_vnet_local_routes(dvs, 'Vnet_10', vlan_subnet_route=True) - if overlay_dmac: - attrs.append(('overlay_dmac', overlay_dmac)) + create_vnet_local_routes(dvs, "8.8.10.0/24", 'Vnet_20', 'Vlan2002') + vnet_obj.check_vnet_local_routes(dvs, 'Vnet_20', vlan_subnet_route=True) - # create the VXLAN tunnel Term entry in Config DB - create_entry_tbl( - conf_db, - "VNET", '|', name, - attrs, - ) + # Clean-up and verify remove flows - time.sleep(2) + delete_vnet_local_routes(dvs, "5.5.10.0/24", 'Vnet_10') + vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet_10', "5.5.10.0/24") + delete_vnet_local_routes(dvs, "8.8.10.0/24", 'Vnet_20') + vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet_20', "8.8.10.0/24") -def delete_vnet_entry(dvs, name): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + delete_vnet_routes(dvs, "5.5.5.10/32", 'Vnet_10') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet_10') + check_remove_state_db_routes(dvs, 'Vnet_10', "5.5.5.10/32") + check_remove_routes_advertisement(dvs, "5.5.5.10/32") - delete_entry_tbl(conf_db, "VNET", "%s" % (name)) + delete_vnet_routes(dvs, "8.8.8.10/32", 'Vnet_20') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet_20') + check_remove_state_db_routes(dvs, 'Vnet_20', "8.8.8.10/32") + check_remove_routes_advertisement(dvs, "8.8.8.10/32") - time.sleep(2) + delete_vlan_interface(dvs, "Vlan2001", "5.5.10.1/24") + vnet_obj.check_del_router_interface(dvs, "Vlan2001") + delete_vlan_interface(dvs, "Vlan2002", "8.8.10.1/24") + vnet_obj.check_del_router_interface(dvs, "Vlan2002") -def create_vxlan_tunnel(dvs, name, src_ip): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - - attrs = [ - ("src_ip", src_ip), - ] + delete_vnet_entry(dvs, 'Vnet_10') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet_10') - # create the VXLAN tunnel Term entry in Config DB - create_entry_tbl( - conf_db, - "VXLAN_TUNNEL", '|', name, - attrs, - ) + delete_vnet_entry(dvs, 'Vnet_20') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet_20') -def delete_vxlan_tunnel(dvs, name): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - delete_entry_tbl(conf_db, "VXLAN_TUNNEL", name) + delete_vxlan_tunnel(dvs, tunnel_name) + vnet_obj.check_del_vxlan_tunnel(dvs) -def create_vxlan_tunnel_map(dvs, tunnel_name, tunnel_map_entry_name, vlan, vni_id): - conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + ''' + Test 4 - IPv6 Vxlan tunnel test + ''' + @pytest.mark.skip(reason="Failing. Under investigation") + def test_vnet_orch_4(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() - # create the VXLAN tunnel map entry in Config DB - create_entry_tbl( - conf_db, - "VXLAN_TUNNEL_MAP", '|', "%s|%s" % (tunnel_name, tunnel_map_entry_name), - [ - ("vni", vni_id), - ("vlan", vlan), - ], - ) + tunnel_name = 'tunnel_v6' + vnet_obj.fetch_exist_entries(dvs) -def get_lo(dvs): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - vr_id = get_default_vr_id(dvs) + create_vxlan_tunnel(dvs, tunnel_name, 'fd:2::32') + create_vnet_entry(dvs, 'Vnet3001', tunnel_name, '3001', "") - tbl = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE') + vnet_obj.check_vnet_entry(dvs, 'Vnet3001') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet3001', '3001') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:2::32') - entries = tbl.getKeys() - lo_id = None - for entry in entries: - status, fvs = tbl.get(entry) - assert status, "Got an error when get a key" - for key, value in fvs: - if key == 'SAI_ROUTER_INTERFACE_ATTR_TYPE' and value == 'SAI_ROUTER_INTERFACE_TYPE_LOOPBACK': - lo_id = entry - break - else: - assert False, 'Don\'t found loopback id' + vid = create_vlan_interface(dvs, "Vlan300", "Ethernet24", 'Vnet3001', "100.100.3.1/24") + vnet_obj.check_router_interface(dvs, "Vlan300", 'Vnet3001', vid, intf_type="vlan") - return lo_id + vid = create_vlan_interface(dvs, "Vlan301", "Ethernet28", 'Vnet3001', "100.100.4.1/24") + vnet_obj.check_router_interface(dvs, "Vlan301", 'Vnet3001', vid, intf_type="vlan") + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000') + vnet_obj.check_vnet_routes(dvs, 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000', tunnel_name) + check_state_db_routes(dvs, 'Vnet3001', "100.100.1.1/32", ['2000:1000:2000:3000:4000:5000:6000:7000']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") -def get_switch_mac(dvs): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + create_vnet_routes(dvs, "100.100.1.2/32", 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000') + vnet_obj.check_vnet_routes(dvs, 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000', tunnel_name) + check_state_db_routes(dvs, 'Vnet3001', "100.100.1.2/32", ['2000:1000:2000:3000:4000:5000:6000:7000']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.2/32") - tbl = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_SWITCH') + create_vnet_local_routes(dvs, "100.100.3.0/24", 'Vnet3001', 'Vlan300') + vnet_obj.check_vnet_local_routes(dvs, 'Vnet3001', vlan_subnet_route=True) - entries = tbl.getKeys() - mac = None - for entry in entries: - status, fvs = tbl.get(entry) - assert status, "Got an error when get a key" - for key, value in fvs: - if key == 'SAI_SWITCH_ATTR_SRC_MAC_ADDRESS': - mac = value - break - else: - assert False, 'Don\'t found switch mac' + create_vnet_local_routes(dvs, "100.100.4.0/24", 'Vnet3001', 'Vlan301') + vnet_obj.check_vnet_local_routes(dvs, 'Vnet3001', vlan_subnet_route=True) - return mac + #Create Physical Interface in another Vnet + create_vnet_entry(dvs, 'Vnet3002', tunnel_name, '3002', "") -def check_linux_intf_arp_proxy(dvs, ifname): - (exitcode, out) = dvs.runcmd("cat /proc/sys/net/ipv4/conf/{0}/proxy_arp_pvlan".format(ifname)) - assert out != "1", "ARP proxy is not enabled for VNET interface in Linux kernel" + vnet_obj.check_vnet_entry(dvs, 'Vnet3002') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet3002', '3002') + create_phy_interface(dvs, "Ethernet60", 'Vnet3002', "100.102.1.1/24") + vnet_obj.check_router_interface(dvs, "Ethernet60", 'Vnet3002') -def update_bfd_session_state(dvs, addr, state): - bfd_id = get_bfd_session_id(dvs, addr) - assert bfd_id is not None + create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet3002', 'fd:2::34', "00:12:34:56:78:9A") + vnet_obj.check_vnet_routes(dvs, 'Vnet3002', 'fd:2::34', tunnel_name, "00:12:34:56:78:9A") + check_state_db_routes(dvs, 'Vnet3002', "100.100.2.1/32", ['fd:2::34']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.2.1/32") - bfd_sai_state = {"Admin_Down": "SAI_BFD_SESSION_STATE_ADMIN_DOWN", - "Down": "SAI_BFD_SESSION_STATE_DOWN", - "Init": "SAI_BFD_SESSION_STATE_INIT", - "Up": "SAI_BFD_SESSION_STATE_UP"} + create_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet3002', 'Ethernet60') + vnet_obj.check_vnet_local_routes(dvs, 'Vnet3002') - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - ntf = swsscommon.NotificationProducer(asic_db, "NOTIFICATIONS") - fvp = swsscommon.FieldValuePairs() - ntf_data = "[{\"bfd_session_id\":\""+bfd_id+"\",\"session_state\":\""+bfd_sai_state[state]+"\"}]" - ntf.send("bfd_session_state_change", ntf_data, fvp) + # Test peering + create_vnet_entry(dvs, 'Vnet3003', tunnel_name, '3003', 'Vnet3004') -def update_monitor_session_state(dvs, addr, monitor, state): - state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - create_entry_tbl( - state_db, - "VNET_MONITOR_TABLE", '|', "%s|%s" % (monitor,addr), - [ - ("state", state), - ] - ) + vnet_obj.check_vnet_entry(dvs, 'Vnet3003', ['Vnet3004']) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet3003', '3003') -def get_bfd_session_id(dvs, addr): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - tbl = swsscommon.Table(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION") - entries = set(tbl.getKeys()) - for entry in entries: - status, fvs = tbl.get(entry) - fvs = dict(fvs) - assert status, "Got an error when get a key" - if fvs["SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS"] == addr and fvs["SAI_BFD_SESSION_ATTR_MULTIHOP"] == "true": - return entry + create_vnet_entry(dvs, 'Vnet3004', tunnel_name, '3004', 'Vnet3003') - return None + vnet_obj.check_vnet_entry(dvs, 'Vnet3004', ['Vnet3003']) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet3004', '3004') + create_vnet_routes(dvs, "5.5.5.10/32", 'Vnet3003', 'fd:2::35') + vnet_obj.check_vnet_routes(dvs, 'Vnet3004', 'fd:2::35', tunnel_name) + check_state_db_routes(dvs, 'Vnet3003', "5.5.5.10/32", ['fd:2::35']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "5.5.5.10/32") -def check_del_bfd_session(dvs, addrs): - for addr in addrs: - assert get_bfd_session_id(dvs, addr) is None + create_vnet_routes(dvs, "8.8.8.10/32", 'Vnet3004', 'fd:2::36') + vnet_obj.check_vnet_routes(dvs, 'Vnet3003', 'fd:2::36', tunnel_name) + check_state_db_routes(dvs, 'Vnet3004', "8.8.8.10/32", ['fd:2::36']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "8.8.8.10/32") + # Clean-up and verify remove flows -def check_bfd_session(dvs, addrs): - for addr in addrs: - assert get_bfd_session_id(dvs, addr) is not None + delete_vnet_routes(dvs, "5.5.5.10/32", 'Vnet3003') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet3003') + check_remove_state_db_routes(dvs, 'Vnet3003', "5.5.5.10/32") + check_remove_routes_advertisement(dvs, "5.5.5.10/32") + delete_vnet_routes(dvs, "8.8.8.10/32", 'Vnet3004') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet3004') + check_remove_state_db_routes(dvs, 'Vnet3004', "8.8.8.10/32") + check_remove_routes_advertisement(dvs, "8.8.8.10/32") -def check_state_db_routes(dvs, vnet, prefix, endpoints): - state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - tbl = swsscommon.Table(state_db, "VNET_ROUTE_TUNNEL_TABLE") + delete_vnet_entry(dvs, 'Vnet3003') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet3003') - status, fvs = tbl.get(vnet + '|' + prefix) - assert status, "Got an error when get a key" + delete_vnet_entry(dvs, 'Vnet3004') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet3004') - fvs = dict(fvs) - assert fvs['active_endpoints'] == ','.join(endpoints) + delete_vnet_routes(dvs, "100.100.2.1/24", 'Vnet3002') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet3002') + check_remove_state_db_routes(dvs, 'Vnet3002', "100.100.2.1/24") + check_remove_routes_advertisement(dvs, "100.100.2.1/24") - if endpoints: - assert fvs['state'] == 'active' - else: - assert fvs['state'] == 'inactive' + delete_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet3002') + vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet3002', "100.102.1.0/24") + delete_phy_interface(dvs, "Ethernet60", "100.102.1.1/24") + vnet_obj.check_del_router_interface(dvs, "Ethernet60") -def check_remove_state_db_routes(dvs, vnet, prefix): - state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - tbl = swsscommon.Table(state_db, "VNET_ROUTE_TUNNEL_TABLE") - keys = tbl.getKeys() + delete_vnet_entry(dvs, 'Vnet3002') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet3002') - assert vnet + '|' + prefix not in keys + delete_vnet_local_routes(dvs, "100.100.3.0/24", 'Vnet3001') + vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet3001', "100.100.3.0/24") + delete_vnet_local_routes(dvs, "100.100.4.0/24", 'Vnet3001') + vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet3001', "100.100.4.0/24") -def check_routes_advertisement(dvs, prefix, profile=""): - state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - tbl = swsscommon.Table(state_db, "ADVERTISE_NETWORK_TABLE") - keys = tbl.getKeys() + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet3001') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet3001') + check_remove_state_db_routes(dvs, 'Vnet3001', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - assert prefix in keys + delete_vnet_routes(dvs, "100.100.1.2/32", 'Vnet3001') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet3001') + check_remove_state_db_routes(dvs, 'Vnet3001', "100.100.1.2/32") + check_remove_routes_advertisement(dvs, "100.100.1.2/32") - if profile: - status, fvs = tbl.get(prefix) - assert status, "Got an error when get a key" - fvs = dict(fvs) - assert fvs['profile'] == profile + delete_vlan_interface(dvs, "Vlan300", "100.100.3.1/24") + vnet_obj.check_del_router_interface(dvs, "Vlan300") + delete_vlan_interface(dvs, "Vlan301", "100.100.4.1/24") + vnet_obj.check_del_router_interface(dvs, "Vlan301") -def check_remove_routes_advertisement(dvs, prefix): - state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - tbl = swsscommon.Table(state_db, "ADVERTISE_NETWORK_TABLE") - keys = tbl.getKeys() + delete_vnet_entry(dvs, 'Vnet3001') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet3001') - assert prefix not in keys + delete_vxlan_tunnel(dvs, tunnel_name) + vnet_obj.check_del_vxlan_tunnel(dvs) + ''' + Test 5 - Default VNet test + ''' + def test_vnet_orch_5(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() -def check_syslog(dvs, marker, err_log): - (exitcode, num) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \"%s\" | wc -l" % (marker, err_log)]) - assert num.strip() == "0" + tunnel_name = 'tunnel_5' + vnet_obj.fetch_exist_entries(dvs) -loopback_id = 0 -def_vr_id = 0 -switch_mac = None - - -class VnetVxlanVrfTunnel(object): - - ASIC_TUNNEL_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL" - ASIC_TUNNEL_MAP = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP" - ASIC_TUNNEL_MAP_ENTRY = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY" - ASIC_TUNNEL_TERM_ENTRY = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY" - ASIC_RIF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE" - ASIC_VRF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER" - ASIC_ROUTE_ENTRY = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY" - ASIC_NEXT_HOP = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP" - ASIC_VLAN_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VLAN" - ASIC_NEXT_HOP_GROUP = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP" - ASIC_NEXT_HOP_GROUP_MEMBER = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER" - ASIC_BFD_SESSION = "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION" - APP_VNET_MONITOR = "VNET_MONITOR_TABLE" - - def __init__(self): - self.tunnel_map_ids = set() - self.tunnel_map_entry_ids = set() - self.tunnel_ids = set() - self.tunnel_term_ids = set() - self.tunnel_map_map = {} - self.tunnel = {} - self.vnet_vr_ids = set() - self.vr_map = {} - self.nh_ids = {} - self.nhg_ids = {} + create_vxlan_tunnel(dvs, tunnel_name, '8.8.8.8') + create_vnet_entry(dvs, 'Vnet_5', tunnel_name, '4789', "", 'default') - def fetch_exist_entries(self, dvs): - self.vnet_vr_ids = get_exist_entries(dvs, self.ASIC_VRF_TABLE) - self.tunnel_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_TABLE) - self.tunnel_map_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_MAP) - self.tunnel_map_entry_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_MAP_ENTRY) - self.tunnel_term_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_TERM_ENTRY) - self.rifs = get_exist_entries(dvs, self.ASIC_RIF_TABLE) - self.routes = get_exist_entries(dvs, self.ASIC_ROUTE_ENTRY) - self.nhops = get_exist_entries(dvs, self.ASIC_NEXT_HOP) - self.nhgs = get_exist_entries(dvs, self.ASIC_NEXT_HOP_GROUP) - self.bfd_sessions = get_exist_entries(dvs, self.ASIC_BFD_SESSION) + vnet_obj.check_default_vnet_entry(dvs, 'Vnet_5') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_5', '4789') - global loopback_id, def_vr_id, switch_mac - if not loopback_id: - loopback_id = get_lo(dvs) + delete_vnet_entry(dvs, 'Vnet_5') + vnet_obj.check_default_vnet_entry(dvs, 'Vnet_5') - if not def_vr_id: - def_vr_id = get_default_vr_id(dvs) + ''' + Test 6 - Test VxLAN tunnel with multiple maps + ''' + def test_vnet_vxlan_multi_map(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() - if switch_mac is None: - switch_mac = get_switch_mac(dvs) + tunnel_name = 'tunnel_v4' - def check_vxlan_tunnel(self, dvs, tunnel_name, src_ip): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - global loopback_id, def_vr_id + vnet_obj.fetch_exist_entries(dvs) - tunnel_map_id = get_created_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 4) - tunnel_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids) - tunnel_term_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, self.tunnel_term_ids) + create_vxlan_tunnel(dvs, tunnel_name, '10.1.0.32') + create_vnet_entry(dvs, 'Vnet1', tunnel_name, '10001', "") - # check that the vxlan tunnel termination are there - assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP) == (len(self.tunnel_map_ids) + 4), "The TUNNEL_MAP wasn't created" - assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == len(self.tunnel_map_entry_ids), "The TUNNEL_MAP_ENTRY is created" - assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_TABLE) == (len(self.tunnel_ids) + 1), "The TUNNEL wasn't created" - assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_TERM_ENTRY) == (len(self.tunnel_term_ids) + 1), "The TUNNEL_TERM_TABLE_ENTRY wasm't created" + vnet_obj.check_vnet_entry(dvs, 'Vnet1') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet1', '10001') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '10.1.0.32') - check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[2], - { - 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VIRTUAL_ROUTER_ID', - } - ) + create_vxlan_tunnel_map(dvs, tunnel_name, 'map_1', 'Vlan1000', '1000') - check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[3], - { - 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VIRTUAL_ROUTER_ID_TO_VNI', - } - ) + delete_vnet_entry(dvs, 'Vnet1') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet1') + delete_vxlan_tunnel(dvs, tunnel_name) - check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[0], - { - 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VLAN_ID', - } - ) + ''' + Test 7 - Test for vnet tunnel routes with ECMP nexthop group + ''' + def test_vnet_orch_7(self, dvs, ordered_ecmp, testlog): + vnet_obj = self.get_vnet_obj() - check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[1], - { - 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VLAN_ID_TO_VNI', - } - ) - - check_object(asic_db, self.ASIC_TUNNEL_TABLE, tunnel_id, - { - 'SAI_TUNNEL_ATTR_TYPE': 'SAI_TUNNEL_TYPE_VXLAN', - 'SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE': loopback_id, - 'SAI_TUNNEL_ATTR_DECAP_MAPPERS': '2:%s,%s' % (tunnel_map_id[0], tunnel_map_id[2]), - 'SAI_TUNNEL_ATTR_ENCAP_MAPPERS': '2:%s,%s' % (tunnel_map_id[1], tunnel_map_id[3]), - 'SAI_TUNNEL_ATTR_ENCAP_SRC_IP': src_ip, - } - ) - - expected_attributes = { - 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE': 'SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP', - 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID': def_vr_id, - 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP': src_ip, - 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE': 'SAI_TUNNEL_TYPE_VXLAN', - 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID': tunnel_id, - } + tunnel_name = 'tunnel_7' + ordered_ecmp + vnet_name = 'Vnet7' + ordered_ecmp - check_object(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, tunnel_term_id, expected_attributes) + vnet_obj.fetch_exist_entries(dvs) - self.tunnel_map_ids.update(tunnel_map_id) - self.tunnel_ids.add(tunnel_id) - self.tunnel_term_ids.add(tunnel_term_id) - self.tunnel_map_map[tunnel_name] = tunnel_map_id - self.tunnel[tunnel_name] = tunnel_id + create_vxlan_tunnel(dvs, tunnel_name, '7.7.7.7') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10007', "") - def check_del_vxlan_tunnel(self, dvs): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10007') - old_tunnel = get_deleted_entries(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids, 1) - check_deleted_object(asic_db, self.ASIC_TUNNEL_TABLE, old_tunnel[0]) - self.tunnel_ids.remove(old_tunnel[0]) + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '7.7.7.7') - old_tunnel_maps = get_deleted_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 4) - for old_tunnel_map in old_tunnel_maps: - check_deleted_object(asic_db, self.ASIC_TUNNEL_MAP, old_tunnel_map) - self.tunnel_map_ids.remove(old_tunnel_map) + # Create an ECMP tunnel route + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '7.0.0.3,7.0.0.2,7.0.0.1') + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['7.0.0.1', '7.0.0.2', '7.0.0.3'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - def check_vxlan_tunnel_entry(self, dvs, tunnel_name, vnet_name, vni_id): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + # Set the tunnel route to another nexthop group + set_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '7.0.0.1,7.0.0.2,7.0.0.4,7.0.0.3') + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4'], tunnel_name, route_ids=route1, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - time.sleep(2) + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs - if (self.tunnel_map_map.get(tunnel_name) is None): - tunnel_map_id = get_created_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 4) - else: - tunnel_map_id = self.tunnel_map_map[tunnel_name] - - tunnel_map_entry_id = get_created_entries(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, self.tunnel_map_entry_ids, 2) - - # check that the vxlan tunnel termination are there - assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == (len(self.tunnel_map_entry_ids) + 2), "The TUNNEL_MAP_ENTRY is created too early" - - check_object(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, tunnel_map_entry_id[0], - { - 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VIRTUAL_ROUTER_ID_TO_VNI', - 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': tunnel_map_id[3], - 'SAI_TUNNEL_MAP_ENTRY_ATTR_VIRTUAL_ROUTER_ID_KEY': self.vr_map[vnet_name].get('ing'), - 'SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_VALUE': vni_id, - } - ) - - check_object(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, tunnel_map_entry_id[1], - { - 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VIRTUAL_ROUTER_ID', - 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': tunnel_map_id[2], - 'SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_KEY': vni_id, - 'SAI_TUNNEL_MAP_ENTRY_ATTR_VIRTUAL_ROUTER_ID_VALUE': self.vr_map[vnet_name].get('egr'), - } - ) - - self.tunnel_map_entry_ids.update(tunnel_map_entry_id) - - def check_vnet_entry(self, dvs, name, peer_list=[]): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + # Create another tunnel route to the same set of endpoints + create_vnet_routes(dvs, "100.100.2.1/32", vnet_name, '7.0.0.1,7.0.0.2,7.0.0.3,7.0.0.4') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4'], tunnel_name, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4']) + check_remove_routes_advertisement(dvs, "100.100.2.1/32") - #Assert if there are linklocal entries - tbl = swsscommon.Table(app_db, "VNET_ROUTE_TUNNEL_TABLE") - route_entries = tbl.getKeys() - assert "ff00::/8" not in route_entries - assert "fe80::/64" not in route_entries + assert nhg2_1 == nhg1_2 - #Check virtual router objects - assert how_many_entries_exist(asic_db, self.ASIC_VRF_TABLE) == (len(self.vnet_vr_ids) + 1),\ - "The VR objects are not created" + # Remove one of the tunnel routes + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - new_vr_ids = get_created_entries(asic_db, self.ASIC_VRF_TABLE, self.vnet_vr_ids, 1) + # Check the nexthop group still exists + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 in vnet_obj.nhgs - self.vnet_vr_ids.update(new_vr_ids) - self.vr_map[name] = { 'ing':new_vr_ids[0], 'egr':new_vr_ids[0], 'peer':peer_list } + # Remove the other tunnel route + delete_vnet_routes(dvs, "100.100.2.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.2.1/32") + check_remove_routes_advertisement(dvs, "100.100.2.1/32") - def check_default_vnet_entry(self, dvs, name): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - #Check virtual router objects - assert how_many_entries_exist(asic_db, self.ASIC_VRF_TABLE) == (len(self.vnet_vr_ids)),\ - "Some VR objects are created" - #Mappers for default VNET is created with default VR objects. - self.vr_map[name] = { 'ing':list(self.vnet_vr_ids)[0], 'egr':list(self.vnet_vr_ids)[0], 'peer':[] } + # Check the nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg2_1 not in vnet_obj.nhgs - def check_del_vnet_entry(self, dvs, name): - # TODO: Implement for VRF VNET - return True + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) - def vnet_route_ids(self, dvs, name, local=False): - vr_set = set() + ''' + Test 8 - Test for ipv6 vnet tunnel routes with ECMP nexthop group + ''' + def test_vnet_orch_8(self, dvs, ordered_ecmp, testlog): - vr_set.add(self.vr_map[name].get('ing')) + vnet_obj = self.get_vnet_obj() - try: - for peer in self.vr_map[name].get('peer'): - vr_set.add(self.vr_map[peer].get('ing')) - except IndexError: - pass + tunnel_name = 'tunnel_8' + ordered_ecmp + vnet_name = 'Vnet8' + ordered_ecmp - return vr_set - def check_router_interface(self, dvs, intf_name, name, vlan_oid=0): - # Check RIF in ingress VRF - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - global switch_mac + vnet_obj.fetch_exist_entries(dvs) - expected_attr = { - "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": self.vr_map[name].get('ing'), - "SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS": switch_mac, - "SAI_ROUTER_INTERFACE_ATTR_MTU": "9100", - } + create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10008', "") - if vlan_oid: - expected_attr.update({'SAI_ROUTER_INTERFACE_ATTR_TYPE': 'SAI_ROUTER_INTERFACE_TYPE_VLAN'}) - expected_attr.update({'SAI_ROUTER_INTERFACE_ATTR_VLAN_ID': vlan_oid}) - else: - expected_attr.update({'SAI_ROUTER_INTERFACE_ATTR_TYPE': 'SAI_ROUTER_INTERFACE_TYPE_PORT'}) + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10008') - new_rif = get_created_entry(asic_db, self.ASIC_RIF_TABLE, self.rifs) - check_object(asic_db, self.ASIC_RIF_TABLE, new_rif, expected_attr) + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') - #IP2ME route will be created with every router interface - new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, 1) + # Create an ECMP tunnel route + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:8:10::32/128", vnet_name, 'fd:8:1::1,fd:8:1::3,fd:8:1::2') + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3']) + check_state_db_routes(dvs, vnet_name, "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") - if vlan_oid: - expected_attr = { 'SAI_VLAN_ATTR_BROADCAST_FLOOD_CONTROL_TYPE': 'SAI_VLAN_FLOOD_CONTROL_TYPE_NONE' } - check_object(asic_db, self.ASIC_VLAN_TABLE, vlan_oid, expected_attr) + # Set the tunnel route to another nexthop group + set_vnet_routes(dvs, "fd:8:10::32/128", vnet_name, 'fd:8:1::2,fd:8:1::3,fd:8:1::1,fd:8:1::4') + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, route_ids=route1, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") - check_linux_intf_arp_proxy(dvs, intf_name) + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs - self.rifs.add(new_rif) - self.routes.update(new_route) + # Create another tunnel route to the same set of endpoints + create_vnet_routes(dvs, "fd:8:20::32/128", vnet_name, 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "fd:8:20::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + check_remove_routes_advertisement(dvs, "fd:8:20::32/128") - def check_del_router_interface(self, dvs, name): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + assert nhg2_1 == nhg1_2 - old_rif = get_deleted_entries(asic_db, self.ASIC_RIF_TABLE, self.rifs, 1) - check_deleted_object(asic_db, self.ASIC_RIF_TABLE, old_rif[0]) + # Create another tunnel route with ipv4 prefix to the same set of endpoints + create_vnet_routes(dvs, "8.0.0.0/24", vnet_name, 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') + route3, nhg3_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "8.0.0.0/24", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + check_remove_routes_advertisement(dvs, "8.0.0.0/24") - self.rifs.remove(old_rif[0]) + assert nhg3_1 == nhg1_2 - def check_vnet_local_routes(self, dvs, name): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + # Remove one of the tunnel routes + delete_vnet_routes(dvs, "fd:8:10::32/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:8:10::32/128"]) + check_remove_state_db_routes(dvs, vnet_name, "fd:8:10::32/128") + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") - vr_ids = self.vnet_route_ids(dvs, name, True) - count = len(vr_ids) + # Check the nexthop group still exists + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 in vnet_obj.nhgs - new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) + # Remove tunnel route 2 + delete_vnet_routes(dvs, "fd:8:20::32/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:8:20::32/128"]) + check_remove_state_db_routes(dvs, vnet_name, "fd:8:20::32/128") + check_remove_routes_advertisement(dvs, "fd:8:20::32/128") - #Routes are not replicated to egress VRF, return if count is 0, else check peering - if not count: - return + # Remove tunnel route 3 + delete_vnet_routes(dvs, "8.0.0.0/24", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["8.0.0.0/24"]) + check_remove_state_db_routes(dvs, vnet_name, "8.0.0.0/24") + check_remove_routes_advertisement(dvs, "8.0.0.0/24") - asic_vrs = set() - for idx in range(count): - rt_key = json.loads(new_route[idx]) - asic_vrs.add(rt_key['vr']) + # Check the nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg2_1 not in vnet_obj.nhgs - assert asic_vrs == vr_ids + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + - self.routes.update(new_route) + ''' + Test 9 - Test for vnet tunnel routes with ECMP nexthop group with endpoint health monitor + ''' + def test_vnet_orch_9(self, dvs, ordered_ecmp, testlog): + vnet_obj = self.get_vnet_obj() - def check_del_vnet_local_routes(self, dvs, name): - # TODO: Implement for VRF VNET - return True + tunnel_name = 'tunnel_9' + ordered_ecmp + vnet_name = 'Vnet9' + ordered_ecmp - def check_vnet_routes(self, dvs, name, endpoint, tunnel, mac="", vni=0, route_ids=""): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + vnet_obj.fetch_exist_entries(dvs) - vr_ids = self.vnet_route_ids(dvs, name) - count = len(vr_ids) + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10009', "") - # Check routes in ingress VRF - expected_attr = { - "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP", - "SAI_NEXT_HOP_ATTR_IP": endpoint, - "SAI_NEXT_HOP_ATTR_TUNNEL_ID": self.tunnel[tunnel], - } + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10009') - if vni: - expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni}) + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') - if mac: - expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac}) + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') - if endpoint in self.nh_ids: - new_nh = self.nh_ids[endpoint] - else: - new_nh = get_created_entry(asic_db, self.ASIC_NEXT_HOP, self.nhops) - self.nh_ids[endpoint] = new_nh - self.nhops.add(new_nh) + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - check_object(asic_db, self.ASIC_NEXT_HOP, new_nh, expected_attr) - if not route_ids: - new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) - else: - new_route = route_ids + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '9.1.0.2', 'Up') + update_bfd_session_state(dvs, '9.1.0.3', 'Up') + update_bfd_session_state(dvs, '9.1.0.1', 'Up') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - #Check if the route is in expected VRF - asic_vrs = set() - for idx in range(count): - check_object(asic_db, self.ASIC_ROUTE_ENTRY, new_route[idx], - { - "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": new_nh, - } - ) - rt_key = json.loads(new_route[idx]) - asic_vrs.add(rt_key['vr']) + # Remove endpoint from group if it goes down + update_bfd_session_state(dvs, '9.1.0.2', 'Down') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '3']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1', '9.0.0.3']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - assert asic_vrs == vr_ids + # Create another tunnel route with endpoint group overlapped with route1 + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.2.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.5', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.5') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['9.0.0.1']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - self.routes.update(new_route) + # Update BFD session state and verify route change + update_bfd_session_state(dvs, '9.1.0.5', 'Up') + time.sleep(2) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '3']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['9.0.0.1', '9.0.0.5']) + check_remove_routes_advertisement(dvs, "100.100.2.1/32") - return new_route + # Update BFD state and check route nexthop + update_bfd_session_state(dvs, '9.1.0.3', 'Down') + time.sleep(2) - def serialize_endpoint_group(self, endpoints): - endpoints.sort() - return ",".join(endpoints) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1'], tunnel_name, route_ids=route1, nhg=nhg1_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - def check_next_hop_group_member(self, dvs, nhg, ordered_ecmp, expected_endpoint, expected_attrs): - expected_endpoint_str = self.serialize_endpoint_group(expected_endpoint) - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - tbl_nhgm = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP_GROUP_MEMBER) - tbl_nh = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP) - entries = set(tbl_nhgm.getKeys()) - endpoints = [] - for entry in entries: - status, fvs = tbl_nhgm.get(entry) - fvs = dict(fvs) - assert status, "Got an error when get a key" - if fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhg: - nh_key = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] - status, nh_fvs = tbl_nh.get(nh_key) - nh_fvs = dict(nh_fvs) - assert status, "Got an error when get a key" - endpoint = nh_fvs["SAI_NEXT_HOP_ATTR_IP"] - endpoints.append(endpoint) - assert endpoint in expected_attrs - if ordered_ecmp == "true": - assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID"] == expected_attrs[endpoint]['SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID'] - del expected_attrs[endpoint]['SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID'] - else: - assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None - - check_object(asic_db, self.ASIC_NEXT_HOP, nh_key, expected_attrs[endpoint]) - - assert self.serialize_endpoint_group(endpoints) == expected_endpoint_str - - def get_nexthop_groups(self, dvs, nhg): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - tbl_nhgm = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP_GROUP_MEMBER) - tbl_nh = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP) - nhg_data = {} - nhg_data['id'] = nhg - entries = set(tbl_nhgm.getKeys()) - nhg_data['endpoints'] = [] - for entry in entries: - status, fvs = tbl_nhgm.get(entry) - fvs = dict(fvs) - assert status, "Got an error when get a key" - if fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhg: - nh_key = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] - status, nh_fvs = tbl_nh.get(nh_key) - nh_fvs = dict(nh_fvs) - assert status, "Got an error when get a key" - endpoint = nh_fvs["SAI_NEXT_HOP_ATTR_IP"] - nhg_data['endpoints'].append(endpoint) - return nhg_data - def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], route_ids=[], nhg="", ordered_ecmp="false", nh_seq_id=None): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - endpoint_str = name + "|" + self.serialize_endpoint_group(endpoints) - - vr_ids = self.vnet_route_ids(dvs, name) - count = len(vr_ids) - - expected_attrs = {} - for idx, endpoint in enumerate(endpoints): - expected_attr = { - "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP", - "SAI_NEXT_HOP_ATTR_IP": endpoint, - "SAI_NEXT_HOP_ATTR_TUNNEL_ID": self.tunnel[tunnel], - } - if vni and vni[idx]: - expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni[idx]}) - if mac and mac[idx]: - expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac[idx]}) - if ordered_ecmp == "true" and nh_seq_id: - expected_attr.update({'SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID': nh_seq_id[idx]}) - expected_attrs[endpoint] = expected_attr - - if nhg: - new_nhg = nhg - elif endpoint_str in self.nhg_ids: - new_nhg = self.nhg_ids[endpoint_str] - else: - new_nhg = get_created_entry(asic_db, self.ASIC_NEXT_HOP_GROUP, self.nhgs) - self.nhg_ids[endpoint_str] = new_nhg - self.nhgs.add(new_nhg) + # Set the route1 to a new group + set_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.3,9.0.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4') + update_bfd_session_state(dvs, '9.1.0.4', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.4'], tunnel_name, route_ids=route1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '4']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1', '9.0.0.4']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs - # Check routes in ingress VRF - expected_nhg_attr = { - "SAI_NEXT_HOP_GROUP_ATTR_TYPE": "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP" if ordered_ecmp == "false" else "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_ORDERED_ECMP", - } - check_object(asic_db, self.ASIC_NEXT_HOP_GROUP, new_nhg, expected_nhg_attr) + # Set BFD session state for a down endpoint to up + update_bfd_session_state(dvs, '9.1.0.2', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.2', '9.0.0.4'], tunnel_name, route_ids=route1, nhg=nhg1_2, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '4']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.4']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - # Check nexthop group member - self.check_next_hop_group_member(dvs, new_nhg, ordered_ecmp, endpoints, expected_attrs) + # Set all endpoint to down state + update_bfd_session_state(dvs, '9.1.0.1', 'Down') + update_bfd_session_state(dvs, '9.1.0.2', 'Down') + update_bfd_session_state(dvs, '9.1.0.3', 'Down') + update_bfd_session_state(dvs, '9.1.0.4', 'Down') + time.sleep(2) - if route_ids: - new_route = route_ids - else: - new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) + # Confirm the tunnel route is updated in ASIC + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['3']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['9.0.0.5']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.2.1/32") - #Check if the route is in expected VRF - asic_vrs = set() - for idx in range(count): - check_object(asic_db, self.ASIC_ROUTE_ENTRY, new_route[idx], - { - "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": new_nhg, - } - ) - rt_key = json.loads(new_route[idx]) - asic_vrs.add(rt_key['vr']) + # Remove tunnel route2 + delete_vnet_routes(dvs, "100.100.2.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.2.1/32") + check_remove_routes_advertisement(dvs, "100.100.2.1/32") - assert asic_vrs == vr_ids + # Check the corresponding nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg2_1 not in vnet_obj.nhgs - self.routes.update(new_route) + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['9.1.0.5']) + check_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3', '9.1.0.4']) - return new_route, new_nhg + # Remove tunnel route 1 + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - def check_priority_vnet_ecmp_routes(self, dvs, name, endpoints_primary, tunnel, mac=[], vni=[], route_ids=[], count =1, prefix =""): - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - endpoint_str_primary = name + "|" + self.serialize_endpoint_group(endpoints_primary) - new_nhgs = [] - expected_attrs_primary = {} - for idx, endpoint in enumerate(endpoints_primary): - expected_attr = { - "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP", - "SAI_NEXT_HOP_ATTR_IP": endpoint, - "SAI_NEXT_HOP_ATTR_TUNNEL_ID": self.tunnel[tunnel], - } - if vni and vni[idx]: - expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni[idx]}) - if mac and mac[idx]: - expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac[idx]}) - expected_attrs_primary[endpoint] = expected_attr - - if len(endpoints_primary) == 1: - if route_ids: - new_route = route_ids - else: - new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) - return new_route - else : - new_nhgs = get_all_created_entries(asic_db, self.ASIC_NEXT_HOP_GROUP, self.nhgs) - found_match = False - - for nhg in new_nhgs: - nhg_data = self.get_nexthop_groups(dvs, nhg) - eplist = self.serialize_endpoint_group(nhg_data['endpoints']) - if eplist == self.serialize_endpoint_group(endpoints_primary): - self.nhg_ids[endpoint_str_primary] = nhg - found_match = True - - assert found_match, "the expected Nexthop group was not found." - - # Check routes in ingress VRF - expected_nhg_attr = { - "SAI_NEXT_HOP_GROUP_ATTR_TYPE": "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP", - } - for nhg in new_nhgs: - check_object(asic_db, self.ASIC_NEXT_HOP_GROUP, nhg, expected_nhg_attr) + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 not in vnet_obj.nhgs - # Check nexthop group member - self.check_next_hop_group_member(dvs, self.nhg_ids[endpoint_str_primary], "false", endpoints_primary, expected_attrs_primary) + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3', '9.1.0.4', '9.1.0.5']) - if route_ids: - new_route = route_ids - else: - new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) - #Check if the route is in expected VRF - active_nhg = self.nhg_ids[endpoint_str_primary] - for idx in range(count): - if prefix != "" and prefix not in new_route[idx] : - continue - check_object(asic_db, self.ASIC_ROUTE_ENTRY, new_route[idx], - { - "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": active_nhg, - } - ) - rt_key = json.loads(new_route[idx]) + ''' + Test 10 - Test for ipv6 vnet tunnel routes with ECMP nexthop group with endpoint health monitor + ''' + def test_vnet_orch_10(self, dvs, ordered_ecmp, testlog): + vnet_obj = self.get_vnet_obj() - self.routes.update(new_route) - del self.nhg_ids[endpoint_str_primary] - return new_route + tunnel_name = 'tunnel_10' + ordered_ecmp + vnet_name = 'Vnet10' + ordered_ecmp - def check_del_vnet_routes(self, dvs, name, prefixes=[]): - # TODO: Implement for VRF VNET + vnet_obj.fetch_exist_entries(dvs) - def _access_function(): - route_entries = get_exist_entries(dvs, self.ASIC_ROUTE_ENTRY) - route_prefixes = [json.loads(route_entry)["dest"] for route_entry in route_entries] - return (all(prefix not in route_prefixes for prefix in prefixes), None) + create_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10010', "") - if prefixes: - wait_for_result(_access_function) + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10010') - return True + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') - def check_custom_monitor_app_db(self, dvs, prefix, endpoint, packet_type, overlay_dmac): - app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - key = endpoint + ':' + prefix - check_object(app_db, self.APP_VNET_MONITOR, key, - { - "packet_type": packet_type, - "overlay_dmac" : overlay_dmac - } - ) - return True - - def check_custom_monitor_deleted(self, dvs, prefix, endpoint): - app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - key = endpoint + ':' + prefix - check_deleted_object(app_db, self.APP_VNET_MONITOR, key) + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::3', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3') -class TestVnetOrch(object): + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", []) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") - def get_vnet_obj(self): - return VnetVxlanVrfTunnel() + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, 'fd:10:2::2', 'Up') + update_bfd_session_state(dvs, 'fd:10:2::3', 'Up') + update_bfd_session_state(dvs, 'fd:10:2::1', 'Up') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3']) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") - def setup_db(self, dvs): - self.pdb = dvs.get_app_db() - self.adb = dvs.get_asic_db() - self.cdb = dvs.get_config_db() - self.sdb = dvs.get_state_db() + # Remove endpoint from group if it goes down + update_bfd_session_state(dvs, 'fd:10:2::2', 'Down') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::3'], tunnel_name, route_ids=route1, nhg=nhg1_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '3']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::3']) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") - def clear_srv_config(self, dvs): - dvs.servers[0].runcmd("ip address flush dev eth0") - dvs.servers[1].runcmd("ip address flush dev eth0") - dvs.servers[2].runcmd("ip address flush dev eth0") - dvs.servers[3].runcmd("ip address flush dev eth0") + # Create another tunnel route with endpoint group overlapped with route1 + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:10:20::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::5', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::5') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1']) + check_state_db_routes(dvs, vnet_name, "fd:10:20::1/128", ['fd:10:1::1']) + check_remove_routes_advertisement(dvs, "fd:10:20::1/128") - def set_admin_status(self, interface, status): - self.cdb.update_entry("PORT", interface, {"admin_status": status}) + # Update BFD session state and verify route change + update_bfd_session_state(dvs, 'fd:10:2::5', 'Up') + time.sleep(2) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::5'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '3']) + check_state_db_routes(dvs, vnet_name, "fd:10:20::1/128", ['fd:10:1::1', 'fd:10:1::5']) + check_remove_routes_advertisement(dvs, "fd:10:20::1/128") - def create_l3_intf(self, interface, vrf_name): - if len(vrf_name) == 0: - self.cdb.create_entry("INTERFACE", interface, {"NULL": "NULL"}) - else: - self.cdb.create_entry("INTERFACE", interface, {"vrf_name": vrf_name}) + # Update BFD state and check route nexthop + update_bfd_session_state(dvs, 'fd:10:2::3', 'Down') + update_bfd_session_state(dvs, 'fd:10:2::2', 'Up') + time.sleep(2) - def add_ip_address(self, interface, ip): - self.cdb.create_entry("INTERFACE", interface + "|" + ip, {"NULL": "NULL"}) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2'], tunnel_name, route_ids=route1, nhg=nhg1_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2']) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") - def remove_ip_address(self, interface, ip): - self.cdb.delete_entry("INTERFACE", interface + "|" + ip) + # Set the route to a new group + set_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::3,fd:10:1::4', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3,fd:10:2::4') + update_bfd_session_state(dvs, 'fd:10:2::4', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::4'], tunnel_name, route_ids=route1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '4']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::4']) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs - def create_route_entry(self, key, pairs): - tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE") - fvs = swsscommon.FieldValuePairs(list(pairs.items())) - tbl.set(key, fvs) + # Set BFD session state for a down endpoint to up + update_bfd_session_state(dvs, 'fd:10:2::3', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3', 'fd:10:1::4'], tunnel_name, route_ids=route1, nhg=nhg1_2, + ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3', 'fd:10:1::4']) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") - def remove_route_entry(self, key): - tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE") - tbl._del(key) + # Set all endpoint to down state + update_bfd_session_state(dvs, 'fd:10:2::1', 'Down') + update_bfd_session_state(dvs, 'fd:10:2::2', 'Down') + update_bfd_session_state(dvs, 'fd:10:2::3', 'Down') + update_bfd_session_state(dvs, 'fd:10:2::4', 'Down') + time.sleep(2) - def check_route_entries(self, destinations): - def _access_function(): - route_entries = self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") - route_destinations = [json.loads(route_entry)["dest"] - for route_entry in route_entries] - return (all(destination in route_destinations for destination in destinations), None) + # Confirm the tunnel route is updated in ASIC + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::5'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['3']) + check_state_db_routes(dvs, vnet_name, "fd:10:20::1/128", ['fd:10:1::5']) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", []) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:10:20::1/128") - wait_for_result(_access_function) + # Remove tunnel route2 + delete_vnet_routes(dvs, "fd:10:20::1/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:20::1/128"]) + check_remove_state_db_routes(dvs, vnet_name, "fd:10:20::1/128") + check_remove_routes_advertisement(dvs, "fd:10:20::1/128") + # Check the corresponding nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg2_1 not in vnet_obj.nhgs - @pytest.fixture(params=["true", "false"]) - def ordered_ecmp(self, dvs, request): + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['fd:10:2::5']) + check_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3', 'fd:10:2::4']) - app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - if request.param == "true": - create_entry_pst( - app_db, - "SWITCH_TABLE", ':', "switch", - [ - ('ordered_ecmp', 'true') - ], - ) - dvs.get_state_db().wait_for_field_match("SWITCH_CAPABILITY", "switch", {"ORDERED_ECMP_CAPABLE": "true"}) + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['fd:10:2::5']) + check_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3', 'fd:10:2::4']) - yield request.param + # Remove tunnel route 1 + delete_vnet_routes(dvs, "fd:10:10::1/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + check_remove_state_db_routes(dvs, vnet_name, "fd:10:10::1/128") + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") - if request.param == "true": - create_entry_pst( - app_db, - "SWITCH_TABLE", ':', "switch", - [ - ('ordered_ecmp', 'false') - ], - ) - dvs.get_state_db().wait_for_field_match("SWITCH_CAPABILITY", "switch", {"ORDERED_ECMP_CAPABLE": "false"}) + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3', 'fd:10:2::4', 'fd:10:2::5']) + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 not in vnet_obj.nhgs + + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) ''' - Test 1 - Create Vlan Interface, Tunnel and Vnet + Test 11 - Test for vnet tunnel routes with both single endpoint and ECMP group with endpoint health monitor ''' - def test_vnet_orch_1(self, dvs, testlog): + def test_vnet_orch_11(self, dvs, ordered_ecmp, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_1' + tunnel_name = 'tunnel_11' + ordered_ecmp + vnet_name = 'Vnet11' + ordered_ecmp vnet_obj.fetch_exist_entries(dvs) - create_vxlan_tunnel(dvs, tunnel_name, '10.10.10.10') - create_vnet_entry(dvs, 'Vnet_2000', tunnel_name, '2000', "") - - vnet_obj.check_vnet_entry(dvs, 'Vnet_2000') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_2000', '2000') - - vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '10.10.10.10') + create_vxlan_tunnel(dvs, tunnel_name, '11.11.11.11') + create_vnet_entry(dvs, vnet_name, tunnel_name, '100011', "") - vid = create_vlan_interface(dvs, "Vlan100", "Ethernet24", "Vnet_2000", "100.100.3.1/24") - vnet_obj.check_router_interface(dvs, "Vlan100", 'Vnet_2000', vid) + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '100011') - vid = create_vlan_interface(dvs, "Vlan101", "Ethernet28", "Vnet_2000", "100.100.4.1/24") - vnet_obj.check_router_interface(dvs, "Vlan101", 'Vnet_2000', vid) + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '11.11.11.11') vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet_2000', '10.10.10.1') - vnet_obj.check_vnet_routes(dvs, 'Vnet_2000', '10.10.10.1', tunnel_name) - check_state_db_routes(dvs, 'Vnet_2000', "100.100.1.1/32", ['10.10.10.1']) - # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "100.100.1.1/32") + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '11.0.0.1', ep_monitor='11.1.0.1') - create_vnet_local_routes(dvs, "100.100.3.0/24", 'Vnet_2000', 'Vlan100') - vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2000') + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - create_vnet_local_routes(dvs, "100.100.4.0/24", 'Vnet_2000', 'Vlan101') - vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2000') + # Route should be properly configured when bfd session state goes up + update_bfd_session_state(dvs, '11.1.0.1', 'Up') + time.sleep(2) + vnet_obj.check_vnet_routes(dvs, vnet_name, '11.0.0.1', tunnel_name) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['11.0.0.1']) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - #Create Physical Interface in another Vnet + # Create another tunnel route with endpoint group overlapped with route1 + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.2.1/32", vnet_name, '11.0.0.2,11.0.0.1', ep_monitor='11.1.0.2,11.1.0.1') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['11.0.0.1'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['11.0.0.1']) + check_remove_routes_advertisement(dvs, "100.100.2.1/32") - create_vnet_entry(dvs, 'Vnet_2001', tunnel_name, '2001', "") + # Create a third tunnel route with another endpoint + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.3.1/32", vnet_name, '11.0.0.2', ep_monitor='11.1.0.2') - vnet_obj.check_vnet_entry(dvs, 'Vnet_2001') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_2001', '2001') + # Update BFD session state and verify route change + update_bfd_session_state(dvs, '11.1.0.2', 'Up') + time.sleep(2) + vnet_obj.check_vnet_routes(dvs, vnet_name, '11.0.0.2', tunnel_name) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['11.0.0.1', '11.0.0.2'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2']) + check_state_db_routes(dvs, vnet_name, "100.100.3.1/32", ['11.0.0.2']) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['11.0.0.1', '11.0.0.2']) + check_remove_routes_advertisement(dvs, "100.100.2.1/32") + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.3.1/32") - create_phy_interface(dvs, "Ethernet4", "Vnet_2001", "100.102.1.1/24") - vnet_obj.check_router_interface(dvs, "Ethernet4", 'Vnet_2001') - vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet_2001', '10.10.10.2', "00:12:34:56:78:9A") - vnet_obj.check_vnet_routes(dvs, 'Vnet_2001', '10.10.10.2', tunnel_name, "00:12:34:56:78:9A") - check_state_db_routes(dvs, 'Vnet_2001', "100.100.2.1/32", ['10.10.10.2']) + update_bfd_session_state(dvs, '11.1.0.1', 'Down') + time.sleep(2) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['11.0.0.2'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['2']) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['11.0.0.2']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") # The default Vnet setting does not advertise prefix check_remove_routes_advertisement(dvs, "100.100.2.1/32") - create_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet_2001', 'Ethernet4') - vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2001') - - # Clean-up and verify remove flows - - delete_vnet_local_routes(dvs, "100.100.3.0/24", 'Vnet_2000') - vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet_2000') - - delete_vnet_local_routes(dvs, "100.100.4.0/24", 'Vnet_2000') - vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet_2000') - delete_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet_2001') - vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet_2001') + # Set the route1 to a new endpoint + vnet_obj.fetch_exist_entries(dvs) + set_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '11.0.0.2', ep_monitor='11.1.0.2') + vnet_obj.check_vnet_routes(dvs, vnet_name, '11.0.0.2', tunnel_name) + check_state_db_routes(dvs, vnet_name, "100.100.3.1/32", ['11.0.0.2']) + check_remove_routes_advertisement(dvs, "100.100.3.1/32") - delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet_2001') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2001') - check_remove_state_db_routes(dvs, 'Vnet_2001', "100.100.2.1/32") + # Remove tunnel route2 + delete_vnet_routes(dvs, "100.100.2.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.2.1/32") check_remove_routes_advertisement(dvs, "100.100.2.1/32") - delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet_2000') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2000') - check_remove_state_db_routes(dvs, 'Vnet_2000', "100.100.1.1/32") - check_remove_routes_advertisement(dvs, "100.100.1.1/32") - - delete_phy_interface(dvs, "Ethernet4", "100.102.1.1/24") - vnet_obj.check_del_router_interface(dvs, "Ethernet4") + # Check the corresponding nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg2_1 not in vnet_obj.nhgs - delete_vlan_interface(dvs, "Vlan100", "100.100.3.1/24") - vnet_obj.check_del_router_interface(dvs, "Vlan100") + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['11.1.0.1']) + check_bfd_session(dvs, ['11.1.0.2']) - delete_vlan_interface(dvs, "Vlan101", "100.100.4.1/24") - vnet_obj.check_del_router_interface(dvs, "Vlan101") + # Remove tunnel route 1 + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - delete_vnet_entry(dvs, 'Vnet_2001') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet_2001') + # Remove tunnel route 3 + delete_vnet_routes(dvs, "100.100.3.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.3.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.3.1/32") + check_remove_routes_advertisement(dvs, "100.100.3.1/32") - delete_vnet_entry(dvs, 'Vnet_2000') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet_2000') + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['11.1.0.1', '11.1.0.2']) + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) delete_vxlan_tunnel(dvs, tunnel_name) - vnet_obj.check_del_vxlan_tunnel(dvs) + ''' - Test 2 - Two VNets, One HSMs per VNet + Test 12 - Test for vnet tunnel routes with ECMP nexthop group with endpoint health monitor and route advertisement ''' - def test_vnet_orch_2(self, dvs, testlog): + def test_vnet_orch_12(self, dvs, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_2' + tunnel_name = 'tunnel_12' vnet_obj.fetch_exist_entries(dvs) - create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') - create_vnet_entry(dvs, 'Vnet_1', tunnel_name, '1111', "") - - vnet_obj.check_vnet_entry(dvs, 'Vnet_1') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_1', '1111') + create_vxlan_tunnel(dvs, tunnel_name, '12.12.12.12') + create_vnet_entry(dvs, 'Vnet12', tunnel_name, '10012', "", advertise_prefix=True) - tun_id = vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') + vnet_obj.check_vnet_entry(dvs, 'Vnet12') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet12', '10012') - vid = create_vlan_interface(dvs, "Vlan1001", "Ethernet0", "Vnet_1", "1.1.10.1/24") - vnet_obj.check_router_interface(dvs, "Vlan1001", 'Vnet_1', vid) + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '12.12.12.12') vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "1.1.1.10/32", 'Vnet_1', '100.1.1.10') - vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '100.1.1.10', tunnel_name) - check_state_db_routes(dvs, 'Vnet_1', "1.1.1.10/32", ['100.1.1.10']) - # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "1.1.1.10/32") + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.3', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.3', profile="test_profile") - vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "1.1.1.11/32", 'Vnet_1', '100.1.1.10') - vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '100.1.1.10', tunnel_name) - check_state_db_routes(dvs, 'Vnet_1', "1.1.1.11/32", ['100.1.1.10']) - # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "1.1.1.11/32") + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "1.1.1.12/32", 'Vnet_1', '200.200.1.200') - vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '200.200.1.200', tunnel_name) - check_state_db_routes(dvs, 'Vnet_1', "1.1.1.12/32", ['200.200.1.200']) - check_remove_routes_advertisement(dvs, "1.1.1.12/32") + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '12.1.0.1', 'Up') + update_bfd_session_state(dvs, '12.1.0.2', 'Up') + update_bfd_session_state(dvs, '12.1.0.3', 'Up') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.2', '12.0.0.3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.2', '12.0.0.3']) + check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile") + + # Remove endpoint from group if it goes down + update_bfd_session_state(dvs, '12.1.0.2', 'Down') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.3']) + check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile") + # Create another tunnel route with endpoint group overlapped with route1 vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "1.1.1.14/32", 'Vnet_1', '200.200.1.201') - vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '200.200.1.201', tunnel_name) - check_state_db_routes(dvs, 'Vnet_1', "1.1.1.14/32", ['200.200.1.201']) - # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "1.1.1.14/32") - - create_vnet_local_routes(dvs, "1.1.10.0/24", 'Vnet_1', 'Vlan1001') - vnet_obj.check_vnet_local_routes(dvs, 'Vnet_1') + create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.5', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.5') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1'], tunnel_name) + check_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32", ['12.0.0.1']) + check_routes_advertisement(dvs, "100.100.1.1/32") - create_vnet_entry(dvs, 'Vnet_2', tunnel_name, '2222', "") + # Update BFD session state and verify route change + update_bfd_session_state(dvs, '12.1.0.5', 'Up') + time.sleep(2) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) + check_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32", ['12.0.0.1', '12.0.0.5']) + check_routes_advertisement(dvs, "100.100.2.1/32") - vnet_obj.check_vnet_entry(dvs, 'Vnet_2') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_2', '2222') + # Update BFD state and check route nexthop + update_bfd_session_state(dvs, '12.1.0.3', 'Down') + time.sleep(2) - vid = create_vlan_interface(dvs, "Vlan1002", "Ethernet4", "Vnet_2", "2.2.10.1/24") - vnet_obj.check_router_interface(dvs, "Vlan1002", 'Vnet_2', vid) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1']) + check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile") - vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "2.2.2.10/32", 'Vnet_2', '100.1.1.20') - vnet_obj.check_vnet_routes(dvs, 'Vnet_2', '100.1.1.20', tunnel_name) - check_state_db_routes(dvs, 'Vnet_2', "2.2.2.10/32", ['100.1.1.20']) - # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "2.2.2.10/32") + # Set the route1 to a new group + set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.3,12.0.0.4', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.3,12.1.0.4', profile="test_profile2") + update_bfd_session_state(dvs, '12.1.0.4', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.4']) + check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile2") + # Check the previous nexthop group is removed vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "2.2.2.11/32", 'Vnet_2', '100.1.1.20') - vnet_obj.check_vnet_routes(dvs, 'Vnet_2', '100.1.1.20', tunnel_name) - check_state_db_routes(dvs, 'Vnet_2', "2.2.2.11/32", ['100.1.1.20']) - # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "2.2.2.11/32") - - create_vnet_local_routes(dvs, "2.2.10.0/24", 'Vnet_2', 'Vlan1002') - vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2') - - # Clean-up and verify remove flows - - delete_vnet_local_routes(dvs, "2.2.10.0/24", 'Vnet_2') - vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet_2') - - delete_vnet_local_routes(dvs, "1.1.10.0/24", 'Vnet_1') - vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet_1') - - delete_vnet_routes(dvs, "2.2.2.11/32", 'Vnet_2') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2') - check_remove_state_db_routes(dvs, 'Vnet_2', "2.2.2.11/32") - check_remove_routes_advertisement(dvs, "2.2.2.11/32") + assert nhg1_1 not in vnet_obj.nhgs - delete_vnet_routes(dvs, "2.2.2.10/32", 'Vnet_2') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2') - check_remove_state_db_routes(dvs, 'Vnet_2', "2.2.2.10/32") - check_remove_routes_advertisement(dvs, "2.2.2.10/32") + # Set BFD session state for a down endpoint to up + update_bfd_session_state(dvs, '12.1.0.2', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.2', '12.0.0.4'], tunnel_name, route_ids=route1, nhg=nhg1_2) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.2', '12.0.0.4']) + check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile2") - delete_vnet_routes(dvs, "1.1.1.14/32", 'Vnet_1') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') - check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.14/32") - check_remove_routes_advertisement(dvs, "1.1.1.14/32") + # Set all endpoint to down state + update_bfd_session_state(dvs, '12.1.0.1', 'Down') + update_bfd_session_state(dvs, '12.1.0.2', 'Down') + update_bfd_session_state(dvs, '12.1.0.3', 'Down') + update_bfd_session_state(dvs, '12.1.0.4', 'Down') + time.sleep(2) - delete_vnet_routes(dvs, "1.1.1.12/32", 'Vnet_1') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') - check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.12/32") - check_remove_routes_advertisement(dvs, "1.1.1.12/32") + # Confirm the tunnel route is updated in ASIC + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.1/32"]) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) + check_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32", ['12.0.0.5']) + check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + check_routes_advertisement(dvs, "100.100.2.1/32") - delete_vnet_routes(dvs, "1.1.1.11/32", 'Vnet_1') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') - check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.11/32") - check_remove_routes_advertisement(dvs, "1.1.1.11/32") + # Remove tunnel route2 + delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet12') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32") + check_remove_routes_advertisement(dvs, "100.100.2.1/32") - delete_vnet_routes(dvs, "1.1.1.10/32", 'Vnet_1') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') - check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.10/32") - check_remove_routes_advertisement(dvs, "1.1.1.10/32") + # Check the corresponding nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg2_1 not in vnet_obj.nhgs - delete_vlan_interface(dvs, "Vlan1002", "2.2.10.1/24") - vnet_obj.check_del_router_interface(dvs, "Vlan1002") + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['12.1.0.5']) + check_bfd_session(dvs, ['12.1.0.1', '12.1.0.2', '12.1.0.3', '12.1.0.4']) - delete_vlan_interface(dvs, "Vlan1001", "1.1.10.1/24") - vnet_obj.check_del_router_interface(dvs, "Vlan1001") + # Remove tunnel route 1 + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - delete_vnet_entry(dvs, 'Vnet_1') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet_1') + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 not in vnet_obj.nhgs - delete_vnet_entry(dvs, 'Vnet_2') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet_2') + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['12.1.0.1', '12.1.0.2', '12.1.0.3', '12.1.0.4', '12.1.0.5']) + delete_vnet_entry(dvs, 'Vnet12') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet12') delete_vxlan_tunnel(dvs, tunnel_name) - vnet_obj.check_del_vxlan_tunnel(dvs) ''' - Test 3 - Two VNets, One HSMs per VNet, Peering + Test 13 - Test for configuration idempotent behaviour ''' - def test_vnet_orch_3(self, dvs, testlog): + def test_vnet_orch_13(self, dvs, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_3' - + tunnel_name = 'tunnel_13' vnet_obj.fetch_exist_entries(dvs) - create_vxlan_tunnel(dvs, tunnel_name, '7.7.7.7') - - create_vnet_entry(dvs, 'Vnet_10', tunnel_name, '3333', "Vnet_20") - - vnet_obj.check_vnet_entry(dvs, 'Vnet_10', ['Vnet_20']) - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_10', '3333') - - create_vnet_entry(dvs, 'Vnet_20', tunnel_name, '4444', "Vnet_10") - - vnet_obj.check_vnet_entry(dvs, 'Vnet_20', ['Vnet_10']) - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_20', '4444') - - tun_id = vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '7.7.7.7') + create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + create_vnet_entry(dvs, 'Vnet13', tunnel_name, '10008', "") - vid = create_vlan_interface(dvs, "Vlan2001", "Ethernet8", "Vnet_10", "5.5.10.1/24") - vnet_obj.check_router_interface(dvs, "Vlan2001", 'Vnet_10', vid) + vnet_obj.check_vnet_entry(dvs, 'Vnet13') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet13', '10008') - vid = create_vlan_interface(dvs, "Vlan2002", "Ethernet12", "Vnet_20", "8.8.10.1/24") - vnet_obj.check_router_interface(dvs, "Vlan2002", 'Vnet_20', vid) + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + # Create an ECMP tunnel route vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "5.5.5.10/32", 'Vnet_10', '50.1.1.10') - vnet_obj.check_vnet_routes(dvs, 'Vnet_10', '50.1.1.10', tunnel_name) - check_state_db_routes(dvs, 'Vnet_10', "5.5.5.10/32", ['50.1.1.10']) + create_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet13', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet13', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet13', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "5.5.5.10/32") + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") - vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "8.8.8.10/32", 'Vnet_20', '80.1.1.20') - vnet_obj.check_vnet_routes(dvs, 'Vnet_10', '80.1.1.20', tunnel_name) - check_state_db_routes(dvs, 'Vnet_20', "8.8.8.10/32", ['80.1.1.20']) + # readd same tunnel again + set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet13', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet13', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet13', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "8.8.8.10/32") - - create_vnet_local_routes(dvs, "5.5.10.0/24", 'Vnet_10', 'Vlan2001') - vnet_obj.check_vnet_local_routes(dvs, 'Vnet_10') - - create_vnet_local_routes(dvs, "8.8.10.0/24", 'Vnet_20', 'Vlan2002') - vnet_obj.check_vnet_local_routes(dvs, 'Vnet_20') - - # Clean-up and verify remove flows - - delete_vnet_local_routes(dvs, "5.5.10.0/24", 'Vnet_10') - vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet_10') - - delete_vnet_local_routes(dvs, "8.8.10.0/24", 'Vnet_20') - vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet_20') - - delete_vnet_routes(dvs, "5.5.5.10/32", 'Vnet_10') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet_10') - check_remove_state_db_routes(dvs, 'Vnet_10', "5.5.5.10/32") - check_remove_routes_advertisement(dvs, "5.5.5.10/32") - - delete_vnet_routes(dvs, "8.8.8.10/32", 'Vnet_20') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet_20') - check_remove_state_db_routes(dvs, 'Vnet_20', "8.8.8.10/32") - check_remove_routes_advertisement(dvs, "8.8.8.10/32") - - delete_vlan_interface(dvs, "Vlan2001", "5.5.10.1/24") - vnet_obj.check_del_router_interface(dvs, "Vlan2001") - - delete_vlan_interface(dvs, "Vlan2002", "8.8.10.1/24") - vnet_obj.check_del_router_interface(dvs, "Vlan2002") - - delete_vnet_entry(dvs, 'Vnet_10') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet_10') + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + # Check only one group is present + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 in vnet_obj.nhgs + assert len(vnet_obj.nhgs) == 1 + assert nhg1_1 == nhg1_2 - delete_vnet_entry(dvs, 'Vnet_20') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet_20') + # Remove one of the tunnel routes + delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet13') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet13', ["fd:8:10::32/128"]) + check_remove_state_db_routes(dvs, 'Vnet13', "fd:8:10::32/128") + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") - delete_vxlan_tunnel(dvs, tunnel_name) - vnet_obj.check_del_vxlan_tunnel(dvs) + # Check the nexthop group still exists + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 not in vnet_obj.nhgs + assert len(vnet_obj.nhgs) == 0 + delete_vnet_entry(dvs, 'Vnet13') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet13') ''' - Test 4 - IPv6 Vxlan tunnel test + Test 14 - Test for configuration idempotent behaviour 2 ''' - @pytest.mark.skip(reason="Failing. Under investigation") - def test_vnet_orch_4(self, dvs, testlog): + def test_vnet_orch_14(self, dvs, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_v6' - + tunnel_name = 'tunnel_14' vnet_obj.fetch_exist_entries(dvs) - create_vxlan_tunnel(dvs, tunnel_name, 'fd:2::32') - create_vnet_entry(dvs, 'Vnet3001', tunnel_name, '3001', "") - - vnet_obj.check_vnet_entry(dvs, 'Vnet3001') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet3001', '3001') - vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:2::32') + create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + create_vnet_entry(dvs, 'Vnet14', tunnel_name, '10008', "") - vid = create_vlan_interface(dvs, "Vlan300", "Ethernet24", 'Vnet3001', "100.100.3.1/24") - vnet_obj.check_router_interface(dvs, "Vlan300", 'Vnet3001', vid) + vnet_obj.check_vnet_entry(dvs, 'Vnet14') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet14', '10008') - vid = create_vlan_interface(dvs, "Vlan301", "Ethernet28", 'Vnet3001', "100.100.4.1/24") - vnet_obj.check_router_interface(dvs, "Vlan301", 'Vnet3001', vid) + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') - create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000') - vnet_obj.check_vnet_routes(dvs, 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000', tunnel_name) - check_state_db_routes(dvs, 'Vnet3001', "100.100.1.1/32", ['2000:1000:2000:3000:4000:5000:6000:7000']) + # Create an ECMP tunnel route + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet14', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet14', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet14', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") - create_vnet_routes(dvs, "100.100.1.2/32", 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000') - vnet_obj.check_vnet_routes(dvs, 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000', tunnel_name) - check_state_db_routes(dvs, 'Vnet3001', "100.100.1.2/32", ['2000:1000:2000:3000:4000:5000:6000:7000']) + # readd same tunnel again + set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet14', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet14', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet14', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "100.100.1.2/32") - - create_vnet_local_routes(dvs, "100.100.3.0/24", 'Vnet3001', 'Vlan300') - vnet_obj.check_vnet_local_routes(dvs, 'Vnet3001') - - create_vnet_local_routes(dvs, "100.100.4.0/24", 'Vnet3001', 'Vlan301') - vnet_obj.check_vnet_local_routes(dvs, 'Vnet3001') + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") - #Create Physical Interface in another Vnet + #update nexthops for the same tunnel. + set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet14', 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet14', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet14', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") - create_vnet_entry(dvs, 'Vnet3002', tunnel_name, '3002', "") + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + assert nhg1_2 in vnet_obj.nhgs - vnet_obj.check_vnet_entry(dvs, 'Vnet3002') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet3002', '3002') + # Remove the tunnel route + delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet14') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet14', ["fd:8:10::32/128"]) + check_remove_state_db_routes(dvs, 'Vnet14', "fd:8:10::32/128") + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + # Remove the tunnel route + delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet14') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet14', ["fd:8:10::32/128"]) + check_remove_state_db_routes(dvs, 'Vnet14', "fd:8:10::32/128") + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") - create_phy_interface(dvs, "Ethernet60", 'Vnet3002', "100.102.1.1/24") - vnet_obj.check_router_interface(dvs, "Ethernet60", 'Vnet3002') + # Check the nexthop group still exists + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 not in vnet_obj.nhgs + assert nhg1_1 not in vnet_obj.nhgs - create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet3002', 'fd:2::34', "00:12:34:56:78:9A") - vnet_obj.check_vnet_routes(dvs, 'Vnet3002', 'fd:2::34', tunnel_name, "00:12:34:56:78:9A") - check_state_db_routes(dvs, 'Vnet3002', "100.100.2.1/32", ['fd:2::34']) - # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "100.100.2.1/32") + delete_vnet_entry(dvs, 'Vnet14') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet14') + delete_vxlan_tunnel(dvs, tunnel_name) - create_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet3002', 'Ethernet60') - vnet_obj.check_vnet_local_routes(dvs, 'Vnet3002') + ''' + Test 15 - Test for configuration idempotent behaviour single endpoint + ''' + def test_vnet_orch_15(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() - # Test peering - create_vnet_entry(dvs, 'Vnet3003', tunnel_name, '3003', 'Vnet3004') + tunnel_name = 'tunnel_15' + vnet_obj.fetch_exist_entries(dvs) - vnet_obj.check_vnet_entry(dvs, 'Vnet3003', ['Vnet3004']) - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet3003', '3003') + create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + create_vnet_entry(dvs, 'Vnet15', tunnel_name, '10008', "") - create_vnet_entry(dvs, 'Vnet3004', tunnel_name, '3004', 'Vnet3003') + vnet_obj.check_vnet_entry(dvs, 'Vnet15') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet15', '10008') - vnet_obj.check_vnet_entry(dvs, 'Vnet3004', ['Vnet3003']) - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet3004', '3004') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') - create_vnet_routes(dvs, "5.5.5.10/32", 'Vnet3003', 'fd:2::35') - vnet_obj.check_vnet_routes(dvs, 'Vnet3004', 'fd:2::35', tunnel_name) - check_state_db_routes(dvs, 'Vnet3003', "5.5.5.10/32", ['fd:2::35']) + # Create an tunnel route + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet15', 'fd:8:1::1') + route1 = vnet_obj.check_vnet_routes(dvs, 'Vnet15', 'fd:8:1::1', tunnel_name) + check_state_db_routes(dvs, 'Vnet15', "fd:8:10::32/128", ['fd:8:1::1']) # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "5.5.5.10/32") + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") - create_vnet_routes(dvs, "8.8.8.10/32", 'Vnet3004', 'fd:2::36') - vnet_obj.check_vnet_routes(dvs, 'Vnet3003', 'fd:2::36', tunnel_name) - check_state_db_routes(dvs, 'Vnet3004', "8.8.8.10/32", ['fd:2::36']) + # readd same tunnel again + set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet15', 'fd:8:1::1') + route1 = vnet_obj.check_vnet_routes(dvs, 'Vnet15', 'fd:8:1::1', tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet15', "fd:8:10::32/128", ['fd:8:1::1']) # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "8.8.8.10/32") - - # Clean-up and verify remove flows - - delete_vnet_routes(dvs, "5.5.5.10/32", 'Vnet3003') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet3003') - check_remove_state_db_routes(dvs, 'Vnet3003', "5.5.5.10/32") - check_remove_routes_advertisement(dvs, "5.5.5.10/32") - - delete_vnet_routes(dvs, "8.8.8.10/32", 'Vnet3004') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet3004') - check_remove_state_db_routes(dvs, 'Vnet3004', "8.8.8.10/32") - check_remove_routes_advertisement(dvs, "8.8.8.10/32") - - delete_vnet_entry(dvs, 'Vnet3003') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet3003') - - delete_vnet_entry(dvs, 'Vnet3004') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet3004') - - delete_vnet_routes(dvs, "100.100.2.1/24", 'Vnet3002') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet3002') - check_remove_state_db_routes(dvs, 'Vnet3002', "100.100.2.1/24") - check_remove_routes_advertisement(dvs, "100.100.2.1/24") - - delete_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet3002') - vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet3002') - - delete_phy_interface(dvs, "Ethernet60", "100.102.1.1/24") - vnet_obj.check_del_router_interface(dvs, "Ethernet60") - - delete_vnet_entry(dvs, 'Vnet3002') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet3002') - - delete_vnet_local_routes(dvs, "100.100.3.0/24", 'Vnet3001') - vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet3001') - - delete_vnet_local_routes(dvs, "100.100.4.0/24", 'Vnet3001') - vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet3001') - - delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet3001') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet3001') - check_remove_state_db_routes(dvs, 'Vnet3001', "100.100.1.1/32") - check_remove_routes_advertisement(dvs, "100.100.1.1/32") - - delete_vnet_routes(dvs, "100.100.1.2/32", 'Vnet3001') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet3001') - check_remove_state_db_routes(dvs, 'Vnet3001', "100.100.1.2/32") - check_remove_routes_advertisement(dvs, "100.100.1.2/32") - - delete_vlan_interface(dvs, "Vlan300", "100.100.3.1/24") - vnet_obj.check_del_router_interface(dvs, "Vlan300") - - delete_vlan_interface(dvs, "Vlan301", "100.100.4.1/24") - vnet_obj.check_del_router_interface(dvs, "Vlan301") + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + # Check only one group is present + vnet_obj.fetch_exist_entries(dvs) + assert len(vnet_obj.nhops) == 1 - delete_vnet_entry(dvs, 'Vnet3001') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet3001') + # Remove one of the tunnel routes + delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet15') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet15', ["fd:8:10::32/128"]) + check_remove_state_db_routes(dvs, 'Vnet15', "fd:8:10::32/128") + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + # Check the nexthop group still exists + vnet_obj.fetch_exist_entries(dvs) + assert len(vnet_obj.nhops) == 0 + delete_vnet_entry(dvs, 'Vnet15') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet15') delete_vxlan_tunnel(dvs, tunnel_name) - vnet_obj.check_del_vxlan_tunnel(dvs) ''' - Test 5 - Default VNet test + Test 16 - Test for configuration idempotent behaviour single endpoint with BFD ''' - def test_vnet_orch_5(self, dvs, testlog): + def test_vnet_orch_16(self, dvs, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_5' - + tunnel_name = 'tunnel_16' vnet_obj.fetch_exist_entries(dvs) - create_vxlan_tunnel(dvs, tunnel_name, '8.8.8.8') - create_vnet_entry(dvs, 'Vnet_5', tunnel_name, '4789', "", 'default') + create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::33') + create_vnet_entry(dvs, 'Vnet16', tunnel_name, '10008', "") - vnet_obj.check_default_vnet_entry(dvs, 'Vnet_5') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_5', '4789') + vnet_obj.check_vnet_entry(dvs, 'Vnet16') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet16', '10008') - delete_vnet_entry(dvs, 'Vnet_5') - vnet_obj.check_default_vnet_entry(dvs, 'Vnet_5') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::33') - ''' - Test 6 - Test VxLAN tunnel with multiple maps - ''' - def test_vnet_vxlan_multi_map(self, dvs, testlog): - vnet_obj = self.get_vnet_obj() + # Create a tunnel route + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:8:11::32/128", 'Vnet16', 'fd:8:2::1', ep_monitor='fd:8:2::1') + update_bfd_session_state(dvs, 'fd:8:2::1', 'Up') + time.sleep(2) - tunnel_name = 'tunnel_v4' + route1 = vnet_obj.check_vnet_routes(dvs, 'Vnet16', 'fd:8:2::1', tunnel_name) + check_state_db_routes(dvs, 'Vnet16', "fd:8:11::32/128", ['fd:8:2::1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:11::32/128") + # readd same tunnel again + set_vnet_routes(dvs, "fd:8:11::32/128", 'Vnet16', 'fd:8:2::1', ep_monitor='fd:8:2::1') + route1 = vnet_obj.check_vnet_routes(dvs, 'Vnet16', 'fd:8:2::1', tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet16', "fd:8:11::32/128", ['fd:8:2::1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:11::32/128") + # Check only one group is present vnet_obj.fetch_exist_entries(dvs) + assert len(vnet_obj.nhops) == 1 - create_vxlan_tunnel(dvs, tunnel_name, '10.1.0.32') - create_vnet_entry(dvs, 'Vnet1', tunnel_name, '10001', "") + update_bfd_session_state(dvs, 'fd:8:2::1', 'Down') + time.sleep(2) + # readd same tunnel again + set_vnet_routes(dvs, "fd:8:11::32/128", 'Vnet16', 'fd:8:2::1', ep_monitor='fd:8:2::1') - vnet_obj.check_vnet_entry(dvs, 'Vnet1') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet1', '10001') - vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '10.1.0.32') + update_bfd_session_state(dvs, 'fd:8:2::1', 'Up') + time.sleep(2) - create_vxlan_tunnel_map(dvs, tunnel_name, 'map_1', 'Vlan1000', '1000') + route1 = vnet_obj.check_vnet_routes(dvs, 'Vnet16', 'fd:8:2::1', tunnel_name,route_ids=route1) + check_state_db_routes(dvs, 'Vnet16', "fd:8:11::32/128", ['fd:8:2::1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:11::32/128") - delete_vnet_entry(dvs, 'Vnet1') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet1') + + # Remove one of the tunnel routes + delete_vnet_routes(dvs, "fd:8:11::32/128", 'Vnet16') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet16', ["fd:8:11::32/128"]) + check_remove_state_db_routes(dvs, 'Vnet16', "fd:8:11::32/128") + check_remove_routes_advertisement(dvs, "fd:8:11::32/128") + + # Check the nexthop group still exists + vnet_obj.fetch_exist_entries(dvs) + assert len(vnet_obj.nhops) == 0 + delete_vnet_entry(dvs, 'Vnet16') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet16') delete_vxlan_tunnel(dvs, tunnel_name) ''' - Test 7 - Test for vnet tunnel routes with ECMP nexthop group + Test 17 - Test for configuration idempotent behaviour multiple endpoint with BFD ''' - def test_vnet_orch_7(self, dvs, ordered_ecmp, testlog): + def test_vnet_orch_17(self, dvs, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_7' + ordered_ecmp - vnet_name = 'Vnet7' + ordered_ecmp + tunnel_name = 'tunnel_17' vnet_obj.fetch_exist_entries(dvs) - create_vxlan_tunnel(dvs, tunnel_name, '7.7.7.7') - create_vnet_entry(dvs, vnet_name, tunnel_name, '10007', "") + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + create_vnet_entry(dvs, 'Vnet17', tunnel_name, '10017', "") - vnet_obj.check_vnet_entry(dvs, vnet_name) - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10007') + vnet_obj.check_vnet_entry(dvs, 'Vnet17') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet17', '10017') - vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '7.7.7.7') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') - # Create an ECMP tunnel route vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '7.0.0.3,7.0.0.2,7.0.0.1') - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['7.0.0.1', '7.0.0.2', '7.0.0.3'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3']) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3']) + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet17', '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, 'Vnet17', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet17', "100.100.1.1/32", []) check_remove_routes_advertisement(dvs, "100.100.1.1/32") - # Set the tunnel route to another nexthop group - set_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '7.0.0.1,7.0.0.2,7.0.0.4,7.0.0.3') - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4'], tunnel_name, route_ids=route1, - ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4']) + #readd the route + set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet17', '9.0.0.1,9.0.0.2,9.0.0.3',ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet17', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet17', "100.100.1.1/32", []) check_remove_routes_advertisement(dvs, "100.100.1.1/32") - # Check the previous nexthop group is removed - vnet_obj.fetch_exist_entries(dvs) - assert nhg1_1 not in vnet_obj.nhgs - - # Create another tunnel route to the same set of endpoints - create_vnet_routes(dvs, "100.100.2.1/32", vnet_name, '7.0.0.1,7.0.0.2,7.0.0.3,7.0.0.4') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4'], tunnel_name, - ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) - check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4']) - check_remove_routes_advertisement(dvs, "100.100.2.1/32") - - assert nhg2_1 == nhg1_2 + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '9.1.0.1', 'Up') + update_bfd_session_state(dvs, '9.1.0.2', 'Up') + update_bfd_session_state(dvs, '9.1.0.3', 'Up') + time.sleep(2) - # Remove one of the tunnel routes - delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) - check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet17', ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet17', "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + # The default Vnet setting does not advertise prefix check_remove_routes_advertisement(dvs, "100.100.1.1/32") - # Check the nexthop group still exists - vnet_obj.fetch_exist_entries(dvs) - assert nhg1_2 in vnet_obj.nhgs + #readd the active route + set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet17', '9.0.0.1,9.0.0.2,9.0.0.3',ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + route2, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet17', ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet17', "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + assert nhg1_1 == nhg1_2 + assert len(vnet_obj.nhgs) == 1 - # Remove the other tunnel route - delete_vnet_routes(dvs, "100.100.2.1/32", vnet_name) - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.2.1/32"]) - check_remove_state_db_routes(dvs, vnet_name, "100.100.2.1/32") - check_remove_routes_advertisement(dvs, "100.100.2.1/32") + # Remove tunnel route + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet17') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet17', ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet17', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - # Check the nexthop group is removed + # Check the corresponding nexthop group is removed vnet_obj.fetch_exist_entries(dvs) - assert nhg2_1 not in vnet_obj.nhgs + assert nhg1_1 not in vnet_obj.nhgs + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3']) - delete_vnet_entry(dvs, vnet_name) - vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vnet_entry(dvs, 'Vnet17') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet17') delete_vxlan_tunnel(dvs, tunnel_name) ''' - Test 8 - Test for ipv6 vnet tunnel routes with ECMP nexthop group + Test 18 - Test for priority vnet tunnel routes with ECMP nexthop group. test primary secondary switchover. ''' - def test_vnet_orch_8(self, dvs, ordered_ecmp, testlog): - + def test_vnet_orch_18(self, dvs, testlog): vnet_obj = self.get_vnet_obj() - - tunnel_name = 'tunnel_8' + ordered_ecmp - vnet_name = 'Vnet8' + ordered_ecmp - + tunnel_name = 'tunnel_18' + vnet_name = 'vnet18' + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) vnet_obj.fetch_exist_entries(dvs) - create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') - create_vnet_entry(dvs, vnet_name, tunnel_name, '10008', "") + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10018', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") vnet_obj.check_vnet_entry(dvs, vnet_name) - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10008') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10018') - vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') - # Create an ECMP tunnel route vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "fd:8:10::32/128", vnet_name, 'fd:8:1::1,fd:8:1::3,fd:8:1::2') - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3']) - check_state_db_routes(dvs, vnet_name, "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) - check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', primary ='9.1.0.1,9.1.0.2', monitoring='custom', adv_prefix='100.100.1.0/24') - # Set the tunnel route to another nexthop group - set_vnet_routes(dvs, "fd:8:10::32/128", vnet_name, 'fd:8:1::2,fd:8:1::3,fd:8:1::1,fd:8:1::4') - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, route_ids=route1, - ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) - check_state_db_routes(dvs, vnet_name, "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) - check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + # default monitor status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.0/24") - # Check the previous nexthop group is removed - vnet_obj.fetch_exist_entries(dvs) - assert nhg1_1 not in vnet_obj.nhgs + # Route should be properly configured when all monitor session states go up. Only primary Endpoints should be in use. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') - # Create another tunnel route to the same set of endpoints - create_vnet_routes(dvs, "fd:8:20::32/128", vnet_name, 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, - ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) - check_state_db_routes(dvs, vnet_name, "fd:8:20::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) - check_remove_routes_advertisement(dvs, "fd:8:20::32/128") + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") - assert nhg2_1 == nhg1_2 + # Remove first primary endpoint from group. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'down') + time.sleep(2) + route1= vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") - # Create another tunnel route with ipv4 prefix to the same set of endpoints - create_vnet_routes(dvs, "8.0.0.0/24", vnet_name, 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') - route3, nhg3_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, - ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) - check_state_db_routes(dvs, vnet_name, "8.0.0.0/24", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) - check_remove_routes_advertisement(dvs, "8.0.0.0/24") + # Switch to secondary if both primary down + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'down') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.3','9.1.0.4']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") - assert nhg3_1 == nhg1_2 + # removing first endpoint of secondary. route should remain on secondary NHG + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'down') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.4']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") - # Remove one of the tunnel routes - delete_vnet_routes(dvs, "fd:8:10::32/128", vnet_name) - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:8:10::32/128"]) - check_remove_state_db_routes(dvs, vnet_name, "fd:8:10::32/128") - check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + # removing last endpoint of secondary. route should be removed + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'down') + time.sleep(2) - # Check the nexthop group still exists - vnet_obj.fetch_exist_entries(dvs) - assert nhg1_2 in vnet_obj.nhgs + new_nhgs = get_all_created_entries(asic_db, vnet_obj.ASIC_NEXT_HOP_GROUP, []) + assert len(new_nhgs) == 0 + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") - # Remove tunnel route 2 - delete_vnet_routes(dvs, "fd:8:20::32/128", vnet_name) - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:8:20::32/128"]) - check_remove_state_db_routes(dvs, vnet_name, "fd:8:20::32/128") - check_remove_routes_advertisement(dvs, "fd:8:20::32/128") + #Route should come up with secondary endpoints. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') - # Remove tunnel route 3 - delete_vnet_routes(dvs, "8.0.0.0/24", vnet_name) - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["8.0.0.0/24"]) - check_remove_state_db_routes(dvs, vnet_name, "8.0.0.0/24") - check_remove_routes_advertisement(dvs, "8.0.0.0/24") + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.3','9.1.0.4']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") - # Check the nexthop group is removed - vnet_obj.fetch_exist_entries(dvs) - assert nhg2_1 not in vnet_obj.nhgs + #Route should be switched to the primary endpoint. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') + time.sleep(2) + route1= vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + #Route should be updated with the second primary endpoint. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + #Route should not be impacted by seconday endpoints going down. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'down') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'down') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + #Route should not be impacted by seconday endpoints coming back up. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + # Remove tunnel route 1 + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + time.sleep(2) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + + # Confirm the monitor sessions are removed + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.2") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.3") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.4") delete_vnet_entry(dvs, vnet_name) vnet_obj.check_del_vnet_entry(dvs, vnet_name) delete_vxlan_tunnel(dvs, tunnel_name) - - ''' - Test 9 - Test for vnet tunnel routes with ECMP nexthop group with endpoint health monitor ''' - def test_vnet_orch_9(self, dvs, ordered_ecmp, testlog): + Test 19 - Test for 2 priority vnet tunnel routes with overlapping primary secondary ECMP nexthop group. + ''' + def test_vnet_orch_19(self, dvs, testlog): vnet_obj = self.get_vnet_obj() - - tunnel_name = 'tunnel_9' + ordered_ecmp - vnet_name = 'Vnet9' + ordered_ecmp + tunnel_name = 'tunnel_19' + vnet_name = 'Vnet19' + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) vnet_obj.fetch_exist_entries(dvs) - create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') - create_vnet_entry(dvs, vnet_name, tunnel_name, '10009', "") + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.19') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10019', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") vnet_obj.check_vnet_entry(dvs, vnet_name) - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10009') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10019') - vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.19') vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', profile="Test_profile", primary ='9.1.0.1,9.1.0.2', monitoring='custom', adv_prefix='100.100.1.0/24') + create_vnet_routes(dvs, "200.100.1.1/32", vnet_name, '9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', primary ='9.1.0.3,9.1.0.4', monitoring='custom', adv_prefix='200.100.1.0/24') - # default bfd status is down, route should not be programmed in this status + # default monitor session status is down, route should not be programmed in this status vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) - check_remove_routes_advertisement(dvs, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.0/24") - # Route should be properly configured when all bfd session states go up - update_bfd_session_state(dvs, '9.1.0.2', 'Up') - update_bfd_session_state(dvs, '9.1.0.3', 'Up') - update_bfd_session_state(dvs, '9.1.0.1', 'Up') + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["200.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "200.100.1.0/24") + + # Route should be properly configured when all monitor session states go up. Only primary Endpoints should be in use. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3']) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) - check_remove_routes_advertisement(dvs, "100.100.1.1/32") + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1'], tunnel_name, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") - # Remove endpoint from group if it goes down - update_bfd_session_state(dvs, '9.1.0.2', 'Down') + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.1', 'up') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '3']) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1', '9.0.0.3']) - check_remove_routes_advertisement(dvs, "100.100.1.1/32") + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.1']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") - # Create another tunnel route with endpoint group overlapped with route1 - vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.2.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.5', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.5') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1']) - check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['9.0.0.1']) - check_remove_routes_advertisement(dvs, "100.100.1.1/32") + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") - # Update BFD session state and verify route change - update_bfd_session_state(dvs, '9.1.0.5', 'Up') + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.2', 'up') time.sleep(2) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '3']) - check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['9.0.0.1', '9.0.0.5']) - check_remove_routes_advertisement(dvs, "100.100.2.1/32") + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.1','9.1.0.2']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") - # Update BFD state and check route nexthop - update_bfd_session_state(dvs, '9.1.0.3', 'Down') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.3', 'up') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1'], tunnel_name, route_ids=route1, nhg=nhg1_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1']) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1']) - check_remove_routes_advertisement(dvs, "100.100.1.1/32") + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") - # Set the route1 to a new group - set_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.3,9.0.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4') - update_bfd_session_state(dvs, '9.1.0.4', 'Up') + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.3']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.4', 'up') time.sleep(2) - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.4'], tunnel_name, route_ids=route1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '4']) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1', '9.0.0.4']) - check_remove_routes_advertisement(dvs, "100.100.1.1/32") + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") - # Check the previous nexthop group is removed - vnet_obj.fetch_exist_entries(dvs) - assert nhg1_1 not in vnet_obj.nhgs + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.3','9.1.0.4']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") - # Set BFD session state for a down endpoint to up - update_bfd_session_state(dvs, '9.1.0.2', 'Up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'down') + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.1', 'down') time.sleep(2) - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.2', '9.0.0.4'], tunnel_name, route_ids=route1, nhg=nhg1_2, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '4']) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.4']) - check_remove_routes_advertisement(dvs, "100.100.1.1/32") + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.2']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") - # Set all endpoint to down state - update_bfd_session_state(dvs, '9.1.0.1', 'Down') - update_bfd_session_state(dvs, '9.1.0.2', 'Down') - update_bfd_session_state(dvs, '9.1.0.3', 'Down') - update_bfd_session_state(dvs, '9.1.0.4', 'Down') + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.3','9.1.0.4']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'down') + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.2', 'down') time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.3','9.1.0.4']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") - # Confirm the tunnel route is updated in ASIC - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['3']) - check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['9.0.0.5']) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) - check_remove_routes_advertisement(dvs, "100.100.1.1/32") - # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "100.100.2.1/32") + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.3','9.1.0.4']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") - # Remove tunnel route2 - delete_vnet_routes(dvs, "100.100.2.1/32", vnet_name) - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.2.1/32"]) - check_remove_state_db_routes(dvs, vnet_name, "100.100.2.1/32") - check_remove_routes_advertisement(dvs, "100.100.2.1/32") + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'down') + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.3', 'down') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.4'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.4']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") - # Check the corresponding nexthop group is removed - vnet_obj.fetch_exist_entries(dvs) - assert nhg2_1 not in vnet_obj.nhgs + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.4'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.4']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") - # Check the BFD session specific to the endpoint group is removed while others exist - check_del_bfd_session(dvs, ['9.1.0.5']) - check_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3', '9.1.0.4']) + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'down') + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.4', 'down') + time.sleep(2) + + #we should still have two NHGs but no active route + new_nhgs = get_all_created_entries(asic_db, vnet_obj.ASIC_NEXT_HOP_GROUP, vnet_obj.nhgs) + assert len(new_nhgs) == 0 + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "200.100.1.1/32") + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["200.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_state_db_routes(dvs, vnet_name, "200.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + check_remove_routes_advertisement(dvs, "200.100.1.0/24") # Remove tunnel route 1 delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + delete_vnet_routes(dvs, "200.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["200.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") - check_remove_routes_advertisement(dvs, "100.100.1.1/32") + check_remove_state_db_routes(dvs, vnet_name, "200.100.1.1/32") - # Check the previous nexthop group is removed - vnet_obj.fetch_exist_entries(dvs) - assert nhg1_2 not in vnet_obj.nhgs + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + check_remove_routes_advertisement(dvs, "200.100.1.0/24") - # Confirm the BFD sessions are removed - check_del_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3', '9.1.0.4', '9.1.0.5']) + + # Confirm the monitor sessions are removed + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.2") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.3") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.4") + + vnet_obj.check_custom_monitor_deleted(dvs, "200.100.1.1/32", "9.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "200.100.1.1/32", "9.1.0.2") + vnet_obj.check_custom_monitor_deleted(dvs, "200.100.1.1/32", "9.1.0.3") + vnet_obj.check_custom_monitor_deleted(dvs, "200.100.1.1/32", "9.1.0.4") delete_vnet_entry(dvs, vnet_name) vnet_obj.check_del_vnet_entry(dvs, vnet_name) delete_vxlan_tunnel(dvs, tunnel_name) - ''' - Test 10 - Test for ipv6 vnet tunnel routes with ECMP nexthop group with endpoint health monitor + Test 20 - Test for Single enpoint priority vnet tunnel routes. Test primary secondary switchover. ''' - def test_vnet_orch_10(self, dvs, ordered_ecmp, testlog): + def test_vnet_orch_20(self, dvs, testlog): vnet_obj = self.get_vnet_obj() - - tunnel_name = 'tunnel_10' + ordered_ecmp - vnet_name = 'Vnet10' + ordered_ecmp + tunnel_name = 'tunnel_20' + vnet_name = 'Vnet20' + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) vnet_obj.fetch_exist_entries(dvs) - create_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') - create_vnet_entry(dvs, vnet_name, tunnel_name, '10010', "") + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10020', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") vnet_obj.check_vnet_entry(dvs, vnet_name) - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10010') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10020') - vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::3', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3') + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.1.0.1,9.1.0.2', ep_monitor='9.1.0.1,9.1.0.2', primary ='9.1.0.1', profile="Test_profile", monitoring='custom', adv_prefix='100.100.1.0/24') - # default bfd status is down, route should not be programmed in this status - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) - check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", []) - check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + # default monitor session status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.0/24") - # Route should be properly configured when all bfd session states go up - update_bfd_session_state(dvs, 'fd:10:2::2', 'Up') - update_bfd_session_state(dvs, 'fd:10:2::3', 'Up') - update_bfd_session_state(dvs, 'fd:10:2::1', 'Up') + # Route should be properly configured when all monitor session states go up. Only primary Endpoints should be in use. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3']) - check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3']) - check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + nhids = get_all_created_entries(asic_db, vnet_obj.ASIC_NEXT_HOP,set()) + tbl_nh = swsscommon.Table(asic_db, vnet_obj.ASIC_NEXT_HOP) + nexthops = dict() + for nhid in nhids: + status, nh_fvs = tbl_nh.get(nhid) + nh_fvs = dict(nh_fvs) + for key in nh_fvs.keys(): + if key == 'SAI_NEXT_HOP_ATTR_IP': + nexthops[nh_fvs[key]] = nhid + assert len(nexthops.keys()) == 1 - # Remove endpoint from group if it goes down - update_bfd_session_state(dvs, 'fd:10:2::2', 'Down') + route = get_created_entries(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, vnet_obj.routes, 1) + check_object(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, route[0], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": nexthops['9.1.0.1'], + } + ) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'down') time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::3'], tunnel_name, route_ids=route1, nhg=nhg1_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '3']) - check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::3']) - check_remove_routes_advertisement(dvs, "fd:10:10::1/128") - # Create another tunnel route with endpoint group overlapped with route1 - vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "fd:10:20::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::5', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::5') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1']) - check_state_db_routes(dvs, vnet_name, "fd:10:20::1/128", ['fd:10:1::1']) - check_remove_routes_advertisement(dvs, "fd:10:20::1/128") + route = get_created_entries(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, vnet_obj.routes, 1) + check_object(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, route[0], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": nexthops['9.1.0.1'], + } + ) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'down') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') - # Update BFD session state and verify route change - update_bfd_session_state(dvs, 'fd:10:2::5', 'Up') time.sleep(2) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::5'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '3']) - check_state_db_routes(dvs, vnet_name, "fd:10:20::1/128", ['fd:10:1::1', 'fd:10:1::5']) - check_remove_routes_advertisement(dvs, "fd:10:20::1/128") - # Update BFD state and check route nexthop - update_bfd_session_state(dvs, 'fd:10:2::3', 'Down') - update_bfd_session_state(dvs, 'fd:10:2::2', 'Up') - time.sleep(2) - - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2'], tunnel_name, route_ids=route1, nhg=nhg1_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2']) - check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2']) - check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + nhids = get_all_created_entries(asic_db, vnet_obj.ASIC_NEXT_HOP,set()) + tbl_nh = swsscommon.Table(asic_db, vnet_obj.ASIC_NEXT_HOP) + nexthops = dict() + for nhid in nhids: + status, nh_fvs = tbl_nh.get(nhid) + nh_fvs = dict(nh_fvs) + for key in nh_fvs.keys(): + if key == 'SAI_NEXT_HOP_ATTR_IP': + nexthops[nh_fvs[key]] = nhid + assert len(nexthops.keys()) == 1 - # Set the route to a new group - set_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::3,fd:10:1::4', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3,fd:10:2::4') - update_bfd_session_state(dvs, 'fd:10:2::4', 'Up') - time.sleep(2) - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::4'], tunnel_name, route_ids=route1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '4']) - check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::4']) - check_remove_routes_advertisement(dvs, "fd:10:10::1/128") - # Check the previous nexthop group is removed - vnet_obj.fetch_exist_entries(dvs) - assert nhg1_1 not in vnet_obj.nhgs + route = get_created_entries(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, vnet_obj.routes, 1) + check_object(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, route[0], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": nexthops['9.1.0.2'], + } + ) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.2']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") - # Set BFD session state for a down endpoint to up - update_bfd_session_state(dvs, 'fd:10:2::3', 'Up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') time.sleep(2) - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3', 'fd:10:1::4'], tunnel_name, route_ids=route1, nhg=nhg1_2, - ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2', '3', '4']) - check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3', 'fd:10:1::4']) - check_remove_routes_advertisement(dvs, "fd:10:10::1/128") - # Set all endpoint to down state - update_bfd_session_state(dvs, 'fd:10:2::1', 'Down') - update_bfd_session_state(dvs, 'fd:10:2::2', 'Down') - update_bfd_session_state(dvs, 'fd:10:2::3', 'Down') - update_bfd_session_state(dvs, 'fd:10:2::4', 'Down') - time.sleep(2) + nhids = get_all_created_entries(asic_db, vnet_obj.ASIC_NEXT_HOP,set()) + tbl_nh = swsscommon.Table(asic_db, vnet_obj.ASIC_NEXT_HOP) + nexthops = dict() + for nhid in nhids: + status, nh_fvs = tbl_nh.get(nhid) + nh_fvs = dict(nh_fvs) + for key in nh_fvs.keys(): + if key == 'SAI_NEXT_HOP_ATTR_IP': + nexthops[nh_fvs[key]] = nhid + assert len(nexthops.keys()) == 1 - # Confirm the tunnel route is updated in ASIC - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::5'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['3']) - check_state_db_routes(dvs, vnet_name, "fd:10:20::1/128", ['fd:10:1::5']) - check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", []) - check_remove_routes_advertisement(dvs, "fd:10:10::1/128") - # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "fd:10:20::1/128") + route = get_created_entries(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, vnet_obj.routes, 1) + check_object(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, route[0], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": nexthops['9.1.0.1'], + } + ) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") - # Remove tunnel route2 - delete_vnet_routes(dvs, "fd:10:20::1/128", vnet_name) - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:20::1/128"]) - check_remove_state_db_routes(dvs, vnet_name, "fd:10:20::1/128") - check_remove_routes_advertisement(dvs, "fd:10:20::1/128") + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'down') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'down') - # Check the corresponding nexthop group is removed - vnet_obj.fetch_exist_entries(dvs) - assert nhg2_1 not in vnet_obj.nhgs + time.sleep(2) - # Check the BFD session specific to the endpoint group is removed while others exist - check_del_bfd_session(dvs, ['fd:10:2::5']) - check_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3', 'fd:10:2::4']) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "200.100.1.0/24") - # Check the BFD session specific to the endpoint group is removed while others exist - check_del_bfd_session(dvs, ['fd:10:2::5']) - check_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3', 'fd:10:2::4']) # Remove tunnel route 1 - delete_vnet_routes(dvs, "fd:10:10::1/128", vnet_name) - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) - check_remove_state_db_routes(dvs, vnet_name, "fd:10:10::1/128") - check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) - # Confirm the BFD sessions are removed - check_del_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3', 'fd:10:2::4', 'fd:10:2::5']) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.0/24") - # Check the previous nexthop group is removed - vnet_obj.fetch_exist_entries(dvs) - assert nhg1_2 not in vnet_obj.nhgs + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.2") delete_vnet_entry(dvs, vnet_name) vnet_obj.check_del_vnet_entry(dvs, vnet_name) delete_vxlan_tunnel(dvs, tunnel_name) ''' - Test 11 - Test for vnet tunnel routes with both single endpoint and ECMP group with endpoint health monitor + Test 21 - Test for priority vxlan tunnel with adv_prefix, adv profile. test route re-addition, route update, primary seocndary swap. ''' - def test_vnet_orch_11(self, dvs, ordered_ecmp, testlog): + def test_vnet_orch_21(self, dvs, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_11' + ordered_ecmp - vnet_name = 'Vnet11' + ordered_ecmp - + tunnel_name = 'tunnel_21' + vnet_name = "Vnet21" vnet_obj.fetch_exist_entries(dvs) - create_vxlan_tunnel(dvs, tunnel_name, '11.11.11.11') - create_vnet_entry(dvs, vnet_name, tunnel_name, '100011', "") + create_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10021', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") vnet_obj.check_vnet_entry(dvs, vnet_name) - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '100011') - - vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '11.11.11.11') - - vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '11.0.0.1', ep_monitor='11.1.0.1') - - # default bfd status is down, route should not be programmed in this status - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) - check_remove_routes_advertisement(dvs, "100.100.1.1/32") - - # Route should be properly configured when bfd session state goes up - update_bfd_session_state(dvs, '11.1.0.1', 'Up') - time.sleep(2) - vnet_obj.check_vnet_routes(dvs, vnet_name, '11.0.0.1', tunnel_name) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['11.0.0.1']) - check_remove_routes_advertisement(dvs, "100.100.1.1/32") + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10021') - # Create another tunnel route with endpoint group overlapped with route1 + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.2.1/32", vnet_name, '11.0.0.2,11.0.0.1', ep_monitor='11.1.0.2,11.1.0.1') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['11.0.0.1'], tunnel_name, ordered_ecmp=ordered_ecmp, nh_seq_id=['1']) - check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['11.0.0.1']) - check_remove_routes_advertisement(dvs, "100.100.2.1/32") - # Create a third tunnel route with another endpoint - vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.3.1/32", vnet_name, '11.0.0.2', ep_monitor='11.1.0.2') + #Add first Route + create_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::3,fd:10:1::4', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3,fd:10:2::4', profile = "test_prf", primary ='fd:10:1::3,fd:10:1::4',monitoring='custom', adv_prefix="fd:10:10::/64") + update_monitor_session_state(dvs, 'fd:10:10::1/128', 'fd:10:2::1', 'up') + update_monitor_session_state(dvs, 'fd:10:10::1/128', 'fd:10:2::2', 'up') - # Update BFD session state and verify route change - update_bfd_session_state(dvs, '11.1.0.2', 'Up') time.sleep(2) - vnet_obj.check_vnet_routes(dvs, vnet_name, '11.0.0.2', tunnel_name) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['11.0.0.1', '11.0.0.2'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['1', '2']) - check_state_db_routes(dvs, vnet_name, "100.100.3.1/32", ['11.0.0.2']) - check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['11.0.0.1', '11.0.0.2']) - check_remove_routes_advertisement(dvs, "100.100.2.1/32") - # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "100.100.3.1/32") - + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1','fd:10:1::2'], tunnel_name, prefix="fd:10:10::1/128") + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1,fd:10:1::2']) + check_routes_advertisement(dvs, "fd:10:10::/64", "test_prf") - update_bfd_session_state(dvs, '11.1.0.1', 'Down') + #add 2nd route + create_vnet_routes(dvs, "fd:10:10::21/128", vnet_name, 'fd:11:1::1,fd:11:1::2,fd:11:1::3,fd:11:1::4', ep_monitor='fd:11:2::1,fd:11:2::2,fd:11:2::3,fd:11:2::4', profile = "test_prf", primary ='fd:11:1::1,fd:11:1::2',monitoring='custom', adv_prefix='fd:10:10::/64') + update_monitor_session_state(dvs, 'fd:10:10::21/128', 'fd:11:2::1', 'up') + update_monitor_session_state(dvs, 'fd:10:10::21/128', 'fd:11:2::2', 'up') + update_monitor_session_state(dvs, 'fd:10:10::21/128', 'fd:11:2::3', 'up') + update_monitor_session_state(dvs, 'fd:10:10::21/128', 'fd:11:2::4', 'up') + time.sleep(2) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['11.0.0.2'], tunnel_name, route_ids=route2, nhg=nhg2_1, ordered_ecmp=ordered_ecmp, nh_seq_id=['2']) - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) - check_state_db_routes(dvs, vnet_name, "100.100.2.1/32", ['11.0.0.2']) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) - check_remove_routes_advertisement(dvs, "100.100.1.1/32") - # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "100.100.2.1/32") - - - # Set the route1 to a new endpoint - vnet_obj.fetch_exist_entries(dvs) - set_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '11.0.0.2', ep_monitor='11.1.0.2') - vnet_obj.check_vnet_routes(dvs, vnet_name, '11.0.0.2', tunnel_name) - check_state_db_routes(dvs, vnet_name, "100.100.3.1/32", ['11.0.0.2']) - check_remove_routes_advertisement(dvs, "100.100.3.1/32") + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['fd:11:1::1','fd:11:1::2'], tunnel_name, route_ids=route1, prefix="fd:10:10::21/128") + check_state_db_routes(dvs, vnet_name, "fd:10:10::21/128", ['fd:11:1::1,fd:11:1::2']) + check_routes_advertisement(dvs, "fd:10:10::/64", "test_prf") - # Remove tunnel route2 - delete_vnet_routes(dvs, "100.100.2.1/32", vnet_name) - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.2.1/32"]) - check_remove_state_db_routes(dvs, vnet_name, "100.100.2.1/32") - check_remove_routes_advertisement(dvs, "100.100.2.1/32") + #remove first route + delete_vnet_routes(dvs, "fd:10:10::1/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["fd:10:10::1/128"]) + check_remove_state_db_routes(dvs, 'Vnet12', "fd:10:10::1/128") - # Check the corresponding nexthop group is removed - vnet_obj.fetch_exist_entries(dvs) - assert nhg2_1 not in vnet_obj.nhgs + #adv should still be up. + check_routes_advertisement(dvs, "fd:10:10::/64") - # Check the BFD session specific to the endpoint group is removed while others exist - check_del_bfd_session(dvs, ['11.1.0.1']) - check_bfd_session(dvs, ['11.1.0.2']) + #add 3rd route + create_vnet_routes(dvs, "fd:10:10::31/128", vnet_name, 'fd:11:1::1,fd:11:1::2,fd:11:1::3,fd:11:1::4', ep_monitor='fd:11:2::1,fd:11:2::2,fd:11:2::3,fd:11:2::4', profile = "test_prf", primary ='fd:11:1::1,fd:11:1::2',monitoring='custom', adv_prefix='fd:10:10::/64') + update_monitor_session_state(dvs, 'fd:10:10::31/128', 'fd:11:2::1', 'up') + update_monitor_session_state(dvs, 'fd:10:10::31/128', 'fd:11:2::2', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['fd:11:1::1','fd:11:1::2'], tunnel_name, route_ids=route1, prefix="fd:10:10::31/128") + check_state_db_routes(dvs, vnet_name, "fd:10:10::31/128", ['fd:11:1::1,fd:11:1::2']) + check_routes_advertisement(dvs, "fd:10:10::/64", "test_prf") - # Remove tunnel route 1 - delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) - check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") - check_remove_routes_advertisement(dvs, "100.100.1.1/32") + #delete 2nd route + delete_vnet_routes(dvs, "fd:10:10::21/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["fd:10:10::21/128"]) + check_remove_state_db_routes(dvs, 'Vnet12', "fd:10:10::21/128") - # Remove tunnel route 3 - delete_vnet_routes(dvs, "100.100.3.1/32", vnet_name) - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.3.1/32"]) - check_remove_state_db_routes(dvs, vnet_name, "100.100.3.1/32") - check_remove_routes_advertisement(dvs, "100.100.3.1/32") + #adv should still be up. + check_routes_advertisement(dvs, "fd:10:10::/64") - # Confirm the BFD sessions are removed - check_del_bfd_session(dvs, ['11.1.0.1', '11.1.0.2']) + #remove 3rd route + delete_vnet_routes(dvs, "fd:10:10::31/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["fd:10:10::31/128"]) + check_remove_state_db_routes(dvs, 'Vnet12', "fd:10:10::31/128") - delete_vnet_entry(dvs, vnet_name) + #adv should be gone. + check_remove_routes_advertisement(dvs, "fd:10:10::/64") + delete_vnet_entry(dvs,vnet_name) vnet_obj.check_del_vnet_entry(dvs, vnet_name) delete_vxlan_tunnel(dvs, tunnel_name) - ''' - Test 12 - Test for vnet tunnel routes with ECMP nexthop group with endpoint health monitor and route advertisement + Test 22 - Test for vxlan custom monitoring with adv_prefix. Add route twice and change nexthops case ''' - def test_vnet_orch_12(self, dvs, testlog): + def test_vnet_orch_22(self, dvs, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_12' - + tunnel_name = 'tunnel_22' + vnet_name = "Vnet22" vnet_obj.fetch_exist_entries(dvs) - create_vxlan_tunnel(dvs, tunnel_name, '12.12.12.12') - create_vnet_entry(dvs, 'Vnet12', tunnel_name, '10012', "", advertise_prefix=True) - - vnet_obj.check_vnet_entry(dvs, 'Vnet12') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet12', '10012') - - vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '12.12.12.12') - - vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.3', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.3', profile="test_profile") - - # default bfd status is down, route should not be programmed in this status - vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.1/32"]) - check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", []) - check_remove_routes_advertisement(dvs, "100.100.1.1/32") + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.3') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") - # Route should be properly configured when all bfd session states go up - update_bfd_session_state(dvs, '12.1.0.1', 'Up') - update_bfd_session_state(dvs, '12.1.0.2', 'Up') - update_bfd_session_state(dvs, '12.1.0.3', 'Up') - time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.2', '12.0.0.3'], tunnel_name) - check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.2', '12.0.0.3']) - check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile") + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10022') - # Remove endpoint from group if it goes down - update_bfd_session_state(dvs, '12.1.0.2', 'Down') - time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) - check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.3']) - check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile") + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.3') - # Create another tunnel route with endpoint group overlapped with route1 vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.5', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.5') - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1'], tunnel_name) - check_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32", ['12.0.0.1']) - check_routes_advertisement(dvs, "100.100.1.1/32") - - # Update BFD session state and verify route change - update_bfd_session_state(dvs, '12.1.0.5', 'Up') - time.sleep(2) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) - check_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32", ['12.0.0.1', '12.0.0.5']) - check_routes_advertisement(dvs, "100.100.2.1/32") - - # Update BFD state and check route nexthop - update_bfd_session_state(dvs, '12.1.0.3', 'Down') + #Add first Route + create_vnet_routes(dvs, "100.100.1.11/32", vnet_name, '19.0.0.1,19.0.0.2,19.0.0.3', ep_monitor='19.1.0.1,19.1.0.2,19.1.0.3', profile = "test_prf", primary ='19.0.0.1',monitoring='custom', adv_prefix='100.100.1.0/24') + update_monitor_session_state(dvs, '100.100.1.11/32', '19.1.0.1', 'up') time.sleep(2) + vnet_obj.check_vnet_routes(dvs, vnet_name, '19.0.0.1', tunnel_name) + check_state_db_routes(dvs, vnet_name, "100.100.1.11/32", ['19.0.0.1']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1'], tunnel_name, route_ids=route1, nhg=nhg1_1) - check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1']) - check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile") - - # Set the route1 to a new group - set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.3,12.0.0.4', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.3,12.1.0.4', profile="test_profile2") - update_bfd_session_state(dvs, '12.1.0.4', 'Up') - time.sleep(2) - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.4'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.4']) - check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile2") + #Add first Route again + create_vnet_routes(dvs, "100.100.1.11/32", vnet_name, '19.0.0.1,19.0.0.2,19.0.0.3', ep_monitor='19.1.0.1,19.1.0.2,19.1.0.3', profile = "test_prf", primary ='19.0.0.1',monitoring='custom', adv_prefix='100.100.1.0/24') + check_state_db_routes(dvs, vnet_name, "100.100.1.11/32", ['19.0.0.1']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") - # Check the previous nexthop group is removed - vnet_obj.fetch_exist_entries(dvs) - assert nhg1_1 not in vnet_obj.nhgs + #remove first route + delete_vnet_routes(dvs, "100.100.1.11/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.11/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.11/32") - # Set BFD session state for a down endpoint to up - update_bfd_session_state(dvs, '12.1.0.2', 'Up') - time.sleep(2) - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.2', '12.0.0.4'], tunnel_name, route_ids=route1, nhg=nhg1_2) - check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.2', '12.0.0.4']) - check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile2") + #adv should be gone. + check_remove_routes_advertisement(dvs, "100.100.1.0/24") - # Set all endpoint to down state - update_bfd_session_state(dvs, '12.1.0.1', 'Down') - update_bfd_session_state(dvs, '12.1.0.2', 'Down') - update_bfd_session_state(dvs, '12.1.0.3', 'Down') - update_bfd_session_state(dvs, '12.1.0.4', 'Down') + #add 2nd route + create_vnet_routes(dvs, "100.100.1.57/32", vnet_name, '5.0.0.1,5.0.0.2,5.0.0.3,5.0.0.4', ep_monitor='5.1.0.1,5.1.0.2,5.1.0.3,5.1.0.4', profile = "test_prf", primary ='5.0.0.1,5.0.0.2',monitoring='custom', adv_prefix='100.100.1.0/24') + update_monitor_session_state(dvs, '100.100.1.57/32', '5.1.0.1', 'up') + update_monitor_session_state(dvs, '100.100.1.57/32', '5.1.0.2', 'up') time.sleep(2) - - # Confirm the tunnel route is updated in ASIC - vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.1/32"]) - route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) - check_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32", ['12.0.0.5']) - check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", []) - check_remove_routes_advertisement(dvs, "100.100.1.1/32") - check_routes_advertisement(dvs, "100.100.2.1/32") - - # Remove tunnel route2 - delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet12') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.2.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet12', "100.100.2.1/32") - check_remove_routes_advertisement(dvs, "100.100.2.1/32") - - # Check the corresponding nexthop group is removed - vnet_obj.fetch_exist_entries(dvs) - assert nhg2_1 not in vnet_obj.nhgs - - # Check the BFD session specific to the endpoint group is removed while others exist - check_del_bfd_session(dvs, ['12.1.0.5']) - check_bfd_session(dvs, ['12.1.0.1', '12.1.0.2', '12.1.0.3', '12.1.0.4']) - - # Remove tunnel route 1 - delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32") - check_remove_routes_advertisement(dvs, "100.100.1.1/32") - - # Check the previous nexthop group is removed - vnet_obj.fetch_exist_entries(dvs) - assert nhg1_2 not in vnet_obj.nhgs - - # Confirm the BFD sessions are removed - check_del_bfd_session(dvs, ['12.1.0.1', '12.1.0.2', '12.1.0.3', '12.1.0.4', '12.1.0.5']) - - delete_vnet_entry(dvs, 'Vnet12') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet12') - delete_vxlan_tunnel(dvs, tunnel_name) - - ''' - Test 13 - Test for configuration idempotent behaviour - ''' - def test_vnet_orch_13(self, dvs, testlog): - vnet_obj = self.get_vnet_obj() - - tunnel_name = 'tunnel_13' - vnet_obj.fetch_exist_entries(dvs) - - create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') - create_vnet_entry(dvs, 'Vnet13', tunnel_name, '10008', "") - - vnet_obj.check_vnet_entry(dvs, 'Vnet13') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet13', '10008') - - vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') - - # Create an ECMP tunnel route - vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet13', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet13', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name) - check_state_db_routes(dvs, 'Vnet13', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.1','5.0.0.2'], tunnel_name, prefix="100.100.1.57/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.57/32", ['5.0.0.1,5.0.0.2']) # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") - # readd same tunnel again - set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet13', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet13', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, 'Vnet13', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) + #modify 2nd route switch primary with secondary + create_vnet_routes(dvs, "100.100.1.57/32", vnet_name, '5.0.0.1,5.0.0.2,5.0.0.3,5.0.0.4', ep_monitor='5.1.0.1,5.1.0.2,5.1.0.3,5.1.0.4', profile = "test_prf", primary ='5.0.0.3,5.0.0.4',monitoring='custom', adv_prefix='100.100.1.0/24') + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.1','5.0.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.57/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.57/32", ['5.0.0.1','5.0.0.2']) # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "fd:8:10::32/128") - # Check only one group is present - vnet_obj.fetch_exist_entries(dvs) - assert nhg1_1 in vnet_obj.nhgs - assert len(vnet_obj.nhgs) == 1 - assert nhg1_1 == nhg1_2 + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") - # Remove one of the tunnel routes - delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet13') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet13', ["fd:8:10::32/128"]) - check_remove_state_db_routes(dvs, 'Vnet13', "fd:8:10::32/128") - check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + #delete 2nd route + delete_vnet_routes(dvs, "100.100.1.57/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.57/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.57/32") + #adv should be gone. + check_remove_routes_advertisement(dvs, "100.100.1.0/24") - # Check the nexthop group still exists - vnet_obj.fetch_exist_entries(dvs) - assert nhg1_2 not in vnet_obj.nhgs - assert len(vnet_obj.nhgs) == 0 - delete_vnet_entry(dvs, 'Vnet13') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet13') + #add 3rd route + create_vnet_routes(dvs, "100.100.1.67/32", vnet_name, '5.0.0.1,5.0.0.2,5.0.0.3,5.0.0.4', ep_monitor='5.1.0.1,5.1.0.2,5.1.0.3,5.1.0.4', profile = "test_prf", primary ='5.0.0.1,5.0.0.2',monitoring='custom', adv_prefix='100.100.1.0/24') + update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.1', 'up') + update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.2', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.1','5.0.0.2'], tunnel_name, prefix="100.100.1.67/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.67/32", ['5.0.0.1,5.0.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") - ''' - Test 14 - Test for configuration idempotent behaviour 2 - ''' - def test_vnet_orch_14(self, dvs, testlog): - vnet_obj = self.get_vnet_obj() + #modify 3rd route next hops to secondary + create_vnet_routes(dvs, "100.100.1.67/32", vnet_name, '5.0.0.1,5.0.0.2,5.0.0.3,5.0.0.4', ep_monitor='5.1.0.1,5.1.0.2,5.1.0.3,5.1.0.4', profile = "test_prf", primary ='5.0.0.3,5.0.0.4',monitoring='custom', adv_prefix='100.100.1.0/24') + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.1','5.0.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.67/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.67/32", ['5.0.0.1','5.0.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") - tunnel_name = 'tunnel_14' - vnet_obj.fetch_exist_entries(dvs) + #modify 3rd route next hops to a new set. + create_vnet_routes(dvs, "100.100.1.67/32", vnet_name, '5.0.0.5,5.0.0.6,5.0.0.7,5.0.0.8', ep_monitor='5.1.0.5,5.1.0.6,5.1.0.7,5.1.0.8', profile = "test_prf", primary ='5.0.0.5,5.0.0.6',monitoring='custom', adv_prefix='100.100.1.0/24') + update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.5', 'up') + update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.6', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.5','5.0.0.6'], tunnel_name, route_ids=route1, prefix="100.100.1.67/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.67/32", ['5.0.0.5,5.0.0.6']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") - create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') - create_vnet_entry(dvs, 'Vnet14', tunnel_name, '10008', "") + update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.7', 'up') + update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.8', 'up') - vnet_obj.check_vnet_entry(dvs, 'Vnet14') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet14', '10008') + create_vnet_routes(dvs, "100.100.1.67/32", vnet_name, '5.0.0.5,5.0.0.6,5.0.0.7,5.0.0.8', ep_monitor='5.1.0.5,5.1.0.6,5.1.0.7,5.1.0.8', profile = "test_prf", primary ='5.0.0.7,5.0.0.8',monitoring='custom', adv_prefix='100.100.1.0/24') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.7','5.0.0.8'], tunnel_name, route_ids=route1, prefix="100.100.1.67/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.67/32", ['5.0.0.7,5.0.0.8']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") - vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + #delete 3rd route + delete_vnet_routes(dvs, "100.100.1.67/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.67/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.67/32") + #adv should be gone. + check_remove_routes_advertisement(dvs, "100.100.1.0/24") - # Create an ECMP tunnel route - vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet14', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet14', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name) - check_state_db_routes(dvs, 'Vnet14', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) - # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + #Add priority route with no secondary enpoints + create_vnet_routes(dvs, "100.100.1.71/32", vnet_name, '19.0.0.1,19.0.0.2', ep_monitor='19.0.0.1,19.0.0.2', profile = "test_prf", primary ='19.0.0.1,19.0.0.2',monitoring='custom', adv_prefix='100.100.1.0/24') + update_monitor_session_state(dvs, '100.100.1.71/32', '19.0.0.1', 'up') + update_monitor_session_state(dvs, '100.100.1.71/32', '19.0.0.2', 'up') - # readd same tunnel again - set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet14', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet14', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, 'Vnet14', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) + #verify that no BFD sessions are created. + check_del_bfd_session(dvs, ['19.0.0.1']) + check_del_bfd_session(dvs, ['19.0.0.2']) + time.sleep(2) + check_state_db_routes(dvs, vnet_name, "100.100.1.71/32", ['19.0.0.1,19.0.0.2']) # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") - #update nexthops for the same tunnel. - set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet14', 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') - route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet14', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, 'Vnet14', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + update_monitor_session_state(dvs, '100.100.1.71/32', '19.0.0.1', 'down') + check_state_db_routes(dvs, vnet_name, "100.100.1.71/32", ['19.0.0.2']) # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "fd:8:10::32/128") - - # Check the previous nexthop group is removed - vnet_obj.fetch_exist_entries(dvs) - assert nhg1_1 not in vnet_obj.nhgs - assert nhg1_2 in vnet_obj.nhgs - - # Remove the tunnel route - delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet14') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet14', ["fd:8:10::32/128"]) - check_remove_state_db_routes(dvs, 'Vnet14', "fd:8:10::32/128") - check_remove_routes_advertisement(dvs, "fd:8:10::32/128") - # Remove the tunnel route - delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet14') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet14', ["fd:8:10::32/128"]) - check_remove_state_db_routes(dvs, 'Vnet14', "fd:8:10::32/128") - check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") - # Check the nexthop group still exists - vnet_obj.fetch_exist_entries(dvs) - assert nhg1_2 not in vnet_obj.nhgs - assert nhg1_1 not in vnet_obj.nhgs + update_monitor_session_state(dvs, '100.100.1.71/32', '19.0.0.2', 'down') + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.71/32") - delete_vnet_entry(dvs, 'Vnet14') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet14') + #remove first route + delete_vnet_routes(dvs, "100.100.1.71/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.71/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.71/32") + + delete_vnet_entry(dvs,vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) delete_vxlan_tunnel(dvs, tunnel_name) ''' - Test 15 - Test for configuration idempotent behaviour single endpoint + Test 23 - Test for vxlan custom monitoring. CHanging the overlay_dmac of the Vnet on the fly. ''' - def test_vnet_orch_15(self, dvs, testlog): + def test_vnet_orch_23(self, dvs, testlog): vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_15' + tunnel_name = 'tunnel_22' + vnet_name = "Vnet22" vnet_obj.fetch_exist_entries(dvs) - create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') - create_vnet_entry(dvs, 'Vnet15', tunnel_name, '10008', "") + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.3') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + delete_vnet_entry(dvs,vnet_name) - vnet_obj.check_vnet_entry(dvs, 'Vnet15') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet15', '10008') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:77") + delete_vnet_entry(dvs,vnet_name) - vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + #update the Dmac of the vnet before adding any routes. + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:77") - # Create an tunnel route - vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet15', 'fd:8:1::1') - route1 = vnet_obj.check_vnet_routes(dvs, 'Vnet15', 'fd:8:1::1', tunnel_name) - check_state_db_routes(dvs, 'Vnet15', "fd:8:10::32/128", ['fd:8:1::1']) - # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10022') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.3') - # readd same tunnel again - set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet15', 'fd:8:1::1') - route1 = vnet_obj.check_vnet_routes(dvs, 'Vnet15', 'fd:8:1::1', tunnel_name, route_ids=route1) - check_state_db_routes(dvs, 'Vnet15', "fd:8:10::32/128", ['fd:8:1::1']) - # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "fd:8:10::32/128") - # Check only one group is present vnet_obj.fetch_exist_entries(dvs) - assert len(vnet_obj.nhops) == 1 + #Add first Route + create_vnet_routes(dvs, "100.100.1.11/32", vnet_name, '19.0.0.1,19.0.0.2,19.0.0.3', ep_monitor='19.1.0.1,19.1.0.2,19.1.0.3', profile = "test_prf", primary ='19.0.0.1',monitoring='custom', adv_prefix='100.100.1.0/24') + #verify the appdb entries. + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.1", "vxlan", "22:33:33:44:44:77") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.2", "vxlan", "22:33:33:44:44:77") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.3", "vxlan", "22:33:33:44:44:77") - # Remove one of the tunnel routes - delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet15') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet15', ["fd:8:10::32/128"]) - check_remove_state_db_routes(dvs, 'Vnet15', "fd:8:10::32/128") - check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + #update the Dmac after a route is added. + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:88") - # Check the nexthop group still exists - vnet_obj.fetch_exist_entries(dvs) - assert len(vnet_obj.nhops) == 0 - delete_vnet_entry(dvs, 'Vnet15') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet15') - delete_vxlan_tunnel(dvs, tunnel_name) + #verify the appdb entries. + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.1", "vxlan", "22:33:33:44:44:88") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.2", "vxlan", "22:33:33:44:44:88") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.3", "vxlan", "22:33:33:44:44:88") - ''' - Test 16 - Test for configuration idempotent behaviour single endpoint with BFD - ''' - def test_vnet_orch_16(self, dvs, testlog): - vnet_obj = self.get_vnet_obj() + #bring up an enpoint. + update_monitor_session_state(dvs, '100.100.1.11/32', '19.1.0.1', 'up') - tunnel_name = 'tunnel_16' - vnet_obj.fetch_exist_entries(dvs) + #verify the appdb entries. + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.1", "vxlan", "22:33:33:44:44:88") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.2", "vxlan", "22:33:33:44:44:88") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.3", "vxlan", "22:33:33:44:44:88") - create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::33') - create_vnet_entry(dvs, 'Vnet16', tunnel_name, '10008', "") + #update the Dmac to empty. This should have no impact. + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="") - vnet_obj.check_vnet_entry(dvs, 'Vnet16') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet16', '10008') + #verify the appdb entries. + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.1", "vxlan", "22:33:33:44:44:88") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.2", "vxlan", "22:33:33:44:44:88") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.3", "vxlan", "22:33:33:44:44:88") - vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::33') + #remove first route + delete_vnet_routes(dvs, "100.100.1.11/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.11/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.11/32") - # Create a tunnel route - vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "fd:8:11::32/128", 'Vnet16', 'fd:8:2::1', ep_monitor='fd:8:2::1') - update_bfd_session_state(dvs, 'fd:8:2::1', 'Up') + #make sure that the app db entries are removed. + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.2") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.3") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.4") time.sleep(2) - route1 = vnet_obj.check_vnet_routes(dvs, 'Vnet16', 'fd:8:2::1', tunnel_name) - check_state_db_routes(dvs, 'Vnet16', "fd:8:11::32/128", ['fd:8:2::1']) - # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "fd:8:11::32/128") + #bring down an enpoint. + update_monitor_session_state(dvs, '100.100.1.11/32', '19.1.0.1', 'down') - # readd same tunnel again - set_vnet_routes(dvs, "fd:8:11::32/128", 'Vnet16', 'fd:8:2::1', ep_monitor='fd:8:2::1') - route1 = vnet_obj.check_vnet_routes(dvs, 'Vnet16', 'fd:8:2::1', tunnel_name, route_ids=route1) - check_state_db_routes(dvs, 'Vnet16', "fd:8:11::32/128", ['fd:8:2::1']) - # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "fd:8:11::32/128") - # Check only one group is present - vnet_obj.fetch_exist_entries(dvs) - assert len(vnet_obj.nhops) == 1 + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") - update_bfd_session_state(dvs, 'fd:8:2::1', 'Down') - time.sleep(2) - # readd same tunnel again - set_vnet_routes(dvs, "fd:8:11::32/128", 'Vnet16', 'fd:8:2::1', ep_monitor='fd:8:2::1') + #Add first Route again + create_vnet_routes(dvs, "100.100.1.11/32", vnet_name, '19.0.0.1,19.0.0.2,19.0.0.3', ep_monitor='19.1.0.1,19.1.0.2,19.1.0.3', profile = "test_prf", primary ='19.0.0.1',monitoring='custom', adv_prefix='100.100.1.0/24') - update_bfd_session_state(dvs, 'fd:8:2::1', 'Up') - time.sleep(2) + #bring up the endpoint. + update_monitor_session_state(dvs, '100.100.1.11/32', '19.1.0.1', 'up') - route1 = vnet_obj.check_vnet_routes(dvs, 'Vnet16', 'fd:8:2::1', tunnel_name,route_ids=route1) - check_state_db_routes(dvs, 'Vnet16', "fd:8:11::32/128", ['fd:8:2::1']) - # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "fd:8:11::32/128") + # The default Vnet setting advertises the prefix. + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + #verify the appdb entries. + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.1", "vxlan", "22:33:33:44:44:66") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.2", "vxlan", "22:33:33:44:44:66") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.3", "vxlan", "22:33:33:44:44:66") - # Remove one of the tunnel routes - delete_vnet_routes(dvs, "fd:8:11::32/128", 'Vnet16') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet16', ["fd:8:11::32/128"]) - check_remove_state_db_routes(dvs, 'Vnet16', "fd:8:11::32/128") - check_remove_routes_advertisement(dvs, "fd:8:11::32/128") + #remove first route + delete_vnet_routes(dvs, "100.100.1.11/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.11/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.11/32") - # Check the nexthop group still exists - vnet_obj.fetch_exist_entries(dvs) - assert len(vnet_obj.nhops) == 0 - delete_vnet_entry(dvs, 'Vnet16') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet16') + #make sure that the app db entries are removed. + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.2") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.3") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.4") + time.sleep(2) + delete_vnet_entry(dvs,vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) delete_vxlan_tunnel(dvs, tunnel_name) ''' - Test 17 - Test for configuration idempotent behaviour multiple endpoint with BFD + Test 24 - Test duplicate route addition and removal. ''' - def test_vnet_orch_17(self, dvs, testlog): + def test_vnet_orch_24(self, dvs, testlog): + self.setup_db(dvs) + self.clear_srv_config(dvs) + vnet_obj = self.get_vnet_obj() + vnet_obj.fetch_exist_entries(dvs) - tunnel_name = 'tunnel_17' + # create vxlan tunnel and vnet in default vrf + tunnel_name = 'tunnel_24' + create_vxlan_tunnel(dvs, tunnel_name, '10.10.10.10') + create_vnet_entry(dvs, 'Vnet_2000', tunnel_name, '2000', "", 'default') + vnet_obj.check_default_vnet_entry(dvs, 'Vnet_2000') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_2000', '2000') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '10.10.10.10') vnet_obj.fetch_exist_entries(dvs) - create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') - create_vnet_entry(dvs, 'Vnet17', tunnel_name, '10017', "") - - vnet_obj.check_vnet_entry(dvs, 'Vnet17') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet17', '10017') + # create vnet route + create_vnet_routes(dvs, "100.100.1.0/24", 'Vnet_2000', '10.10.10.3') + vnet_obj.check_vnet_routes(dvs, 'Vnet_2000', '10.10.10.3', tunnel_name) + check_state_db_routes(dvs, 'Vnet_2000', "100.100.1.0/24", ['10.10.10.3']) + time.sleep(2) - vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + # create l3 interface + self.create_l3_intf("Ethernet0", "") - vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet17', '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + # set ip address + self.add_ip_address("Ethernet0", "10.10.10.1/24") - # default bfd status is down, route should not be programmed in this status - vnet_obj.check_del_vnet_routes(dvs, 'Vnet17', ["100.100.1.1/32"]) - check_state_db_routes(dvs, 'Vnet17', "100.100.1.1/32", []) - check_remove_routes_advertisement(dvs, "100.100.1.1/32") + # bring up interface + self.set_admin_status("Ethernet0", "up") - #readd the route - set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet17', '9.0.0.1,9.0.0.2,9.0.0.3',ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet17', ["100.100.1.1/32"]) - check_state_db_routes(dvs, 'Vnet17', "100.100.1.1/32", []) - check_remove_routes_advertisement(dvs, "100.100.1.1/32") + # set ip address and default route + dvs.servers[0].runcmd("ip address add 10.10.10.3/24 dev eth0") + dvs.servers[0].runcmd("ip route add default via 10.10.10.1") - # Route should be properly configured when all bfd session states go up - update_bfd_session_state(dvs, '9.1.0.1', 'Up') - update_bfd_session_state(dvs, '9.1.0.2', 'Up') - update_bfd_session_state(dvs, '9.1.0.3', 'Up') + marker = dvs.add_log_marker("/var/log/syslog") time.sleep(2) - route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet17', ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name) - check_state_db_routes(dvs, 'Vnet17', "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) - # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "100.100.1.1/32") + # add another route for same prefix as vnet route + dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 100.100.1.0/24 10.10.10.3\"") - #readd the active route - set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet17', '9.0.0.1,9.0.0.2,9.0.0.3',ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') - route2, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet17', ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) - check_state_db_routes(dvs, 'Vnet17', "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) - # The default Vnet setting does not advertise prefix - check_remove_routes_advertisement(dvs, "100.100.1.1/32") - assert nhg1_1 == nhg1_2 - assert len(vnet_obj.nhgs) == 1 + # check application database + self.pdb.wait_for_entry("ROUTE_TABLE", "100.100.1.0/24") - # Remove tunnel route - delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet17') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet17', ["100.100.1.1/32"]) - check_remove_state_db_routes(dvs, 'Vnet17', "100.100.1.1/32") - check_remove_routes_advertisement(dvs, "100.100.1.1/32") + # check ASIC route database + self.check_route_entries(["100.100.1.0/24"]) - # Check the corresponding nexthop group is removed - vnet_obj.fetch_exist_entries(dvs) - assert nhg1_1 not in vnet_obj.nhgs - # Check the BFD session specific to the endpoint group is removed while others exist - check_del_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3']) + log_string = "Encountered failure in create operation, exiting orchagent, SAI API: SAI_API_ROUTE, status: SAI_STATUS_NOT_EXECUTED" + # check for absence of log_string in syslog + check_syslog(dvs, marker, log_string) + + # remove route entry + dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route 100.100.1.0/24 10.10.10.3\"") + + # delete vnet route + delete_vnet_routes(dvs, "100.100.1.0/24", 'Vnet_2000') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2000') + check_remove_state_db_routes(dvs, 'Vnet_2000', "100.100.1.0/24") - delete_vnet_entry(dvs, 'Vnet17') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet17') + # delete vnet + delete_vnet_entry(dvs, 'Vnet_2000') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet_2000') + + # delete vxlan tunnel delete_vxlan_tunnel(dvs, tunnel_name) ''' - Test 18 - Test for priority vnet tunnel routes with ECMP nexthop group. test primary secondary switchover. + Test 25 - Test for BFD TSA and TSB behaviour within overlay tunnel routes. ''' - def test_vnet_orch_18(self, dvs, testlog): + def test_vnet_orch_25(self, dvs, testlog): + # This test creates a vnet route with BFD monitoring.This followd by application of TSA and absence of BFD sessions + # is verified. Following the removal of TSA the Vnet route is verified to be up. vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_18' - vnet_name = 'vnet18' - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tunnel_name = 'tunnel_25' vnet_obj.fetch_exist_entries(dvs) create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') - create_vnet_entry(dvs, vnet_name, tunnel_name, '10018', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + create_vnet_entry(dvs, 'Vnet25', tunnel_name, '10025', "") - vnet_obj.check_vnet_entry(dvs, vnet_name) - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10018') + vnet_obj.check_vnet_entry(dvs, 'Vnet25') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet25', '10025') vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', primary ='9.1.0.1,9.1.0.2', monitoring='custom', adv_prefix='100.100.1.0/24') - - # default monitor status is down, route should not be programmed in this status - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) - check_remove_routes_advertisement(dvs, "100.100.1.0/24") - - # Route should be properly configured when all monitor session states go up. Only primary Endpoints should be in use. - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') - - time.sleep(2) - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) - # The default Vnet setting does not advertise prefix - check_routes_advertisement(dvs, "100.100.1.0/24") + create_vnet_routes(dvs, "125.100.1.1/32", 'Vnet25', '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') - # Remove first primary endpoint from group. - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'down') - time.sleep(2) - route1= vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) - # The default Vnet setting does not advertise prefix - check_routes_advertisement(dvs, "100.100.1.0/24") + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, 'Vnet25', ["125.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet25', "125.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "125.100.1.1/32") - # Switch to secondary if both primary down - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'down') + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '9.1.0.1', 'Up') + update_bfd_session_state(dvs, '9.1.0.2', 'Up') + update_bfd_session_state(dvs, '9.1.0.3', 'Up') time.sleep(2) - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.3','9.1.0.4']) - # The default Vnet setting does not advertise prefix - check_routes_advertisement(dvs, "100.100.1.0/24") - # removing first endpoint of secondary. route should remain on secondary NHG - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'down') - time.sleep(2) - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.4'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.4']) + # make sure the route is up. + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet25', ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet25', "125.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) # The default Vnet setting does not advertise prefix - check_routes_advertisement(dvs, "100.100.1.0/24") - - # removing last endpoint of secondary. route should be removed - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'down') - time.sleep(2) - - new_nhgs = get_all_created_entries(asic_db, vnet_obj.ASIC_NEXT_HOP_GROUP, []) - assert len(new_nhgs) == 0 - check_remove_routes_advertisement(dvs, "100.100.1.0/24") - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) - check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") - - #Route should come up with secondary endpoints. - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') + check_remove_routes_advertisement(dvs, "125.100.1.1/32") + # tsa would remove all bfd sessions down. + set_tsa(dvs) time.sleep(2) - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.3','9.1.0.4']) - # The default Vnet setting does not advertise prefix - check_routes_advertisement(dvs, "100.100.1.0/24") - #Route should be switched to the primary endpoint. - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') - time.sleep(2) - route1= vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) - # The default Vnet setting does not advertise prefix - check_routes_advertisement(dvs, "100.100.1.0/24") + # Route should be removed. + vnet_obj.check_del_vnet_routes(dvs, 'Vnet25', ["125.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet25', "125.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "125.100.1.1/32") - #Route should be updated with the second primary endpoint. - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') + #clearing TSA should bring the route back. + clear_tsa(dvs) time.sleep(2) - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) - # The default Vnet setting does not advertise prefix - check_routes_advertisement(dvs, "100.100.1.0/24") - #Route should not be impacted by seconday endpoints going down. - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'down') - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'down') + update_bfd_session_state(dvs, '9.1.0.1', 'Up') + update_bfd_session_state(dvs, '9.1.0.2', 'Up') + update_bfd_session_state(dvs, '9.1.0.3', 'Up') time.sleep(2) - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) - # The default Vnet setting does not advertise prefix - check_routes_advertisement(dvs, "100.100.1.0/24") - #Route should not be impacted by seconday endpoints coming back up. - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') - time.sleep(2) - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet25', ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet25', "125.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) # The default Vnet setting does not advertise prefix - check_routes_advertisement(dvs, "100.100.1.0/24") + check_remove_routes_advertisement(dvs, "125.100.1.1/32") - # Remove tunnel route 1 - delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) - time.sleep(2) - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) - check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") - check_remove_routes_advertisement(dvs, "100.100.1.0/24") + # Remove tunnel route + delete_vnet_routes(dvs, "125.100.1.1/32", 'Vnet25') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet25', ["125.100.1.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet25', "125.100.1.1/32") + check_remove_routes_advertisement(dvs, "125.100.1.1/32") - # Confirm the monitor sessions are removed - vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.1") - vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.2") - vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.3") - vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.4") + # Check the corresponding nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3']) - delete_vnet_entry(dvs, vnet_name) - vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vnet_entry(dvs, 'Vnet25') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet25') delete_vxlan_tunnel(dvs, tunnel_name) ''' - Test 19 - Test for 2 priority vnet tunnel routes with overlapping primary secondary ECMP nexthop group. + Test 26 - Test for vnet tunnel routes with ECMP nexthop group with subnet decap enable ''' - def test_vnet_orch_19(self, dvs, testlog): - vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_19' - vnet_name = 'Vnet19' - asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + def test_vnet_orch_26(self, dvs, setup_subnet_decap): + # apply subnet decap config + subnet_decap_config = { + "status": "enable", + "src_ip": "10.10.10.0/24", + "src_ip_v6": "20c1:ba8::/64" + } + setup_subnet_decap(subnet_decap_config) + vnet_obj = self.get_vnet_obj() vnet_obj.fetch_exist_entries(dvs) - create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.19') - create_vnet_entry(dvs, vnet_name, tunnel_name, '10019', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + # Add the subnet decap tunnel + create_subnet_decap_tunnel(dvs, "IPINIP_SUBNET", tunnel_type="IPINIP", + dscp_mode="uniform", ecn_mode="standard", ttl_mode="pipe") + vnet_obj.check_ipinip_tunnel(dvs, "IPINIP_SUBNET", "uniform", "standard", "pipe") - vnet_obj.check_vnet_entry(dvs, vnet_name) - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10019') + vnet_obj.fetch_exist_entries(dvs) + tunnel_name = 'tunnel_26' + create_vxlan_tunnel(dvs, tunnel_name, '26.26.26.26') + create_vnet_entry(dvs, 'Vnet26', tunnel_name, '10026', "", advertise_prefix=True) - vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.19') + vnet_obj.check_vnet_entry(dvs, 'Vnet26') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet26', '10026') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '26.26.26.26') vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', profile="Test_profile", primary ='9.1.0.1,9.1.0.2', monitoring='custom', adv_prefix='100.100.1.0/24') - create_vnet_routes(dvs, "200.100.1.1/32", vnet_name, '9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', primary ='9.1.0.3,9.1.0.4', monitoring='custom', adv_prefix='200.100.1.0/24') + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet26', '26.0.0.1,26.0.0.2,26.0.0.3', ep_monitor='26.1.0.1,26.1.0.2,26.1.0.3', profile="test_profile") - # default monitor session status is down, route should not be programmed in this status - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) - check_remove_routes_advertisement(dvs, "100.100.1.0/24") + with pytest.raises(AssertionError): + vnet_obj.check_ipinip_tunnel_decap_term(dvs, "IPINIP_SUBNET", "100.100.1.1/32", "10.10.10.0/24") - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["200.100.1.1/32"]) - check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", []) - check_remove_routes_advertisement(dvs, "200.100.1.0/24") + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, 'Vnet26', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet26', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - # Route should be properly configured when all monitor session states go up. Only primary Endpoints should be in use. - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') - time.sleep(2) - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1'], tunnel_name, prefix="100.100.1.1/32") - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) - check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '26.1.0.1', 'Up') - update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.1', 'up') time.sleep(2) - route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") - check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.1']) - check_routes_advertisement(dvs, "200.100.1.0/24", "") + # subnet decap term should be created as one bfd session state go up + vnet_obj.check_ipinip_tunnel_decap_term(dvs, "IPINIP_SUBNET", "100.100.1.1/32", "10.10.10.0/24") - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') + update_bfd_session_state(dvs, '26.1.0.2', 'Up') + update_bfd_session_state(dvs, '26.1.0.3', 'Up') time.sleep(2) - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) - check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet26', ['26.0.0.1', '26.0.0.2', '26.0.0.3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet26', "100.100.1.1/32", ['26.0.0.1', '26.0.0.2', '26.0.0.3']) + check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile") - update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.2', 'up') + # Set all endpoint to down state + update_bfd_session_state(dvs, '26.1.0.1', 'Down') + update_bfd_session_state(dvs, '26.1.0.2', 'Down') + update_bfd_session_state(dvs, '26.1.0.3', 'Down') time.sleep(2) - route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") - check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.1','9.1.0.2']) - check_routes_advertisement(dvs, "200.100.1.0/24", "") - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') - update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.3', 'up') - time.sleep(2) + # subnet decap term should be removed as all bfd session states go down + vnet_obj.check_del_ipinip_tunnel_decap_term(dvs, "IPINIP_SUBNET", "100.100.1.1/32", "10.10.10.0/24") - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) - check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + # Confirm the tunnel route is updated in ASIC + vnet_obj.check_del_vnet_routes(dvs, 'Vnet26', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet26', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") - check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.3']) - check_routes_advertisement(dvs, "200.100.1.0/24", "") + # Remove tunnel route + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet26') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet26', ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet26', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') - update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.4', 'up') - time.sleep(2) - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) - check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['12.1.0.1', '12.1.0.2', '12.1.0.3']) - route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") - check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.3','9.1.0.4']) - check_routes_advertisement(dvs, "200.100.1.0/24", "") + delete_vnet_entry(dvs, 'Vnet26') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet26') + delete_vxlan_tunnel(dvs, tunnel_name) - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'down') - update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.1', 'down') - time.sleep(2) - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.2']) - check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + # Remove the subnet decap tunnel + vnet_obj.fetch_exist_entries(dvs) + delete_subnet_decap_tunnel(dvs, "IPINIP_SUBNET") + vnet_obj.check_del_ipinip_tunnel(dvs, "IPINIP_SUBNET") - route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") - check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.3','9.1.0.4']) - check_routes_advertisement(dvs, "200.100.1.0/24", "") + ''' + Test 27 - Test for IPv6 vnet tunnel routes with ECMP nexthop group with subnet decap enable + ''' + def test_vnet_orch_27(self, dvs, setup_subnet_decap): + subnet_decap_config = { + "status": "enable", + "src_ip": "10.10.10.0/24", + "src_ip_v6": "20c1:ba8::/64" + } + setup_subnet_decap(subnet_decap_config) - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'down') - update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.2', 'down') - time.sleep(2) - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.3','9.1.0.4']) - check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + vnet_obj = self.get_vnet_obj() + vnet_obj.fetch_exist_entries(dvs) - route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") - check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.3','9.1.0.4']) - check_routes_advertisement(dvs, "200.100.1.0/24", "") + # Add the subnet decap tunnel + create_subnet_decap_tunnel(dvs, "IPINIP_SUBNET_V6", tunnel_type="IPINIP", + dscp_mode="uniform", ecn_mode="standard", ttl_mode="pipe") + vnet_obj.check_ipinip_tunnel(dvs, "IPINIP_SUBNET_V6", "uniform", "standard", "pipe") - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'down') - update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.3', 'down') - time.sleep(2) - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.4'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") - check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.4']) - check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + vnet_obj.fetch_exist_entries(dvs) + tunnel_name = 'tunnel_27' + vnet_name = 'Vnet26' + create_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10010', "", advertise_prefix=True) - route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.4'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") - check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.4']) - check_routes_advertisement(dvs, "200.100.1.0/24", "") + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10010') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'down') - update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.4', 'down') - time.sleep(2) + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::3', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3', profile="test_profile") - #we should still have two NHGs but no active route - new_nhgs = get_all_created_entries(asic_db, vnet_obj.ASIC_NEXT_HOP_GROUP, vnet_obj.nhgs) - assert len(new_nhgs) == 0 - check_remove_routes_advertisement(dvs, "100.100.1.1/32") - check_remove_routes_advertisement(dvs, "200.100.1.1/32") - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["200.100.1.1/32"]) - check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") - check_remove_state_db_routes(dvs, vnet_name, "200.100.1.1/32") - check_remove_routes_advertisement(dvs, "100.100.1.0/24") - check_remove_routes_advertisement(dvs, "200.100.1.0/24") + with pytest.raises(AssertionError): + vnet_obj.check_ipinip_tunnel_decap_term(dvs, "IPINIP_SUBNET_V6", "100.100.1.1/32", "10.10.10.0/24") + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", []) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, 'fd:10:2::2', 'Up') - # Remove tunnel route 1 - delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) - delete_vnet_routes(dvs, "200.100.1.1/32", vnet_name) + time.sleep(2) + # subnet decap term should be created as one bfd session state go up + vnet_obj.check_ipinip_tunnel_decap_term(dvs, "IPINIP_SUBNET_V6", "fd:10:10::1/128", "20c1:ba8::/64") - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) - vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["200.100.1.1/32"]) + update_bfd_session_state(dvs, 'fd:10:2::3', 'Up') + update_bfd_session_state(dvs, 'fd:10:2::1', 'Up') + time.sleep(2) + vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3'], tunnel_name) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3']) + check_routes_advertisement(dvs, "fd:10:10::1/128", "test_profile") - check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") - check_remove_state_db_routes(dvs, vnet_name, "200.100.1.1/32") + # Set all endpoint to down state + update_bfd_session_state(dvs, 'fd:10:2::1', 'Down') + update_bfd_session_state(dvs, 'fd:10:2::2', 'Down') + update_bfd_session_state(dvs, 'fd:10:2::3', 'Down') + time.sleep(2) - check_remove_routes_advertisement(dvs, "100.100.1.0/24") - check_remove_routes_advertisement(dvs, "200.100.1.0/24") + # subnet decap term should be removed as all bfd session states go down + vnet_obj.check_del_ipinip_tunnel_decap_term(dvs, "IPINIP_SUBNET_V6", "fd:10:10::1/128", "20c1:ba8::/64") + # Confirm the tunnel route is updated in ASIC + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", []) + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") - # Confirm the monitor sessions are removed - vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.1") - vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.2") - vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.3") - vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.4") + # Remove tunnel route + delete_vnet_routes(dvs, "fd:10:10::1/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"]) + check_remove_state_db_routes(dvs, vnet_name, "fd:10:10::1/128") + check_remove_routes_advertisement(dvs, "fd:10:10::1/128") - vnet_obj.check_custom_monitor_deleted(dvs, "200.100.1.1/32", "9.1.0.1") - vnet_obj.check_custom_monitor_deleted(dvs, "200.100.1.1/32", "9.1.0.2") - vnet_obj.check_custom_monitor_deleted(dvs, "200.100.1.1/32", "9.1.0.3") - vnet_obj.check_custom_monitor_deleted(dvs, "200.100.1.1/32", "9.1.0.4") + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3']) delete_vnet_entry(dvs, vnet_name) vnet_obj.check_del_vnet_entry(dvs, vnet_name) delete_vxlan_tunnel(dvs, tunnel_name) + # Remove the subnet decap tunnel + vnet_obj.fetch_exist_entries(dvs) + delete_subnet_decap_tunnel(dvs, "IPINIP_SUBNET_V6") + vnet_obj.check_del_ipinip_tunnel(dvs, "IPINIP_SUBNET_V6") + ''' - Test 20 - Test for Single enpoint priority vnet tunnel routes. Test primary secondary switchover. + Test 28 - Test for Single endpoint priority vnet tunnel routes. Test with local endpoint. ''' - def test_vnet_orch_20(self, dvs, testlog): + def test_vnet_orch_28(self, dvs, dvs_acl, testlog): + self.setup_db(dvs) + self.clear_srv_config(dvs) + vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_20' - vnet_name = 'Vnet20' + tunnel_name = 'tunnel_28' + vnet_name = 'Vnet28' asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) vnet_obj.fetch_exist_entries(dvs) create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') - create_vnet_entry(dvs, vnet_name, tunnel_name, '10020', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + create_vnet_entry(dvs, vnet_name, tunnel_name, '10028', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") vnet_obj.check_vnet_entry(dvs, vnet_name) - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10020') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10028') vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + # no acl rule before creating vnet route + dvs_acl.verify_no_acl_rules() + + # create l3 interface + self.create_l3_intf("Ethernet8", "") + + # set ip address + self.add_ip_address("Ethernet8", "9.1.0.1/32") + + # bring up interface + self.set_admin_status("Ethernet8", "up") + + # add neighbor for direcetly connected endpoint + self.add_neighbor("Ethernet8", "9.1.0.1", "00:01:02:03:04:05") + vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.1.0.1,9.1.0.2', ep_monitor='9.1.0.1,9.1.0.2', primary ='9.1.0.1', profile="Test_profile", monitoring='custom', adv_prefix='100.100.1.0/24') + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.1.0.1,9.1.0.2', ep_monitor='9.1.0.3,9.1.0.4', primary ='9.1.0.1', profile="Test_profile", monitoring='custom', adv_prefix='100.100.1.0/24', check_directly_connected=True) + + # verify tunnel term acl + expected_sai_qualifiers = { + "SAI_ACL_ENTRY_ATTR_FIELD_DST_IP": dvs_acl.get_simple_qualifier_comparator("100.100.1.1&mask:255.255.255.255") + } + nh_id = dvs.get_asic_db().wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", 1)[0] + dvs_acl.verify_redirect_acl_rule(expected_sai_qualifiers, nh_id, priority="9998") # default monitor session status is down, route should not be programmed in this status vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) @@ -2929,8 +2700,8 @@ def test_vnet_orch_20(self, dvs, testlog): check_remove_routes_advertisement(dvs, "100.100.1.0/24") # Route should be properly configured when all monitor session states go up. Only primary Endpoints should be in use. - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') time.sleep(2) nhids = get_all_created_entries(asic_db, vnet_obj.ASIC_NEXT_HOP,set()) tbl_nh = swsscommon.Table(asic_db, vnet_obj.ASIC_NEXT_HOP) @@ -2952,7 +2723,7 @@ def test_vnet_orch_20(self, dvs, testlog): check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'down') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'down') time.sleep(2) route = get_created_entries(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, vnet_obj.routes, 1) @@ -2964,8 +2735,8 @@ def test_vnet_orch_20(self, dvs, testlog): check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'down') - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'down') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') time.sleep(2) @@ -2978,7 +2749,8 @@ def test_vnet_orch_20(self, dvs, testlog): for key in nh_fvs.keys(): if key == 'SAI_NEXT_HOP_ATTR_IP': nexthops[nh_fvs[key]] = nhid - assert len(nexthops.keys()) == 1 + # nexthop from diectly connected endpoint should NOT be removed + assert len(nexthops.keys()) == 2 route = get_created_entries(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, vnet_obj.routes, 1) check_object(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, route[0], @@ -2989,7 +2761,7 @@ def test_vnet_orch_20(self, dvs, testlog): check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.2']) check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') time.sleep(2) nhids = get_all_created_entries(asic_db, vnet_obj.ASIC_NEXT_HOP,set()) @@ -3012,8 +2784,8 @@ def test_vnet_orch_20(self, dvs, testlog): check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'down') - update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'down') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'down') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'down') time.sleep(2) @@ -3021,7 +2793,6 @@ def test_vnet_orch_20(self, dvs, testlog): check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") check_remove_routes_advertisement(dvs, "200.100.1.0/24") - # Remove tunnel route 1 delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) @@ -3029,406 +2800,370 @@ def test_vnet_orch_20(self, dvs, testlog): check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") check_remove_routes_advertisement(dvs, "100.100.1.0/24") - vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.1") - vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.2") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.3") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.4") delete_vnet_entry(dvs, vnet_name) vnet_obj.check_del_vnet_entry(dvs, vnet_name) delete_vxlan_tunnel(dvs, tunnel_name) + self.remove_neighbor("Ethernet8", "9.1.0.1") + self.remove_ip_address("Ethernet8", "9.1.0.1/32") + self.set_admin_status("Ethernet8", "down") + ''' - Test 21 - Test for priority vxlan tunnel with adv_prefix, adv profile. test route re-addition, route update, primary seocndary swap. + Test 29 - Test for priority vnet tunnel routes with ECMP nexthop group and local nhg. test primary secondary switchover. ''' - def test_vnet_orch_21(self, dvs, testlog): + def test_vnet_orch_29(self, dvs, testlog): + self.setup_db(dvs) + self.clear_srv_config(dvs) + vnet_obj = self.get_vnet_obj() + tunnel_name = 'tunnel_29' + vnet_name = 'vnet29' + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - tunnel_name = 'tunnel_21' - vnet_name = "Vnet21" vnet_obj.fetch_exist_entries(dvs) - create_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') - create_vnet_entry(dvs, vnet_name, tunnel_name, '10021', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10029', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") vnet_obj.check_vnet_entry(dvs, vnet_name) - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10021') - - vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') - vnet_obj.fetch_exist_entries(dvs) - - #Add first Route - create_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::3,fd:10:1::4', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3,fd:10:2::4', profile = "test_prf", primary ='fd:10:1::3,fd:10:1::4',monitoring='custom', adv_prefix="fd:10:10::/64") - update_monitor_session_state(dvs, 'fd:10:10::1/128', 'fd:10:2::1', 'up') - update_monitor_session_state(dvs, 'fd:10:10::1/128', 'fd:10:2::2', 'up') - - time.sleep(2) - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1','fd:10:1::2'], tunnel_name, prefix="fd:10:10::1/128") - check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1,fd:10:1::2']) - check_routes_advertisement(dvs, "fd:10:10::/64", "test_prf") - - #add 2nd route - create_vnet_routes(dvs, "fd:10:10::21/128", vnet_name, 'fd:11:1::1,fd:11:1::2,fd:11:1::3,fd:11:1::4', ep_monitor='fd:11:2::1,fd:11:2::2,fd:11:2::3,fd:11:2::4', profile = "test_prf", primary ='fd:11:1::1,fd:11:1::2',monitoring='custom', adv_prefix='fd:10:10::/64') - update_monitor_session_state(dvs, 'fd:10:10::21/128', 'fd:11:2::1', 'up') - update_monitor_session_state(dvs, 'fd:10:10::21/128', 'fd:11:2::2', 'up') - update_monitor_session_state(dvs, 'fd:10:10::21/128', 'fd:11:2::3', 'up') - update_monitor_session_state(dvs, 'fd:10:10::21/128', 'fd:11:2::4', 'up') - - time.sleep(2) - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['fd:11:1::1','fd:11:1::2'], tunnel_name, route_ids=route1, prefix="fd:10:10::21/128") - check_state_db_routes(dvs, vnet_name, "fd:10:10::21/128", ['fd:11:1::1,fd:11:1::2']) - check_routes_advertisement(dvs, "fd:10:10::/64", "test_prf") - - #remove first route - delete_vnet_routes(dvs, "fd:10:10::1/128", vnet_name) - vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["fd:10:10::1/128"]) - check_remove_state_db_routes(dvs, 'Vnet12', "fd:10:10::1/128") - - #adv should still be up. - check_routes_advertisement(dvs, "fd:10:10::/64") + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10029') - #add 3rd route - create_vnet_routes(dvs, "fd:10:10::31/128", vnet_name, 'fd:11:1::1,fd:11:1::2,fd:11:1::3,fd:11:1::4', ep_monitor='fd:11:2::1,fd:11:2::2,fd:11:2::3,fd:11:2::4', profile = "test_prf", primary ='fd:11:1::1,fd:11:1::2',monitoring='custom', adv_prefix='fd:10:10::/64') - update_monitor_session_state(dvs, 'fd:10:10::31/128', 'fd:11:2::1', 'up') - update_monitor_session_state(dvs, 'fd:10:10::31/128', 'fd:11:2::2', 'up') - time.sleep(2) - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['fd:11:1::1','fd:11:1::2'], tunnel_name, route_ids=route1, prefix="fd:10:10::31/128") - check_state_db_routes(dvs, vnet_name, "fd:10:10::31/128", ['fd:11:1::1,fd:11:1::2']) - check_routes_advertisement(dvs, "fd:10:10::/64", "test_prf") - - #delete 2nd route - delete_vnet_routes(dvs, "fd:10:10::21/128", vnet_name) - vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["fd:10:10::21/128"]) - check_remove_state_db_routes(dvs, 'Vnet12', "fd:10:10::21/128") + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') - #adv should still be up. - check_routes_advertisement(dvs, "fd:10:10::/64") + # create l3 interface + self.create_l3_intf("Ethernet8", "") + self.create_l3_intf("Ethernet12", "") - #remove 3rd route - delete_vnet_routes(dvs, "fd:10:10::31/128", vnet_name) - vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["fd:10:10::31/128"]) - check_remove_state_db_routes(dvs, 'Vnet12', "fd:10:10::31/128") + # set ip address + self.add_ip_address("Ethernet8", "9.1.0.3/32") + self.add_ip_address("Ethernet12", "9.1.0.4/32") - #adv should be gone. - check_remove_routes_advertisement(dvs, "fd:10:10::/64") - delete_vnet_entry(dvs,vnet_name) - vnet_obj.check_del_vnet_entry(dvs, vnet_name) - delete_vxlan_tunnel(dvs, tunnel_name) + # bring up interface + self.set_admin_status("Ethernet8", "up") + self.set_admin_status("Ethernet12", "up") - ''' - Test 22 - Test for vxlan custom monitoring with adv_prefix. Add route twice and change nexthops case - ''' - def test_vnet_orch_22(self, dvs, testlog): - vnet_obj = self.get_vnet_obj() + # add neighbor for direcetly connected endpoint + self.add_neighbor("Ethernet8", "9.1.0.3", "00:01:02:03:04:05") + self.add_neighbor("Ethernet12", "9.1.0.4", "00:01:02:03:04:06") - tunnel_name = 'tunnel_22' - vnet_name = "Vnet22" vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', primary ='9.1.0.1,9.1.0.2', monitoring='custom', adv_prefix='100.100.1.0/24', check_directly_connected=True) - create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.3') - create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") - - vnet_obj.check_vnet_entry(dvs, vnet_name) - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10022') + # default monitor status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.0/24") - vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.3') + # Route should be properly configured when all monitor session states go up. Only primary Endpoints should be in use. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') - vnet_obj.fetch_exist_entries(dvs) - #Add first Route - create_vnet_routes(dvs, "100.100.1.11/32", vnet_name, '19.0.0.1,19.0.0.2,19.0.0.3', ep_monitor='19.1.0.1,19.1.0.2,19.1.0.3', profile = "test_prf", primary ='19.0.0.1',monitoring='custom', adv_prefix='100.100.1.0/24') - update_monitor_session_state(dvs, '100.100.1.11/32', '19.1.0.1', 'up') time.sleep(2) - vnet_obj.check_vnet_routes(dvs, vnet_name, '19.0.0.1', tunnel_name) - check_state_db_routes(dvs, vnet_name, "100.100.1.11/32", ['19.0.0.1']) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) # The default Vnet setting does not advertise prefix - check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + check_routes_advertisement(dvs, "100.100.1.0/24") - #Add first Route again - create_vnet_routes(dvs, "100.100.1.11/32", vnet_name, '19.0.0.1,19.0.0.2,19.0.0.3', ep_monitor='19.1.0.1,19.1.0.2,19.1.0.3', profile = "test_prf", primary ='19.0.0.1',monitoring='custom', adv_prefix='100.100.1.0/24') - check_state_db_routes(dvs, vnet_name, "100.100.1.11/32", ['19.0.0.1']) + # Remove first primary endpoint from group. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'down') + time.sleep(2) + route1= vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) # The default Vnet setting does not advertise prefix - check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") - - #remove first route - delete_vnet_routes(dvs, "100.100.1.11/32", vnet_name) - vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.11/32"]) - check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.11/32") - - #adv should be gone. - check_remove_routes_advertisement(dvs, "100.100.1.0/24") + check_routes_advertisement(dvs, "100.100.1.0/24") - #add 2nd route - create_vnet_routes(dvs, "100.100.1.57/32", vnet_name, '5.0.0.1,5.0.0.2,5.0.0.3,5.0.0.4', ep_monitor='5.1.0.1,5.1.0.2,5.1.0.3,5.1.0.4', profile = "test_prf", primary ='5.0.0.1,5.0.0.2',monitoring='custom', adv_prefix='100.100.1.0/24') - update_monitor_session_state(dvs, '100.100.1.57/32', '5.1.0.1', 'up') - update_monitor_session_state(dvs, '100.100.1.57/32', '5.1.0.2', 'up') + # Switch to secondary if both primary down + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'down') time.sleep(2) - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.1','5.0.0.2'], tunnel_name, prefix="100.100.1.57/32") - check_state_db_routes(dvs, vnet_name, "100.100.1.57/32", ['5.0.0.1,5.0.0.2']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.3','9.1.0.4']) # The default Vnet setting does not advertise prefix - check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + check_routes_advertisement(dvs, "100.100.1.0/24") - #modify 2nd route switch primary with secondary - create_vnet_routes(dvs, "100.100.1.57/32", vnet_name, '5.0.0.1,5.0.0.2,5.0.0.3,5.0.0.4', ep_monitor='5.1.0.1,5.1.0.2,5.1.0.3,5.1.0.4', profile = "test_prf", primary ='5.0.0.3,5.0.0.4',monitoring='custom', adv_prefix='100.100.1.0/24') - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.1','5.0.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.57/32") - check_state_db_routes(dvs, vnet_name, "100.100.1.57/32", ['5.0.0.1','5.0.0.2']) + # removing first endpoint of secondary. route should remain on secondary NHG + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'down') + time.sleep(2) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.4']) # The default Vnet setting does not advertise prefix - check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") - - #delete 2nd route - delete_vnet_routes(dvs, "100.100.1.57/32", vnet_name) - vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.57/32"]) - check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.57/32") - #adv should be gone. - check_remove_routes_advertisement(dvs, "100.100.1.0/24") + check_routes_advertisement(dvs, "100.100.1.0/24") - #add 3rd route - create_vnet_routes(dvs, "100.100.1.67/32", vnet_name, '5.0.0.1,5.0.0.2,5.0.0.3,5.0.0.4', ep_monitor='5.1.0.1,5.1.0.2,5.1.0.3,5.1.0.4', profile = "test_prf", primary ='5.0.0.1,5.0.0.2',monitoring='custom', adv_prefix='100.100.1.0/24') - update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.1', 'up') - update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.2', 'up') + # removing last endpoint of secondary. route should be removed + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'down') time.sleep(2) - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.1','5.0.0.2'], tunnel_name, prefix="100.100.1.67/32") - check_state_db_routes(dvs, vnet_name, "100.100.1.67/32", ['5.0.0.1,5.0.0.2']) - # The default Vnet setting does not advertise prefix - check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") - #modify 3rd route next hops to secondary - create_vnet_routes(dvs, "100.100.1.67/32", vnet_name, '5.0.0.1,5.0.0.2,5.0.0.3,5.0.0.4', ep_monitor='5.1.0.1,5.1.0.2,5.1.0.3,5.1.0.4', profile = "test_prf", primary ='5.0.0.3,5.0.0.4',monitoring='custom', adv_prefix='100.100.1.0/24') - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.1','5.0.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.67/32") - check_state_db_routes(dvs, vnet_name, "100.100.1.67/32", ['5.0.0.1','5.0.0.2']) - # The default Vnet setting does not advertise prefix - check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + new_nhgs = get_all_created_entries(asic_db, vnet_obj.ASIC_NEXT_HOP_GROUP, []) + assert len(new_nhgs) == 0 + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + + #Route should come up with secondary endpoints. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') - #modify 3rd route next hops to a new set. - create_vnet_routes(dvs, "100.100.1.67/32", vnet_name, '5.0.0.5,5.0.0.6,5.0.0.7,5.0.0.8', ep_monitor='5.1.0.5,5.1.0.6,5.1.0.7,5.1.0.8', profile = "test_prf", primary ='5.0.0.5,5.0.0.6',monitoring='custom', adv_prefix='100.100.1.0/24') - update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.5', 'up') - update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.6', 'up') time.sleep(2) - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.5','5.0.0.6'], tunnel_name, route_ids=route1, prefix="100.100.1.67/32") - check_state_db_routes(dvs, vnet_name, "100.100.1.67/32", ['5.0.0.5,5.0.0.6']) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.3','9.1.0.4']) # The default Vnet setting does not advertise prefix - check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") - - update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.7', 'up') - update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.8', 'up') + check_routes_advertisement(dvs, "100.100.1.0/24") - create_vnet_routes(dvs, "100.100.1.67/32", vnet_name, '5.0.0.5,5.0.0.6,5.0.0.7,5.0.0.8', ep_monitor='5.1.0.5,5.1.0.6,5.1.0.7,5.1.0.8', profile = "test_prf", primary ='5.0.0.7,5.0.0.8',monitoring='custom', adv_prefix='100.100.1.0/24') + #Route should be switched to the primary endpoint. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') time.sleep(2) - route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.7','5.0.0.8'], tunnel_name, route_ids=route1, prefix="100.100.1.67/32") - check_state_db_routes(dvs, vnet_name, "100.100.1.67/32", ['5.0.0.7,5.0.0.8']) + route1= vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) # The default Vnet setting does not advertise prefix - check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") - - #delete 3rd route - delete_vnet_routes(dvs, "100.100.1.67/32", vnet_name) - vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.67/32"]) - check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.67/32") - #adv should be gone. - check_remove_routes_advertisement(dvs, "100.100.1.0/24") + check_routes_advertisement(dvs, "100.100.1.0/24") - #Add priority route with no secondary enpoints - create_vnet_routes(dvs, "100.100.1.71/32", vnet_name, '19.0.0.1,19.0.0.2', ep_monitor='19.0.0.1,19.0.0.2', profile = "test_prf", primary ='19.0.0.1,19.0.0.2',monitoring='custom', adv_prefix='100.100.1.0/24') - update_monitor_session_state(dvs, '100.100.1.71/32', '19.0.0.1', 'up') - update_monitor_session_state(dvs, '100.100.1.71/32', '19.0.0.2', 'up') + #Route should be updated with the second primary endpoint. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") - #verify that no BFD sessions are created. - check_del_bfd_session(dvs, ['19.0.0.1']) - check_del_bfd_session(dvs, ['19.0.0.2']) + #Route should not be impacted by seconday endpoints going down. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'down') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'down') time.sleep(2) - check_state_db_routes(dvs, vnet_name, "100.100.1.71/32", ['19.0.0.1,19.0.0.2']) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) # The default Vnet setting does not advertise prefix - check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + check_routes_advertisement(dvs, "100.100.1.0/24") - update_monitor_session_state(dvs, '100.100.1.71/32', '19.0.0.1', 'down') - check_state_db_routes(dvs, vnet_name, "100.100.1.71/32", ['19.0.0.2']) + #Route should not be impacted by seconday endpoints coming back up. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) # The default Vnet setting does not advertise prefix - check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + check_routes_advertisement(dvs, "100.100.1.0/24") - update_monitor_session_state(dvs, '100.100.1.71/32', '19.0.0.2', 'down') - check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.71/32") + # Remove tunnel route 1 + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + time.sleep(2) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.0/24") - #remove first route - delete_vnet_routes(dvs, "100.100.1.71/32", vnet_name) - vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.71/32"]) - check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.71/32") + # Confirm the monitor sessions are removed + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.2") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.3") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.4") - delete_vnet_entry(dvs,vnet_name) + delete_vnet_entry(dvs, vnet_name) vnet_obj.check_del_vnet_entry(dvs, vnet_name) delete_vxlan_tunnel(dvs, tunnel_name) + self.remove_neighbor("Ethernet8", "9.1.0.3") + self.remove_ip_address("Ethernet8", "9.1.0.3/32") + self.set_admin_status("Ethernet8", "down") + + self.remove_neighbor("Ethernet12", "9.1.0.4") + self.remove_ip_address("Ethernet12", "9.1.0.4/32") + self.set_admin_status("Ethernet12", "down") + ''' - Test 23 - Test for vxlan custom monitoring. CHanging the overlay_dmac of the Vnet on the fly. + Test 30 - Test vnet local route with single nexthop ''' - def test_vnet_orch_23(self, dvs, testlog): + def test_vnet_local_route_single(self, dvs, testlog): + self.setup_db(dvs) + vnet_obj = self.get_vnet_obj() - tunnel_name = 'tunnel_22' - vnet_name = "Vnet22" + tunnel_name = 'tunnel_30' + vnet_obj.fetch_exist_entries(dvs) + vnet_name = "Vnet5000" - create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.3') - create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") - delete_vnet_entry(dvs,vnet_name) + # setup vnet and vlan + create_vxlan_tunnel(dvs, tunnel_name, '30.30.30.30') + create_vnet_entry(dvs, vnet_name, tunnel_name, '5000', "") - create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") - create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:77") - delete_vnet_entry(dvs,vnet_name) + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '5000') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '30.30.30.30') - #update the Dmac of the vnet before adding any routes. - create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") - create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:77") + # setup interface under vnet + create_phy_interface(dvs, "Ethernet20", vnet_name, "10.10.0.8/31") + vnet_obj.check_router_interface(dvs, "Ethernet20", vnet_name) - vnet_obj.check_vnet_entry(dvs, vnet_name) - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10022') + # bring up interface + self.set_admin_status("Ethernet20", "up") - vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.3') + # setup vnet neighbor + self.add_neighbor("Ethernet20", "10.10.0.9", "00:01:02:03:04:05") vnet_obj.fetch_exist_entries(dvs) - #Add first Route - create_vnet_routes(dvs, "100.100.1.11/32", vnet_name, '19.0.0.1,19.0.0.2,19.0.0.3', ep_monitor='19.1.0.1,19.1.0.2,19.1.0.3', profile = "test_prf", primary ='19.0.0.1',monitoring='custom', adv_prefix='100.100.1.0/24') - #verify the appdb entries. - vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.1", "vxlan", "22:33:33:44:44:77") - vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.2", "vxlan", "22:33:33:44:44:77") - vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.3", "vxlan", "22:33:33:44:44:77") - #update the Dmac after a route is added. - create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:88") + num_nh = len(vnet_obj.nhops) + num_nhg = len(vnet_obj.nhgs) + num_nhgm = len(vnet_obj.nhgms) - #verify the appdb entries. - vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.1", "vxlan", "22:33:33:44:44:88") - vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.2", "vxlan", "22:33:33:44:44:88") - vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.3", "vxlan", "22:33:33:44:44:88") + # create vnet local route + create_vnet_local_routes(dvs, "10.10.0.0/24", vnet_name, 'Ethernet20', "10.10.0.9") + route_keys = vnet_obj.check_vnet_local_routes(dvs, vnet_name) + vnet_obj.check_vnet_local_route_nexthops(dvs, route_keys[0], ["10.10.0.9"]) - #bring up an enpoint. - update_monitor_session_state(dvs, '100.100.1.11/32', '19.1.0.1', 'up') + # Clean-up and verify remove flows + delete_vnet_local_routes(dvs, "10.10.0.0/24", vnet_name) + vnet_obj.check_del_vnet_local_routes(dvs, vnet_name, "10.10.0.0/24") - #verify the appdb entries. - vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.1", "vxlan", "22:33:33:44:44:88") - vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.2", "vxlan", "22:33:33:44:44:88") - vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.3", "vxlan", "22:33:33:44:44:88") + vnet_obj.fetch_exist_entries(dvs) + assert len(vnet_obj.nhops) == num_nh + assert len(vnet_obj.nhgs) == num_nhg + assert len(vnet_obj.nhgms) == num_nhgm - #update the Dmac to empty. This should have no impact. - create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="") + self.remove_neighbor("Ethernet20", "10.10.0.9") + self.set_admin_status("Ethernet20", "down") - #verify the appdb entries. - vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.1", "vxlan", "22:33:33:44:44:88") - vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.2", "vxlan", "22:33:33:44:44:88") - vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.3", "vxlan", "22:33:33:44:44:88") + delete_phy_interface(dvs, "Ethernet20", "10.10.0.8/31") + vnet_obj.check_del_router_interface(dvs, "Ethernet20") - #remove first route - delete_vnet_routes(dvs, "100.100.1.11/32", vnet_name) - vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.11/32"]) - check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.11/32") + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) - #make sure that the app db entries are removed. - vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.1") - vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.2") - vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.3") - vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.4") - time.sleep(2) + delete_vxlan_tunnel(dvs, tunnel_name) + vnet_obj.check_del_vxlan_tunnel(dvs) - #bring down an enpoint. - update_monitor_session_state(dvs, '100.100.1.11/32', '19.1.0.1', 'down') + ''' + Test 31 - Test ecmp vnet local route with multiple nexthops + ''' + def test_vnet_local_route_ecmp(self, dvs, testlog): + self.setup_db(dvs) - create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + vnet_obj = self.get_vnet_obj() - #Add first Route again - create_vnet_routes(dvs, "100.100.1.11/32", vnet_name, '19.0.0.1,19.0.0.2,19.0.0.3', ep_monitor='19.1.0.1,19.1.0.2,19.1.0.3', profile = "test_prf", primary ='19.0.0.1',monitoring='custom', adv_prefix='100.100.1.0/24') + tunnel_name = 'tunnel_31' + vnet_name = "Vnet5001" - #bring up the endpoint. - update_monitor_session_state(dvs, '100.100.1.11/32', '19.1.0.1', 'up') + vnet_obj.fetch_exist_entries(dvs) - # The default Vnet setting advertises the prefix. - check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + # setup vnet and vlan + create_vxlan_tunnel(dvs, tunnel_name, '31.31.31.31') + create_vnet_entry(dvs, vnet_name, tunnel_name, '5001', "") - #verify the appdb entries. - vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.1", "vxlan", "22:33:33:44:44:66") - vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.2", "vxlan", "22:33:33:44:44:66") - vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.3", "vxlan", "22:33:33:44:44:66") + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '5001') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '31.31.31.31') - #remove first route - delete_vnet_routes(dvs, "100.100.1.11/32", vnet_name) - vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.11/32"]) - check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.11/32") + # setup interfaces under vnet + create_phy_interface(dvs, "Ethernet20", vnet_name, "10.10.0.8/31") + vnet_obj.check_router_interface(dvs, "Ethernet20", vnet_name) - #make sure that the app db entries are removed. - vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.1") - vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.2") - vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.3") - vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.4") - time.sleep(2) - delete_vnet_entry(dvs,vnet_name) + create_phy_interface(dvs, "Ethernet16", vnet_name, "10.10.0.10/31") + vnet_obj.check_router_interface(dvs, "Ethernet16", vnet_name) + + # bring up interface + self.set_admin_status("Ethernet20", "up") + self.set_admin_status("Ethernet16", "up") + + # setup vnet neighbor + self.add_neighbor("Ethernet20", "10.10.0.9", "00:01:02:03:04:05") + self.add_neighbor("Ethernet16", "10.10.0.11", "00:01:02:03:04:06") + + vnet_obj.fetch_exist_entries(dvs) + + num_nh = len(vnet_obj.nhops) + num_nhg = len(vnet_obj.nhgs) + num_nhgm = len(vnet_obj.nhgms) + + # create vnet local route + create_vnet_local_routes(dvs, "10.10.0.0/24", vnet_name, 'Ethernet20,Ethernet16', "10.10.0.9,10.10.0.11") + route_keys = vnet_obj.check_vnet_local_routes(dvs, vnet_name) + vnet_obj.check_vnet_local_route_nexthops(dvs, route_keys[0], ["10.10.0.9", "10.10.0.11"]) + + # update vnet local route to single nexthop + create_vnet_local_routes(dvs, "10.10.0.0/24", vnet_name, 'Ethernet20', "10.10.0.9") + vnet_obj.check_vnet_local_route_nexthops(dvs, route_keys[0], ["10.10.0.9"]) + + # update vnet local route back to ecmp + create_vnet_local_routes(dvs, "10.10.0.0/24", vnet_name, 'Ethernet20,Ethernet16', "10.10.0.9,10.10.0.11") + vnet_obj.check_vnet_local_route_nexthops(dvs, route_keys[0], ["10.10.0.9", "10.10.0.11"]) + + # Clean-up and verify remove flows + delete_vnet_local_routes(dvs, "10.10.0.0/24", vnet_name) + vnet_obj.check_del_vnet_local_routes(dvs, vnet_name, "10.10.0.0/24") + + vnet_obj.fetch_exist_entries(dvs) + assert len(vnet_obj.nhops) == num_nh + assert len(vnet_obj.nhgs) == num_nhg + assert len(vnet_obj.nhgms) == num_nhgm + + self.remove_neighbor("Ethernet20", "10.10.0.9") + self.remove_neighbor("Ethernet16", "10.10.0.11") + self.set_admin_status("Ethernet20", "down") + self.set_admin_status("Ethernet16", "down") + + delete_phy_interface(dvs, "Ethernet20", "10.10.0.8/31") + vnet_obj.check_del_router_interface(dvs, "Ethernet20") + delete_phy_interface(dvs, "Ethernet16", "10.10.0.10/31") + vnet_obj.check_del_router_interface(dvs, "Ethernet16") + + delete_vnet_entry(dvs, vnet_name) vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + vnet_obj.check_del_vxlan_tunnel(dvs) ''' - Test 24 - Test duplicate route addition and removal. + Test 32 - Test for priority vnet tunnel routes with local endpoint + bfd monitoring + rx and tx timer. ''' - def test_vnet_orch_24(self, dvs, testlog): + def test_vnet_orch_30(self, dvs, dvs_acl, testlog): self.setup_db(dvs) self.clear_srv_config(dvs) vnet_obj = self.get_vnet_obj() - vnet_obj.fetch_exist_entries(dvs) - - # create vxlan tunnel and vnet in default vrf - tunnel_name = 'tunnel_24' - create_vxlan_tunnel(dvs, tunnel_name, '10.10.10.10') - create_vnet_entry(dvs, 'Vnet_2000', tunnel_name, '2000', "", 'default') + tunnel_name = 'tunnel_32' + vnet_name = 'Vnet32' + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - vnet_obj.check_default_vnet_entry(dvs, 'Vnet_2000') - vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_2000', '2000') - vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '10.10.10.10') vnet_obj.fetch_exist_entries(dvs) - # create vnet route - create_vnet_routes(dvs, "100.100.1.0/24", 'Vnet_2000', '10.10.10.3') - vnet_obj.check_vnet_routes(dvs, 'Vnet_2000', '10.10.10.3', tunnel_name) - check_state_db_routes(dvs, 'Vnet_2000', "100.100.1.0/24", ['10.10.10.3']) - time.sleep(2) + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10028', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10028') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') # create l3 interface - self.create_l3_intf("Ethernet0", "") + self.create_l3_intf("Ethernet8", "") # set ip address - self.add_ip_address("Ethernet0", "10.10.10.1/24") + self.add_ip_address("Ethernet8", "9.1.0.1/32") # bring up interface - self.set_admin_status("Ethernet0", "up") - - # set ip address and default route - dvs.servers[0].runcmd("ip address add 10.10.10.3/24 dev eth0") - dvs.servers[0].runcmd("ip route add default via 10.10.10.1") - - marker = dvs.add_log_marker("/var/log/syslog") - time.sleep(2) - - # add another route for same prefix as vnet route - dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 100.100.1.0/24 10.10.10.3\"") - - # check application database - self.pdb.wait_for_entry("ROUTE_TABLE", "100.100.1.0/24") - - # check ASIC route database - self.check_route_entries(["100.100.1.0/24"]) + self.set_admin_status("Ethernet8", "up") - log_string = "Encountered failure in create operation, exiting orchagent, SAI API: SAI_API_ROUTE, status: SAI_STATUS_NOT_EXECUTED" - # check for absence of log_string in syslog - check_syslog(dvs, marker, log_string) + # add neighbor for directly connected endpoint + self.add_neighbor("Ethernet8", "9.1.0.1", "00:01:02:03:04:05") - # remove route entry - dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route 100.100.1.0/24 10.10.10.3\"") + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.1.0.1,9.1.0.2', ep_monitor='9.1.0.1,9.1.0.2', primary ='9.1.0.1', profile="Test_profile", monitoring='', rx_monitor_timer=100, tx_monitor_timer=100, adv_prefix='100.100.1.0/24', check_directly_connected=True) - # delete vnet route - delete_vnet_routes(dvs, "100.100.1.0/24", 'Vnet_2000') - vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2000') - check_remove_state_db_routes(dvs, 'Vnet_2000', "100.100.1.0/24") + # Remove tunnel route 1 + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) - # delete vnet - delete_vnet_entry(dvs, 'Vnet_2000') - vnet_obj.check_del_vnet_entry(dvs, 'Vnet_2000') + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.0/24") - # delete vxlan tunnel + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) delete_vxlan_tunnel(dvs, tunnel_name) + self.remove_neighbor("Ethernet8", "9.1.0.1") + self.remove_ip_address("Ethernet8", "9.1.0.1/32") + self.set_admin_status("Ethernet8", "down") + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): - pass + pass \ No newline at end of file diff --git a/tests/test_vnet2.py b/tests/test_vnet2.py new file mode 100644 index 00000000000..9ed200f199b --- /dev/null +++ b/tests/test_vnet2.py @@ -0,0 +1,426 @@ +import time +import ipaddress +import json +import random +import time +import pytest + +from swsscommon import swsscommon +from pprint import pprint +from dvslib.dvs_common import wait_for_result +from vnet_lib import * + + +class TestVnet2Orch(object): + CFG_SUBNET_DECAP_TABLE_NAME = "SUBNET_DECAP" + + @pytest.fixture + def setup_subnet_decap(self, dvs): + + def _apply_subnet_decap_config(subnet_decap_config): + """Apply subnet decap config to CONFIG_DB.""" + subnet_decap_tbl = swsscommon.Table(configdb, self.CFG_SUBNET_DECAP_TABLE_NAME) + fvs = create_fvs(**subnet_decap_config) + subnet_decap_tbl.set("AZURE", fvs) + + def _cleanup_subnet_decap_config(): + """Cleanup subnet decap config in CONFIG_DB.""" + subnet_decap_tbl = swsscommon.Table(configdb, self.CFG_SUBNET_DECAP_TABLE_NAME) + for key in subnet_decap_tbl.getKeys(): + subnet_decap_tbl._del(key) + + configdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + _cleanup_subnet_decap_config() + + yield _apply_subnet_decap_config + + _cleanup_subnet_decap_config() + + def get_vnet_obj(self): + return VnetVxlanVrfTunnel() + + def setup_db(self, dvs): + self.pdb = dvs.get_app_db() + self.adb = dvs.get_asic_db() + self.cdb = dvs.get_config_db() + self.sdb = dvs.get_state_db() + + def clear_srv_config(self, dvs): + dvs.servers[0].runcmd("ip address flush dev eth0") + dvs.servers[1].runcmd("ip address flush dev eth0") + dvs.servers[2].runcmd("ip address flush dev eth0") + dvs.servers[3].runcmd("ip address flush dev eth0") + + def set_admin_status(self, interface, status): + self.cdb.update_entry("PORT", interface, {"admin_status": status}) + + def create_l3_intf(self, interface, vrf_name): + if len(vrf_name) == 0: + self.cdb.create_entry("INTERFACE", interface, {"NULL": "NULL"}) + else: + self.cdb.create_entry("INTERFACE", interface, {"vrf_name": vrf_name}) + + def add_ip_address(self, interface, ip): + self.cdb.create_entry("INTERFACE", interface + "|" + ip, {"NULL": "NULL"}) + + def remove_ip_address(self, interface, ip): + self.cdb.delete_entry("INTERFACE", interface + "|" + ip) + + def check_route_entries(self, destinations, absent=False): + def _access_function(): + route_entries = self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + route_destinations = [json.loads(route_entry)["dest"] + for route_entry in route_entries] + return (all(destination in route_destinations for destination in destinations), None) + if absent: + return True if _access_function() == None else False + + wait_for_result(_access_function) + return True + + + ''' + Test 1 - Test for vnet tunnel routes interaction with regular route. + Add the conflicting route and then add the vnet route with same nexthops. + Bring up the bfd sessions and check the vnet route is programmed in hardware. + Remove the vnet route and check the vnet route is removed. + Remove the conflicting route and check the conflicting route is removed. + ''' + def test_vnet_orch_1(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_1' + vnet_name = 'Vnet1' + self.setup_db(dvs) + vnet_obj.fetch_exist_entries(dvs) + # create l3 interface and bring it up + self.create_l3_intf("Ethernet0", "") + self.add_ip_address("Ethernet0", "20.20.20.1/24") + self.set_admin_status("Ethernet0", "down") + time.sleep(1) + self.set_admin_status("Ethernet0", "up") + + # set ip address and default route + dvs.servers[0].runcmd("ip address add 20.20.20.5/24 dev eth0") + dvs.servers[0].runcmd("ip route add default via 20.20.20.1") + + # create vxlan tunnel and verfiy it + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + create_vnet_entry(dvs, vnet_name, tunnel_name, '1001', "") + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '1001') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + + vnet_obj.fetch_exist_entries(dvs) + + # add conflicting route + dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 103.100.1.1/32 20.20.20.5\"") + + # check ASIC route database + self.check_route_entries(["103.100.1.1/32"]) + + create_vnet_routes(dvs, "103.100.1.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["103.100.1.1/32"], absent=True) + check_state_db_routes(dvs, vnet_name, "103.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "103.100.1.1/32") + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '9.1.0.2', 'Up') + time.sleep(1) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.2'], tunnel_name) + + update_bfd_session_state(dvs, '9.1.0.3', 'Up') + time.sleep(1) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.2', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + + update_bfd_session_state(dvs, '9.1.0.1', 'Up') + time.sleep(1) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, vnet_name, "103.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + + # Remove all endpoint from group route shouldnt come back up. + update_bfd_session_state(dvs, '9.1.0.2', 'Down') + update_bfd_session_state(dvs, '9.1.0.1', 'Down') + update_bfd_session_state(dvs, '9.1.0.3', 'Down') + + time.sleep(1) + # after removal of vnet route, conflicting route is not getting programmed as its not a bgp learnt route. + self.check_route_entries(["103.100.1.1/32"], absent=True) + # Remove tunnel route 1 + delete_vnet_routes(dvs, "103.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["103.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "103.100.1.1/32") + check_remove_routes_advertisement(dvs, "103.100.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3']) + vnet_obj.nhg_ids = {} + vnet_obj.fetch_exist_entries(dvs) + # readd the same route. + create_vnet_routes(dvs, "103.100.1.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["103.100.1.1/32"], absent=True) + check_state_db_routes(dvs, vnet_name, "103.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "103.100.1.1/32") + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '9.1.0.2', 'Up') + time.sleep(1) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.2'], tunnel_name) + + update_bfd_session_state(dvs, '9.1.0.3', 'Up') + time.sleep(1) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.2', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + + update_bfd_session_state(dvs, '9.1.0.1', 'Up') + time.sleep(1) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, vnet_name, "103.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + + # Remove all endpoint from group route shouldnt come back up. + update_bfd_session_state(dvs, '9.1.0.2', 'Down') + update_bfd_session_state(dvs, '9.1.0.1', 'Down') + update_bfd_session_state(dvs, '9.1.0.3', 'Down') + + time.sleep(1) + # after removal of vnet route, conflicting route is not getting programmed as its not a bgp learnt route. + self.check_route_entries(["103.100.1.1/32"], absent=True) + # Remove tunnel route 1 + delete_vnet_routes(dvs, "103.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["103.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "103.100.1.1/32") + check_remove_routes_advertisement(dvs, "103.100.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3']) + dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route 103.100.1.1/32\"") + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 2 - Test for vnet tunnel routes interaction with regular route with endpoints bieng up. + Add the conflicting route and then add the vnet route with same nexthops. + Bring up the bfd sessions and check the vnet route is programmed in hardware. + Add the 2nd conflicting route and then add the 2nd vnet route with same nexthops as first vnet route. + This way we check if the newly added route works when the nexthops are already UP. + Verify the vnet routes are programmed in hardware. + Remove all the vnet route and check the vnet route is removed. + Remove all the conflicting route and check the conflicting route is removed. + ''' + def test_vnet_orch_2(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_2' + vnet_name = 'Vnet2' + self.setup_db(dvs) + vnet_obj.fetch_exist_entries(dvs) + + # create l3 interface and bring it up + self.create_l3_intf("Ethernet0", "") + self.add_ip_address("Ethernet0", "20.20.20.1/24") + self.set_admin_status("Ethernet0", "down") + time.sleep(1) + self.set_admin_status("Ethernet0", "up") + + # set ip address and default route + dvs.servers[0].runcmd("ip address add 20.20.20.6/24 dev eth0") + dvs.servers[0].runcmd("ip route add default via 20.20.20.1") + + # create vxlan tunnel and verfiy it + create_vxlan_tunnel(dvs, tunnel_name, '9.8.8.9') + create_vnet_entry(dvs, vnet_name, tunnel_name, '1002', "") + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '1002') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.8.8.9') + vnet_obj.fetch_exist_entries(dvs) + + # add conflicting route + dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 200.100.1.1/32 20.20.20.6\"") + + # check ASIC route database + self.check_route_entries(["200.100.1.1/32"]) + + create_vnet_routes(dvs, "200.100.1.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["200.100.1.1/32"], absent=True) + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "200.100.1.1/32") + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '9.1.0.2', 'Up') + time.sleep(1) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.2'], tunnel_name) + + update_bfd_session_state(dvs, '9.1.0.3', 'Up') + time.sleep(1) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.2', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + + update_bfd_session_state(dvs, '9.1.0.1', 'Up') + time.sleep(1) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, vnet_name, ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + + # create a new regular and vnet route with same different prefix but same nexthops as before. + dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 200.200.1.1/32 20.20.20.6\"") + # check ASIC route database + self.check_route_entries(["200.200.1.1/32"]) + + create_vnet_routes(dvs, "200.200.1.1/32", vnet_name, '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route 200.100.1.1/32 20.20.20.6\"") + + # Remove all endpoint from group route shouldnt come back up. + update_bfd_session_state(dvs, '9.1.0.2', 'Down') + update_bfd_session_state(dvs, '9.1.0.1', 'Down') + update_bfd_session_state(dvs, '9.1.0.3', 'Down') + + time.sleep(1) + # after removal of vnet route, conflicting route is not getting programmed. + self.check_route_entries(["200.100.1.1/32"], absent=True) + self.check_route_entries(["200.200.1.1/32"], absent=True) + + # Remove tunnel route 1 + delete_vnet_routes(dvs, "200.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["200.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "200.100.1.1/32") + check_remove_routes_advertisement(dvs, "200.100.1.1/32") + + # Remove tunnel route 2 + delete_vnet_routes(dvs, "200.200.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["200.200.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "200.200.1.1/32") + check_remove_routes_advertisement(dvs, "200.200.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3']) + + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + + + ''' + Test 3 - Test for vnet tunnel routes (custom monitoring) interaction with regular route. + Add the conflicting route and then add the vnet route with same nexthops. + Bring up the bfd sessions and check the vnet route is programmed in hardware. + Remove the vnet route and check the vnet route is removed. + Remove the conflicting route and check the conflicting route is removed. + ''' + def test_vnet_orch_3(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_3' + vnet_name = 'Vnet3' + self.setup_db(dvs) + vnet_obj.fetch_exist_entries(dvs) + # create l3 interface and bring it up + self.create_l3_intf("Ethernet0", "") + self.add_ip_address("Ethernet0", "20.20.20.1/24") + self.set_admin_status("Ethernet0", "down") + time.sleep(1) + self.set_admin_status("Ethernet0", "up") + + # set ip address and default route + dvs.servers[0].runcmd("ip address add 20.20.20.7/24 dev eth0") + dvs.servers[0].runcmd("ip route add default via 20.20.20.1") + + # create vxlan tunnel and verfiy it + create_vxlan_tunnel(dvs, tunnel_name, '19.19.19.19') + create_vnet_entry(dvs, vnet_name, tunnel_name, '1003', "", '', advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '1003') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '19.19.19.19') + + vnet_obj.fetch_exist_entries(dvs) + + # add conflicting route + dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 105.100.1.1/32 20.20.20.7\"") + + # check ASIC route database + self.check_route_entries(["105.100.1.1/32"]) + + create_vnet_routes(dvs, "105.100.1.1/32", vnet_name, '9.7.0.1,9.7.0.2,9.7.0.3,9.7.0.4', ep_monitor='9.1.2.1,9.1.2.2,9.1.2.3,9.1.2.4',profile = "test_prf", primary='9.7.0.1,9.7.0.2', monitoring='custom',adv_prefix='105.100.1.1/32') + # Route should be properly configured when all monitor session states go up + update_monitor_session_state(dvs, "105.100.1.1/32", '9.1.2.2', 'up') + update_monitor_session_state(dvs, "105.100.1.1/32", '9.1.2.3', 'up') + update_monitor_session_state(dvs, "105.100.1.1/32", '9.1.2.1', 'up') + time.sleep(1) + route1= vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.7.0.2,9.7.0.1'], tunnel_name) + check_state_db_routes(dvs, vnet_name, "105.100.1.1/32", ['9.7.0.1', '9.7.0.2']) + + # Remove all endpoint from group route shouldnt come back up. + update_monitor_session_state(dvs, "105.100.1.1/32", '9.1.2.2', 'down') + update_monitor_session_state(dvs, "105.100.1.1/32", '9.1.2.1', 'down') + update_monitor_session_state(dvs, "105.100.1.1/32", '9.1.2.3', 'down') + time.sleep(1) + # after removal of vnet route, conflicting route is not getting programmed as its not a bgp learnt route. + self.check_route_entries(["105.100.1.1/32"], absent=True) + # Remove tunnel route 1 + delete_vnet_routes(dvs, "105.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["105.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "105.100.1.1/32") + check_remove_routes_advertisement(dvs, "105.100.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + + vnet_obj.nhg_ids = {} + vnet_obj.fetch_exist_entries(dvs) + # readd the same route. + create_vnet_routes(dvs, "105.100.1.1/32", vnet_name, '9.7.0.1,9.7.0.2,9.7.0.3,9.7.0.4', ep_monitor='9.1.2.1,9.1.2.2,9.1.2.3,9.1.2.4',primary='9.7.0.1,9.7.0.2', monitoring='custom') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["105.100.1.1/32"], absent=True) + check_state_db_routes(dvs, vnet_name, "105.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "105.100.1.1/32") + + # Route should be properly configured when all bfd session states go up + update_monitor_session_state(dvs, "105.100.1.1/32", '9.1.2.2', 'up') + time.sleep(1) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.7.0.2'], tunnel_name) + + update_monitor_session_state(dvs, "105.100.1.1/32", '9.1.2.1', 'up') + time.sleep(1) + vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.7.0.2,9.7.0.1'], tunnel_name) + + # Remove all endpoint from group route shouldnt come back up. + update_monitor_session_state(dvs, "105.100.1.1/32", '9.1.2.2', 'down') + update_monitor_session_state(dvs, "105.100.1.1/32", '9.1.2.1', 'down') + + time.sleep(1) + # after removal of vnet route, conflicting route is not getting programmed as its not a bgp learnt route. + self.check_route_entries(["105.100.1.1/32"], absent=True) + # Remove tunnel route 1 + delete_vnet_routes(dvs, "105.100.1.1/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["105.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "105.100.1.1/32") + check_remove_routes_advertisement(dvs, "105.100.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + + dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route 105.100.1.1/32\"") + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_vxlan_tunnel.py b/tests/test_vxlan_tunnel.py index d296fcc7412..82de242fb8c 100644 --- a/tests/test_vxlan_tunnel.py +++ b/tests/test_vxlan_tunnel.py @@ -152,7 +152,9 @@ def check_vxlan_tunnel(dvs, src_ip, dst_ip, tunnel_map_ids, tunnel_map_entry_ids 'SAI_TUNNEL_ATTR_DECAP_MAPPERS': decapstr, 'SAI_TUNNEL_ATTR_ENCAP_MAPPERS': encapstr, 'SAI_TUNNEL_ATTR_PEER_MODE': 'SAI_TUNNEL_PEER_MODE_P2MP', - 'SAI_TUNNEL_ATTR_ENCAP_SRC_IP': src_ip + 'SAI_TUNNEL_ATTR_ENCAP_SRC_IP': src_ip, + 'SAI_TUNNEL_ATTR_ENCAP_TTL_MODE': 'SAI_TUNNEL_TTL_MODE_PIPE_MODEL', + 'SAI_TUNNEL_ATTR_ENCAP_TTL_VAL': '255' } ) diff --git a/tests/test_warm_reboot.py b/tests/test_warm_reboot.py index b2edc425878..c8b5203a462 100644 --- a/tests/test_warm_reboot.py +++ b/tests/test_warm_reboot.py @@ -60,6 +60,31 @@ def check_port_oper_status(appl_db, port_name, state): break assert oper_status == state +def check_port_host_tx_ready_status(state_db, port_name, status): + portTable = swsscommon.Table(state_db, swsscommon.STATE_PORT_TABLE_NAME) + (status, fvs) = portTable.get(port_name) + + assert status == True + + assert "host_tx_ready" in [fv[0] for fv in fvs] + for fv in fvs: + if fv[0] == "host_tx_ready": + assert fv[1] == "true" if status == "up" else "false" + +def update_host_tx_ready_status(dvs, port_id, switch_id, admin_state): + host_tx_ready = "SAI_PORT_HOST_TX_READY_STATUS_READY" if admin_state == "up" else "SAI_PORT_HOST_TX_READY_STATUS_NOT_READY" + ntf = swsscommon.NotificationProducer(dvs.adb, "NOTIFICATIONS") + fvp = swsscommon.FieldValuePairs() + ntf_data = "[{\"host_tx_ready_status\":\""+host_tx_ready+"\",\"port_id\":\""+port_id+"\",\"switch_id\":\""+switch_id+"\"}]" + ntf.send("port_host_tx_ready", ntf_data, fvp) + +def get_port_id(dvs, port_name): + count_db = swsscommon.DBConnector(2, dvs.redis_sock, 0) + port_name_map = swsscommon.Table(count_db, "COUNTERS_PORT_NAME_MAP") + status, returned_value = port_name_map.hget("", port_name) + assert status == True + return returned_value + # function to check the restore count incremented by 1 for a single process def swss_app_check_RestoreCount_single(state_db, restore_count, name): warmtbl = swsscommon.Table(state_db, swsscommon.STATE_WARM_RESTART_TABLE_NAME) @@ -256,6 +281,8 @@ def warm_restart_timer_set(dvs, app, timer, val): class TestWarmReboot(object): def test_PortSyncdWarmRestart(self, dvs, testlog): + dvs.setup_db() + switch_id = dvs.getSwitchOid() conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) appl_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) @@ -294,6 +321,13 @@ def test_PortSyncdWarmRestart(self, dvs, testlog): check_port_oper_status(appl_db, "Ethernet16", "up") check_port_oper_status(appl_db, "Ethernet20", "up") + update_host_tx_ready_status(dvs, get_port_id(dvs, "Ethernet16") , switch_id, "up") + update_host_tx_ready_status(dvs, get_port_id(dvs, "Ethernet20") , switch_id, "up") + + # Ethernet port host_tx_ready status should be "true" + check_port_host_tx_ready_status(state_db, "Ethernet16", "up") + check_port_host_tx_ready_status(state_db, "Ethernet20", "up") + # Ping should work between servers via vs vlan interfaces ping_stats = dvs.servers[4].runcmd("ping -c 1 11.0.0.10") time.sleep(1) @@ -337,6 +371,13 @@ def test_PortSyncdWarmRestart(self, dvs, testlog): check_port_oper_status(appl_db, "Ethernet20", "up") check_port_oper_status(appl_db, "Ethernet24", "down") + update_host_tx_ready_status(dvs, get_port_id(dvs, "Ethernet16") , switch_id, "up") + update_host_tx_ready_status(dvs, get_port_id(dvs, "Ethernet20") , switch_id, "up") + update_host_tx_ready_status(dvs, get_port_id(dvs, "Ethernet24") , switch_id, "down") + + check_port_host_tx_ready_status(state_db, "Ethernet16", "up") + check_port_host_tx_ready_status(state_db, "Ethernet20", "up") + check_port_host_tx_ready_status(state_db, "Ethernet24", "down") swss_app_check_RestoreCount_single(state_db, restore_count, "portsyncd") @@ -925,6 +966,7 @@ def test_OrchagentWarmRestartReadyCheck(self, dvs, testlog): dvs.start_swss() time.sleep(5) + @pytest.mark.skip(reason="This test is failing consistently") def test_swss_port_state_syncup(self, dvs, testlog): appl_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) @@ -1049,7 +1091,7 @@ def test_swss_port_state_syncup(self, dvs, testlog): orchStateCount += 1; # Only WARM_RESTART_TABLE|orchagent state=reconciled operation may exist after port oper status change. - assert orchStateCount == 1 + assert orchStateCount == 2 #clean up arp dvs.runcmd("arp -d 10.0.0.1") @@ -1078,6 +1120,7 @@ def test_swss_port_state_syncup(self, dvs, testlog): # ################################################################################ + @pytest.mark.skip(reason="This test is failing consistently") def test_routing_WarmRestart(self, dvs, testlog): appl_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) @@ -2132,6 +2175,7 @@ def test_system_warmreboot_neighbor_syncup(self, dvs, testlog): intf_tbl._del("Ethernet{}".format(i*4, i*4)) intf_tbl._del("Ethernet{}".format(i*4, i*4)) + @pytest.mark.skip(reason="This test is failing consistently") def test_VrfMgrdWarmRestart(self, dvs, testlog): conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) @@ -2291,6 +2335,7 @@ def setup_erspan_neighbors(self, dvs): dvs.set_interface_status("Ethernet20", "down") @pytest.mark.usefixtures("dvs_mirror_manager", "setup_erspan_neighbors") + @pytest.mark.skip(reason="This test is failing consistently") def test_MirrorSessionWarmReboot(self, dvs): dvs.setup_db() @@ -2327,6 +2372,7 @@ def test_MirrorSessionWarmReboot(self, dvs): dvs.check_swss_ready() @pytest.mark.usefixtures("dvs_mirror_manager", "dvs_policer_manager", "setup_erspan_neighbors") + @pytest.mark.skip(reason="This test is failing consistently") def test_EverflowWarmReboot(self, dvs, dvs_acl): # Setup the policer self.dvs_policer.create_policer("test_policer") @@ -2387,36 +2433,65 @@ def test_EverflowWarmReboot(self, dvs, dvs_acl): dvs.start_swss() dvs.check_swss_ready() + @pytest.mark.skip(reason="This test is failing consistently") def test_TunnelMgrdWarmRestart(self, dvs): tunnel_name = "MuxTunnel0" tunnel_table = "TUNNEL_DECAP_TABLE" + tunnel_decap_term_table = "TUNNEL_DECAP_TERM_TABLE" tunnel_params = { "tunnel_type": "IPINIP", + "src_ip": "10.1.0.33", "dst_ip": "10.1.0.32", "dscp_mode": "uniform", "ecn_mode": "standard", "ttl_mode": "pipe" } - - pubsub = dvs.SubscribeAppDbObject(tunnel_table) + + pubsub_tunnel = dvs.SubscribeAppDbObject(tunnel_table) + pubsub_decap_term = dvs.SubscribeAppDbObject(tunnel_decap_term_table) dvs.runcmd("config warm_restart enable swss") config_db = dvs.get_config_db() config_db.create_entry("TUNNEL", tunnel_name, tunnel_params) app_db = dvs.get_app_db() + dst_ip = tunnel_params.pop("dst_ip") app_db.wait_for_matching_keys(tunnel_table, [tunnel_name]) + app_db.wait_for_matching_keys(tunnel_decap_term_table, [tunnel_name + ":" + dst_ip]) - nadd, ndel = dvs.CountSubscribedObjects(pubsub) + nadd, ndel = dvs.CountSubscribedObjects(pubsub_tunnel) assert nadd == len(tunnel_params) assert ndel == 1 # Expect 1 deletion as part of table creation + nadd, ndel = dvs.CountSubscribedObjects(pubsub_decap_term) + assert nadd == 2 + assert ndel == 1 dvs.runcmd("supervisorctl restart tunnelmgrd") dvs.check_services_ready() - nadd, ndel = dvs.CountSubscribedObjects(pubsub) + nadd, ndel = dvs.CountSubscribedObjects(pubsub_tunnel) + assert nadd == 0 + assert ndel == 0 + nadd, ndel = dvs.CountSubscribedObjects(pubsub_decap_term) assert nadd == 0 assert ndel == 0 + def test_FpmsyncdWarmRestart(self, dvs): + # This test aims to improve code coverage in fpmsyncd. + warm_restart_set(dvs, "system", "true") + warm_restart_set(dvs, "bgp", "true") + + # set restore count + db = swsscommon.DBConnector(6, dvs.redis_sock, 0) + tbl = swsscommon.Table(db, "WARM_RESTART_ENABLE_TABLE") + fvs = swsscommon.FieldValuePairs([("restore_count", "0")]) + tbl.set("bgp", fvs) + + # Stop fpmsyncd + dvs.stop_fpmsyncd() + + # Start fpmsyncd + dvs.start_fpmsyncd() + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/tests/test_zmq.py b/tests/test_zmq.py new file mode 100644 index 00000000000..6312c443d29 --- /dev/null +++ b/tests/test_zmq.py @@ -0,0 +1,156 @@ +from swsscommon import swsscommon + +from dash_api.appliance_pb2 import * +from dash_api.vnet_pb2 import * +from dash_api.eni_pb2 import * +from dash_api.route_pb2 import * +from dash_api.route_rule_pb2 import * +from dash_api.vnet_mapping_pb2 import * +from dash_api.route_type_pb2 import * +from dash_api.types_pb2 import * + +import typing +import time +import binascii +import uuid +import ipaddress +import sys +import socket +import logging +import pytest + +logging.basicConfig(level=logging.INFO) +zmq_logger = logging.getLogger(__name__) + +DVS_ENV = ["HWSKU=DPU-2P"] +NUM_PORTS = 2 + +class Table(object): + def __init__(self, database, table_name: str): + self.table_name = table_name + self.table = swsscommon.Table(database.db_connection, self.table_name) + + def __getitem__(self, key: str): + exists, result = self.table.get(str(key)) + if not exists: + return None + else: + return dict(result) + + def get_keys(self): + return self.table.getKeys() + + def get_newly_created_oid(self, old_oids): + new_oids = self.asic_db.wait_for_n_keys(table, len(old_oids) + 1) + oid = [ids for ids in new_oids if ids not in old_oids] + return oid[0] + +class DashZmq(object): + def __init__(self, dvs): + self.dvs = dvs + self.asic_direction_lookup_table = Table( + self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_DIRECTION_LOOKUP_ENTRY") + self.asic_vip_table = Table( + self.dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_VIP_ENTRY") + +class TestZmqDash(object): + @pytest.fixture(scope="class") + def enable_orchagent_zmq(self, dvs): + # change orchagent to use ZMQ + # change orchagent to use custom create_switch_timeout + dvs.runcmd("cp /usr/bin/orchagent.sh /usr/bin/orchagent.sh_zmq_ut_backup") + dvs.runcmd("sed -i.bak 's/\/usr\/bin\/orchagent /\/usr\/bin\/orchagent -q tcp:\/\/127.0.0.1:8100 -t 60 /g' /usr/bin/orchagent.sh") + dvs.stop_swss() + dvs.start_swss() + + process_statue = dvs.runcmd("ps -ef") + zmq_logger.debug("Process status: {}".format(process_statue)) + + yield + + # revert change + dvs.runcmd("cp /usr/bin/orchagent.sh_zmq_ut_backup /usr/bin/orchagent.sh") + dvs.stop_swss() + dvs.start_swss() + + @pytest.mark.usefixtures("enable_orchagent_zmq") + def test_appliance(self, dvs): + # upload test script to test container and create applicance with it + dvs.copy_file("/", "create_appliance.py") + dvs.runcmd(['sh', '-c', "python3 create_appliance.py {}".format(1234)]) + time.sleep(3) + + asic_direction_lookup_table = Table( + dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_DIRECTION_LOOKUP_ENTRY") + direction_entries = asic_direction_lookup_table.get_keys() + zmq_logger.info("Keys from asic_direction_lookup_table: {}".format(direction_entries)) + + assert direction_entries + fvs = asic_direction_lookup_table[direction_entries[0]] + zmq_logger.info("Data from asic_direction_lookup_table: {}={}".format(direction_entries[0], fvs)) + for fv in fvs.items(): + if fv[0] == "SAI_DIRECTION_LOOKUP_ENTRY_ATTR_ACTION": + assert fv[1] == "SAI_DIRECTION_LOOKUP_ENTRY_ACTION_SET_OUTBOUND_DIRECTION" + + asic_vip_table = Table( + dvs.get_asic_db(), "ASIC_STATE:SAI_OBJECT_TYPE_VIP_ENTRY") + vip_entries = asic_vip_table.get_keys() + zmq_logger.info("Keys from asic_vip_table: {}".format(direction_entries)) + + assert vip_entries + fvs = asic_vip_table[vip_entries[0]] + zmq_logger.info("Data from asic_vip_table: {}={}".format(vip_entries[0], fvs)) + for fv in fvs.items(): + if fv[0] == "SAI_VIP_ENTRY_ATTR_ACTION": + assert fv[1] == "SAI_VIP_ENTRY_ACTION_ACCEPT" + + def test_vrf(self, dvs): + # Improve test code coverage, change orchagent to use VRF + dvs.runcmd("cp /usr/bin/orchagent.sh /usr/bin/orchagent.sh_vrf_ut_backup") + dvs.runcmd("sed -i.bak 's/\/usr\/bin\/orchagent /\/usr\/bin\/orchagent -v mgmt /g' /usr/bin/orchagent.sh") + dvs.stop_swss() + dvs.start_swss() + + # wait orchagent start + time.sleep(3) + process_statue = dvs.runcmd("ps -ef") + zmq_logger.debug("Process status: {}".format(process_statue)) + + # revert change + dvs.runcmd("cp /usr/bin/orchagent.sh_vrf_ut_backup /usr/bin/orchagent.sh") + dvs.stop_swss() + dvs.start_swss() + + def test_heartbeat(self, dvs): + # Improve test code coverage, change orchagent to disable heartbeat + dvs.runcmd("cp /usr/bin/orchagent.sh /usr/bin/orchagent.sh_hb_ut_backup") + dvs.runcmd("sed -i.bak 's/\/usr\/bin\/orchagent /\/usr\/bin\/orchagent -I 0 /g' /usr/bin/orchagent.sh") + dvs.stop_swss() + dvs.start_swss() + + # wait orchagent start + time.sleep(3) + process_statue = dvs.runcmd("ps -ef") + zmq_logger.debug("Process status: {}".format(process_statue)) + + # revert change + dvs.runcmd("cp /usr/bin/orchagent.sh_hb_ut_backup /usr/bin/orchagent.sh") + dvs.stop_swss() + dvs.start_swss() + + def test_usage(self, dvs): + # Improve test code coverage, change orchagent to display usage + dvs.runcmd("cp /usr/bin/orchagent.sh /usr/bin/orchagent.sh_usage_ut_backup") + dvs.runcmd("sed -i.bak 's/\/usr\/bin\/orchagent /\/usr\/bin\/orchagent -h /g' /usr/bin/orchagent.sh") + dvs.stop_swss() + dvs.start_swss() + + # wait orchagent start + time.sleep(3) + process_statue = dvs.runcmd("ps -ef") + zmq_logger.debug("Process status: {}".format(process_statue)) + + # revert change + dvs.runcmd("cp /usr/bin/orchagent.sh_usage_ut_backup /usr/bin/orchagent.sh") + dvs.stop_swss() + dvs.start_swss() diff --git a/tests/virtual_chassis/1/default_config.json b/tests/virtual_chassis/1/default_config.json index 88769c9ce62..8cea66ee12a 100644 --- a/tests/virtual_chassis/1/default_config.json +++ b/tests/virtual_chassis/1/default_config.json @@ -15,8 +15,10 @@ "INTERFACE": { "Ethernet0": {}, "Ethernet4": {}, + "Ethernet8": {}, "Ethernet0|10.8.101.1/24": {}, - "Ethernet4|10.8.104.1/24": {} + "Ethernet4|10.8.104.1/24": {}, + "Ethernet8|10.8.108.1/24": {} }, "PORT": { "Ethernet0": { @@ -24,6 +26,9 @@ }, "Ethernet4": { "admin_status": "up" + }, + "Ethernet8": { + "admin_status": "up" } }, "SYSTEM_PORT": { diff --git a/tests/virtual_chassis/8/default_config.json b/tests/virtual_chassis/8/default_config.json index 523ab8e450d..4160d7dd922 100644 --- a/tests/virtual_chassis/8/default_config.json +++ b/tests/virtual_chassis/8/default_config.json @@ -5,9 +5,116 @@ "chassis_db_address" : "10.8.1.200", "inband_address" : "10.8.1.200/24", "switch_type": "fabric", + "switch_id": "0", "sub_role" : "BackEnd", "start_chassis_db" : "1", "comment" : "default_config for a vs that runs chassis_db" } + }, + "FABRIC_MONITOR": { + "FABRIC_MONITOR_DATA": { + "monErrThreshCrcCells": "1", + "monErrThreshRxCells": "61035156", + "monPollThreshRecovery": "8", + "monPollThreshIsolation": "1" + } + }, + "FABRIC_PORT": { + "Fabric0": { + "alias": "Fabric0", + "isolateStatus": "False", + "forceUnisolateStatus": "0", + "lanes": "0" + }, + "Fabric1": { + "alias": "Fabric1", + "isolateStatus": "False", + "forceUnisolateStatus": "0", + "lanes": "1" + }, + "Fabric2": { + "alias": "Fabric2", + "isolateStatus": "False", + "forceUnisolateStatus": "0", + "lanes": "2" + }, + "Fabric3": { + "alias": "Fabric3", + "isolateStatus": "False", + "forceUnisolateStatus": "0", + "lanes": "3" + }, + "Fabric4": { + "alias": "Fabric4", + "isolateStatus": "False", + "forceUnisolateStatus": "0", + "lanes": "4" + }, + "Fabric5": { + "alias": "Fabric5", + "isolateStatus": "False", + "forceUnisolateStatus": "0", + "lanes": "5" + }, + "Fabric6": { + "alias": "Fabric6", + "isolateStatus": "False", + "forceUnisolateStatus": "0", + "lanes": "6" + }, + "Fabric7": { + "alias": "Fabric7", + "isolateStatus": "False", + "forceUnisolateStatus": "0", + "lanes": "7" + }, + "Fabric8": { + "alias": "Fabric8", + "isolateStatus": "False", + "forceUnisolateStatus": "0", + "lanes": "8" + }, + "Fabric9": { + "alias": "Fabric9", + "isolateStatus": "False", + "forceUnisolateStatus": "0", + "lanes": "9" + }, + "Fabric10": { + "alias": "Fabric10", + "isolateStatus": "False", + "forceUnisolateStatus": "0", + "lanes": "10" + }, + "Fabric11": { + "alias": "Fabric11", + "isolateStatus": "False", + "forceUnisolateStatus": "0", + "lanes": "11" + }, + "Fabric12": { + "alias": "Fabric12", + "isolateStatus": "False", + "forceUnisolateStatus": "0", + "lanes": "12" + }, + "Fabric13": { + "alias": "Fabric13", + "isolateStatus": "False", + "forceUnisolateStatus": "0", + "lanes": "13" + }, + "Fabric14": { + "alias": "Fabric14", + "isolateStatus": "False", + "forceUnisolateStatus": "0", + "lanes": "14" + }, + "Fabric15": { + "alias": "Fabric15", + "isolateStatus": "False", + "forceUnisolateStatus": "0", + "lanes": "15" + } } } diff --git a/tests/virtual_chassis/database_config.json b/tests/virtual_chassis/database_config.json new file mode 100644 index 00000000000..0a0d62a6eb4 --- /dev/null +++ b/tests/virtual_chassis/database_config.json @@ -0,0 +1,97 @@ +{ + "INSTANCES": { + "redis":{ + "hostname" : "127.0.0.1", + "port" : 6379, + "unix_socket_path" : "/var/run/redis/redis.sock" + }, + "redis_chassis":{ + "hostname" : "10.8.1.200", + "port" : 6380, + "unix_socket_path" : "/var/run/redis/redis_chassis.sock" + } + }, + "DATABASES" : { + "APPL_DB" : { + "id" : 0, + "separator": ":", + "instance" : "redis" + }, + "ASIC_DB" : { + "id" : 1, + "separator": ":", + "instance" : "redis" + }, + "COUNTERS_DB" : { + "id" : 2, + "separator": ":", + "instance" : "redis" + }, + "CONFIG_DB" : { + "id" : 4, + "separator": "|", + "instance" : "redis" + }, + "PFC_WD_DB" : { + "id" : 5, + "separator": ":", + "instance" : "redis" + }, + "FLEX_COUNTER_DB" : { + "id" : 5, + "separator": ":", + "instance" : "redis" + }, + "STATE_DB" : { + "id" : 6, + "separator": "|", + "instance" : "redis" + }, + "SNMP_OVERLAY_DB" : { + "id" : 7, + "separator": "|", + "instance" : "redis" + }, + "GB_ASIC_DB" : { + "id" : 9, + "separator": ":", + "instance" : "redis" + }, + "GB_COUNTERS_DB" : { + "id" : 10, + "separator": ":", + "instance" : "redis" + }, + "GB_FLEX_COUNTER_DB" : { + "id" : 11, + "separator": ":", + "instance" : "redis" + }, + "CHASSIS_APP_DB" : { + "id" : 12, + "separator": "|", + "instance" : "redis_chassis" + }, + "APPL_STATE_DB" : { + "id" : 14, + "separator": ":", + "instance" : "redis" + }, + "DPU_APPL_DB" : { + "id" : 15, + "separator": ":", + "instance" : "redis" + }, + "DPU_APPL_STATE_DB" : { + "id" : 16, + "separator": "|", + "instance" : "redis" + }, + "DPU_STATE_DB": { + "id": 17, + "separator": "|", + "instance": "redis" + } + }, + "VERSION" : "1.0" +} diff --git a/tests/vnet_lib.py b/tests/vnet_lib.py new file mode 100644 index 00000000000..2b219c176bb --- /dev/null +++ b/tests/vnet_lib.py @@ -0,0 +1,1307 @@ +import time +import ipaddress +import json +import time + +from swsscommon import swsscommon +from pprint import pprint +from dvslib.dvs_common import wait_for_result + + +def create_entry(tbl, key, pairs): + fvs = swsscommon.FieldValuePairs(pairs) + tbl.set(key, fvs) + time.sleep(1) + + +def create_entry_tbl(db, table, separator, key, pairs): + tbl = swsscommon.Table(db, table) + create_entry(tbl, key, pairs) + + +def create_entry_pst(db, table, separator, key, pairs): + tbl = swsscommon.ProducerStateTable(db, table) + create_entry(tbl, key, pairs) + + +def delete_entry_tbl(db, table, key): + tbl = swsscommon.Table(db, table) + tbl._del(key) + time.sleep(1) + + +def delete_entry_pst(db, table, key): + tbl = swsscommon.ProducerStateTable(db, table) + tbl._del(key) + time.sleep(1) + + +def how_many_entries_exist(db, table): + tbl = swsscommon.Table(db, table) + return len(tbl.getKeys()) + + +def entries(db, table): + tbl = swsscommon.Table(db, table) + return set(tbl.getKeys()) + + +def get_exist_entries(dvs, table): + db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(db, table) + return set(tbl.getKeys()) + + +def get_created_entry(db, table, existed_entries): + tbl = swsscommon.Table(db, table) + entries = set(tbl.getKeys()) + new_entries = list(entries - existed_entries) + assert len(new_entries) == 1, "Wrong number of created entries." + return new_entries[0] + + +def get_all_created_entries(db, table, existed_entries): + tbl = swsscommon.Table(db, table) + entries = set(tbl.getKeys()) + new_entries = list(entries - set(existed_entries)) + assert len(new_entries) >= 0, "Get all could be no new created entries." + new_entries.sort() + return new_entries + + +def get_created_entries(db, table, existed_entries, count): + new_entries = get_all_created_entries(db, table, existed_entries) + assert len(new_entries) == count, "Wrong number of created entries." + return new_entries + + +def get_deleted_entries(db, table, existed_entries, count): + tbl = swsscommon.Table(db, table) + entries = set(tbl.getKeys()) + old_entries = list(existed_entries - entries) + assert len(old_entries) == count, "Wrong number of deleted entries." + old_entries.sort() + return old_entries + + +def get_default_vr_id(dvs): + db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + table = 'ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER' + tbl = swsscommon.Table(db, table) + keys = tbl.getKeys() + assert len(keys) == 1, "Wrong number of virtual routers found" + + return keys[0] + + +def check_object(db, table, key, expected_attributes): + tbl = swsscommon.Table(db, table) + keys = tbl.getKeys() + assert key in keys, "The desired key is not presented" + + status, fvs = tbl.get(key) + assert status, "Got an error when get a key" + + assert len(fvs) >= len(expected_attributes), "Incorrect attributes" + + attr_keys = {entry[0] for entry in fvs} + + for name, value in fvs: + if name in expected_attributes: + assert expected_attributes[name] == value, "Wrong value %s for the attribute %s = %s" % \ + (value, name, expected_attributes[name]) + +def check_deleted_object(db, table, key): + tbl = swsscommon.Table(db, table) + keys = tbl.getKeys() + assert key not in keys, "The desired key is not removed" + + +def create_vnet_local_routes(dvs, prefix, vnet_name, ifname, nexthop=""): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + create_entry_tbl( + conf_db, + "VNET_ROUTE", '|', "%s|%s" % (vnet_name, prefix), + [ + ("ifname", ifname), + ("nexthop", nexthop), + ] + ) + + time.sleep(2) + + +def delete_vnet_local_routes(dvs, prefix, vnet_name): + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + + delete_entry_pst(app_db, "VNET_ROUTE_TABLE", "%s:%s" % (vnet_name, prefix)) + + time.sleep(2) + + +def create_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile="", primary="", monitoring="", rx_monitor_timer=-1, tx_monitor_timer=-1, adv_prefix="", check_directly_connected=False): + set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac=mac, vni=vni, ep_monitor=ep_monitor, profile=profile, primary=primary, monitoring=monitoring, rx_monitor_timer=rx_monitor_timer, tx_monitor_timer=tx_monitor_timer, adv_prefix=adv_prefix, check_directly_connected=check_directly_connected) + + +def set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile="", primary="", monitoring="", rx_monitor_timer=-1, tx_monitor_timer=-1, adv_prefix="", check_directly_connected=False): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + attrs = [ + ("endpoint", endpoint), + ] + + if vni: + attrs.append(('vni', vni)) + + if mac: + attrs.append(('mac_address', mac)) + + if ep_monitor: + attrs.append(('endpoint_monitor', ep_monitor)) + + if profile: + attrs.append(('profile', profile)) + + if primary: + attrs.append(('primary', primary)) + + if monitoring: + attrs.append(('monitoring', monitoring)) + + if adv_prefix: + attrs.append(('adv_prefix', adv_prefix)) + + if check_directly_connected: + attrs.append(('check_directly_connected', 'true')) + + if rx_monitor_timer != -1: + attrs.append(('rx_monitor_timer', str(rx_monitor_timer))) + + if tx_monitor_timer != -1: + attrs.append(('tx_monitor_timer', str(tx_monitor_timer))) + + tbl = swsscommon.Table(conf_db, "VNET_ROUTE_TUNNEL") + fvs = swsscommon.FieldValuePairs(attrs) + tbl.set("%s|%s" % (vnet_name, prefix), fvs) + + time.sleep(2) + + +def delete_vnet_routes(dvs, prefix, vnet_name): + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + + delete_entry_pst(app_db, "VNET_ROUTE_TUNNEL_TABLE", "%s:%s" % (vnet_name, prefix)) + + time.sleep(2) + + +def create_vlan(dvs, vlan_name, vlan_ids): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + vlan_id = vlan_name[4:] + + # create vlan + create_entry_tbl( + conf_db, + "VLAN", '|', vlan_name, + [ + ("vlanid", vlan_id), + ], + ) + + time.sleep(1) + + vlan_oid = get_created_entry(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN", vlan_ids) + + check_object(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN", vlan_oid, + { + "SAI_VLAN_ATTR_VLAN_ID": vlan_id, + } + ) + + return vlan_oid + + +def create_vlan_interface(dvs, vlan_name, ifname, vnet_name, ipaddr): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + vlan_ids = get_exist_entries(dvs, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN") + + vlan_oid = create_vlan (dvs, vlan_name, vlan_ids) + + # create a vlan member in config db + create_entry_tbl( + conf_db, + "VLAN_MEMBER", '|', "%s|%s" % (vlan_name, ifname), + [ + ("tagging_mode", "untagged"), + ], + ) + + time.sleep(1) + + # create vlan interface in config db + create_entry_tbl( + conf_db, + "VLAN_INTERFACE", '|', vlan_name, + [ + ("vnet_name", vnet_name), + ("proxy_arp", "enabled"), + ], + ) + + #FIXME - This is created by IntfMgr + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + create_entry_pst( + app_db, + "INTF_TABLE", ':', vlan_name, + [ + ("vnet_name", vnet_name), + ("proxy_arp", "enabled"), + ], + ) + time.sleep(2) + + create_entry_tbl( + conf_db, + "VLAN_INTERFACE", '|', "%s|%s" % (vlan_name, ipaddr), + [ + ("family", "IPv4"), + ], + ) + + time.sleep(2) + + return vlan_oid + + +def delete_vlan_interface(dvs, ifname, ipaddr): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + delete_entry_tbl(conf_db, "VLAN_INTERFACE", "%s|%s" % (ifname, ipaddr)) + + time.sleep(2) + + delete_entry_tbl(conf_db, "VLAN_INTERFACE", ifname) + + time.sleep(2) + + +def create_phy_interface(dvs, ifname, vnet_name, ipaddr): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + exist_rifs = get_exist_entries(dvs, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE") + + # create vlan interface in config db + create_entry_tbl( + conf_db, + "INTERFACE", '|', ifname, + [ + ("vnet_name", vnet_name), + ], + ) + + #FIXME - This is created by IntfMgr + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + create_entry_pst( + app_db, + "INTF_TABLE", ':', ifname, + [ + ("vnet_name", vnet_name), + ], + ) + time.sleep(2) + + create_entry_tbl( + conf_db, + "INTERFACE", '|', "%s|%s" % (ifname, ipaddr), + [ + ("family", "IPv4"), + ], + ) + + +def delete_phy_interface(dvs, ifname, ipaddr): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + delete_entry_tbl(conf_db, "INTERFACE", "%s|%s" % (ifname, ipaddr)) + + time.sleep(2) + + delete_entry_tbl(conf_db, "INTERFACE", ifname) + + time.sleep(2) + + +def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope="", advertise_prefix=False, overlay_dmac=""): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + attrs = [ + ("vxlan_tunnel", tunnel), + ("vni", vni), + ("peer_list", peer_list), + ] + + if scope: + attrs.append(('scope', scope)) + + if advertise_prefix: + attrs.append(('advertise_prefix', 'true')) + + if overlay_dmac: + attrs.append(('overlay_dmac', overlay_dmac)) + + # create the VXLAN tunnel Term entry in Config DB + create_entry_tbl( + conf_db, + "VNET", '|', name, + attrs, + ) + + time.sleep(2) + + +def delete_vnet_entry(dvs, name): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + delete_entry_tbl(conf_db, "VNET", "%s" % (name)) + + time.sleep(2) + + +def create_vxlan_tunnel(dvs, name, src_ip): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + attrs = [ + ("src_ip", src_ip), + ] + + # create the VXLAN tunnel Term entry in Config DB + create_entry_tbl( + conf_db, + "VXLAN_TUNNEL", '|', name, + attrs, + ) + +def delete_vxlan_tunnel(dvs, name): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + delete_entry_tbl(conf_db, "VXLAN_TUNNEL", name) + +def create_vxlan_tunnel_map(dvs, tunnel_name, tunnel_map_entry_name, vlan, vni_id): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + # create the VXLAN tunnel map entry in Config DB + create_entry_tbl( + conf_db, + "VXLAN_TUNNEL_MAP", '|', "%s|%s" % (tunnel_name, tunnel_map_entry_name), + [ + ("vni", vni_id), + ("vlan", vlan), + ], + ) + + +def get_lo(dvs): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + vr_id = get_default_vr_id(dvs) + + tbl = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE') + + entries = tbl.getKeys() + lo_id = None + for entry in entries: + status, fvs = tbl.get(entry) + assert status, "Got an error when get a key" + for key, value in fvs: + if key == 'SAI_ROUTER_INTERFACE_ATTR_TYPE' and value == 'SAI_ROUTER_INTERFACE_TYPE_LOOPBACK': + lo_id = entry + break + else: + assert False, 'Don\'t found loopback id' + + return lo_id + + +def get_switch_mac(dvs): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + tbl = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_SWITCH') + + entries = tbl.getKeys() + mac = None + for entry in entries: + status, fvs = tbl.get(entry) + assert status, "Got an error when get a key" + for key, value in fvs: + if key == 'SAI_SWITCH_ATTR_SRC_MAC_ADDRESS': + mac = value + break + else: + assert False, 'Don\'t found switch mac' + + return mac + + +def check_linux_intf_arp_proxy(dvs, ifname): + (exitcode, out) = dvs.runcmd("cat /proc/sys/net/ipv4/conf/{0}/proxy_arp_pvlan".format(ifname)) + assert out != "1", "ARP proxy is not enabled for VNET interface in Linux kernel" + + +def update_bfd_session_state(dvs, addr, state): + bfd_id = get_bfd_session_id(dvs, addr) + assert bfd_id is not None + + bfd_sai_state = {"Admin_Down": "SAI_BFD_SESSION_STATE_ADMIN_DOWN", + "Down": "SAI_BFD_SESSION_STATE_DOWN", + "Init": "SAI_BFD_SESSION_STATE_INIT", + "Up": "SAI_BFD_SESSION_STATE_UP"} + + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + ntf = swsscommon.NotificationProducer(asic_db, "NOTIFICATIONS") + fvp = swsscommon.FieldValuePairs() + ntf_data = "[{\"bfd_session_id\":\""+bfd_id+"\",\"session_state\":\""+bfd_sai_state[state]+"\"}]" + ntf.send("bfd_session_state_change", ntf_data, fvp) + +def update_monitor_session_state(dvs, addr, monitor, state): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + create_entry_tbl( + state_db, + "VNET_MONITOR_TABLE", '|', "%s|%s" % (monitor,addr), + [ + ("state", state), + ] + ) + +def get_bfd_session_id(dvs, addr): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION") + entries = set(tbl.getKeys()) + for entry in entries: + status, fvs = tbl.get(entry) + fvs = dict(fvs) + assert status, "Got an error when get a key" + if fvs["SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS"] == addr and fvs["SAI_BFD_SESSION_ATTR_MULTIHOP"] == "true": + return entry + + return None + + +def check_del_bfd_session(dvs, addrs): + for addr in addrs: + assert get_bfd_session_id(dvs, addr) is None + + +def check_bfd_session(dvs, addrs): + for addr in addrs: + assert get_bfd_session_id(dvs, addr) is not None + + +def check_state_db_routes(dvs, vnet, prefix, endpoints): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(state_db, "VNET_ROUTE_TUNNEL_TABLE") + + status, fvs = tbl.get(vnet + '|' + prefix) + assert status, "Got an error when get a key" + + fvs = dict(fvs) + assert fvs['active_endpoints'] == ','.join(endpoints) + + if endpoints: + assert fvs['state'] == 'active' + else: + assert fvs['state'] == 'inactive' + + +def check_remove_state_db_routes(dvs, vnet, prefix): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(state_db, "VNET_ROUTE_TUNNEL_TABLE") + keys = tbl.getKeys() + + assert vnet + '|' + prefix not in keys + + +def check_routes_advertisement(dvs, prefix, profile=""): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(state_db, "ADVERTISE_NETWORK_TABLE") + keys = tbl.getKeys() + + assert prefix in keys + + if profile: + status, fvs = tbl.get(prefix) + assert status, "Got an error when get a key" + fvs = dict(fvs) + assert fvs['profile'] == profile + + +def check_remove_routes_advertisement(dvs, prefix): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(state_db, "ADVERTISE_NETWORK_TABLE") + keys = tbl.getKeys() + + assert prefix not in keys + + +def check_syslog(dvs, marker, err_log): + (exitcode, num) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \"%s\" | wc -l" % (marker, err_log)]) + assert num.strip() == "0" + + +def create_fvs(**kwargs): + return swsscommon.FieldValuePairs(list(kwargs.items())) + + +def create_subnet_decap_tunnel(dvs, tunnel_name, **kwargs): + """Create tunnel and verify all needed entries in state DB exists.""" + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + fvs = create_fvs(**kwargs) + # create tunnel entry in DB + ps = swsscommon.ProducerStateTable(appdb, "TUNNEL_DECAP_TABLE") + ps.set(tunnel_name, fvs) + + # wait till config will be applied + time.sleep(1) + + # validate the tunnel entry in state db + tunnel_state_table = swsscommon.Table(statedb, "TUNNEL_DECAP_TABLE") + + tunnels = tunnel_state_table.getKeys() + for tunnel in tunnels: + status, fvs = tunnel_state_table.get(tunnel) + assert status == True + + for field, value in fvs: + if field == "tunnel_type": + assert value == "IPINIP" + elif field == "dscp_mode": + assert value == kwargs["dscp_mode"] + elif field == "ecn_mode": + assert value == kwargs["ecn_mode"] + elif field == "ttl_mode": + assert value == kwargs["ttl_mode"] + elif field == "encap_ecn_mode": + assert value == kwargs["encap_ecn_mode"] + else: + assert False, "Field %s is not tested" % field + + +def delete_subnet_decap_tunnel(dvs, tunnel_name): + """Delete tunnel and checks that state DB is cleared.""" + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + statedb = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tunnel_app_table = swsscommon.Table(appdb, "TUNNEL_DECAP_TABLE") + tunnel_state_table = swsscommon.Table(statedb, "TUNNEL_DECAP_TABLE") + + ps = swsscommon.ProducerStateTable(appdb, "TUNNEL_DECAP_TABLE") + ps._del(tunnel_name) + + # wait till config will be applied + time.sleep(1) + + assert len(tunnel_app_table.getKeys()) == 0 + assert len(tunnel_state_table.getKeys()) == 0 + + +loopback_id = 0 +def_vr_id = 0 +switch_mac = None +# Creation of a physical interface should only add one route to 'ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY' (i.e., a route to the interface's IP). +# But after creating a VLAN interface, two entries should be added to this table: One for the VLAN interface's IP and one for the VLAN's subnet. +# Creating a VLAN in a VNet that is peered with another Vnet will create an additional entry for the VLAN's subnet in the peer Vnet. +intf_route_count = {"physical": 1, "vlan": 2, "vlan-one-peer": 3} + +def update_bgp_global_dev_state(dvs, state): + config_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + create_entry_tbl( + config_db, + "BGP_DEVICE_GLOBAL",'|',"STATE", + [ + ("tsa_enabled", state), + ] + ) + +def set_tsa(dvs): + update_bgp_global_dev_state(dvs, "true") + +def clear_tsa(dvs): + update_bgp_global_dev_state(dvs, "false") + +class VnetVxlanVrfTunnel(object): + + ASIC_TUNNEL_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL" + ASIC_TUNNEL_MAP = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP" + ASIC_TUNNEL_MAP_ENTRY = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY" + ASIC_TUNNEL_TERM_ENTRY = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY" + ASIC_RIF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE" + ASIC_VRF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER" + ASIC_ROUTE_ENTRY = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY" + ASIC_NEXT_HOP = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP" + ASIC_VLAN_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VLAN" + ASIC_NEXT_HOP_GROUP = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP" + ASIC_NEXT_HOP_GROUP_MEMBER = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER" + ASIC_BFD_SESSION = "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION" + APP_VNET_MONITOR = "VNET_MONITOR_TABLE" + + ecn_modes_map = { + "standard" : "SAI_TUNNEL_DECAP_ECN_MODE_STANDARD", + "copy_from_outer": "SAI_TUNNEL_DECAP_ECN_MODE_COPY_FROM_OUTER" + } + + dscp_modes_map = { + "pipe" : "SAI_TUNNEL_DSCP_MODE_PIPE_MODEL", + "uniform" : "SAI_TUNNEL_DSCP_MODE_UNIFORM_MODEL" + } + + ttl_modes_map = { + "pipe" : "SAI_TUNNEL_TTL_MODE_PIPE_MODEL", + "uniform" : "SAI_TUNNEL_TTL_MODE_UNIFORM_MODEL" + } + + def __init__(self): + self.tunnel_map_ids = set() + self.tunnel_map_entry_ids = set() + self.tunnel_ids = set() + self.tunnel_term_ids = set() + self.ipinip_tunnel_term_ids = {} + self.tunnel_map_map = {} + self.tunnel = {} + self.vnet_vr_ids = set() + self.vr_map = {} + self.nh_ids = {} + self.nhg_ids = {} + + def fetch_exist_entries(self, dvs): + self.vnet_vr_ids = get_exist_entries(dvs, self.ASIC_VRF_TABLE) + self.tunnel_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_TABLE) + self.tunnel_map_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_MAP) + self.tunnel_map_entry_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_MAP_ENTRY) + self.tunnel_term_ids = get_exist_entries(dvs, self.ASIC_TUNNEL_TERM_ENTRY) + self.rifs = get_exist_entries(dvs, self.ASIC_RIF_TABLE) + self.routes = get_exist_entries(dvs, self.ASIC_ROUTE_ENTRY) + self.nhops = get_exist_entries(dvs, self.ASIC_NEXT_HOP) + self.nhgs = get_exist_entries(dvs, self.ASIC_NEXT_HOP_GROUP) + self.nhgms = get_exist_entries(dvs, self.ASIC_NEXT_HOP_GROUP_MEMBER) + self.bfd_sessions = get_exist_entries(dvs, self.ASIC_BFD_SESSION) + + global loopback_id, def_vr_id, switch_mac + if not loopback_id: + loopback_id = get_lo(dvs) + + if not def_vr_id: + def_vr_id = get_default_vr_id(dvs) + + if switch_mac is None: + switch_mac = get_switch_mac(dvs) + + def check_ipinip_tunnel(self, dvs, tunnel_name, dscp_mode, ecn_mode, ttl_mode): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + tunnel_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids) + tunnel_attrs = { + 'SAI_TUNNEL_ATTR_TYPE': 'SAI_TUNNEL_TYPE_IPINIP', + 'SAI_TUNNEL_ATTR_ENCAP_DSCP_MODE': self.dscp_modes_map[dscp_mode], + 'SAI_TUNNEL_ATTR_ENCAP_ECN_MODE': self.ecn_modes_map[ecn_mode], + 'SAI_TUNNEL_ATTR_ENCAP_TTL_MODE': self.ttl_modes_map[ttl_mode] + } + check_object(asic_db, self.ASIC_TUNNEL_TABLE, tunnel_id, tunnel_attrs) + + self.tunnel_ids.add(tunnel_id) + self.tunnel[tunnel_name] = tunnel_id + + def check_del_ipinip_tunnel(self, dvs, tunnel_name): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + tunnel_id = get_deleted_entries(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids, 1)[0] + check_deleted_object(asic_db, self.ASIC_TUNNEL_TABLE, tunnel_id) + self.tunnel_ids.remove(tunnel_id) + assert tunnel_id == self.tunnel[tunnel_name] + self.tunnel.pop(tunnel_name) + + def check_ipinip_tunnel_decap_term(self, dvs, tunnel_name, dst_ip, src_ip): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + dst_ip = ipaddress.ip_network(dst_ip) + src_ip = ipaddress.ip_network(src_ip) + tunnel_term_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, self.tunnel_term_ids) + tunnel_term_attrs = { + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE': 'SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_MP2MP', + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE': 'SAI_TUNNEL_TYPE_IPINIP', + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP': str(dst_ip.network_address), + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP_MASK': str(dst_ip.netmask), + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP': str(src_ip.network_address), + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP_MASK': str(src_ip.netmask), + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID': self.tunnel[tunnel_name] + } + check_object(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, tunnel_term_id, tunnel_term_attrs) + + self.tunnel_term_ids.add(tunnel_term_id) + self.ipinip_tunnel_term_ids[(tunnel_name, src_ip, dst_ip)] = tunnel_term_id + + def check_del_ipinip_tunnel_decap_term(self, dvs, tunnel_name, dst_ip, src_ip): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + dst_ip = ipaddress.ip_network(dst_ip) + src_ip = ipaddress.ip_network(src_ip) + tunnel_term_id = get_deleted_entries(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, self.tunnel_term_ids, 1)[0] + check_deleted_object(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, tunnel_term_id) + self.tunnel_term_ids.remove(tunnel_term_id) + assert self.ipinip_tunnel_term_ids[(tunnel_name, src_ip, dst_ip)] == tunnel_term_id + self.ipinip_tunnel_term_ids.pop((tunnel_name, src_ip, dst_ip)) + + def check_vxlan_tunnel(self, dvs, tunnel_name, src_ip): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + global loopback_id, def_vr_id + + tunnel_map_id = get_created_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 4) + tunnel_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids) + tunnel_term_id = get_created_entry(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, self.tunnel_term_ids) + + # check that the vxlan tunnel termination are there + assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP) == (len(self.tunnel_map_ids) + 4), "The TUNNEL_MAP wasn't created" + assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == len(self.tunnel_map_entry_ids), "The TUNNEL_MAP_ENTRY is created" + assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_TABLE) == (len(self.tunnel_ids) + 1), "The TUNNEL wasn't created" + assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_TERM_ENTRY) == (len(self.tunnel_term_ids) + 1), "The TUNNEL_TERM_TABLE_ENTRY wasm't created" + + check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[2], + { + 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VIRTUAL_ROUTER_ID', + } + ) + + check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[3], + { + 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VIRTUAL_ROUTER_ID_TO_VNI', + } + ) + + check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[0], + { + 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VLAN_ID', + } + ) + + check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[1], + { + 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VLAN_ID_TO_VNI', + } + ) + + check_object(asic_db, self.ASIC_TUNNEL_TABLE, tunnel_id, + { + 'SAI_TUNNEL_ATTR_TYPE': 'SAI_TUNNEL_TYPE_VXLAN', + 'SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE': loopback_id, + 'SAI_TUNNEL_ATTR_DECAP_MAPPERS': '2:%s,%s' % (tunnel_map_id[0], tunnel_map_id[2]), + 'SAI_TUNNEL_ATTR_ENCAP_MAPPERS': '2:%s,%s' % (tunnel_map_id[1], tunnel_map_id[3]), + 'SAI_TUNNEL_ATTR_ENCAP_SRC_IP': src_ip, + } + ) + + expected_attributes = { + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE': 'SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP', + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID': def_vr_id, + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP': src_ip, + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE': 'SAI_TUNNEL_TYPE_VXLAN', + 'SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID': tunnel_id, + } + + check_object(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, tunnel_term_id, expected_attributes) + + self.tunnel_map_ids.update(tunnel_map_id) + self.tunnel_ids.add(tunnel_id) + self.tunnel_term_ids.add(tunnel_term_id) + self.tunnel_map_map[tunnel_name] = tunnel_map_id + self.tunnel[tunnel_name] = tunnel_id + + def check_del_vxlan_tunnel(self, dvs): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + old_tunnel = get_deleted_entries(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids, 1) + check_deleted_object(asic_db, self.ASIC_TUNNEL_TABLE, old_tunnel[0]) + self.tunnel_ids.remove(old_tunnel[0]) + + old_tunnel_maps = get_deleted_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 4) + for old_tunnel_map in old_tunnel_maps: + check_deleted_object(asic_db, self.ASIC_TUNNEL_MAP, old_tunnel_map) + self.tunnel_map_ids.remove(old_tunnel_map) + + def check_vxlan_tunnel_entry(self, dvs, tunnel_name, vnet_name, vni_id): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + + time.sleep(2) + + if (self.tunnel_map_map.get(tunnel_name) is None): + tunnel_map_id = get_created_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 4) + else: + tunnel_map_id = self.tunnel_map_map[tunnel_name] + + tunnel_map_entry_id = get_created_entries(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, self.tunnel_map_entry_ids, 2) + + # check that the vxlan tunnel termination are there + assert how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == (len(self.tunnel_map_entry_ids) + 2), "The TUNNEL_MAP_ENTRY is created too early" + + check_object(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, tunnel_map_entry_id[0], + { + 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VIRTUAL_ROUTER_ID_TO_VNI', + 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': tunnel_map_id[3], + 'SAI_TUNNEL_MAP_ENTRY_ATTR_VIRTUAL_ROUTER_ID_KEY': self.vr_map[vnet_name].get('ing'), + 'SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_VALUE': vni_id, + } + ) + + check_object(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, tunnel_map_entry_id[1], + { + 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VIRTUAL_ROUTER_ID', + 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': tunnel_map_id[2], + 'SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_KEY': vni_id, + 'SAI_TUNNEL_MAP_ENTRY_ATTR_VIRTUAL_ROUTER_ID_VALUE': self.vr_map[vnet_name].get('egr'), + } + ) + + self.tunnel_map_entry_ids.update(tunnel_map_entry_id) + + def check_vnet_entry(self, dvs, name, peer_list=[]): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + + #Assert if there are linklocal entries + tbl = swsscommon.Table(app_db, "VNET_ROUTE_TUNNEL_TABLE") + route_entries = tbl.getKeys() + assert "ff00::/8" not in route_entries + assert "fe80::/64" not in route_entries + + #Check virtual router objects + assert how_many_entries_exist(asic_db, self.ASIC_VRF_TABLE) == (len(self.vnet_vr_ids) + 1),\ + "The VR objects are not created" + + new_vr_ids = get_created_entries(asic_db, self.ASIC_VRF_TABLE, self.vnet_vr_ids, 1) + + self.vnet_vr_ids.update(new_vr_ids) + self.vr_map[name] = { 'ing':new_vr_ids[0], 'egr':new_vr_ids[0], 'peer':peer_list } + + def check_default_vnet_entry(self, dvs, name): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + #Check virtual router objects + assert how_many_entries_exist(asic_db, self.ASIC_VRF_TABLE) == (len(self.vnet_vr_ids)),\ + "Some VR objects are created" + #Mappers for default VNET is created with default VR objects. + self.vr_map[name] = { 'ing':list(self.vnet_vr_ids)[0], 'egr':list(self.vnet_vr_ids)[0], 'peer':[] } + + def check_del_vnet_entry(self, dvs, name): + # TODO: Implement for VRF VNET + return True + + def vnet_route_ids(self, dvs, name, local=False): + vr_set = set() + + vr_set.add(self.vr_map[name].get('ing')) + + try: + for peer in self.vr_map[name].get('peer'): + vr_set.add(self.vr_map[peer].get('ing')) + except IndexError: + pass + + return vr_set + + def check_router_interface(self, dvs, intf_name, name, vlan_oid=0, intf_type="physical"): + ''' + :param str intf_type Indicates whether the interface named 'intf_name' is a physical interface, + a VLAN interface in a peerless VNet, or a VLAN interface in a VNet with one peer. This is important since + it determines how many new routes we should expect to be added to the 'ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY' + table after the creation of the interface. Entries for more than one peers are currently not added + to the 'intf_route_count' dictionary since no test uses them. + ''' + # Check RIF in ingress VRF + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + global switch_mac + global intf_route_count + + expected_attr = { + "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": self.vr_map[name].get('ing'), + "SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS": switch_mac, + "SAI_ROUTER_INTERFACE_ATTR_MTU": "9100", + } + + if vlan_oid: + expected_attr.update({'SAI_ROUTER_INTERFACE_ATTR_TYPE': 'SAI_ROUTER_INTERFACE_TYPE_VLAN'}) + expected_attr.update({'SAI_ROUTER_INTERFACE_ATTR_VLAN_ID': vlan_oid}) + else: + expected_attr.update({'SAI_ROUTER_INTERFACE_ATTR_TYPE': 'SAI_ROUTER_INTERFACE_TYPE_PORT'}) + + new_rif = get_created_entry(asic_db, self.ASIC_RIF_TABLE, self.rifs) + check_object(asic_db, self.ASIC_RIF_TABLE, new_rif, expected_attr) + + #IP2ME route will be created with every router interface + new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, intf_route_count[intf_type]) + + if vlan_oid: + expected_attr = { 'SAI_VLAN_ATTR_BROADCAST_FLOOD_CONTROL_TYPE': 'SAI_VLAN_FLOOD_CONTROL_TYPE_NONE' } + check_object(asic_db, self.ASIC_VLAN_TABLE, vlan_oid, expected_attr) + + expected_attr = { 'SAI_VLAN_ATTR_UNKNOWN_MULTICAST_FLOOD_CONTROL_TYPE': 'SAI_VLAN_FLOOD_CONTROL_TYPE_NONE' } + check_object(asic_db, self.ASIC_VLAN_TABLE, vlan_oid, expected_attr) + + check_linux_intf_arp_proxy(dvs, intf_name) + + self.rifs.add(new_rif) + self.routes.update(new_route) + + def check_del_router_interface(self, dvs, name): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + old_rif = get_deleted_entries(asic_db, self.ASIC_RIF_TABLE, self.rifs, 1) + check_deleted_object(asic_db, self.ASIC_RIF_TABLE, old_rif[0]) + + self.rifs.remove(old_rif[0]) + + def check_vnet_local_routes(self, dvs, name, vlan_subnet_route=False): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + vr_ids = self.vnet_route_ids(dvs, name, True) + count = len(vr_ids) + # The route to the VLAN subnet must have been added when the VLAN was created. + # We are not expecting any new routes in that case. + expected_route_count = 0 if vlan_subnet_route else count + new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, expected_route_count) + + #Routes are not replicated to egress VRF, return if count is 0, else check peering + if not expected_route_count: + return + + asic_vrs = set() + for idx in range(expected_route_count): + rt_key = json.loads(new_route[idx]) + asic_vrs.add(rt_key['vr']) + + assert asic_vrs == vr_ids + + self.routes.update(new_route) + return new_route + + def check_vnet_local_route_nexthops(self, dvs, route_key, nexthops=[]): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + route_tbl = swsscommon.Table(asic_db, self.ASIC_ROUTE_ENTRY) + nhgm_tbl = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP_GROUP_MEMBER) + nh_tbl = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP) + + # Verify next hop or next hop group + status, route_fvs = route_tbl.get(route_key) + route_fvs = dict(route_fvs) + assert status, "Error occurred when trying to get a route entry key" + + if (len(nexthops) == 1): + expected_attr = { + "SAI_NEXT_HOP_ATTR_IP": nexthops[0] + } + + # Check next hop entry + nh_key = route_fvs.get("SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID") + assert nh_key, "Route entry does not have a next hop" + check_object(asic_db, self.ASIC_NEXT_HOP, nh_key, expected_attr) + + elif (len(nexthops) > 1): + expected_attr = { + "SAI_NEXT_HOP_GROUP_ATTR_TYPE": "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP", + } + + # Check next hop group entry + nhg_key = route_fvs.get("SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID") + assert nhg_key, "Route entry does not have a next hop" + check_object(asic_db, self.ASIC_NEXT_HOP_GROUP, nhg_key, expected_attr) + + # Check next hop group members + remaining_nexthops = set(nexthops) + nhgms = get_exist_entries(dvs, self.ASIC_NEXT_HOP_GROUP_MEMBER) + for nhgm in nhgms: + status, fvs = nhgm_tbl.get(nhgm) + fvs = dict(fvs) + assert status, "Error occurred when trying to get a next hop group member key" + + if fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhg_key: + nh_key = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + status, nh_fvs = nh_tbl.get(nh_key) + nh_fvs = dict(nh_fvs) + assert status, "Error occurred when trying to get a next hop key" + assert nh_fvs["SAI_NEXT_HOP_ATTR_IP"] in remaining_nexthops, "Next hop in next hop group member not expected" + remaining_nexthops.remove(nh_fvs["SAI_NEXT_HOP_ATTR_IP"]) + + assert len(remaining_nexthops) == 0, "Not all nexthops are associated with the next hop group" + + def check_del_vnet_local_routes(self, dvs, name, prefix): + # TODO: Implement for VRF VNET + vr_ids = self.vnet_route_ids(dvs, name, True) + + routes = get_exist_entries(dvs, self.ASIC_ROUTE_ENTRY) + + for route in routes: + rt_key = json.loads(route) + assert rt_key['vr'] not in vr_ids or rt_key['dest'] != prefix, "The route %s in VRF %s is not deleted" % (prefix, name) + + def check_vnet_routes(self, dvs, name, endpoint, tunnel, mac="", vni=0, route_ids=""): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + vr_ids = self.vnet_route_ids(dvs, name) + count = len(vr_ids) + + # Check routes in ingress VRF + expected_attr = { + "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP", + "SAI_NEXT_HOP_ATTR_IP": endpoint, + "SAI_NEXT_HOP_ATTR_TUNNEL_ID": self.tunnel[tunnel], + } + + if vni: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni}) + + if mac: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac}) + + if endpoint in self.nh_ids: + new_nh = self.nh_ids[endpoint] + else: + new_nh = get_created_entry(asic_db, self.ASIC_NEXT_HOP, self.nhops) + self.nh_ids[endpoint] = new_nh + self.nhops.add(new_nh) + + check_object(asic_db, self.ASIC_NEXT_HOP, new_nh, expected_attr) + if not route_ids: + new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) + else: + new_route = route_ids + + #Check if the route is in expected VRF + asic_vrs = set() + for idx in range(count): + check_object(asic_db, self.ASIC_ROUTE_ENTRY, new_route[idx], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": new_nh, + } + ) + rt_key = json.loads(new_route[idx]) + asic_vrs.add(rt_key['vr']) + + assert asic_vrs == vr_ids + + self.routes.update(new_route) + + return new_route + + def serialize_endpoint_group(self, endpoints): + endpoints.sort() + return ",".join(endpoints) + + def check_next_hop_group_member(self, dvs, nhg, ordered_ecmp, expected_endpoint, expected_attrs): + expected_endpoint_str = self.serialize_endpoint_group(expected_endpoint) + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tbl_nhgm = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP_GROUP_MEMBER) + tbl_nh = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP) + entries = set(tbl_nhgm.getKeys()) + endpoints = [] + for entry in entries: + status, fvs = tbl_nhgm.get(entry) + fvs = dict(fvs) + assert status, "Got an error when get a key" + if fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhg: + nh_key = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + status, nh_fvs = tbl_nh.get(nh_key) + nh_fvs = dict(nh_fvs) + assert status, "Got an error when get a key" + endpoint = nh_fvs["SAI_NEXT_HOP_ATTR_IP"] + endpoints.append(endpoint) + assert endpoint in expected_attrs + if ordered_ecmp == "true": + assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID"] == expected_attrs[endpoint]['SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID'] + del expected_attrs[endpoint]['SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID'] + else: + assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None + + check_object(asic_db, self.ASIC_NEXT_HOP, nh_key, expected_attrs[endpoint]) + + assert self.serialize_endpoint_group(endpoints) == expected_endpoint_str + + def get_nexthop_groups(self, dvs, nhg): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tbl_nhgm = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP_GROUP_MEMBER) + tbl_nh = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP) + nhg_data = {} + nhg_data['id'] = nhg + entries = set(tbl_nhgm.getKeys()) + nhg_data['endpoints'] = [] + for entry in entries: + status, fvs = tbl_nhgm.get(entry) + fvs = dict(fvs) + assert status, "Got an error when get a key" + if fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhg: + nh_key = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + status, nh_fvs = tbl_nh.get(nh_key) + nh_fvs = dict(nh_fvs) + assert status, "Got an error when get a key" + endpoint = nh_fvs["SAI_NEXT_HOP_ATTR_IP"] + nhg_data['endpoints'].append(endpoint) + return nhg_data + def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], route_ids=[], nhg="", ordered_ecmp="false", nh_seq_id=None): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + endpoint_str = name + "|" + self.serialize_endpoint_group(endpoints) + + vr_ids = self.vnet_route_ids(dvs, name) + count = len(vr_ids) + + expected_attrs = {} + for idx, endpoint in enumerate(endpoints): + expected_attr = { + "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP", + "SAI_NEXT_HOP_ATTR_IP": endpoint, + "SAI_NEXT_HOP_ATTR_TUNNEL_ID": self.tunnel[tunnel], + } + if vni and vni[idx]: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni[idx]}) + if mac and mac[idx]: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac[idx]}) + if ordered_ecmp == "true" and nh_seq_id: + expected_attr.update({'SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID': nh_seq_id[idx]}) + expected_attrs[endpoint] = expected_attr + + if nhg: + new_nhg = nhg + elif endpoint_str in self.nhg_ids: + new_nhg = self.nhg_ids[endpoint_str] + else: + new_nhg = get_created_entry(asic_db, self.ASIC_NEXT_HOP_GROUP, self.nhgs) + self.nhg_ids[endpoint_str] = new_nhg + self.nhgs.add(new_nhg) + + + # Check routes in ingress VRF + expected_nhg_attr = { + "SAI_NEXT_HOP_GROUP_ATTR_TYPE": "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP" if ordered_ecmp == "false" else "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_ORDERED_ECMP", + } + check_object(asic_db, self.ASIC_NEXT_HOP_GROUP, new_nhg, expected_nhg_attr) + + # Check nexthop group member + self.check_next_hop_group_member(dvs, new_nhg, ordered_ecmp, endpoints, expected_attrs) + + if route_ids: + new_route = route_ids + else: + new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) + + #Check if the route is in expected VRF + asic_vrs = set() + for idx in range(count): + check_object(asic_db, self.ASIC_ROUTE_ENTRY, new_route[idx], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": new_nhg, + } + ) + rt_key = json.loads(new_route[idx]) + asic_vrs.add(rt_key['vr']) + + assert asic_vrs == vr_ids + + self.routes.update(new_route) + + return new_route, new_nhg + + def check_priority_vnet_ecmp_routes(self, dvs, name, endpoints_primary, tunnel, mac=[], vni=[], route_ids=[], count =1, prefix =""): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + endpoint_str_primary = name + "|" + self.serialize_endpoint_group(endpoints_primary) + new_nhgs = [] + expected_attrs_primary = {} + for idx, endpoint in enumerate(endpoints_primary): + expected_attr = { + "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP", + "SAI_NEXT_HOP_ATTR_IP": endpoint, + "SAI_NEXT_HOP_ATTR_TUNNEL_ID": self.tunnel[tunnel], + } + if vni and vni[idx]: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni[idx]}) + if mac and mac[idx]: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac[idx]}) + expected_attrs_primary[endpoint] = expected_attr + + if len(endpoints_primary) == 1: + if route_ids: + new_route = route_ids + else: + new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) + return new_route + else : + new_nhgs = get_all_created_entries(asic_db, self.ASIC_NEXT_HOP_GROUP, self.nhgs) + found_match = False + + for nhg in new_nhgs: + nhg_data = self.get_nexthop_groups(dvs, nhg) + eplist = self.serialize_endpoint_group(nhg_data['endpoints']) + if eplist == self.serialize_endpoint_group(endpoints_primary): + self.nhg_ids[endpoint_str_primary] = nhg + found_match = True + + assert found_match, "the expected Nexthop group was not found." + + # Check routes in ingress VRF + expected_nhg_attr = { + "SAI_NEXT_HOP_GROUP_ATTR_TYPE": "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP", + } + for nhg in new_nhgs: + check_object(asic_db, self.ASIC_NEXT_HOP_GROUP, nhg, expected_nhg_attr) + + # Check nexthop group member + self.check_next_hop_group_member(dvs, self.nhg_ids[endpoint_str_primary], "false", endpoints_primary, expected_attrs_primary) + + if route_ids: + new_route = route_ids + else: + new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) + + #Check if the route is in expected VRF + active_nhg = self.nhg_ids[endpoint_str_primary] + for idx in range(count): + if prefix != "" and prefix not in new_route[idx] : + continue + check_object(asic_db, self.ASIC_ROUTE_ENTRY, new_route[idx], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": active_nhg, + } + ) + rt_key = json.loads(new_route[idx]) + + + self.routes.update(new_route) + del self.nhg_ids[endpoint_str_primary] + return new_route + + def check_del_vnet_routes(self, dvs, name, prefixes=[], absent=False): + # TODO: Implement for VRF VNET + + def _access_function(): + route_entries = get_exist_entries(dvs, self.ASIC_ROUTE_ENTRY) + route_prefixes = [json.loads(route_entry)["dest"] for route_entry in route_entries] + return (all(prefix not in route_prefixes for prefix in prefixes), None) + + if absent: + return True if _access_function()== None else False + elif prefixes: + wait_for_result(_access_function) + + return True + + def check_custom_monitor_app_db(self, dvs, prefix, endpoint, packet_type, overlay_dmac): + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + key = endpoint + ':' + prefix + check_object(app_db, self.APP_VNET_MONITOR, key, + { + "packet_type": packet_type, + "overlay_dmac" : overlay_dmac + } + ) + return True + + def check_custom_monitor_deleted(self, dvs, prefix, endpoint): + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + key = endpoint + ':' + prefix + check_deleted_object(app_db, self.APP_VNET_MONITOR, key) diff --git a/tlm_teamd/Makefile.am b/tlm_teamd/Makefile.am index 46ddfd22f55..4548ea06ba3 100644 --- a/tlm_teamd/Makefile.am +++ b/tlm_teamd/Makefile.am @@ -15,7 +15,7 @@ tlm_teamd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(JANSSON_CFLAGS) tlm_teamd_LDADD = $(LDFLAGS_ASAN) -lhiredis -lswsscommon -lteamdctl $(JANSSON_LIBS) if GCOV_ENABLED -tlm_teamd_LDADD += -lgcovpreload +tlm_teamd_SOURCES += ../gcovpreload/gcovpreload.cpp endif if ASAN_ENABLED diff --git a/tlm_teamd/values_store.cpp b/tlm_teamd/values_store.cpp index f883d22fd3c..957194b4c5b 100644 --- a/tlm_teamd/values_store.cpp +++ b/tlm_teamd/values_store.cpp @@ -281,6 +281,14 @@ void ValuesStore::remove_keys_db(const std::vector & keys) const auto & p = split_key(key); const auto & table_name = p.first; const auto & table_key = p.second; + // Do not delete te key from State Db for table LAB_TABLE. LAB_TABLE entry is created/deleted + // from teamsyncd on detecting netlink of teamd dev as up/down. For some reason + // if we do not get state dump from teamdctl it might be transient issue. If it is + // persistent issue then teamsyncd might be able to catch it and delete state db entry + // or we can keep entry in it's current state as best effort. Similar to try_add_lag which is best effort + // to connect to teamdctl and if it fails we do not delete State Db entry. + if (table_name == "LAG_TABLE") + continue; swss::Table table(m_db, table_name); table.del(table_key); } diff --git a/warmrestart/warmRestartHelper.cpp b/warmrestart/warmRestartHelper.cpp index 580e9f98a62..b7dafd64d72 100644 --- a/warmrestart/warmRestartHelper.cpp +++ b/warmrestart/warmRestartHelper.cpp @@ -264,7 +264,7 @@ void WarmStartHelper::reconcile(void) * Compare all field-value-tuples within two vectors. * * Example: v1 {nexthop: 10.1.1.1, ifname: eth1} - * v2 {nexthop: 10.1.1.2, ifname: eth2} + * v2 {nexthop: 10.1.1.2, ifname: eth2, protocol: kernel, weight: 1} * * Returns: * @@ -274,25 +274,24 @@ void WarmStartHelper::reconcile(void) bool WarmStartHelper::compareAllFV(const std::vector &v1, const std::vector &v2) { + /* Size mismatch implies a diff */ + if (v1.size() != v2.size()) + { + return true; + } + std::unordered_map v1Map((v1.begin()), v1.end()); /* Iterate though all v2 tuples to check if their content match v1 ones */ for (auto &v2fv : v2) { auto v1Iter = v1Map.find(v2fv.first); - /* - * The sizes of both tuple-vectors should always match within any - * given application. In other words, all fields within v1 should be - * also present in v2. - * - * To make this possible, every application should continue relying on a - * uniform schema to create/generate information. For example, fpmsyncd - * will be always expected to push FieldValueTuples with "nexthop" and - * "ifname" fields; neighsyncd is expected to make use of "family" and - * "neigh" fields, etc. The existing reconciliation logic will rely on - * this assumption. - */ - assert(v1Iter != v1Map.end()); + + /* Return true when v2 has a new field */ + if (v1Iter == v1Map.end()) + { + return true; + } if (compareOneFV(v1Map[fvField(*v1Iter)], fvValue(v2fv))) {