diff --git a/.azure-pipelines/azure-pipelines-build.yml b/.azure-pipelines/azure-pipelines-build.yml
index 550e2ccd979..7f96f128b23 100644
--- a/.azure-pipelines/azure-pipelines-build.yml
+++ b/.azure-pipelines/azure-pipelines-build.yml
@@ -1,3 +1,5 @@
+# The azure pipeline template for Official build, and upgrade version build
+
parameters:
- name: 'jobFilters'
type: object
@@ -5,68 +7,93 @@ parameters:
- name: 'buildOptions'
type: string
default: 'SONIC_CONFIG_BUILD_JOBS=1'
-- name: 'buildSlave'
- type: string
- default: 'n'
+- name: 'preSteps'
+ type: stepList
+ default: []
- name: 'postSteps'
type: stepList
default: []
jobs:
-- template: azure-pipelines-job-groups.yml
+- template: azure-pipelines-image-template.yml
parameters:
jobFilters: ${{ parameters.jobFilters }}
- preSteps:
- - script: |
- containers=$(docker container ls | grep "sonic-slave" | awk '{ print $1 }')
- if [ ! -z "$containers" ]; then
- docker container kill $containers || true
- sleep 5
- fi
- if [ "${{ parameters.buildSlave }}" == "y" ]; then
- images=$(docker images 'sonic-slave-*' -a -q)
- [ ! -z "$images" ] && docker rmi -f $images
- fi
- sudo rm -rf $(ls -A1)
- displayName: 'Init'
- - checkout: self
- submodules: recursive
- displayName: 'Checkout code'
- - script: |
- make ${{ parameters.buildOptions }} PLATFORM=$GROUP_NAME configure
- displayName: 'Make configure'
- postSteps:
- - ${{ parameters.postSteps }}
- - publish: $(System.DefaultWorkingDirectory)/target
- artifact: 'sonic-buildimage.$(GROUP_NAME)$(GROUP_EXTNAME)'
- displayName: "Archive sonic image"
+ preSteps: ${{ parameters.preSteps }}
+ postSteps: ${{ parameters.postSteps }}
+ jobVariables:
+ PLATFORM_AZP: $(GROUP_NAME)
+ PLATFORM_ARCH: amd64
+ BUILD_OPTIONS: ${{ parameters.buildOptions }}
+ dbg_image: false
+ swi_image: false
+ raw_image: false
+ sync_rpc_image: false
+ platform_rpc: false
jobGroups:
- name: vs
- script: |
- sudo bash -c "echo 1 > /proc/sys/vm/compact_memory"
- make ${{ parameters.buildOptions }} target/sonic-vs.img.gz
- - name: broadcom
- script: |
- make ${{ parameters.buildOptions }} target/sonic-broadcom.bin target/sonic-aboot-broadcom.swi
+ variables:
+ dbg_image: true
- name: barefoot
- script: |
- make ${{ parameters.buildOptions }} target/sonic-barefoot.bin target/sonic-aboot-barefoot.swi
+ variables:
+ swi_image: true
+ - name: broadcom
+ variables:
+ dbg_image: true
+ swi_image: true
+ raw_image: true
+ sync_rpc_image: true
+ platform_rpc: brcm
- name: centec
- script: |
- make ${{ parameters.buildOptions }} INSTALL_DEBUG_TOOLS=y target/sonic-centec.bin
- mv target/sonic-centec.bin target/sonic-centec-dbg.bin
- make ${{ parameters.buildOptions }} target/sonic-centec.bin
- make ${{ parameters.buildOptions }} ENABLE_SYNCD_RPC=y target/docker-syncd-centec-rpc.gz
+ variables:
+ dbg_image: true
+ sync_rpc_image: true
+ platform_rpc: centec
+ - name: centec-arm64
+ pool: sonicbld_8c
+ variables:
+ arch: arm64
+ timeoutInMinutes: 1800
+ - name: generic
+ variables:
+ dbg_image: true
- name: innovium
- script: |
- make ${{ parameters.buildOptions }} SONIC_CONFIG_BUILD_JOBS=1 target/sonic-innovium.bin
+ variables:
+ swi_image: true
+ - name: marvell-armhf
+ pool: sonicbld_8c
+ variables:
+ arch: armhf
+ timeoutInMinutes: 1800
- name: mellanox
- script: |
- make ${{ parameters.buildOptions }} target/sonic-mellanox.bin
- - name: mellanox
- extName: _rpc
- script: |
- make ${{ parameters.buildOptions }} ENABLE_SYNCD_RPC=y all
+ variables:
+ dbg_image: true
+ sync_rpc_image: true
+ platform_rpc: mlnx
- name: nephos
- script: |
- make ${{ parameters.buildOptions }} target/sonic-nephos.bin
+ variables:
+ dbg_image: true
+ sync_rpc_image: true
+ platform_rpc: nephos
+ buildSteps:
+ - bash: |
+ if [ $(GROUP_NAME) == vs ]; then
+ if [ $(dbg_image) == true ]; then
+ make $BUILD_OPTIONS INSTALL_DEBUG_TOOLS=y target/sonic-vs.img.gz && mv target/sonic-vs.img.gz target/sonic-vs-dbg.img.gz
+ fi
+ make $BUILD_OPTIONS target/docker-sonic-vs.gz target/sonic-vs.img.gz target/docker-ptf.gz
+ else
+ if [ $(dbg_image) == true ]; then
+ make $BUILD_OPTIONS INSTALL_DEBUG_TOOLS=y target/sonic-$(GROUP_NAME).bin && \
+ mv target/sonic-$(GROUP_NAME).bin target/sonic-$(GROUP_NAME)-dbg.bin
+ fi
+ if [ $(swi_image) == true ]; then
+ make $BUILD_OPTIONS ENABLE_IMAGE_SIGNATURE=y target/sonic-aboot-$(GROUP_NAME).swi
+ fi
+ if [ $(raw_image) == true ]; then
+ make $BUILD_OPTIONS target/sonic-$(GROUP_NAME).raw
+ fi
+ if [ $(sync_rpc_image) == true ]; then
+ make $BUILD_OPTIONS ENABLE_SYNCD_RPC=y target/docker-syncd-$(platform_rpc)-rpc.gz
+ fi
+ make $BUILD_OPTIONS target/sonic-$(GROUP_NAME).bin
+ fi
diff --git a/.azure-pipelines/azure-pipelines-image-template.yml b/.azure-pipelines/azure-pipelines-image-template.yml
new file mode 100644
index 00000000000..fb7be2fa69f
--- /dev/null
+++ b/.azure-pipelines/azure-pipelines-image-template.yml
@@ -0,0 +1,54 @@
+# The azure pipeline template for PR build, Official build, and upgrade version build
+
+parameters:
+- name: 'jobFilters'
+ type: object
+ default: ''
+- name: 'preSteps'
+ type: stepList
+ default: []
+- name: 'buildSteps'
+ type: stepList
+ default: []
+- name: 'postSteps'
+ type: stepList
+ default: []
+- name: jobGroups
+ type: object
+ default: []
+- name: jobVariables
+ type: object
+ default: []
+jobs:
+- template: azure-pipelines-job-groups.yml
+ parameters:
+ jobFilters: ${{ parameters.jobFilters }}
+ jobVariables: ${{ parameters.jobVariables }}
+ preSteps:
+ - template: cleanup.yml
+ - ${{ parameters. preSteps }}
+ - script: |
+ if [ -n "$(CACHE_MODE)" ] && echo $(PLATFORM_AZP) | grep -E -q "^(vs|broadcom|mellanox)$"; then
+ CACHE_OPTIONS="SONIC_DPKG_CACHE_METHOD=$(CACHE_MODE) SONIC_DPKG_CACHE_SOURCE=/nfs/dpkg_cache/$(PLATFORM_AZP)"
+ BUILD_OPTIONS="$(BUILD_OPTIONS) $CACHE_OPTIONS"
+ echo "##vso[task.setvariable variable=BUILD_OPTIONS]$BUILD_OPTIONS"
+ fi
+ displayName: "Make build options"
+ - checkout: self
+ submodules: recursive
+ displayName: 'Checkout code'
+ - script: |
+ sudo modprobe overlay
+ sudo apt-get install -y acl
+ export DOCKER_DATA_ROOT_FOR_MULTIARCH=/data/march/docker
+ sudo bash -c "echo 1 > /proc/sys/vm/compact_memory"
+ ENABLE_DOCKER_BASE_PULL=y make PLATFORM=$(PLATFORM_AZP) PLATFORM_ARCH=$(PLATFORM_ARCH) configure
+ displayName: 'Make configure'
+ postSteps:
+ - publish: $(System.DefaultWorkingDirectory)/target
+ artifact: 'sonic-buildimage.$(GROUP_NAME)$(GROUP_EXTNAME)'
+ displayName: "Archive sonic image"
+ - ${{ parameters.postSteps }}
+ - template: cleanup.yml
+ jobGroups: ${{ parameters.jobGroups }}
+ buildSteps: ${{ parameters.buildSteps }}
diff --git a/.azure-pipelines/azure-pipelines-job-groups.yml b/.azure-pipelines/azure-pipelines-job-groups.yml
index 1ca5c932b0a..d5be80dcde3 100644
--- a/.azure-pipelines/azure-pipelines-job-groups.yml
+++ b/.azure-pipelines/azure-pipelines-job-groups.yml
@@ -2,6 +2,9 @@ parameters:
- name: 'preSteps'
type: stepList
default: []
+- name: 'buildSteps'
+ type: stepList
+ default: []
- name: "postSteps"
type: stepList
default: []
@@ -16,14 +19,14 @@ parameters:
default: ''
- name: 'timeoutInMinutes'
type: 'number'
- default: 1440
+ default: 600
- name: 'jobFilters'
type: object
default: ''
jobs:
- ${{ each jobGroup in parameters.jobGroups }}:
- - ${{ if or(eq(parameters.jobFilters, ''), containsValue(parameters.jobFilters, jobGroup.name)) }}:
+ - ${{ if or(eq(parameters.jobFilters, ''), containsValue(parameters.jobFilters, jobGroup.name), endswith(variables['Build.DefinitionName'], format('.{0}{1}', jobGroup.name, jobGroup.extName))) }}:
- job: ${{ replace(format('{0}{1}', jobGroup.name, jobGroup.extName), '-', '_') }}
${{ each pair in jobGroup }}:
${{ if not(in(pair.key, 'job', 'name', 'extName', 'variables', 'steps', 'script', 'scriptEnv')) }}:
@@ -50,4 +53,5 @@ jobs:
displayName: 'JobScript'
- ${{ if ne(jobGroup.steps, '') }}:
- ${{ jobGroup.steps }}
+ - ${{ parameters.buildSteps }}
- ${{ parameters.postSteps }}
diff --git a/.azure-pipelines/official-build.yml b/.azure-pipelines/official-build.yml
index 8524027d7b1..fa79be96974 100644
--- a/.azure-pipelines/official-build.yml
+++ b/.azure-pipelines/official-build.yml
@@ -9,6 +9,7 @@ schedules:
branches:
include:
- master
+ - 202012
always: true
trigger: none
@@ -16,47 +17,13 @@ pr: none
stages:
- stage: Build
-
+ pool: sonicbld
+ variables:
+ CACHE_MODE: wcache
+ ${{ if eq(variables['Build.SourceBranchName'], '202012') }}:
+ VERSION_CONTROL_OPTIONS: 'SONIC_VERSION_CONTROL_COMPONENTS=deb,py2,py3,web'
jobs:
- - template: build-template.yml
- parameters:
- platform: broadcom
- platform_short: brcm
- cache_mode: wcache
- dbg_image: true
- swi_image: true
- raw_image: true
- sync_rpc_image: true
-
- - template: build-template.yml
- parameters:
- platform: mellanox
- platform_short: mlnx
- cache_mode: wcache
- dbg_image: true
- sync_rpc_image: true
-
- - template: build-template.yml
- parameters:
- platform: vs
- platform_short: vs
- dbg_image: true
- cache_mode: wcache
-
- - template: build-template.yml
- parameters:
- timeout: 3600
- platform: marvell-armhf
- platform_arch: armhf
- platform_short: marvell-armhf
- cache_mode: wcache
- pool: sonicbld_8c
-
- - template: build-template.yml
+ - template: azure-pipelines-build.yml
parameters:
- timeout: 3600
- platform: centec-arm64
- platform_arch: arm64
- platform_short: centec-arm64
- cache_mode: wcache
- pool: sonicbld_8c
+ buildOptions: 'USERNAME=admin SONIC_BUILD_JOBS=$(nproc) ${{ variables.VERSION_CONTROL_OPTIONS }}'
+ jobFilters: none
diff --git a/build_debian.sh b/build_debian.sh
index c95710641e1..17e48b9b62d 100755
--- a/build_debian.sh
+++ b/build_debian.sh
@@ -79,6 +79,8 @@ TARGET_PATH=$TARGET_PATH scripts/build_debian_base_system.sh $CONFIGURED_ARCH $I
# Prepare buildinfo
sudo scripts/prepare_debian_image_buildinfo.sh $CONFIGURED_ARCH $IMAGE_DISTRO $FILESYSTEM_ROOT $http_proxy
+sudo chown root:root $FILESYSTEM_ROOT
+
## Config hostname and hosts, otherwise 'sudo ...' will complain 'sudo: unable to resolve host ...'
sudo LANG=C chroot $FILESYSTEM_ROOT /bin/bash -c "echo '$HOSTNAME' > /etc/hostname"
sudo LANG=C chroot $FILESYSTEM_ROOT /bin/bash -c "echo '127.0.0.1 $HOSTNAME' >> /etc/hosts"
@@ -109,7 +111,7 @@ sudo cp files/apt/apt.conf.d/{81norecommends,apt-{clean,gzip-indexes,no-language
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y update
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y upgrade
echo '[INFO] Install packages for building image'
-sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install makedev psmisc systemd-sysv
+sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install makedev psmisc
## Create device files
echo '[INFO] MAKEDEV'
@@ -244,15 +246,16 @@ sudo cp files/docker/docker.service.conf $_
## Fix systemd race between docker and containerd
sudo sed -i '/After=/s/$/ containerd.service/' $FILESYSTEM_ROOT/lib/systemd/system/docker.service
-## Create redis group
-sudo LANG=C chroot $FILESYSTEM_ROOT groupadd -f redis
-
## Create default user
## Note: user should be in the group with the same name, and also in sudo/docker/redis groups
-sudo LANG=C chroot $FILESYSTEM_ROOT useradd -G sudo,docker,redis $USERNAME -c "$DEFAULT_USERINFO" -m -s /bin/bash
+sudo LANG=C chroot $FILESYSTEM_ROOT useradd -G sudo,docker $USERNAME -c "$DEFAULT_USERINFO" -m -s /bin/bash
## Create password for the default user
echo "$USERNAME:$PASSWORD" | sudo LANG=C chroot $FILESYSTEM_ROOT chpasswd
+## Create redis group
+sudo LANG=C chroot $FILESYSTEM_ROOT groupadd -f redis
+sudo LANG=C chroot $FILESYSTEM_ROOT usermod -aG redis $USERNAME
+
if [[ $CONFIGURED_ARCH == amd64 ]]; then
## Pre-install hardware drivers
sudo LANG=C chroot $FILESYSTEM_ROOT apt-get -y install \
@@ -347,7 +350,8 @@ sudo LANG=C chroot $FILESYSTEM_ROOT bash -c "find /usr/share/i18n/locales/ ! -na
# Install certain fundamental packages from $IMAGE_DISTRO-backports in order to get
# more up-to-date (but potentially less stable) versions
sudo LANG=C DEBIAN_FRONTEND=noninteractive chroot $FILESYSTEM_ROOT apt-get -y -t $IMAGE_DISTRO-backports install \
- picocom
+ picocom \
+ systemd-sysv
if [[ $CONFIGURED_ARCH == amd64 ]]; then
sudo LANG=C DEBIAN_FRONTEND=noninteractive chroot $FILESYSTEM_ROOT apt-get -y download \
@@ -583,6 +587,12 @@ if [[ $CONFIGURED_ARCH == armhf || $CONFIGURED_ARCH == arm64 ]]; then
DOCKERFS_PATH=../dockerfs/
fi
+# Ensure admin gid is 1000
+gid_user=$(sudo LANG=C chroot $FILESYSTEM_ROOT id -g $USERNAME) || gid_user="none"
+if [ "${gid_user}" != "1000" ]; then
+ die "expect gid 1000. current:${gid_user}"
+fi
+
## Compress docker files
pushd $FILESYSTEM_ROOT && sudo tar czf $OLDPWD/$FILESYSTEM_DOCKERFS -C ${DOCKERFS_PATH}var/lib/docker .; popd
diff --git a/device/dell/x86_64-dellemc_z9332f_d1508-r0/DellEMC-Z9332f-C32/port_config.ini b/device/dell/x86_64-dellemc_z9332f_d1508-r0/DellEMC-Z9332f-C32/port_config.ini
index d7d83ae21e7..62482e72564 100644
--- a/device/dell/x86_64-dellemc_z9332f_d1508-r0/DellEMC-Z9332f-C32/port_config.ini
+++ b/device/dell/x86_64-dellemc_z9332f_d1508-r0/DellEMC-Z9332f-C32/port_config.ini
@@ -1,35 +1,35 @@
-# name lanes alias index speed
-Ethernet0 33,34,35,36 hundredGigE1/1 1 100000
-Ethernet8 41,42,43,44 hundredGigE1/2 2 100000
-Ethernet16 49,50,51,52 hundredGigE1/3 3 100000
-Ethernet24 57,58,59,60 hundredGigE1/4 4 100000
-Ethernet32 65,66,67,68 hundredGigE1/5 5 100000
-Ethernet40 73,74,75,76 hundredGigE1/6 6 100000
-Ethernet48 81,82,83,84 hundredGigE1/7 7 100000
-Ethernet56 89,90,91,92 hundredGigE1/8 8 100000
-Ethernet64 1,2,3,4 hundredGigE1/9 9 100000
-Ethernet72 9,10,11,12 hundredGigE1/10 10 100000
-Ethernet80 17,18,19,20 hundredGigE1/11 11 100000
-Ethernet88 25,26,27,28 hundredGigE1/12 12 100000
-Ethernet96 97,98,99,100 hundredGigE1/13 13 100000
-Ethernet104 105,106,107,108 hundredGigE1/14 14 100000
-Ethernet112 113,114,115,116 hundredGigE1/15 15 100000
-Ethernet120 121,122,123,124 hundredGigE1/16 16 100000
-Ethernet128 129,130,131,132 hundredGigE1/17 17 100000
-Ethernet136 137,138,139,140 hundredGigE1/18 18 100000
-Ethernet144 145,146,147,148 hundredGigE1/19 19 100000
-Ethernet152 153,154,155,156 hundredGigE1/20 20 100000
-Ethernet160 225,226,227,228 hundredGigE1/21 21 100000
-Ethernet168 233,234,235,236 hundredGigE1/22 22 100000
-Ethernet176 241,242,243,244 hundredGigE1/23 23 100000
-Ethernet184 249,250,251,252 hundredGigE1/24 24 100000
-Ethernet192 161,162,163,164 hundredGigE1/25 25 100000
-Ethernet200 169,170,171,172 hundredGigE1/26 26 100000
-Ethernet208 177,178,179,180 hundredGigE1/27 27 100000
-Ethernet216 185,186,187,188 hundredGigE1/28 28 100000
-Ethernet224 193,194,195,196 hundredGigE1/29 29 100000
-Ethernet232 201,202,203,204 hundredGigE1/30 30 100000
-Ethernet240 209,210,211,212 hundredGigE1/31 31 100000
-Ethernet248 217,218,219,220 hundredGigE1/32 32 100000
-Ethernet256 257 tenGigE1/33 33 10000
-Ethernet257 258 tenGigE1/34 34 10000
+# name lanes alias index speed
+Ethernet0 33,34,35,36 etp1 1 100000
+Ethernet8 41,42,43,44 etp2 2 100000
+Ethernet16 49,50,51,52 etp3 3 100000
+Ethernet24 57,58,59,60 etp4 4 100000
+Ethernet32 65,66,67,68 etp5 5 100000
+Ethernet40 73,74,75,76 etp6 6 100000
+Ethernet48 81,82,83,84 etp7 7 100000
+Ethernet56 89,90,91,92 etp8 8 100000
+Ethernet64 1,2,3,4 etp9 9 100000
+Ethernet72 9,10,11,12 etp10 10 100000
+Ethernet80 17,18,19,20 etp11 11 100000
+Ethernet88 25,26,27,28 etp12 12 100000
+Ethernet96 97,98,99,100 etp13 13 100000
+Ethernet104 105,106,107,108 etp14 14 100000
+Ethernet112 113,114,115,116 etp15 15 100000
+Ethernet120 121,122,123,124 etp16 16 100000
+Ethernet128 129,130,131,132 etp17 17 100000
+Ethernet136 137,138,139,140 etp18 18 100000
+Ethernet144 145,146,147,148 etp19 19 100000
+Ethernet152 153,154,155,156 etp20 20 100000
+Ethernet160 225,226,227,228 etp21 21 100000
+Ethernet168 233,234,235,236 etp22 22 100000
+Ethernet176 241,242,243,244 etp23 23 100000
+Ethernet184 249,250,251,252 etp24 24 100000
+Ethernet192 161,162,163,164 etp25 25 100000
+Ethernet200 169,170,171,172 etp26 26 100000
+Ethernet208 177,178,179,180 etp27 27 100000
+Ethernet216 185,186,187,188 etp28 28 100000
+Ethernet224 193,194,195,196 etp29 29 100000
+Ethernet232 201,202,203,204 etp30 30 100000
+Ethernet240 209,210,211,212 etp31 31 100000
+Ethernet248 217,218,219,220 etp32 32 100000
+Ethernet256 257 etp33 33 10000
+Ethernet257 258 etp34 34 10000
diff --git a/device/dell/x86_64-dellemc_z9332f_d1508-r0/DellEMC-Z9332f-M-O16C64/port_config.ini b/device/dell/x86_64-dellemc_z9332f_d1508-r0/DellEMC-Z9332f-M-O16C64/port_config.ini
index 101780149b5..b191e0a41a1 100644
--- a/device/dell/x86_64-dellemc_z9332f_d1508-r0/DellEMC-Z9332f-M-O16C64/port_config.ini
+++ b/device/dell/x86_64-dellemc_z9332f_d1508-r0/DellEMC-Z9332f-M-O16C64/port_config.ini
@@ -1,83 +1,83 @@
-# name lanes alias index speed
-Ethernet0 33,34 hundredGigE1/1/1 1 100000
-Ethernet2 35,36 hundredGigE1/1/2 1 100000
-Ethernet4 37,38 hundredGigE1/1/3 1 100000
-Ethernet6 39,40 hundredGigE1/1/4 1 100000
-Ethernet8 41,42 hundredGigE1/2/1 2 100000
-Ethernet10 43,44 hundredGigE1/2/2 2 100000
-Ethernet12 45,46 hundredGigE1/2/3 2 100000
-Ethernet14 47,48 hundredGigE1/2/4 2 100000
-Ethernet16 49,50 hundredGigE1/3/1 3 100000
-Ethernet18 51,52 hundredGigE1/3/2 3 100000
-Ethernet20 53,54 hundredGigE1/3/3 3 100000
-Ethernet22 55,56 hundredGigE1/3/4 3 100000
-Ethernet24 57,58 hundredGigE1/4/1 4 100000
-Ethernet26 59,60 hundredGigE1/4/2 4 100000
-Ethernet28 61,62 hundredGigE1/4/3 4 100000
-Ethernet30 63,64 hundredGigE1/4/4 4 100000
-Ethernet32 65,66 hundredGigE1/5/1 5 100000
-Ethernet34 67,68 hundredGigE1/5/2 5 100000
-Ethernet36 69,70 hundredGigE1/5/3 5 100000
-Ethernet38 71,72 hundredGigE1/5/4 5 100000
-Ethernet40 73,74 hundredGigE1/6/1 6 100000
-Ethernet42 75,76 hundredGigE1/6/2 6 100000
-Ethernet44 77,78 hundredGigE1/6/3 6 100000
-Ethernet46 79,80 hundredGigE1/6/4 6 100000
-Ethernet48 81,82 hundredGigE1/7/1 7 100000
-Ethernet50 83,84 hundredGigE1/7/2 7 100000
-Ethernet52 85,86 hundredGigE1/7/3 7 100000
-Ethernet54 87,88 hundredGigE1/7/4 7 100000
-Ethernet56 89,90 hundredGigE1/8/1 8 100000
-Ethernet58 91,92 hundredGigE1/8/2 8 100000
-Ethernet60 93,94 hundredGigE1/8/3 8 100000
-Ethernet62 95,96 hundredGigE1/8/4 8 100000
-Ethernet64 1,2 hundredGigE1/9/1 9 100000
-Ethernet66 3,4 hundredGigE1/9/2 9 100000
-Ethernet68 5,6 hundredGigE1/9/3 9 100000
-Ethernet70 7,8 hundredGigE1/9/4 9 100000
-Ethernet72 9,10 hundredGigE1/10/1 10 100000
-Ethernet74 11,12 hundredGigE1/10/2 10 100000
-Ethernet76 13,14 hundredGigE1/10/3 10 100000
-Ethernet78 15,16 hundredGigE1/10/4 10 100000
-Ethernet80 17,18 hundredGigE1/11/1 11 100000
-Ethernet82 19,20 hundredGigE1/11/2 11 100000
-Ethernet84 21,22 hundredGigE1/11/3 11 100000
-Ethernet86 23,24 hundredGigE1/11/4 11 100000
-Ethernet88 25,26 hundredGigE1/12/1 12 100000
-Ethernet90 27,28 hundredGigE1/12/2 12 100000
-Ethernet92 29,30 hundredGigE1/12/3 12 100000
-Ethernet94 31,32 hundredGigE1/12/4 12 100000
-Ethernet96 97,98,99,100,101,102,103,104 fourhundredGigE1/13 13 400000
-Ethernet104 105,106,107,108,109,110,111,112 fourhundredGigE1/14 14 400000
-Ethernet112 113,114,115,116,117,118,119,120 fourhundredGigE1/15 15 400000
-Ethernet120 121,122,123,124,125,126,127,128 fourhundredGigE1/16 16 400000
-Ethernet128 129,130 hundredGigE1/17/1 17 100000
-Ethernet130 131,132 hundredGigE1/17/2 17 100000
-Ethernet132 133,134 hundredGigE1/17/3 17 100000
-Ethernet134 135,136 hundredGigE1/17/4 17 100000
-Ethernet136 137,138 hundredGigE1/18/1 18 100000
-Ethernet138 139,140 hundredGigE1/18/2 18 100000
-Ethernet140 141,142 hundredGigE1/18/3 18 100000
-Ethernet142 143,144 hundredGigE1/18/4 18 100000
-Ethernet144 145,146 hundredGigE1/19/1 19 100000
-Ethernet146 147,148 hundredGigE1/19/2 19 100000
-Ethernet148 149,150 hundredGigE1/19/3 19 100000
-Ethernet150 151,152 hundredGigE1/19/4 19 100000
-Ethernet152 153,154 hundredGigE1/20/1 20 100000
-Ethernet154 155,156 hundredGigE1/20/2 20 100000
-Ethernet156 157,158 hundredGigE1/20/3 20 100000
-Ethernet158 159,160 hundredGigE1/20/4 20 100000
-Ethernet160 225,226,227,228,229,230,231,232 fourhundredGigE1/21 21 400000
-Ethernet168 233,234,235,236,237,238,239,240 fourhundredGigE1/22 22 400000
-Ethernet176 241,242,243,244,245,246,247,248 fourhundredGigE1/23 23 400000
-Ethernet184 249,250,251,252,253,254,255,256 fourhundredGigE1/24 24 400000
-Ethernet192 161,162,163,164,165,166,167,168 fourhundredGigE1/25 25 400000
-Ethernet200 169,170,171,172,173,174,175,176 fourhundredGigE1/26 26 400000
-Ethernet208 177,178,179,180,181,182,183,184 fourhundredGigE1/27 27 400000
-Ethernet216 185,186,187,188,189,190,191,192 fourhundredGigE1/28 28 400000
-Ethernet224 193,194,195,196,197,198,199,200 fourhundredGigE1/29 29 400000
-Ethernet232 201,202,203,204,205,206,207,208 fourhundredGigE1/30 30 400000
-Ethernet240 209,210,211,212,213,214,215,216 fourhundredGigE1/31 31 400000
-Ethernet248 217,218,219,220,221,222,223,224 fourhundredGigE1/32 32 400000
-Ethernet256 257 tenGigE1/33 33 10000
-Ethernet257 258 tenGigE1/34 34 10000
+# name lanes alias index speed
+Ethernet0 33,34 etp1a 1 100000
+Ethernet2 35,36 etp1b 1 100000
+Ethernet4 37,38 etp1c 1 100000
+Ethernet6 39,40 etp1d 1 100000
+Ethernet8 41,42 etp2a 2 100000
+Ethernet10 43,44 etp2b 2 100000
+Ethernet12 45,46 etp2c 2 100000
+Ethernet14 47,48 etp2d 2 100000
+Ethernet16 49,50 etp3a 3 100000
+Ethernet18 51,52 etp3b 3 100000
+Ethernet20 53,54 etp3c 3 100000
+Ethernet22 55,56 etp3d 3 100000
+Ethernet24 57,58 etp4a 4 100000
+Ethernet26 59,60 etp4b 4 100000
+Ethernet28 61,62 etp4c 4 100000
+Ethernet30 63,64 etp4d 4 100000
+Ethernet32 65,66 etp5a 5 100000
+Ethernet34 67,68 etp5b 5 100000
+Ethernet36 69,70 etp5c 5 100000
+Ethernet38 71,72 etp5d 5 100000
+Ethernet40 73,74 etp6a 6 100000
+Ethernet42 75,76 etp6b 6 100000
+Ethernet44 77,78 etp6c 6 100000
+Ethernet46 79,80 etp6d 6 100000
+Ethernet48 81,82 etp7a 7 100000
+Ethernet50 83,84 etp7b 7 100000
+Ethernet52 85,86 etp7c 7 100000
+Ethernet54 87,88 etp7d 7 100000
+Ethernet56 89,90 etp8a 8 100000
+Ethernet58 91,92 etp8b 8 100000
+Ethernet60 93,94 etp8c 8 100000
+Ethernet62 95,96 etp8d 8 100000
+Ethernet64 1,2 etp9a 9 100000
+Ethernet66 3,4 etp9b 9 100000
+Ethernet68 5,6 etp9c 9 100000
+Ethernet70 7,8 etp9d 9 100000
+Ethernet72 9,10 etp10a 10 100000
+Ethernet74 11,12 etp10b 10 100000
+Ethernet76 13,14 etp10c 10 100000
+Ethernet78 15,16 etp10d 10 100000
+Ethernet80 17,18 etp11a 11 100000
+Ethernet82 19,20 etp11b 11 100000
+Ethernet84 21,22 etp11c 11 100000
+Ethernet86 23,24 etp11d 11 100000
+Ethernet88 25,26 etp12a 12 100000
+Ethernet90 27,28 etp12b 12 100000
+Ethernet92 29,30 etp12c 12 100000
+Ethernet94 31,32 etp12d 12 100000
+Ethernet96 97,98,99,100,101,102,103,104 etp13 13 400000
+Ethernet104 105,106,107,108,109,110,111,112 etp14 14 400000
+Ethernet112 113,114,115,116,117,118,119,120 etp15 15 400000
+Ethernet120 121,122,123,124,125,126,127,128 etp16 16 400000
+Ethernet128 129,130 etp17a 17 100000
+Ethernet130 131,132 etp17b 17 100000
+Ethernet132 133,134 etp17c 17 100000
+Ethernet134 135,136 etp17d 17 100000
+Ethernet136 137,138 etp18a 18 100000
+Ethernet138 139,140 etp18b 18 100000
+Ethernet140 141,142 etp18c 18 100000
+Ethernet142 143,144 etp18d 18 100000
+Ethernet144 145,146 etp19a 19 100000
+Ethernet146 147,148 etp19b 19 100000
+Ethernet148 149,150 etp19c 19 100000
+Ethernet150 151,152 etp19d 19 100000
+Ethernet152 153,154 etp20a 20 100000
+Ethernet154 155,156 etp20b 20 100000
+Ethernet156 157,158 etp20c 20 100000
+Ethernet158 159,160 etp20d 20 100000
+Ethernet160 225,226,227,228,229,230,231,232 etp21 21 400000
+Ethernet168 233,234,235,236,237,238,239,240 etp22 22 400000
+Ethernet176 241,242,243,244,245,246,247,248 etp23 23 400000
+Ethernet184 249,250,251,252,253,254,255,256 etp24 24 400000
+Ethernet192 161,162,163,164,165,166,167,168 etp25 25 400000
+Ethernet200 169,170,171,172,173,174,175,176 etp26 26 400000
+Ethernet208 177,178,179,180,181,182,183,184 etp27 27 400000
+Ethernet216 185,186,187,188,189,190,191,192 etp28 28 400000
+Ethernet224 193,194,195,196,197,198,199,200 etp29 29 400000
+Ethernet232 201,202,203,204,205,206,207,208 etp30 30 400000
+Ethernet240 209,210,211,212,213,214,215,216 etp31 31 400000
+Ethernet248 217,218,219,220,221,222,223,224 etp32 32 400000
+Ethernet256 257 etp33 33 10000
+Ethernet257 258 etp34 34 10000
diff --git a/device/dell/x86_64-dellemc_z9332f_d1508-r0/DellEMC-Z9332f-O32/port_config.ini b/device/dell/x86_64-dellemc_z9332f_d1508-r0/DellEMC-Z9332f-O32/port_config.ini
index 790fb490cfe..ea214699e06 100644
--- a/device/dell/x86_64-dellemc_z9332f_d1508-r0/DellEMC-Z9332f-O32/port_config.ini
+++ b/device/dell/x86_64-dellemc_z9332f_d1508-r0/DellEMC-Z9332f-O32/port_config.ini
@@ -1,35 +1,35 @@
-# name lanes alias index speed
-Ethernet0 33,34,35,36,37,38,39,40 fourhundredGigE1/1 1 400000
-Ethernet8 41,42,43,44,45,46,47,48 fourhundredGigE1/2 2 400000
-Ethernet16 49,50,51,52,53,54,55,56 fourhundredGigE1/3 3 400000
-Ethernet24 57,58,59,60,61,62,63,64 fourhundredGigE1/4 4 400000
-Ethernet32 65,66,67,68,69,70,71,72 fourhundredGigE1/5 5 400000
-Ethernet40 73,74,75,76,77,78,79,80 fourhundredGigE1/6 6 400000
-Ethernet48 81,82,83,84,85,86,87,88 fourhundredGigE1/7 7 400000
-Ethernet56 89,90,91,92,93,94,95,96 fourhundredGigE1/8 8 400000
-Ethernet64 1,2,3,4,5,6,7,8 fourhundredGigE1/9 9 400000
-Ethernet72 9,10,11,12,13,14,15,16 fourhundredGigE1/10 10 400000
-Ethernet80 17,18,19,20,21,22,23,24 fourhundredGigE1/11 11 400000
-Ethernet88 25,26,27,28,29,30,31,32 fourhundredGigE1/12 12 400000
-Ethernet96 97,98,99,100,101,102,103,104 fourhundredGigE1/13 13 400000
-Ethernet104 105,106,107,108,109,110,111,112 fourhundredGigE1/14 14 400000
-Ethernet112 113,114,115,116,117,118,119,120 fourhundredGigE1/15 15 400000
-Ethernet120 121,122,123,124,125,126,127,128 fourhundredGigE1/16 16 400000
-Ethernet128 129,130,131,132,133,134,135,136 fourhundredGigE1/17 17 400000
-Ethernet136 137,138,139,140,141,142,143,144 fourhundredGigE1/18 18 400000
-Ethernet144 145,146,147,148,149,150,151,152 fourhundredGigE1/19 19 400000
-Ethernet152 153,154,155,156,157,158,159,160 fourhundredGigE1/20 20 400000
-Ethernet160 225,226,227,228,229,230,231,232 fourhundredGigE1/21 21 400000
-Ethernet168 233,234,235,236,237,238,239,240 fourhundredGigE1/22 22 400000
-Ethernet176 241,242,243,244,245,246,247,248 fourhundredGigE1/23 23 400000
-Ethernet184 249,250,251,252,253,254,255,256 fourhundredGigE1/24 24 400000
-Ethernet192 161,162,163,164,165,166,167,168 fourhundredGigE1/25 25 400000
-Ethernet200 169,170,171,172,173,174,175,176 fourhundredGigE1/26 26 400000
-Ethernet208 177,178,179,180,181,182,183,184 fourhundredGigE1/27 27 400000
-Ethernet216 185,186,187,188,189,190,191,192 fourhundredGigE1/28 28 400000
-Ethernet224 193,194,195,196,197,198,199,200 fourhundredGigE1/29 29 400000
-Ethernet232 201,202,203,204,205,206,207,208 fourhundredGigE1/30 30 400000
-Ethernet240 209,210,211,212,213,214,215,216 fourhundredGigE1/31 31 400000
-Ethernet248 217,218,219,220,221,222,223,224 fourhundredGigE1/32 32 400000
-Ethernet256 257 tenGigE1/33 33 10000
-Ethernet257 258 tenGigE1/34 34 10000
+# name lanes alias index speed
+Ethernet0 33,34,35,36,37,38,39,40 etp1 1 400000
+Ethernet8 41,42,43,44,45,46,47,48 etp2 2 400000
+Ethernet16 49,50,51,52,53,54,55,56 etp3 3 400000
+Ethernet24 57,58,59,60,61,62,63,64 etp4 4 400000
+Ethernet32 65,66,67,68,69,70,71,72 etp5 5 400000
+Ethernet40 73,74,75,76,77,78,79,80 etp6 6 400000
+Ethernet48 81,82,83,84,85,86,87,88 etp7 7 400000
+Ethernet56 89,90,91,92,93,94,95,96 etp8 8 400000
+Ethernet64 1,2,3,4,5,6,7,8 etp9 9 400000
+Ethernet72 9,10,11,12,13,14,15,16 etp10 10 400000
+Ethernet80 17,18,19,20,21,22,23,24 etp11 11 400000
+Ethernet88 25,26,27,28,29,30,31,32 etp12 12 400000
+Ethernet96 97,98,99,100,101,102,103,104 etp13 13 400000
+Ethernet104 105,106,107,108,109,110,111,112 etp14 14 400000
+Ethernet112 113,114,115,116,117,118,119,120 etp15 15 400000
+Ethernet120 121,122,123,124,125,126,127,128 etp16 16 400000
+Ethernet128 129,130,131,132,133,134,135,136 etp17 17 400000
+Ethernet136 137,138,139,140,141,142,143,144 etp18 18 400000
+Ethernet144 145,146,147,148,149,150,151,152 etp19 19 400000
+Ethernet152 153,154,155,156,157,158,159,160 etp20 20 400000
+Ethernet160 225,226,227,228,229,230,231,232 etp21 21 400000
+Ethernet168 233,234,235,236,237,238,239,240 etp22 22 400000
+Ethernet176 241,242,243,244,245,246,247,248 etp23 23 400000
+Ethernet184 249,250,251,252,253,254,255,256 etp24 24 400000
+Ethernet192 161,162,163,164,165,166,167,168 etp25 25 400000
+Ethernet200 169,170,171,172,173,174,175,176 etp26 26 400000
+Ethernet208 177,178,179,180,181,182,183,184 etp27 27 400000
+Ethernet216 185,186,187,188,189,190,191,192 etp28 28 400000
+Ethernet224 193,194,195,196,197,198,199,200 etp29 29 400000
+Ethernet232 201,202,203,204,205,206,207,208 etp30 30 400000
+Ethernet240 209,210,211,212,213,214,215,216 etp31 31 400000
+Ethernet248 217,218,219,220,221,222,223,224 etp32 32 400000
+Ethernet256 257 etp33 33 10000
+Ethernet257 258 etp34 34 10000
diff --git a/device/mellanox/x86_64-mlnx_msn2700-r0/plugins/sfputil.py b/device/mellanox/x86_64-mlnx_msn2700-r0/plugins/sfputil.py
index 2cac9e5e45d..952620dc22f 100644
--- a/device/mellanox/x86_64-mlnx_msn2700-r0/plugins/sfputil.py
+++ b/device/mellanox/x86_64-mlnx_msn2700-r0/plugins/sfputil.py
@@ -43,7 +43,7 @@
platform_dict = {'x86_64-mlnx_msn2700-r0': 0, 'x86_64-mlnx_msn2740-r0': 0, 'x86_64-mlnx_msn2100-r0': 1,
'x86_64-mlnx_msn2410-r0': 2, 'x86_64-mlnx_msn2010-r0': 3, 'x86_64-mlnx_msn3420-r0': 5,
'x86_64-mlnx_msn3700-r0': 0, 'x86_64-mlnx_msn3700c-r0': 0, 'x86_64-mlnx_msn3800-r0': 4,
- 'x86_64-mlnx_msn4410-r0': 0, 'x86_64-mlnx_msn4600-r0': 4, 'x86_64-mlnx_msn4600c': 4,
+ 'x86_64-mlnx_msn4410-r0': 0, 'x86_64-mlnx_msn4600-r0': 4, 'x86_64-mlnx_msn4600c-r0': 4,
'x86_64-mlnx_msn4700-r0': 0}
port_position_tuple_list = [(0, 0, 31, 32, 1), (0, 0, 15, 16, 1), (0, 48, 55, 56, 1),
(0, 18, 21, 22, 1), (0, 0, 63, 64, 1), (0, 48, 59, 60, 1)]
diff --git a/device/mellanox/x86_64-mlnx_msn4410-r0/ACS-MSN4410/buffers_dynamic.json.j2 b/device/mellanox/x86_64-mlnx_msn4410-r0/ACS-MSN4410/buffers_dynamic.json.j2
new file mode 120000
index 00000000000..8c4117c6621
--- /dev/null
+++ b/device/mellanox/x86_64-mlnx_msn4410-r0/ACS-MSN4410/buffers_dynamic.json.j2
@@ -0,0 +1 @@
+../../x86_64-mlnx_msn2700-r0/ACS-MSN2700/buffers_dynamic.json.j2
\ No newline at end of file
diff --git a/dockers/docker-fpm-frr/TS b/dockers/docker-fpm-frr/TS
index 78ba24d5db6..5057802c766 100755
--- a/dockers/docker-fpm-frr/TS
+++ b/dockers/docker-fpm-frr/TS
@@ -10,7 +10,7 @@ function check_not_installed()
{
c=0
config=$(vtysh -c "show run")
- for route_map_name in $(echo "$config" | sed -ne 's/ neighbor \S* route-map \(\S*\) out/\1/p' | egrep 'V4|V6');
+ for route_map_name in $(echo "$config" | sed -ne 's/ neighbor \S* route-map \(\S*\) out/\1/p' | egrep 'V4|V6' | uniq);
do
is_internal_route_map $route_map_name && continue
echo "$config" | egrep -q "^route-map $route_map_name permit 20$"
@@ -26,7 +26,7 @@ function check_installed()
c=0
e=0
config=$(vtysh -c "show run")
- for route_map_name in $(echo "$config" | sed -ne 's/ neighbor \S* route-map \(\S*\) out/\1/p' | egrep 'V4|V6');
+ for route_map_name in $(echo "$config" | sed -ne 's/ neighbor \S* route-map \(\S*\) out/\1/p' | egrep 'V4|V6' | uniq);
do
is_internal_route_map $route_map_name && continue
echo "$config" | egrep -q "^route-map $route_map_name permit 20$"
@@ -38,3 +38,15 @@ function check_installed()
done
return $((e-c))
}
+
+function find_num_routemap()
+{
+ c=0
+ config=$(vtysh -c "show run")
+ for route_map_name in $(echo "$config" | sed -ne 's/ neighbor \S* route-map \(\S*\) out/\1/p' | egrep 'V4|V6' | uniq);
+ do
+ is_internal_route_map $route_map_name && continue
+ c=$((c+1))
+ done
+ return $c
+}
diff --git a/dockers/docker-fpm-frr/TSA b/dockers/docker-fpm-frr/TSA
index 6312bf0ba5e..f45d3bf0bcb 100755
--- a/dockers/docker-fpm-frr/TSA
+++ b/dockers/docker-fpm-frr/TSA
@@ -3,12 +3,18 @@
# Load the common functions
source /usr/bin/TS
+find_num_routemap
+routemap_count=$?
check_not_installed
not_installed=$?
-if [[ $not_installed -ne 0 ]];
+
+if [[ $routemap_count -eq 0 ]];
+then
+ echo "System Mode: No external neighbors"
+elif [[ $not_installed -ne 0 ]];
then
TSA_FILE=$(mktemp)
- for route_map_name in $(echo "$config" | sed -ne 's/ neighbor \S* route-map \(\S*\) out/\1/p');
+ for route_map_name in $(echo "$config" | sed -ne 's/ neighbor \S* route-map \(\S*\) out/\1/p' | uniq);
do
is_internal_route_map $route_map_name && continue
case "$route_map_name" in
diff --git a/dockers/docker-fpm-frr/TSB b/dockers/docker-fpm-frr/TSB
index 44f9b15aea2..50f1ebc3ce8 100755
--- a/dockers/docker-fpm-frr/TSB
+++ b/dockers/docker-fpm-frr/TSB
@@ -3,12 +3,18 @@
# Load the common functions
source /usr/bin/TS
+find_num_routemap
+routemap_count=$?
check_installed
installed=$?
-if [[ $installed -ne 0 ]];
+
+if [[ $routemap_count -eq 0 ]];
+then
+ echo "System Mode: No external neighbors"
+elif [[ $installed -ne 0 ]];
then
TSB_FILE=$(mktemp)
- for route_map_name in $(echo "$config" | sed -ne 's/ neighbor \S* route-map \(\S*\) out/\1/p');
+ for route_map_name in $(echo "$config" | sed -ne 's/ neighbor \S* route-map \(\S*\) out/\1/p' | uniq);
do
is_internal_route_map $route_map_name && continue
case "$route_map_name" in
diff --git a/dockers/docker-fpm-frr/TSC b/dockers/docker-fpm-frr/TSC
index a0e908439e4..45d22bdf55c 100755
--- a/dockers/docker-fpm-frr/TSC
+++ b/dockers/docker-fpm-frr/TSC
@@ -3,13 +3,18 @@
# Load the common functions
source /usr/bin/TS
+find_num_routemap
+routemap_count=$?
check_not_installed
not_installed=$?
check_installed
installed=$?
-if [[ $installed -eq 0 ]];
+if [[ $routemap_count -eq 0 ]];
+then
+ echo "System Mode: No external neighbors"
+elif [[ $installed -eq 0 ]];
then
echo "System Mode: Normal"
elif [[ $not_installed -eq 0 ]];
diff --git a/dockers/docker-orchagent/ipinip.json.j2 b/dockers/docker-orchagent/ipinip.json.j2
index cbfeb784b05..9cdf6857bdd 100644
--- a/dockers/docker-orchagent/ipinip.json.j2
+++ b/dockers/docker-orchagent/ipinip.json.j2
@@ -5,7 +5,7 @@
{% if DEVICE_METADATA['localhost']['sub_role'] == 'FrontEnd' or DEVICE_METADATA['localhost']['sub_role'] == 'BackEnd'%}
{% set loopback_intf_names = ['Loopback0', 'Loopback4096'] %}
{% else %}
-{% set loopback_intf_names = ['Loopback0'] %}
+{% set loopback_intf_names = ['Loopback0', 'Loopback2', 'Loopback3'] %}
{% endif %}
{% for (name, prefix) in LOOPBACK_INTERFACE|pfx_filter %}
{%- if prefix | ipv4 and name in loopback_intf_names %}
@@ -47,11 +47,10 @@
"TUNNEL_DECAP_TABLE:IPINIP_TUNNEL" : {
"tunnel_type":"IPINIP",
"dst_ip":"{% for prefix in ipv4_addresses|sort %}{{ prefix | ip }}{% if not loop.last %},{% endif %}{% endfor %}",
-{% if "mlnx" in DEVICE_METADATA.localhost.platform %}
"dscp_mode":"uniform",
+{% if "mlnx" in DEVICE_METADATA.localhost.platform %}
"ecn_mode":"standard",
{% else %}
- "dscp_mode":"pipe",
"ecn_mode":"copy_from_outer",
{% endif %}
"ttl_mode":"pipe"
@@ -66,11 +65,10 @@
"TUNNEL_DECAP_TABLE:IPINIP_V6_TUNNEL" : {
"tunnel_type":"IPINIP",
"dst_ip":"{% for prefix in ipv6_addresses|sort %}{{ prefix | ip }}{% if not loop.last %},{% endif %}{% endfor %}",
-{% if "mlnx" in DEVICE_METADATA.localhost.platform %}
"dscp_mode":"uniform",
+{% if "mlnx" in DEVICE_METADATA.localhost.platform %}
"ecn_mode":"standard",
{% else %}
- "dscp_mode":"pipe",
"ecn_mode":"copy_from_outer",
{% endif %}
"ttl_mode":"pipe"
diff --git a/installer/x86_64/install.sh b/installer/x86_64/install.sh
index cbb81fabd02..7c0d311b1a8 100755
--- a/installer/x86_64/install.sh
+++ b/installer/x86_64/install.sh
@@ -648,6 +648,7 @@ menuentry '$demo_grub_entry' {
linux /$image_dir/boot/vmlinuz-4.19.0-12-2-amd64 root=$grub_cfg_root rw $GRUB_CMDLINE_LINUX \
net.ifnames=0 biosdevname=0 \
loop=$image_dir/$FILESYSTEM_SQUASHFS loopfstype=squashfs \
+ systemd.unified_cgroup_hierarchy=0 \
apparmor=1 security=apparmor varlog_size=$VAR_LOG_SIZE usbcore.autosuspend=-1 $ONIE_PLATFORM_EXTRA_CMDLINE_LINUX
echo 'Loading $demo_volume_label $demo_type initial ramdisk ...'
initrd /$image_dir/boot/initrd.img-4.19.0-12-2-amd64
diff --git a/platform/mellanox/asic_table.j2 b/platform/mellanox/asic_table.j2
index 4e41a416b6e..77f62e3b368 100644
--- a/platform/mellanox/asic_table.j2
+++ b/platform/mellanox/asic_table.j2
@@ -14,10 +14,12 @@
'x86_64-mlnx_msn2700-r0':'MELLANOX-SPECTRUM',
'x86_64-mlnx_msn2700_simx-r0':'MELLANOX-SPECTRUM',
'x86_64-mlnx_msn2740-r0':'MELLANOX-SPECTRUM',
+ 'x86_64-mlnx_msn3420-r0':'MELLANOX-SPECTRUM-2',
'x86_64-mlnx_msn3700c-r0':'MELLANOX-SPECTRUM-2',
'x86_64-mlnx_msn3700-r0':'MELLANOX-SPECTRUM-2',
'x86_64-mlnx_msn3700_simx-r0':'MELLANOX-SPECTRUM-2',
'x86_64-mlnx_msn3800-r0':'MELLANOX-SPECTRUM-2',
+ 'x86_64-mlnx_msn4410-r0':'MELLANOX-SPECTRUM-3',
'x86_64-mlnx_msn4700_simx-r0':'MELLANOX-SPECTRUM-3',
'x86_64-mlnx_msn4700-r0':'MELLANOX-SPECTRUM-3',
'x86_64-mlnx_msn4600c-r0':'MELLANOX-SPECTRUM-3',
diff --git a/platform/pddf/i2c/utils/pddf_util.py b/platform/pddf/i2c/utils/pddf_util.py
index 127f37d6b2f..299ec18babd 100755
--- a/platform/pddf/i2c/utils/pddf_util.py
+++ b/platform/pddf/i2c/utils/pddf_util.py
@@ -45,14 +45,14 @@
try:
pddf_obj = pddfparse.PddfParse()
except Exception as e:
- print "%s" % str(e)
+ print("%s" % str(e))
sys.exit()
if DEBUG == True:
- print sys.argv[0]
- print 'ARGV :', sys.argv[1:]
+ print(sys.argv[0])
+ print('ARGV :', sys.argv[1:])
def main():
global DEBUG
@@ -68,9 +68,9 @@ def main():
'force',
])
if DEBUG == True:
- print options
- print args
- print len(sys.argv)
+ print(options)
+ print(args)
+ print(len(sys.argv))
# generate the KOS list from pddf device JSON file
if 'std_perm_kos' in pddf_obj.data['PLATFORM'].keys():
@@ -110,12 +110,12 @@ def main():
return 0
def show_help():
- print __doc__ % {'scriptName' : sys.argv[0].split("/")[-1]}
+ print(__doc__ % {'scriptName' : sys.argv[0].split("/")[-1]})
sys.exit(0)
def my_log(txt):
if DEBUG == True:
- print "[PDDF]"+txt
+ print("[PDDF]"+txt)
return
def log_os_system(cmd, show):
@@ -180,10 +180,10 @@ def config_pddf_utils():
pddf_path = get_path_to_pddf_plugin()
# ##########################################################################
- SONIC_PLATFORM_BSP_WHL_PKG = "/".join([device_path, 'sonic_platform-1.0-py2-none-any.whl'])
- SONIC_PLATFORM_PDDF_WHL_PKG = "/".join([device_path, 'pddf', 'sonic_platform-1.0-py2-none-any.whl'])
- SONIC_PLATFORM_BSP_WHL_PKG_BK = "/".join([device_path, 'sonic_platform-1.0-py2-none-any.whl.orig'])
- status, output = log_os_system("pip show sonic-platform > /dev/null 2>&1", 1)
+ SONIC_PLATFORM_BSP_WHL_PKG = "/".join([device_path, 'sonic_platform-1.0-py3-none-any.whl'])
+ SONIC_PLATFORM_PDDF_WHL_PKG = "/".join([device_path, 'pddf', 'sonic_platform-1.0-py3-none-any.whl'])
+ SONIC_PLATFORM_BSP_WHL_PKG_BK = "/".join([device_path, 'sonic_platform-1.0-py3-none-any.whl.orig'])
+ status, output = log_os_system("pip3 show sonic-platform > /dev/null 2>&1", 1)
if status:
if os.path.exists(SONIC_PLATFORM_PDDF_WHL_PKG):
# Platform API 2.0 is supported
@@ -194,18 +194,18 @@ def config_pddf_utils():
# PDDF whl package exist ... this must be the whl package created from
# PDDF 2.0 ref API classes and some changes on top of it ... install it
shutil.copy(SONIC_PLATFORM_PDDF_WHL_PKG, SONIC_PLATFORM_BSP_WHL_PKG)
- print "Attemting to install the PDDF sonic_platform wheel package ..."
- status, output = log_os_system("pip install "+ SONIC_PLATFORM_BSP_WHL_PKG, 1)
+ print("Attemting to install the PDDF sonic_platform wheel package ...")
+ status, output = log_os_system("pip3 install "+ SONIC_PLATFORM_BSP_WHL_PKG, 1)
if status:
- print "Error: Failed to install {}".format(SONIC_PLATFORM_BSP_WHL_PKG)
+ print("Error: Failed to install {}".format(SONIC_PLATFORM_BSP_WHL_PKG))
return status
else:
- print "Successfully installed {} package".format(SONIC_PLATFORM_BSP_WHL_PKG)
+ print("Successfully installed {} package".format(SONIC_PLATFORM_BSP_WHL_PKG))
else:
# PDDF with platform APIs 1.5 must be supported
device_plugin_path = "/".join([device_path, "plugins"])
backup_path = "/".join([device_plugin_path, "orig"])
- print "Loading PDDF generic plugins (1.0)"
+ print("Loading PDDF generic plugins (1.0)")
if os.path.exists(backup_path) is False:
os.mkdir(backup_path)
log_os_system("mv "+device_plugin_path+"/*.*"+" "+backup_path, 0)
@@ -224,23 +224,23 @@ def config_pddf_utils():
log_os_system('mv '+SONIC_PLATFORM_BSP_WHL_PKG+' '+SONIC_PLATFORM_BSP_WHL_PKG_BK, 1)
shutil.copy(SONIC_PLATFORM_PDDF_WHL_PKG, SONIC_PLATFORM_BSP_WHL_PKG)
# uninstall the existing bsp whl pkg
- status, output = log_os_system("pip uninstall sonic-platform -y &> /dev/null", 1)
+ status, output = log_os_system("pip3 uninstall sonic-platform -y &> /dev/null", 1)
if status:
- print "Error: Unable to uninstall BSP sonic-platform whl package"
+ print("Error: Unable to uninstall BSP sonic-platform whl package")
return status
- print "Attemting to install the PDDF sonic_platform wheel package ..."
- status, output = log_os_system("pip install "+ SONIC_PLATFORM_BSP_WHL_PKG, 1)
+ print("Attemting to install the PDDF sonic_platform wheel package ...")
+ status, output = log_os_system("pip3 install "+ SONIC_PLATFORM_BSP_WHL_PKG, 1)
if status:
- print "Error: Failed to install {}".format(SONIC_PLATFORM_BSP_WHL_PKG)
+ print("Error: Failed to install {}".format(SONIC_PLATFORM_BSP_WHL_PKG))
return status
else:
- print "Successfully installed {} package".format(SONIC_PLATFORM_BSP_WHL_PKG)
+ print("Successfully installed {} package".format(SONIC_PLATFORM_BSP_WHL_PKG))
else:
# system rebooted in pddf mode
- print "System rebooted in PDDF mode, hence keeping the PDDF 2.0 classes"
+ print("System rebooted in PDDF mode, hence keeping the PDDF 2.0 classes")
else:
# pddf whl package doesnt exist
- print "Error: PDDF 2.0 classes doesnt exist. PDDF mode can not be enabled"
+ print("Error: PDDF 2.0 classes doesnt exist. PDDF mode can not be enabled")
sys.exit(1)
# ##########################################################################
@@ -266,11 +266,11 @@ def config_pddf_utils():
def cleanup_pddf_utils():
device_path = get_path_to_device()
- SONIC_PLATFORM_BSP_WHL_PKG = "/".join([device_path, 'sonic_platform-1.0-py2-none-any.whl'])
- SONIC_PLATFORM_PDDF_WHL_PKG = "/".join([device_path, 'pddf', 'sonic_platform-1.0-py2-none-any.whl'])
- SONIC_PLATFORM_BSP_WHL_PKG_BK = "/".join([device_path, 'sonic_platform-1.0-py2-none-any.whl.orig'])
+ SONIC_PLATFORM_BSP_WHL_PKG = "/".join([device_path, 'sonic_platform-1.0-py3-none-any.whl'])
+ SONIC_PLATFORM_PDDF_WHL_PKG = "/".join([device_path, 'pddf', 'sonic_platform-1.0-py3-none-any.whl'])
+ SONIC_PLATFORM_BSP_WHL_PKG_BK = "/".join([device_path, 'sonic_platform-1.0-py3-none-any.whl.orig'])
# ##########################################################################
- status, output = log_os_system("pip show sonic-platform > /dev/null 2>&1", 1)
+ status, output = log_os_system("pip3 show sonic-platform > /dev/null 2>&1", 1)
if status:
# PDDF Platform API 2.0 is not supported but system is in PDDF mode, hence PDDF 1.0 plugins are present
device_plugin_path = "/".join([device_path, "plugins"])
@@ -283,7 +283,7 @@ def cleanup_pddf_utils():
log_os_system("mv "+backup_path+"/*"+" "+device_plugin_path, 1)
os.rmdir(backup_path)
else:
- print "\nERR: Unable to locate original device files...\n"
+ print("\nERR: Unable to locate original device files...\n")
else:
# PDDF 2.0 apis are supported and PDDF whl package is installed
@@ -291,29 +291,29 @@ def cleanup_pddf_utils():
if os.path.exists(SONIC_PLATFORM_BSP_WHL_PKG_BK):
# platform is 2.0 compliant and original bsp 2.0 whl package exist
log_os_system('mv '+SONIC_PLATFORM_BSP_WHL_PKG_BK+' '+SONIC_PLATFORM_BSP_WHL_PKG, 1)
- status, output = log_os_system("pip uninstall sonic-platform -y &> /dev/null", 1)
+ status, output = log_os_system("pip3 uninstall sonic-platform -y &> /dev/null", 1)
if status:
- print "Error: Unable to uninstall PDDF sonic-platform whl package"
+ print("Error: Unable to uninstall PDDF sonic-platform whl package")
return status
- print "Attemting to install the BSP sonic_platform wheel package ..."
- status, output = log_os_system("pip install "+ SONIC_PLATFORM_BSP_WHL_PKG, 1)
+ print("Attemting to install the BSP sonic_platform wheel package ...")
+ status, output = log_os_system("pip3 install "+ SONIC_PLATFORM_BSP_WHL_PKG, 1)
if status:
- print "Error: Failed to install {}".format(SONIC_PLATFORM_BSP_WHL_PKG)
+ print("Error: Failed to install {}".format(SONIC_PLATFORM_BSP_WHL_PKG))
return status
else:
- print "Successfully installed {} package".format(SONIC_PLATFORM_BSP_WHL_PKG)
+ print("Successfully installed {} package".format(SONIC_PLATFORM_BSP_WHL_PKG))
else:
# platform doesnt support 2.0 APIs but PDDF is 2.0 based
# remove and uninstall the PDDF whl package
if os.path.exists(SONIC_PLATFORM_BSP_WHL_PKG):
os.remove(SONIC_PLATFORM_BSP_WHL_PKG)
- status, output = log_os_system("pip uninstall sonic-platform -y &> /dev/null", 1)
+ status, output = log_os_system("pip3 uninstall sonic-platform -y &> /dev/null", 1)
if status:
- print "Error: Unable to uninstall PDDF sonic-platform whl package"
+ print("Error: Unable to uninstall PDDF sonic-platform whl package")
return status
else:
# something seriously wrong. System is in PDDF mode but pddf whl pkg is not present
- print "Error: Fatal error as the system is in PDDF mode but the pddf .whl original is not present"
+ print("Error: Fatal error as the system is in PDDF mode but the pddf .whl original is not present")
# ################################################################################################################
if os.path.exists(device_path+"/fancontrol"):
@@ -351,26 +351,28 @@ def driver_install():
if os.path.exists('/usr/local/bin/pddf_pre_driver_install.sh'):
status, output = log_os_system('/usr/local/bin/pddf_pre_driver_install.sh', 1)
if status:
- print "Error: pddf_pre_driver_install script failed with error %d"%status
+ print("Error: pddf_pre_driver_install script failed with error %d"%status)
return status
log_os_system("depmod", 1)
for i in range(0,len(kos)):
status, output = log_os_system(kos[i], 1)
if status:
- print "driver_install() failed with error %d"%status
+ print("driver_install() failed with error %d"%status)
if FORCE == 0:
return status
output = config_pddf_utils()
if output:
- print "config_pddf_utils() failed with error %d"%output
+ print("config_pddf_utils() failed with error %d"%output)
# check for post_driver_install script
if os.path.exists('/usr/local/bin/pddf_post_driver_install.sh'):
status, output = log_os_system('/usr/local/bin/pddf_post_driver_install.sh', 1)
if status:
- print "Error: pddf_post_driver_install script failed with error %d"%status
+ print("Error: pddf_post_driver_install script failed with error %d"%status)
return status
+ # Useful for debugging
+ print(output)
return 0
@@ -380,7 +382,7 @@ def driver_uninstall():
status = cleanup_pddf_utils()
if status:
- print "cleanup_pddf_utils() failed with error %d"%status
+ print("cleanup_pddf_utils() failed with error %d"%status)
for i in range(0,len(kos)):
# if it is in perm_kos, do not remove
@@ -391,7 +393,7 @@ def driver_uninstall():
rm = rm.replace("insmod", "rmmod")
status, output = log_os_system(rm, 1)
if status:
- print "driver_uninstall() failed with error %d"%status
+ print("driver_uninstall() failed with error %d"%status)
if FORCE == 0:
return status
return 0
@@ -403,13 +405,13 @@ def device_install():
if os.path.exists('/usr/local/bin/pddf_pre_device_create.sh'):
status, output = log_os_system('/usr/local/bin/pddf_pre_device_create.sh', 1)
if status:
- print "Error: pddf_pre_device_create script failed with error %d"%status
+ print("Error: pddf_pre_device_create script failed with error %d"%status)
return status
# trigger the pddf_obj script for FAN, PSU, CPLD, MUX, etc
status = pddf_obj.create_pddf_devices()
if status:
- print "Error: create_pddf_devices() failed with error %d"%status
+ print("Error: create_pddf_devices() failed with error %d"%status)
if FORCE == 0:
return status
@@ -417,8 +419,10 @@ def device_install():
if os.path.exists('/usr/local/bin/pddf_post_device_create.sh'):
status, output = log_os_system('/usr/local/bin/pddf_post_device_create.sh', 1)
if status:
- print "Error: pddf_post_device_create script failed with error %d"%status
+ print("Error: pddf_post_device_create script failed with error %d"%status)
return status
+ # Useful for debugging
+ print(output)
return
@@ -427,28 +431,28 @@ def device_uninstall():
# Trigger the paloparse script for deletion of FAN, PSU, OPTICS, CPLD clients
status = pddf_obj.delete_pddf_devices()
if status:
- print "Error: delete_pddf_devices() failed with error %d"%status
+ print("Error: delete_pddf_devices() failed with error %d"%status)
if FORCE == 0:
return status
return
def do_install():
- print "Checking system...."
+ print("Checking system....")
if not os.path.exists('/usr/share/sonic/platform/pddf_support'):
- print PROJECT_NAME.upper() +" mode is not enabled"
+ print(PROJECT_NAME.upper() +" mode is not enabled")
return
if driver_check()== False :
- print PROJECT_NAME.upper() +" has no PDDF driver installed...."
+ print(PROJECT_NAME.upper() +" has no PDDF driver installed....")
create_pddf_log_files()
- print "Installing ..."
+ print("Installing ...")
status = driver_install()
if status:
return status
else:
- print PROJECT_NAME.upper() +" drivers detected...."
+ print(PROJECT_NAME.upper() +" drivers detected....")
- print "Creating devices ..."
+ print("Creating devices ...")
status = device_install()
if status:
return status
@@ -456,26 +460,26 @@ def do_install():
return
def do_uninstall():
- print "Checking system...."
+ print("Checking system....")
if not os.path.exists('/usr/share/sonic/platform/pddf_support'):
- print PROJECT_NAME.upper() +" mode is not enabled"
+ print(PROJECT_NAME.upper() +" mode is not enabled")
return
if os.path.exists('/var/log/pddf'):
- print "Remove pddf log files....."
+ print("Remove pddf log files.....")
log_os_system("sudo rm -rf /var/log/pddf", 1)
- print "Remove all the devices..."
+ print("Remove all the devices...")
status = device_uninstall()
if status:
return status
if driver_check()== False :
- print PROJECT_NAME.upper() +" has no PDDF driver installed...."
+ print(PROJECT_NAME.upper() +" has no PDDF driver installed....")
else:
- print "Removing installed driver...."
+ print("Removing installed driver....")
status = driver_uninstall()
if status:
if FORCE == 0:
@@ -486,60 +490,60 @@ def do_switch_pddf():
try:
import pddf_switch_svc
except ImportError:
- print "Unable to find pddf_switch_svc.py. PDDF might not be supported on this platform"
+ print("Unable to find pddf_switch_svc.py. PDDF might not be supported on this platform")
sys.exit()
- print "Check the pddf support..."
+ print("Check the pddf support...")
status = pddf_switch_svc.check_pddf_support()
if not status:
- print "PDDF is not supported on this platform"
+ print("PDDF is not supported on this platform")
return status
- print "Checking system...."
+ print("Checking system....")
if os.path.exists('/usr/share/sonic/platform/pddf_support'):
- print PROJECT_NAME.upper() +" system is already in pddf mode...."
+ print(PROJECT_NAME.upper() +" system is already in pddf mode....")
else:
- print "Check if the native sonic-platform whl package is installed in the pmon docker"
- status, output = log_os_system("docker exec -it pmon pip show sonic-platform", 1)
+ print("Check if the native sonic-platform whl package is installed in the pmon docker")
+ status, output = log_os_system("docker exec -it pmon pip3 show sonic-platform", 1)
if not status:
# Need to remove this whl module
- status, output = log_os_system("docker exec -it pmon pip uninstall sonic-platform -y", 1)
+ status, output = log_os_system("docker exec -it pmon pip3 uninstall sonic-platform -y", 1)
if not status:
- print "Successfully uninstalled the native sonic-platform whl pkg from pmon container"
+ print("Successfully uninstalled the native sonic-platform whl pkg from pmon container")
else:
- print "Error: Unable to uninstall the sonic-platform whl pkg from pmon container.\
- Do it manually before moving to nonpddf mode"
+ print("Error: Unable to uninstall the sonic-platform whl pkg from pmon container."
+ " Do it manually before moving to nonpddf mode")
return status
- print "Stopping the pmon service ..."
+ print("Stopping the pmon service ...")
status, output = log_os_system("systemctl stop pmon.service", 1)
if status:
- print "Pmon stop failed"
+ print("Pmon stop failed")
if FORCE==0:
return status
- print "Stopping the platform services.."
+ print("Stopping the platform services..")
status = pddf_switch_svc.stop_platform_svc()
if not status:
if FORCE==0:
return status
- print "Creating the pddf_support file..."
+ print("Creating the pddf_support file...")
if os.path.exists('/usr/share/sonic/platform'):
log_os_system("touch /usr/share/sonic/platform/pddf_support", 1)
else:
- print "/usr/share/sonic/platform path doesn't exist. Unable to set pddf mode"
+ print("/usr/share/sonic/platform path doesn't exist. Unable to set pddf mode")
return -1
- print "Starting the PDDF platform service..."
+ print("Starting the PDDF platform service...")
status = pddf_switch_svc.start_platform_pddf()
if not status:
if FORCE==0:
return status
- print "Restart the pmon service ..."
+ print("Restart the pmon service ...")
status, output = log_os_system("systemctl start pmon.service", 1)
if status:
- print "Pmon restart failed"
+ print("Pmon restart failed")
if FORCE==0:
return status
@@ -549,53 +553,53 @@ def do_switch_nonpddf():
try:
import pddf_switch_svc
except ImportError:
- print "Unable to find pddf_switch_svc.py. PDDF might not be supported on this platform"
+ print("Unable to find pddf_switch_svc.py. PDDF might not be supported on this platform")
sys.exit()
- print "Checking system...."
+ print("Checking system....")
if not os.path.exists('/usr/share/sonic/platform/pddf_support'):
- print PROJECT_NAME.upper() +" system is already in non-pddf mode...."
+ print(PROJECT_NAME.upper() +" system is already in non-pddf mode....")
else:
- print "Check if the sonic-platform whl package is installed in the pmon docker"
- status, output = log_os_system("docker exec -it pmon pip show sonic-platform", 1)
+ print("Check if the sonic-platform whl package is installed in the pmon docker")
+ status, output = log_os_system("docker exec -it pmon pip3 show sonic-platform", 1)
if not status:
# Need to remove this whl module
- status, output = log_os_system("docker exec -it pmon pip uninstall sonic-platform -y", 1)
+ status, output = log_os_system("docker exec -it pmon pip3 uninstall sonic-platform -y", 1)
if not status:
- print "Successfully uninstalled the sonic-platform whl pkg from pmon container"
+ print("Successfully uninstalled the sonic-platform whl pkg from pmon container")
else:
- print "Error: Unable to uninstall the sonic-platform whl pkg from pmon container.\
- Do it manually before moving to nonpddf mode"
+ print("Error: Unable to uninstall the sonic-platform whl pkg from pmon container."
+ " Do it manually before moving to nonpddf mode")
return status
- print "Stopping the pmon service ..."
+ print("Stopping the pmon service ...")
status, output = log_os_system("systemctl stop pmon.service", 1)
if status:
- print "Stopping pmon service failed"
+ print("Stopping pmon service failed")
if FORCE==0:
return status
- print "Stopping the PDDF platform service..."
+ print("Stopping the PDDF platform service...")
status = pddf_switch_svc.stop_platform_pddf()
if not status:
if FORCE==0:
return status
- print "Removing the pddf_support file..."
+ print("Removing the pddf_support file...")
if os.path.exists('/usr/share/sonic/platform'):
log_os_system("rm -f /usr/share/sonic/platform/pddf_support", 1)
else:
- print "/usr/share/sonic/platform path doesnt exist. Unable to set non-pddf mode"
+ print("/usr/share/sonic/platform path doesnt exist. Unable to set non-pddf mode")
return -1
- print "Starting the platform services..."
+ print("Starting the platform services...")
status = pddf_switch_svc.start_platform_svc()
if not status:
if FORCE==0:
return status
- print "Restart the pmon service ..."
+ print("Restart the pmon service ...")
status, output = log_os_system("systemctl start pmon.service", 1)
if status:
- print "Restarting pmon service failed"
+ print("Restarting pmon service failed")
if FORCE==0:
return status
diff --git a/rules/ethtool.mk b/rules/ethtool.mk
index b79eaad7afa..11c857811a1 100644
--- a/rules/ethtool.mk
+++ b/rules/ethtool.mk
@@ -3,11 +3,11 @@
ETHTOOL_VERSION_BASE = 5.9
export ETHTOOL_VERSION_BASE
-ETHTOOL = ethtool_$(ETHTOOL_VERSION_BASE)-1_amd64.deb
+ETHTOOL = ethtool_$(ETHTOOL_VERSION_BASE)-1_$(CONFIGURED_ARCH).deb
$(ETHTOOL)_SRC_PATH = $(SRC_PATH)/ethtool
SONIC_MAKE_DEBS += $(ETHTOOL)
-ETHTOOL_DBG = ethtool-dbgsym_$(ETHTOOL_VERSION_BASE)-1_amd64.deb
+ETHTOOL_DBG = ethtool-dbgsym_$(ETHTOOL_VERSION_BASE)-1_$(CONFIGURED_ARCH).deb
$(eval $(call add_extra_package,$(ETHTOOL),$(ETHTOOL_DBG)))
export ETHTOOL ETHTOOL_DBG
diff --git a/rules/sonic-syseepromd.mk b/rules/sonic-syseepromd.mk
index 0732e3b531c..03cc20a38f7 100644
--- a/rules/sonic-syseepromd.mk
+++ b/rules/sonic-syseepromd.mk
@@ -4,7 +4,7 @@
SONIC_SYSEEPROMD_PY2 = sonic_syseepromd-1.0-py2-none-any.whl
$(SONIC_SYSEEPROMD_PY2)_SRC_PATH = $(SRC_PATH)/sonic-platform-daemons/sonic-syseepromd
-$(SONIC_SYSEEPROMD_PY2)_DEPENDS = $(SONIC_PY_COMMON_PY2)
+$(SONIC_SYSEEPROMD_PY2)_DEPENDS = $(SONIC_PY_COMMON_PY2) $(SONIC_PLATFORM_COMMON_PY2)
$(SONIC_SYSEEPROMD_PY2)_DEBS_DEPENDS = $(LIBSWSSCOMMON) $(PYTHON_SWSSCOMMON)
$(SONIC_SYSEEPROMD_PY2)_PYTHON_VERSION = 2
SONIC_PYTHON_WHEELS += $(SONIC_SYSEEPROMD_PY2)
@@ -13,7 +13,7 @@ SONIC_PYTHON_WHEELS += $(SONIC_SYSEEPROMD_PY2)
SONIC_SYSEEPROMD_PY3 = sonic_syseepromd-1.0-py3-none-any.whl
$(SONIC_SYSEEPROMD_PY3)_SRC_PATH = $(SRC_PATH)/sonic-platform-daemons/sonic-syseepromd
-$(SONIC_SYSEEPROMD_PY3)_DEPENDS = $(SONIC_PY_COMMON_PY3) $(SONIC_SYSEEPROMD_PY2)
+$(SONIC_SYSEEPROMD_PY3)_DEPENDS = $(SONIC_PY_COMMON_PY3) $(SONIC_PLATFORM_COMMON_PY3) $(SONIC_SYSEEPROMD_PY2)
$(SONIC_SYSEEPROMD_PY3)_DEBS_DEPENDS = $(LIBSWSSCOMMON) $(PYTHON3_SWSSCOMMON)
$(SONIC_SYSEEPROMD_PY3)_PYTHON_VERSION = 3
SONIC_PYTHON_WHEELS += $(SONIC_SYSEEPROMD_PY3)
diff --git a/sonic-slave-buster/Dockerfile.j2 b/sonic-slave-buster/Dockerfile.j2
index ed541ec5356..7598dd96e9f 100644
--- a/sonic-slave-buster/Dockerfile.j2
+++ b/sonic-slave-buster/Dockerfile.j2
@@ -411,6 +411,8 @@ RUN pip3 install fastentrypoints
# For running Python unit tests
RUN pip2 install pytest-runner==4.4
RUN pip3 install pytest-runner==5.2
+RUN pip2 install nose==1.3.7
+RUN pip3 install nose==1.3.7
RUN pip2 install mockredispy==2.9.3
RUN pip3 install mockredispy==2.9.3
diff --git a/sonic-slave-stretch/Dockerfile.j2 b/sonic-slave-stretch/Dockerfile.j2
index 103a9cd7783..afb69af16b2 100644
--- a/sonic-slave-stretch/Dockerfile.j2
+++ b/sonic-slave-stretch/Dockerfile.j2
@@ -343,6 +343,7 @@ RUN pip3 uninstall -y enum34
RUN pip2 install j2cli==0.3.10
# For sonic snmpagent mock testing
+RUN pip3 install nose==1.3.7
RUN pip3 install mockredispy==2.9.3
# For sonic-mgmt-framework
@@ -359,6 +360,7 @@ RUN pip3 install redis
RUN pip2 install pexpect==4.6.0
# For sonic-utilities build
+RUN pip2 install nose==1.3.7
RUN pip2 install mockredispy==2.9.3
RUN pip2 install pytest-runner==4.4
RUN pip2 install setuptools==40.8.0
diff --git a/src/ethtool/Makefile b/src/ethtool/Makefile
index 2f5a3acc94d..071429121f6 100644
--- a/src/ethtool/Makefile
+++ b/src/ethtool/Makefile
@@ -12,7 +12,12 @@ $(addprefix $(DEST)/, $(MAIN_TARGET)): $(DEST)/% :
pushd ethtool
git checkout tags/debian/1%$(ETHTOOL_VERSION_BASE)-1
# Build package
+ifneq (,(filter $(CONFIGURED_ARCH),arm64 armhf))
+ DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage -rfakeroot -b -us -uc -j$(SONIC_CONFIG_MAKE_JOBS) --admindir $(SONIC_DPKG_ADMINDIR)
+else
dpkg-buildpackage -rfakeroot -b -us -uc -j$(SONIC_CONFIG_MAKE_JOBS) --admindir $(SONIC_DPKG_ADMINDIR)
+endif
+
popd
mv $(DERIVED_TARGET) $* $(DEST)/
diff --git a/src/sonic-bgpcfgd/bgpcfgd/main.py b/src/sonic-bgpcfgd/bgpcfgd/main.py
index 360b54dc11a..64d2edbed17 100644
--- a/src/sonic-bgpcfgd/bgpcfgd/main.py
+++ b/src/sonic-bgpcfgd/bgpcfgd/main.py
@@ -15,6 +15,7 @@
from .managers_db import BGPDataBaseMgr
from .managers_intf import InterfaceMgr
from .managers_setsrc import ZebraSetSrc
+from .managers_static_rt import StaticRouteMgr
from .runner import Runner, signal_handler
from .template import TemplateFabric
from .utils import read_constants
@@ -53,6 +54,8 @@ def do_work():
BGPAllowListMgr(common_objs, "CONFIG_DB", "BGP_ALLOWED_PREFIXES"),
# BBR Manager
BBRMgr(common_objs, "CONFIG_DB", "BGP_BBR"),
+ # Static Route Managers
+ StaticRouteMgr(common_objs, "CONFIG_DB", "STATIC_ROUTE"),
]
runner = Runner(common_objs['cfg_mgr'])
for mgr in managers:
diff --git a/src/sonic-bgpcfgd/bgpcfgd/managers_static_rt.py b/src/sonic-bgpcfgd/bgpcfgd/managers_static_rt.py
new file mode 100644
index 00000000000..f6dd77a520d
--- /dev/null
+++ b/src/sonic-bgpcfgd/bgpcfgd/managers_static_rt.py
@@ -0,0 +1,173 @@
+import traceback
+from .log import log_crit, log_err, log_debug
+from .manager import Manager
+from .template import TemplateFabric
+import socket
+
+class StaticRouteMgr(Manager):
+ """ This class updates static routes when STATIC_ROUTE table is updated """
+ def __init__(self, common_objs, db, table):
+ """
+ Initialize the object
+ :param common_objs: common object dictionary
+ :param db: name of the db
+ :param table: name of the table in the db
+ """
+ super(StaticRouteMgr, self).__init__(
+ common_objs,
+ [],
+ db,
+ table,
+ )
+
+ self.static_routes = {}
+
+ OP_DELETE = 'DELETE'
+ OP_ADD = 'ADD'
+
+ def set_handler(self, key, data):
+ vrf, ip_prefix = self.split_key(key)
+ is_ipv6 = TemplateFabric.is_ipv6(ip_prefix)
+
+ arg_list = lambda v: v.split(',') if len(v.strip()) != 0 else None
+ bkh_list = arg_list(data['blackhole']) if 'blackhole' in data else None
+ nh_list = arg_list(data['nexthop']) if 'nexthop' in data else None
+ intf_list = arg_list(data['ifname']) if 'ifname' in data else None
+ dist_list = arg_list(data['distance']) if 'distance' in data else None
+ nh_vrf_list = arg_list(data['nexthop-vrf']) if 'nexthop-vrf' in data else None
+
+ try:
+ ip_nh_set = IpNextHopSet(is_ipv6, bkh_list, nh_list, intf_list, dist_list, nh_vrf_list)
+ cur_nh_set = self.static_routes.get(vrf, {}).get(ip_prefix, IpNextHopSet(is_ipv6))
+ cmd_list = self.static_route_commands(ip_nh_set, cur_nh_set, ip_prefix, vrf)
+ except Exception as exc:
+ log_crit("Got an exception %s: Traceback: %s" % (str(exc), traceback.format_exc()))
+ return False
+
+ if cmd_list:
+ self.cfg_mgr.push_list(cmd_list)
+ log_debug("Static route {} is scheduled for updates".format(key))
+ else:
+ log_debug("Nothing to update for static route {}".format(key))
+
+ self.static_routes.setdefault(vrf, {})[ip_prefix] = ip_nh_set
+
+ return True
+
+
+ def del_handler(self, key):
+ vrf, ip_prefix = self.split_key(key)
+ is_ipv6 = TemplateFabric.is_ipv6(ip_prefix)
+
+ ip_nh_set = IpNextHopSet(is_ipv6)
+ cur_nh_set = self.static_routes.get(vrf, {}).get(ip_prefix, IpNextHopSet(is_ipv6))
+ cmd_list = self.static_route_commands(ip_nh_set, cur_nh_set, ip_prefix, vrf)
+
+ if cmd_list:
+ self.cfg_mgr.push_list(cmd_list)
+ log_debug("Static route {} is scheduled for updates".format(key))
+ else:
+ log_debug("Nothing to update for static route {}".format(key))
+
+ self.static_routes.setdefault(vrf, {}).pop(ip_prefix, None)
+
+ @staticmethod
+ def split_key(key):
+ """
+ Split key into vrf name and prefix.
+ :param key: key to split
+ :return: vrf name extracted from the key, ip prefix extracted from the key
+ """
+ if '|' not in key:
+ return 'default', key
+ else:
+ return tuple(key.split('|', 1))
+
+ def static_route_commands(self, ip_nh_set, cur_nh_set, ip_prefix, vrf):
+ diff_set = ip_nh_set.symmetric_difference(cur_nh_set)
+
+ op_cmd_list = {}
+ for ip_nh in diff_set:
+ if ip_nh in cur_nh_set:
+ op = self.OP_DELETE
+ else:
+ op = self.OP_ADD
+
+ op_cmds = op_cmd_list.setdefault(op, [])
+ op_cmds.append(self.generate_command(op, ip_nh, ip_prefix, vrf))
+
+ cmd_list = op_cmd_list.get(self.OP_DELETE, [])
+ cmd_list += op_cmd_list.get(self.OP_ADD, [])
+
+ return cmd_list
+
+ def generate_command(self, op, ip_nh, ip_prefix, vrf):
+ return '{}{} route {}{}{}'.format(
+ 'no ' if op == self.OP_DELETE else '',
+ 'ipv6' if ip_nh.af == socket.AF_INET6 else 'ip',
+ ip_prefix,
+ ip_nh,
+ ' vrf {}'.format(vrf) if vrf != 'default' else ''
+ )
+
+class IpNextHop:
+ def __init__(self, af_id, blackhole, dst_ip, if_name, dist, vrf):
+ zero_ip = lambda af: '0.0.0.0' if af == socket.AF_INET else '::'
+ self.af = af_id
+ self.blackhole = 'false' if blackhole is None or blackhole == '' else blackhole
+ self.distance = 0 if dist is None else int(dist)
+ if self.blackhole == 'true':
+ dst_ip = if_name = vrf = None
+ self.ip = zero_ip(af_id) if dst_ip is None or dst_ip == '' else dst_ip
+ self.interface = '' if if_name is None else if_name
+ self.nh_vrf = '' if vrf is None else vrf
+ if self.blackhole != 'true' and self.is_zero_ip() and len(self.interface.strip()) == 0:
+ log_err('Mandatory attribute not found for nexthop')
+ raise ValueError
+ def __eq__(self, other):
+ return (self.af == other.af and self.blackhole == other.blackhole and
+ self.ip == other.ip and self.interface == other.interface and
+ self.distance == other.distance and self.nh_vrf == other.nh_vrf)
+ def __ne__(self, other):
+ return (self.af != other.af or self.blackhole != other.blackhole or
+ self.ip != other.ip or self.interface != other.interface or
+ self.distance != other.distance or self.nh_vrf != other.nh_vrf)
+ def __hash__(self):
+ return hash((self.af, self.blackhole, self.ip, self.interface, self.distance, self.nh_vrf))
+ def is_zero_ip(self):
+ return sum([x for x in socket.inet_pton(self.af, self.ip)]) == 0
+ def __format__(self, format):
+ ret_val = ''
+ if self.blackhole == 'true':
+ ret_val += ' blackhole'
+ if not (self.ip is None or self.is_zero_ip()):
+ ret_val += ' %s' % self.ip
+ if not (self.interface is None or self.interface == ''):
+ ret_val += ' %s' % self.interface
+ if not (self.distance is None or self.distance == 0):
+ ret_val += ' %d' % self.distance
+ if not (self.nh_vrf is None or self.nh_vrf == ''):
+ ret_val += ' nexthop-vrf %s' % self.nh_vrf
+ return ret_val
+
+class IpNextHopSet(set):
+ def __init__(self, is_ipv6, bkh_list = None, ip_list = None, intf_list = None, dist_list = None, vrf_list = None):
+ super(IpNextHopSet, self).__init__()
+ af = socket.AF_INET6 if is_ipv6 else socket.AF_INET
+ if bkh_list is None and ip_list is None and intf_list is None:
+ # empty set, for delete case
+ return
+ nums = {len(x) for x in [bkh_list, ip_list, intf_list, dist_list, vrf_list] if x is not None}
+ if len(nums) != 1:
+ log_err("Lists of next-hop attribute have different sizes: %s" % nums)
+ for x in [bkh_list, ip_list, intf_list, dist_list, vrf_list]:
+ log_debug("List: %s" % x)
+ raise ValueError
+ nh_cnt = nums.pop()
+ item = lambda lst, i: lst[i] if lst is not None else None
+ for idx in range(nh_cnt):
+ try:
+ self.add(IpNextHop(af, item(bkh_list, idx), item(ip_list, idx), item(intf_list, idx),
+ item(dist_list, idx), item(vrf_list, idx), ))
+ except ValueError:
+ continue
diff --git a/src/sonic-bgpcfgd/tests/test_static_rt.py b/src/sonic-bgpcfgd/tests/test_static_rt.py
new file mode 100644
index 00000000000..c6a3e40fab4
--- /dev/null
+++ b/src/sonic-bgpcfgd/tests/test_static_rt.py
@@ -0,0 +1,487 @@
+from unittest.mock import MagicMock, patch
+
+from bgpcfgd.directory import Directory
+from bgpcfgd.template import TemplateFabric
+from bgpcfgd.managers_static_rt import StaticRouteMgr
+from collections import Counter
+
+def constructor():
+ cfg_mgr = MagicMock()
+
+ common_objs = {
+ 'directory': Directory(),
+ 'cfg_mgr': cfg_mgr,
+ 'tf': TemplateFabric(),
+ 'constants': {},
+ }
+
+ mgr = StaticRouteMgr(common_objs, "CONFIG_DB", "STATIC_ROUTE")
+ assert len(mgr.static_routes) == 0
+
+ return mgr
+
+def set_del_test(mgr, op, args, expected_ret, expected_cmds):
+ set_del_test.push_list_called = False
+ def push_list(cmds):
+ set_del_test.push_list_called = True
+ assert Counter(cmds) == Counter(expected_cmds) # check if commands are expected (regardless of the order)
+ max_del_idx = -1
+ min_set_idx = len(cmds)
+ for idx in range(len(cmds)):
+ if cmds[idx].startswith('no') and idx > max_del_idx:
+ max_del_idx = idx
+ if not cmds[idx].startswith('no') and idx < min_set_idx:
+ min_set_idx = idx
+ assert max_del_idx < min_set_idx, "DEL command comes after SET command" # DEL commands should be done first
+ return True
+ mgr.cfg_mgr.push_list = push_list
+
+ if op == "SET":
+ ret = mgr.set_handler(*args)
+ assert ret == expected_ret
+ elif op == "DEL":
+ mgr.del_handler(*args)
+ else:
+ assert False, "Wrong operation"
+
+ if expected_cmds:
+ assert set_del_test.push_list_called, "cfg_mgr.push_list wasn't called"
+ else:
+ assert not set_del_test.push_list_called, "cfg_mgr.push_list was called"
+
+def test_set():
+ mgr = constructor()
+ set_del_test(
+ mgr,
+ "SET",
+ ("10.1.0.0/24", {
+ "nexthop": "10.0.0.57",
+ }),
+ True,
+ [
+ "ip route 10.1.0.0/24 10.0.0.57"
+ ]
+ )
+
+def test_set_nhvrf():
+ mgr = constructor()
+ set_del_test(
+ mgr,
+ "SET",
+ ("default|10.1.1.0/24", {
+ "nexthop": "10.0.0.57",
+ "ifname": "PortChannel0001",
+ "distance": "10",
+ "nexthop-vrf": "nh_vrf",
+ "blackhole": "false",
+ }),
+ True,
+ [
+ "ip route 10.1.1.0/24 10.0.0.57 PortChannel0001 10 nexthop-vrf nh_vrf"
+ ]
+ )
+
+def test_set_blackhole():
+ mgr = constructor()
+ set_del_test(
+ mgr,
+ "SET",
+ ("default|10.1.2.0/24", {
+ "nexthop": "10.0.0.57",
+ "ifname": "PortChannel0001",
+ "distance": "10",
+ "nexthop-vrf": "nh_vrf",
+ "blackhole": "true",
+ }),
+ True,
+ [
+ "ip route 10.1.2.0/24 blackhole 10"
+ ]
+ )
+
+def test_set_vrf():
+ mgr = constructor()
+ set_del_test(
+ mgr,
+ "SET",
+ ("vrfRED|10.1.3.0/24", {
+ "nexthop": "10.0.0.57",
+ "ifname": "PortChannel0001",
+ "distance": "10",
+ "nexthop-vrf": "nh_vrf",
+ "blackhole": "false",
+ }),
+ True,
+ [
+ "ip route 10.1.3.0/24 10.0.0.57 PortChannel0001 10 nexthop-vrf nh_vrf vrf vrfRED"
+ ]
+ )
+
+def test_set_ipv6():
+ mgr = constructor()
+ set_del_test(
+ mgr,
+ "SET",
+ ("default|fc00:10::/64", {
+ "nexthop": "fc00::72",
+ "ifname": "PortChannel0001",
+ "distance": "10",
+ "nexthop-vrf": "",
+ "blackhole": "false",
+ }),
+ True,
+ [
+ "ipv6 route fc00:10::/64 fc00::72 PortChannel0001 10"
+ ]
+ )
+
+def test_set_nh_only():
+ mgr = constructor()
+ set_del_test(
+ mgr,
+ "SET",
+ ("vrfRED|10.1.3.0/24", {
+ "nexthop": "10.0.0.57,10.0.0.59,10.0.0.61",
+ "distance": "10,20,30",
+ "nexthop-vrf": "nh_vrf,,default",
+ "blackhole": "false,false,false",
+ }),
+ True,
+ [
+ "ip route 10.1.3.0/24 10.0.0.57 10 nexthop-vrf nh_vrf vrf vrfRED",
+ "ip route 10.1.3.0/24 10.0.0.59 20 vrf vrfRED",
+ "ip route 10.1.3.0/24 10.0.0.61 30 nexthop-vrf default vrf vrfRED"
+ ]
+ )
+
+def test_set_ifname_only():
+ mgr = constructor()
+ set_del_test(
+ mgr,
+ "SET",
+ ("vrfRED|10.1.3.0/24", {
+ "ifname": "PortChannel0001,PortChannel0002,PortChannel0003",
+ "distance": "10,20,30",
+ "nexthop-vrf": "nh_vrf,,default",
+ "blackhole": "false,false,false",
+ }),
+ True,
+ [
+ "ip route 10.1.3.0/24 PortChannel0001 10 nexthop-vrf nh_vrf vrf vrfRED",
+ "ip route 10.1.3.0/24 PortChannel0002 20 vrf vrfRED",
+ "ip route 10.1.3.0/24 PortChannel0003 30 nexthop-vrf default vrf vrfRED"
+ ]
+ )
+
+def test_set_with_empty_ifname():
+ mgr = constructor()
+ set_del_test(
+ mgr,
+ "SET",
+ ("vrfRED|10.1.3.0/24", {
+ "nexthop": "10.0.0.57,10.0.0.59,10.0.0.61",
+ "ifname": "PortChannel0001,,PortChannel0003",
+ "distance": "10,20,30",
+ "nexthop-vrf": "nh_vrf,,default",
+ "blackhole": "false,false,false",
+ }),
+ True,
+ [
+ "ip route 10.1.3.0/24 10.0.0.57 PortChannel0001 10 nexthop-vrf nh_vrf vrf vrfRED",
+ "ip route 10.1.3.0/24 10.0.0.59 20 vrf vrfRED",
+ "ip route 10.1.3.0/24 10.0.0.61 PortChannel0003 30 nexthop-vrf default vrf vrfRED"
+ ]
+ )
+
+def test_set_with_empty_nh():
+ mgr = constructor()
+ set_del_test(
+ mgr,
+ "SET",
+ ("vrfRED|10.1.3.0/24", {
+ "nexthop": "10.0.0.57,,",
+ "ifname": "PortChannel0001,PortChannel0002,PortChannel0003",
+ "distance": "10,20,30",
+ "nexthop-vrf": "nh_vrf,,default",
+ "blackhole": "false,false,false",
+ }),
+ True,
+ [
+ "ip route 10.1.3.0/24 10.0.0.57 PortChannel0001 10 nexthop-vrf nh_vrf vrf vrfRED",
+ "ip route 10.1.3.0/24 PortChannel0002 20 vrf vrfRED",
+ "ip route 10.1.3.0/24 PortChannel0003 30 nexthop-vrf default vrf vrfRED"
+ ]
+ )
+
+def test_set_del():
+ mgr = constructor()
+ set_del_test(
+ mgr,
+ "SET",
+ ("vrfRED|10.1.3.0/24", {
+ "nexthop": "10.0.0.57,10.0.0.59,10.0.0.61",
+ "ifname": "PortChannel0001,PortChannel0002,PortChannel0003",
+ "distance": "10,20,30",
+ "nexthop-vrf": "nh_vrf,,default",
+ "blackhole": "false,false,false",
+ }),
+ True,
+ [
+ "ip route 10.1.3.0/24 10.0.0.57 PortChannel0001 10 nexthop-vrf nh_vrf vrf vrfRED",
+ "ip route 10.1.3.0/24 10.0.0.59 PortChannel0002 20 vrf vrfRED",
+ "ip route 10.1.3.0/24 10.0.0.61 PortChannel0003 30 nexthop-vrf default vrf vrfRED"
+ ]
+ )
+ set_del_test(
+ mgr,
+ "DEL",
+ ("vrfRED|10.1.3.0/24",),
+ True,
+ [
+ "no ip route 10.1.3.0/24 10.0.0.57 PortChannel0001 10 nexthop-vrf nh_vrf vrf vrfRED",
+ "no ip route 10.1.3.0/24 10.0.0.59 PortChannel0002 20 vrf vrfRED",
+ "no ip route 10.1.3.0/24 10.0.0.61 PortChannel0003 30 nexthop-vrf default vrf vrfRED"
+ ]
+ )
+ set_del_test(
+ mgr,
+ "SET",
+ ("vrfRED|10.1.3.0/24", {
+ "nexthop": "10.0.0.57,10.0.0.59,10.0.0.61",
+ "ifname": "PortChannel0001,PortChannel0002,PortChannel0003",
+ "distance": "10,20,30",
+ "nexthop-vrf": "nh_vrf,,default",
+ "blackhole": "false,false,false",
+ }),
+ True,
+ [
+ "ip route 10.1.3.0/24 10.0.0.57 PortChannel0001 10 nexthop-vrf nh_vrf vrf vrfRED",
+ "ip route 10.1.3.0/24 10.0.0.59 PortChannel0002 20 vrf vrfRED",
+ "ip route 10.1.3.0/24 10.0.0.61 PortChannel0003 30 nexthop-vrf default vrf vrfRED"
+ ]
+ )
+
+def test_set_same_route():
+ mgr = constructor()
+ set_del_test(
+ mgr,
+ "SET",
+ ("vrfRED|10.1.3.0/24", {
+ "nexthop": "10.0.0.57,10.0.0.59,10.0.0.61",
+ "ifname": "PortChannel0001,PortChannel0002,PortChannel0003",
+ "distance": "10,20,30",
+ "nexthop-vrf": "nh_vrf,,default",
+ "blackhole": "false,false,false",
+ }),
+ True,
+ [
+ "ip route 10.1.3.0/24 10.0.0.57 PortChannel0001 10 nexthop-vrf nh_vrf vrf vrfRED",
+ "ip route 10.1.3.0/24 10.0.0.59 PortChannel0002 20 vrf vrfRED",
+ "ip route 10.1.3.0/24 10.0.0.61 PortChannel0003 30 nexthop-vrf default vrf vrfRED"
+ ]
+ )
+ set_del_test(
+ mgr,
+ "SET",
+ ("vrfRED|10.1.3.0/24", {
+ "nexthop": "10.0.0.57,10.0.0.59,10.0.0.61",
+ "ifname": "PortChannel0001,PortChannel0002,PortChannel0003",
+ "distance": "40,50,60",
+ "nexthop-vrf": "nh_vrf,,default",
+ "blackhole": "false,false,false",
+ }),
+ True,
+ [
+ "no ip route 10.1.3.0/24 10.0.0.57 PortChannel0001 10 nexthop-vrf nh_vrf vrf vrfRED",
+ "no ip route 10.1.3.0/24 10.0.0.59 PortChannel0002 20 vrf vrfRED",
+ "no ip route 10.1.3.0/24 10.0.0.61 PortChannel0003 30 nexthop-vrf default vrf vrfRED",
+ "ip route 10.1.3.0/24 10.0.0.57 PortChannel0001 40 nexthop-vrf nh_vrf vrf vrfRED",
+ "ip route 10.1.3.0/24 10.0.0.59 PortChannel0002 50 vrf vrfRED",
+ "ip route 10.1.3.0/24 10.0.0.61 PortChannel0003 60 nexthop-vrf default vrf vrfRED"
+ ]
+ )
+
+def test_set_add_del_nh():
+ mgr = constructor()
+ set_del_test(
+ mgr,
+ "SET",
+ ("vrfRED|10.1.3.0/24", {
+ "nexthop": "10.0.0.57,10.0.0.59,10.0.0.61",
+ "ifname": "PortChannel0001,PortChannel0002,PortChannel0003",
+ "distance": "10,20,30",
+ "nexthop-vrf": "nh_vrf,,default",
+ "blackhole": "false,false,false",
+ }),
+ True,
+ [
+ "ip route 10.1.3.0/24 10.0.0.57 PortChannel0001 10 nexthop-vrf nh_vrf vrf vrfRED",
+ "ip route 10.1.3.0/24 10.0.0.59 PortChannel0002 20 vrf vrfRED",
+ "ip route 10.1.3.0/24 10.0.0.61 PortChannel0003 30 nexthop-vrf default vrf vrfRED"
+ ]
+ )
+ set_del_test(
+ mgr,
+ "SET",
+ ("vrfRED|10.1.3.0/24", {
+ "nexthop": "10.0.0.57,10.0.0.59,10.0.0.61,10.0.0.63",
+ "ifname": "PortChannel0001,PortChannel0002,PortChannel0003,PortChannel0004",
+ "distance": "10,20,30,30",
+ "nexthop-vrf": "nh_vrf,,default,",
+ "blackhole": "false,false,false,",
+ }),
+ True,
+ [
+ "ip route 10.1.3.0/24 10.0.0.63 PortChannel0004 30 vrf vrfRED",
+ ]
+ )
+ set_del_test(
+ mgr,
+ "SET",
+ ("vrfRED|10.1.3.0/24", {
+ "nexthop": "10.0.0.57,10.0.0.59",
+ "ifname": "PortChannel0001,PortChannel0002",
+ "distance": "10,20",
+ "nexthop-vrf": "nh_vrf,",
+ "blackhole": "false,false",
+ }),
+ True,
+ [
+ "no ip route 10.1.3.0/24 10.0.0.61 PortChannel0003 30 nexthop-vrf default vrf vrfRED",
+ "no ip route 10.1.3.0/24 10.0.0.63 PortChannel0004 30 vrf vrfRED",
+ ]
+ )
+
+def test_set_add_del_nh_ethernet():
+ mgr = constructor()
+ set_del_test(
+ mgr,
+ "SET",
+ ("default|20.1.3.0/24", {
+ "nexthop": "20.0.0.57,20.0.0.59,20.0.0.61",
+ "ifname": "Ethernet4,Ethernet8,Ethernet12",
+ "distance": "10,20,30",
+ "nexthop-vrf": "default,,default",
+ "blackhole": "false,false,false",
+ }),
+ True,
+ [
+ "ip route 20.1.3.0/24 20.0.0.57 Ethernet4 10 nexthop-vrf default",
+ "ip route 20.1.3.0/24 20.0.0.59 Ethernet8 20",
+ "ip route 20.1.3.0/24 20.0.0.61 Ethernet12 30 nexthop-vrf default"
+ ]
+ )
+ set_del_test(
+ mgr,
+ "SET",
+ ("default|20.1.3.0/24", {
+ "nexthop": "20.0.0.57,20.0.0.59,20.0.0.61,20.0.0.63",
+ "ifname": "Ethernet4,Ethernet8,Ethernet12,Ethernet16",
+ "distance": "10,20,30,30",
+ "nexthop-vrf": "default,,default,",
+ "blackhole": "false,false,false,",
+ }),
+ True,
+ [
+ "ip route 20.1.3.0/24 20.0.0.63 Ethernet16 30",
+ ]
+ )
+ set_del_test(
+ mgr,
+ "SET",
+ ("default|20.1.3.0/24", {
+ "nexthop": "20.0.0.57,20.0.0.59",
+ "ifname": "Ethernet4,Ethernet8",
+ "distance": "10,20",
+ "nexthop-vrf": "default,",
+ "blackhole": "false,false",
+ }),
+ True,
+ [
+ "no ip route 20.1.3.0/24 20.0.0.61 Ethernet12 30 nexthop-vrf default",
+ "no ip route 20.1.3.0/24 20.0.0.63 Ethernet16 30",
+ ]
+ )
+
+@patch('bgpcfgd.managers_static_rt.log_debug')
+def test_set_no_action(mocked_log_debug):
+ mgr = constructor()
+ set_del_test(
+ mgr,
+ "SET",
+ ("default|10.1.1.0/24", {
+ "nexthop": "10.0.0.57",
+ "ifname": "PortChannel0001",
+ "blackhole": "true",
+ }),
+ True,
+ [
+ "ip route 10.1.1.0/24 blackhole"
+ ]
+ )
+
+ set_del_test(
+ mgr,
+ "SET",
+ ("default|10.1.1.0/24", {
+ "nexthop": "10.0.0.59",
+ "ifname": "PortChannel0002",
+ "blackhole": "true",
+ }),
+ True,
+ []
+ )
+ mocked_log_debug.assert_called_with("Nothing to update for static route default|10.1.1.0/24")
+
+@patch('bgpcfgd.managers_static_rt.log_debug')
+def test_del_no_action(mocked_log_debug):
+ mgr = constructor()
+ set_del_test(
+ mgr,
+ "DEL",
+ ("default|10.1.1.0/24",),
+ True,
+ []
+ )
+ mocked_log_debug.assert_called_with("Nothing to update for static route default|10.1.1.0/24")
+
+def test_set_invalid_arg():
+ mgr = constructor()
+ set_del_test(
+ mgr,
+ "SET",
+ ("default|10.1.1.0/24", {
+ "nexthop": "10.0.0.57,10.0.0.59",
+ "ifname": "PortChannel0001",
+ }),
+ False,
+ []
+ )
+
+@patch('bgpcfgd.managers_static_rt.log_err')
+def test_set_invalid_blackhole(mocked_log_err):
+ mgr = constructor()
+ set_del_test(
+ mgr,
+ "SET",
+ ("default|10.1.1.0/24", {
+ "nexthop": "",
+ "ifname": "",
+ "blackhole": "false",
+ }),
+ True,
+ []
+ )
+ mocked_log_err.assert_called_with("Mandatory attribute not found for nexthop")
+
+def test_set_invalid_ipaddr():
+ mgr = constructor()
+ set_del_test(
+ mgr,
+ "SET",
+ ("10.1.0.0/24", {
+ "nexthop": "invalid_ipaddress",
+ }),
+ False,
+ []
+ )
diff --git a/src/sonic-config-engine/minigraph.py b/src/sonic-config-engine/minigraph.py
index 0b99ad07f32..83a0e2af4aa 100644
--- a/src/sonic-config-engine/minigraph.py
+++ b/src/sonic-config-engine/minigraph.py
@@ -606,7 +606,11 @@ def parse_dpg(dpg, hname):
if panel_port not in intfs_inpc and panel_port not in acl_intfs:
acl_intfs.append(panel_port)
break
- if acl_intfs:
+ # if acl is classified as mirror (erpsan) or acl interface
+ # are binded then do not classify as Control plane.
+ # For multi-asic platforms it's possible there is no
+ # interface are binded to everflow in host namespace.
+ if acl_intfs or is_mirror_v6 or is_mirror:
# Remove duplications
dedup_intfs = []
for intf in acl_intfs:
diff --git a/src/sonic-config-engine/tests/multi_npu_data/py2/ipinip.json b/src/sonic-config-engine/tests/multi_npu_data/py2/ipinip.json
index 3e8d4429d77..82d583de607 100644
--- a/src/sonic-config-engine/tests/multi_npu_data/py2/ipinip.json
+++ b/src/sonic-config-engine/tests/multi_npu_data/py2/ipinip.json
@@ -3,7 +3,7 @@
"TUNNEL_DECAP_TABLE:IPINIP_TUNNEL" : {
"tunnel_type":"IPINIP",
"dst_ip":"10.0.0.0,10.1.0.1,10.1.0.3,10.1.0.32,8.0.0.0",
- "dscp_mode":"pipe",
+ "dscp_mode":"uniform",
"ecn_mode":"copy_from_outer",
"ttl_mode":"pipe"
},
@@ -14,7 +14,7 @@
"TUNNEL_DECAP_TABLE:IPINIP_V6_TUNNEL" : {
"tunnel_type":"IPINIP",
"dst_ip":"fc00:1::32,fc00::1,fd00:1::32",
- "dscp_mode":"pipe",
+ "dscp_mode":"uniform",
"ecn_mode":"copy_from_outer",
"ttl_mode":"pipe"
},
diff --git a/src/sonic-config-engine/tests/multi_npu_data/py3/ipinip.json b/src/sonic-config-engine/tests/multi_npu_data/py3/ipinip.json
index 3e8d4429d77..82d583de607 100644
--- a/src/sonic-config-engine/tests/multi_npu_data/py3/ipinip.json
+++ b/src/sonic-config-engine/tests/multi_npu_data/py3/ipinip.json
@@ -3,7 +3,7 @@
"TUNNEL_DECAP_TABLE:IPINIP_TUNNEL" : {
"tunnel_type":"IPINIP",
"dst_ip":"10.0.0.0,10.1.0.1,10.1.0.3,10.1.0.32,8.0.0.0",
- "dscp_mode":"pipe",
+ "dscp_mode":"uniform",
"ecn_mode":"copy_from_outer",
"ttl_mode":"pipe"
},
@@ -14,7 +14,7 @@
"TUNNEL_DECAP_TABLE:IPINIP_V6_TUNNEL" : {
"tunnel_type":"IPINIP",
"dst_ip":"fc00:1::32,fc00::1,fd00:1::32",
- "dscp_mode":"pipe",
+ "dscp_mode":"uniform",
"ecn_mode":"copy_from_outer",
"ttl_mode":"pipe"
},
diff --git a/src/sonic-config-engine/tests/multi_npu_data/sample-minigraph-noportchannel.xml b/src/sonic-config-engine/tests/multi_npu_data/sample-minigraph-noportchannel.xml
new file mode 100644
index 00000000000..460f71e21c2
--- /dev/null
+++ b/src/sonic-config-engine/tests/multi_npu_data/sample-minigraph-noportchannel.xml
@@ -0,0 +1,1299 @@
+
+
+
+
+
+ false
+ multi_npu_platform_01
+ 10.0.0.0
+ 01T2
+ 10.0.0.1
+ 1
+ 10
+ 3
+
+
+ multi_npu_platform_01
+ FC00::1
+ 01T2
+ FC00::2
+ 1
+ 10
+ 3
+
+
+ false
+ multi_npu_platform_01
+ 10.0.0.8
+ 05T2
+ 10.0.0.9
+ 1
+ 10
+ 3
+
+
+ multi_npu_platform_01
+ FC00::9
+ 05T2
+ FC00::A
+ 1
+ 10
+ 3
+
+
+ BGPSession
+ false
+ ASIC2
+ 10.1.0.0
+ ASIC0
+ 10.1.0.1
+ 1
+ 0
+ 0
+
+
+ BGPSession
+ false
+ ASIC2
+ 10.1.0.4
+ ASIC1
+ 10.1.0.5
+ 1
+ 0
+ 0
+
+
+ BGPSession
+ false
+ ASIC3
+ 10.1.0.2
+ ASIC0
+ 10.1.0.3
+ 1
+ 0
+ 0
+
+
+ BGPSession
+ false
+ ASIC3
+ 10.1.0.6
+ ASIC1
+ 10.1.0.7
+ 1
+ 0
+ 0
+
+
+ false
+ ASIC0
+ 10.0.0.0
+ 01T2
+ 10.0.0.1
+ 1
+ 10
+ 3
+
+
+ ASIC0
+ FC00::1
+ 01T2
+ FC00::2
+ 1
+ 10
+ 3
+
+
+ false
+ ASIC1
+ 10.0.0.8
+ 05T2
+ 10.0.0.9
+ 1
+ 10
+ 3
+
+
+ ASIC1
+ FC00::9
+ 05T2
+ FC00::A
+ 1
+ 10
+ 3
+
+
+
+
+ 65100
+ multi_npu_platform_01
+
+
+ 10.0.0.1
+
+
+
+
+
+ 10.0.0.9
+
+
+
+
+
+
+
+
+ 65100
+
+ ASIC0
+
+
+ BGPPeer
+ 10.1.0.1
+
+
+
+
+
+ BGPPeer
+ 10.1.0.3
+
+
+
+
+
+ BGPPeer
+ 10.0.0.1
+
+
+
+
+
+ BGPPeer
+ FC00::1
+
+
+
+
+
+
+
+
+ 65100
+
+ ASIC1
+
+
+ BGPPeer
+ 10.1.0.5
+
+
+
+
+
+ BGPPeer
+ 10.1.0.7
+
+
+
+
+
+ BGPPeer
+ 10.0.0.9
+
+
+
+
+
+ BGPPeer
+ FC00::A
+
+
+
+
+
+
+
+
+ 65100
+
+ ASIC2
+
+
+ BGPPeer
+ 10.1.0.0
+
+
+
+
+
+ BGPPeer
+ 10.1.0.4
+
+
+
+
+
+
+
+
+ 65100
+
+ ASIC3
+
+
+ BGPPeer
+ 10.1.0.2
+
+
+
+
+
+ BGPPeer
+ 10.1.0.6
+
+
+
+
+
+
+
+
+ 65200
+ 01T2
+
+
+
+ 65200
+ 05T2
+
+
+
+
+
+
+
+
+
+ HostIP
+ Loopback0
+
+ 10.1.0.32/32
+
+ 10.1.0.32/32
+
+
+ HostIP1
+ Loopback0
+
+ FC00:1::32/128
+
+ FC00:1::32/128
+
+
+
+
+ HostIP
+ eth0
+
+ 3.10.147.150/23
+
+ 3.10.147.150/23
+
+
+ V6HostIP
+ eth0
+
+ FC00:2::32/64
+
+ FC00:2::32/64
+
+
+
+
+
+
+ multi_npu_platform_01
+
+
+
+
+
+
+ Ethernet1/1
+ 10.0.0.0/31
+
+
+
+ Ethernet1/1
+ FC00::1/126
+
+
+
+ Ethernet1/2
+ 10.0.0.2/31
+
+
+
+ Ethernet1/2
+ FC00::2/126
+
+
+
+ Ethernet1/5
+ 10.0.0.8/31
+
+
+
+ Ethernet1/5
+ FC00::9/126
+
+
+
+ Ethernet1/8
+ 10.0.0.10/31
+
+
+
+ Ethernet1/8
+ FC00::10/126
+
+
+
+
+
+ SNMP_ACL
+ SNMP
+ SNMP
+
+
+ ERSPAN
+ Everflow
+ Everflow
+
+
+ ERSPANV6
+ EverflowV6
+ EverflowV6
+
+
+ VTY_LINE
+ ssh-only
+ SSH
+
+
+
+
+
+
+
+
+
+ LoopbackInterface
+ HostIP
+ Loopback4096
+
+ 8.0.0.0/32
+
+ 8.0.0.0/32
+
+
+ HostIP1
+ Loopback4096
+
+ FD00:1::32/128
+
+ FD00:1::32/128
+
+
+
+
+
+
+
+ ASIC0
+
+
+ PortChannelInterface
+ PortChannel4001
+ Eth4-ASIC0;Eth5-ASIC0
+
+
+
+ PortChannelInterface
+ PortChannel4002
+ Eth6-ASIC0;Eth7-ASIC0
+
+
+
+
+
+
+
+ IPInterface
+
+ PortChannel4001
+ 10.1.0.1/31
+
+
+ IPInterface
+
+ PortChannel4002
+ 10.1.0.3/31
+
+
+
+ Ethernet1/1
+ 10.0.0.0/31
+
+
+
+ Ethernet1/1
+ FC00::1/126
+
+
+
+ Ethernet1/2
+ 10.0.0.2/31
+
+
+
+ Ethernet1/2
+ FC00::2/126
+
+
+
+
+
+
+
+
+
+
+
+ LoopbackInterface
+ HostIP
+ Loopback4096
+
+ 8.0.0.1/32
+
+ 8.0.0.1/32
+
+
+ HostIP1
+ Loopback4096
+
+ FD00:2::32/128
+
+ FD00:2::32/128
+
+
+
+
+
+
+
+ ASIC1
+
+
+ PortChannelInterface
+ PortChannel4003
+ Eth4-ASIC1;Eth5-ASIC1
+
+
+
+ PortChannelInterface
+ PortChannel4004
+ Eth6-ASIC1;Eth7-ASIC1
+
+
+
+
+
+
+
+ IPInterface
+
+ PortChannel4003
+ 10.1.0.5/31
+
+
+ IPInterface
+
+ PortChannel4004
+ 10.1.0.7/31
+
+
+
+ Ethernet1/5
+ 10.0.0.8/31
+
+
+
+ Ethernet1/5
+ FC00::9/126
+
+
+
+ Ethernet1/8
+ 10.0.0.10/31
+
+
+
+ Ethernet1/8
+ FC00::10/126
+
+
+
+
+
+
+
+
+
+
+
+ LoopbackInterface
+ HostIP
+ Loopback4096
+
+ 8.0.0.4/32
+
+ 8.0.0.4/32
+
+
+ HostIP1
+ Loopback4096
+
+ FD00:3::32/128
+
+ FD00:3::32/128
+
+
+
+
+
+
+
+ ASIC2
+
+
+ PortChannelInterface
+ PortChannel4009
+ Eth0-ASIC2;Eth1-ASIC2
+
+
+
+ PortChannelInterface
+ PortChannel4010
+ Eth2-ASIC2;Eth3-ASIC2
+
+
+
+
+
+
+
+ IPInterface
+
+ PortChannel4009
+ 10.1.0.0/31
+
+
+ IPInterface
+
+ PortChannel4010
+ 10.1.0.4/31
+
+
+
+
+
+
+
+
+
+
+
+ LoopbackInterface
+ HostIP
+ Loopback4096
+
+ 8.0.0.5/32
+
+ 8.0.0.5/32
+
+
+ HostIP1
+ Loopback4096
+
+ FD00:4::32/128
+
+ FD00:4::32/128
+
+
+
+
+
+
+
+ ASIC3
+
+
+ PortChannelInterface
+ PortChannel4013
+ Eth0-ASIC3;Eth1-ASIC3
+
+
+
+ PortChannelInterface
+ PortChannel4014
+ Eth2-ASIC3;Eth3-ASIC3
+
+
+
+
+
+
+
+ IPInterface
+
+ PortChannel4013
+ 10.1.0.2/31
+
+
+ IPInterface
+
+ PortChannel4014
+ 10.1.0.6/31
+
+
+
+
+
+
+
+
+
+
+
+ DeviceInterfaceLink
+ 01T2
+ Ethernet1
+ multi_npu_platform_01
+ Ethernet1/1
+
+
+ DeviceInterfaceLink
+ 01T2
+ Ethernet2
+ multi_npu_platform_01
+ Ethernet1/2
+
+
+ DeviceInterfaceLink
+ 05T2
+ Ethernet1
+ multi_npu_platform_01
+ Ethernet1/5
+
+
+ DeviceInterfaceLink
+ 05T2
+ Ethernet2
+ multi_npu_platform_01
+ Ethernet1/6
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC2
+ Eth0-ASIC2
+ true
+ ASIC0
+ Eth4-ASIC0
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC2
+ Eth1-ASIC2
+ true
+ ASIC0
+ Eth5-ASIC0
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC3
+ Eth0-ASIC3
+ true
+ ASIC0
+ Eth6-ASIC0
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC3
+ Eth1-ASIC3
+ true
+ ASIC0
+ Eth7-ASIC0
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC2
+ Eth2-ASIC2
+ true
+ ASIC1
+ Eth4-ASIC1
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC2
+ Eth3-ASIC2
+ true
+ ASIC1
+ Eth5-ASIC1
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC3
+ Eth2-ASIC3
+ true
+ ASIC1
+ Eth6-ASIC1
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC3
+ Eth3-ASIC3
+ true
+ ASIC1
+ Eth7-ASIC1
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC0
+ Eth0-ASIC0
+ true
+ multi_npu_platform_01
+ Ethernet1/1
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC0
+ Eth1-ASIC0
+ true
+ multi_npu_platform_01
+ Ethernet1/2
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC0
+ Eth2-ASIC0
+ true
+ multi_npu_platform_01
+ Ethernet1/3
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC0
+ Eth3-ASIC0
+ true
+ multi_npu_platform_01
+ Ethernet1/4
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC1
+ Eth0-ASIC1
+ true
+ multi_npu_platform_01
+ Ethernet1/5
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC1
+ Eth1-ASIC1
+ true
+ multi_npu_platform_01
+ Ethernet1/6
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC1
+ Eth2-ASIC1
+ true
+ multi_npu_platform_01
+ Ethernet1/7
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC1
+ Eth3-ASIC1
+ true
+ multi_npu_platform_01
+ Ethernet1/8
+ true
+
+
+
+
+ multi_npu_platform_01
+ multi-npu-01
+
+ 3.10.147.150
+
+
+
+ 07T2
+
+ 89.139.132.43
+
+ VM
+
+
+ 01T2
+
+ 89.139.132.40
+
+ VM
+
+
+ 05T2
+
+ 89.139.132.42
+
+ VM
+
+
+ 03T2
+
+ 89.139.132.41
+
+ VM
+
+
+ Asic
+
+ 0.0.0.0/0
+
+
+ ::/0
+
+
+
+
+
+
+
+
+
+ 0.0.0.0/0
+
+
+ ::/0
+
+
+ ASIC0
+ multi-npu-asic
+
+
+ Asic
+
+ 0.0.0.0/0
+
+
+ ::/0
+
+
+
+
+
+
+
+
+
+ 0.0.0.0/0
+
+
+ ::/0
+
+
+ ASIC1
+ multi-npu-asic
+
+
+ Asic
+
+ 0.0.0.0/0
+
+
+ ::/0
+
+
+
+
+
+
+
+
+
+ 0.0.0.0/0
+
+
+ ::/0
+
+
+ ASIC2
+ multi-npu-asic
+
+
+ Asic
+
+ 0.0.0.0/0
+
+
+ ::/0
+
+
+
+
+
+
+
+
+
+ 0.0.0.0/0
+
+
+ ::/0
+
+
+ ASIC3
+ multi-npu-asic
+
+
+
+
+
+
+
+ DeviceInterface
+
+ true
+ 1
+ Ethernet1/1
+
+ false
+ 0
+ 0
+ 40000
+
+
+ DeviceInterface
+
+ true
+ 1
+ Ethernet1/2
+
+ false
+ 0
+ 0
+ 40000
+
+
+ DeviceInterface
+
+ true
+ 1
+ Ethernet1/3
+
+ false
+ 0
+ 0
+ 40000
+
+
+ DeviceInterface
+
+ true
+ 1
+ Ethernet1/4
+
+ false
+ 0
+ 0
+ 40000
+
+
+ DeviceInterface
+
+ true
+ 1
+ Ethernet1/5
+
+ false
+ 0
+ 0
+ 40000
+
+
+ DeviceInterface
+
+ true
+ 1
+ Ethernet1/6
+
+ false
+ 0
+ 0
+ 40000
+
+
+ DeviceInterface
+
+ true
+ 1
+ Ethernet1/7
+
+ false
+ 0
+ 0
+ 40000
+
+
+ DeviceInterface
+
+ true
+ 1
+ Ethernet1/8
+
+ false
+ 0
+ 0
+ 40000
+
+
+ true
+ 0
+ multi-npu-01
+
+
+
+
+
+
+ multi_npu_platform_01
+
+
+ DeploymentId
+
+ 1
+
+
+ QosProfile
+
+ Profile0
+
+
+ DhcpResources
+
+ 169.118.23.1;169.118.23.2;169.118.23.3;169.118.23.4;169.118.23.5;169.118.23.6;169.118.23.7;169.118.23.8;169.118.23.9;169.118.23.10;169.118.23.11;169.118.23.12;169.118.23.13;169.118.23.14;169.118.23.15;169.118.23.16;169.118.23.17;169.118.23.18;169.118.23.19;169.118.23.20;169.118.23.21;169.118.23.22;169.118.23.23;169.118.23.24;169.118.23.25;169.118.23.26;169.118.23.27;169.118.23.28;169.118.23.29;169.118.23.30;169.118.23.31;169.118.23.32;169.118.23.33;169.118.23.34;169.118.23.35;169.118.23.36;169.118.23.37;169.118.23.38;169.118.23.39;169.118.23.40;169.118.23.41;169.118.23.42;169.118.23.43;169.118.23.44;169.118.23.45;169.118.23.46;169.118.23.47;169.118.23.48
+
+
+ NtpResources
+
+ 17.39.1.129;17.39.1.130
+
+
+ SnmpResources
+
+ 71.49.219.98
+
+
+ SyslogResources
+
+ 71.49.219.8;123.46.98.21
+
+
+ TacacsGroup
+
+ Starlab
+
+
+ TacacsServer
+
+ 123.46.98.21
+
+
+ ForcedMgmtRoutes
+
+ 71.49.219.98/31;71.49.219.8;123.46.98.16/28;10.3.149.170/31;40.122.216.24;13.91.48.226;71.49.219.14
+
+
+ ErspanDestinationIpv4
+
+ 10.20.6.16
+
+
+
+
+ ASIC0
+
+
+ SubRole
+
+ FrontEnd
+
+
+
+
+ ASIC1
+
+
+ SubRole
+
+ FrontEnd
+
+
+
+
+ ASIC2
+
+
+ SubRole
+
+ FrontEnd
+
+
+
+
+ ASIC3
+
+
+ SubRole
+
+ FrontEnd
+
+
+
+
+ ASIC2
+
+
+ SubRole
+
+ BackEnd
+
+
+
+
+ ASIC3
+
+
+ SubRole
+
+ BackEnd
+
+
+
+
+
+
+
+
+
+
+
+
+ AutoNegotiation
+
+ True
+
+
+ multi_npu_platform_01:Ethernet1/1;01T2:Ethernet1
+
+
+
+
+
+ AutoNegotiation
+
+ True
+
+
+ multi_npu_platform_01:Ethernet1/2;01T2:Ethernet2
+
+
+
+
+
+ AutoNegotiation
+
+ True
+
+
+ multi_npu_platform_01:Ethernet1/5;05T2:Ethernet1
+
+
+
+
+
+ AutoNegotiation
+
+ True
+
+
+ multi_npu_platform_01:Ethernet1/6;05T2:Ethernet2
+
+
+
+ multi_npu_platform_01
+ multi-npu-01
+
diff --git a/src/sonic-config-engine/tests/sample_output/py2/bgpd_quagga.conf b/src/sonic-config-engine/tests/sample_output/py2/bgpd_quagga.conf
index f5a4b22f0ee..43e8ade6a30 100644
--- a/src/sonic-config-engine/tests/sample_output/py2/bgpd_quagga.conf
+++ b/src/sonic-config-engine/tests/sample_output/py2/bgpd_quagga.conf
@@ -23,10 +23,10 @@ router bgp 65100
bgp graceful-restart restart-time 240
bgp graceful-restart
bgp router-id 10.1.0.32
+ network 10.1.0.32/32
address-family ipv6
network fc00:1::32/64
exit-address-family
- network 10.1.0.32/32
network 192.168.200.1/27
network 192.168.0.1/27
neighbor 10.0.0.59 remote-as 64600
diff --git a/src/sonic-config-engine/tests/sample_output/py2/ipinip.json b/src/sonic-config-engine/tests/sample_output/py2/ipinip.json
index 62346451af0..0a01058463b 100644
--- a/src/sonic-config-engine/tests/sample_output/py2/ipinip.json
+++ b/src/sonic-config-engine/tests/sample_output/py2/ipinip.json
@@ -2,8 +2,8 @@
{
"TUNNEL_DECAP_TABLE:IPINIP_TUNNEL" : {
"tunnel_type":"IPINIP",
- "dst_ip":"10.0.0.56,10.0.0.58,10.0.0.60,10.0.0.62,10.1.0.32,192.168.0.1,192.168.200.1",
- "dscp_mode":"pipe",
+ "dst_ip":"10.0.0.56,10.0.0.58,10.0.0.60,10.0.0.62,10.1.0.32,10.21.0.64,10.21.64.2,192.168.0.1,192.168.200.1",
+ "dscp_mode":"uniform",
"ecn_mode":"copy_from_outer",
"ttl_mode":"pipe"
},
@@ -14,7 +14,7 @@
"TUNNEL_DECAP_TABLE:IPINIP_V6_TUNNEL" : {
"tunnel_type":"IPINIP",
"dst_ip":"fc00:1::32,fc00::71,fc00::75,fc00::79,fc00::7d",
- "dscp_mode":"pipe",
+ "dscp_mode":"uniform",
"ecn_mode":"copy_from_outer",
"ttl_mode":"pipe"
},
diff --git a/src/sonic-config-engine/tests/sample_output/py3/ipinip.json b/src/sonic-config-engine/tests/sample_output/py3/ipinip.json
index 62346451af0..0a01058463b 100644
--- a/src/sonic-config-engine/tests/sample_output/py3/ipinip.json
+++ b/src/sonic-config-engine/tests/sample_output/py3/ipinip.json
@@ -2,8 +2,8 @@
{
"TUNNEL_DECAP_TABLE:IPINIP_TUNNEL" : {
"tunnel_type":"IPINIP",
- "dst_ip":"10.0.0.56,10.0.0.58,10.0.0.60,10.0.0.62,10.1.0.32,192.168.0.1,192.168.200.1",
- "dscp_mode":"pipe",
+ "dst_ip":"10.0.0.56,10.0.0.58,10.0.0.60,10.0.0.62,10.1.0.32,10.21.0.64,10.21.64.2,192.168.0.1,192.168.200.1",
+ "dscp_mode":"uniform",
"ecn_mode":"copy_from_outer",
"ttl_mode":"pipe"
},
@@ -14,7 +14,7 @@
"TUNNEL_DECAP_TABLE:IPINIP_V6_TUNNEL" : {
"tunnel_type":"IPINIP",
"dst_ip":"fc00:1::32,fc00::71,fc00::75,fc00::79,fc00::7d",
- "dscp_mode":"pipe",
+ "dscp_mode":"uniform",
"ecn_mode":"copy_from_outer",
"ttl_mode":"pipe"
},
diff --git a/src/sonic-config-engine/tests/t0-sample-graph.xml b/src/sonic-config-engine/tests/t0-sample-graph.xml
index f847af1f90c..266554dd331 100644
--- a/src/sonic-config-engine/tests/t0-sample-graph.xml
+++ b/src/sonic-config-engine/tests/t0-sample-graph.xml
@@ -185,6 +185,22 @@
10.10.0.99/32
+
+ LoopbackIP2
+ Loopback2
+
+ 10.21.0.64/32
+
+ 10.21.0.64/32
+
+
+ LoopbackIP3
+ Loopback3
+
+ 10.21.64.2/32
+
+ 10.21.64.2/32
+
diff --git a/src/sonic-config-engine/tests/test_multinpu_cfggen.py b/src/sonic-config-engine/tests/test_multinpu_cfggen.py
index fbcddb65a1f..2b77d1c6aa7 100644
--- a/src/sonic-config-engine/tests/test_multinpu_cfggen.py
+++ b/src/sonic-config-engine/tests/test_multinpu_cfggen.py
@@ -24,6 +24,7 @@ def setUp(self):
self.test_data_dir = os.path.join(self.test_dir, 'multi_npu_data')
self.script_file = utils.PYTHON_INTERPRETTER + ' ' + os.path.join(self.test_dir, '..', 'sonic-cfggen')
self.sample_graph = os.path.join(self.test_data_dir, 'sample-minigraph.xml')
+ self.sample_graph1 = os.path.join(self.test_data_dir, 'sample-minigraph-noportchannel.xml')
self.port_config = []
for asic in range(NUM_ASIC):
self.port_config.append(os.path.join(self.test_data_dir, "sample_port_config-{}.ini".format(asic)))
@@ -183,6 +184,14 @@ def test_frontend_asic_portchannel_intf(self):
utils.liststr_to_dict("['PortChannel4001|10.1.0.1/31', 'PortChannel0002|FC00::1/126', 'PortChannel4002|10.1.0.3/31', 'PortChannel0002', 'PortChannel0002|10.0.0.0/31', 'PortChannel4001', 'PortChannel4002']")
)
+ def test_frontend_asic_routerport_intf(self):
+ argument = "-m {} -p {} -n asic0 -v \"INTERFACE.keys()|list\"".format(self.sample_graph1, self.port_config[0])
+ output = self.run_script(argument)
+ self.assertEqual(
+ utils.liststr_to_dict(output.strip()),
+ utils.liststr_to_dict("['Ethernet0', ('Ethernet0', '10.0.0.0/31'), 'Ethernet4', ('Ethernet0', 'FC00::1/126'), ('Ethernet4', 'FC00::2/126'), ('Ethernet4', '10.0.0.2/31')]")
+ )
+
def test_backend_asic_portchannel_intf(self):
argument = "-m {} -p {} -n asic3 -v \"PORTCHANNEL_INTERFACE.keys()|list\"".format(self.sample_graph, self.port_config[3])
output = self.run_script(argument)
@@ -282,6 +291,15 @@ def test_global_asic_acl(self):
'EVERFLOWV6':{'policy_desc': 'EVERFLOWV6', 'ports': ['PortChannel0002','PortChannel0008'], 'stage': 'ingress', 'type': 'MIRRORV6'},
'SNMP_ACL': {'policy_desc': 'SNMP_ACL', 'services': ['SNMP'], 'stage': 'ingress', 'type': 'CTRLPLANE'},
'SSH_ONLY': {'policy_desc': 'SSH_ONLY', 'services': ['SSH'], 'stage': 'ingress', 'type': 'CTRLPLANE'}})
+ def test_global_asic_acl1(self):
+ argument = "-m {} --var-json \"ACL_TABLE\"".format(self.sample_graph1)
+ output = json.loads(self.run_script(argument))
+ self.assertDictEqual(output, {\
+ 'EVERFLOW': {'policy_desc': 'EVERFLOW', 'ports': [], 'stage': 'ingress', 'type': 'MIRROR'},
+ 'EVERFLOWV6':{'policy_desc': 'EVERFLOWV6', 'ports': [], 'stage': 'ingress', 'type': 'MIRRORV6'},
+ 'SNMP_ACL': {'policy_desc': 'SNMP_ACL', 'services': ['SNMP'], 'stage': 'ingress', 'type': 'CTRLPLANE'},
+ 'SSH_ONLY': {'policy_desc': 'SSH_ONLY', 'services': ['SSH'], 'stage': 'ingress', 'type': 'CTRLPLANE'}})
+
def test_front_end_asic_acl(self):
argument = "-m {} -p {} -n asic0 --var-json \"ACL_TABLE\"".format(self.sample_graph, self.port_config[0])
@@ -293,11 +311,27 @@ def test_front_end_asic_acl(self):
'SNMP_ACL': {'policy_desc': 'SNMP_ACL', 'services': ['SNMP'], 'stage': 'ingress', 'type': 'CTRLPLANE'},
'SSH_ONLY': {'policy_desc': 'SSH_ONLY', 'services': ['SSH'], 'stage': 'ingress', 'type': 'CTRLPLANE'}})
+ def test_front_end_asic_acl1(self):
+ argument = "-m {} -p {} -n asic0 --var-json \"ACL_TABLE\"".format(self.sample_graph1, self.port_config[0])
+ output = json.loads(self.run_script(argument))
+ self.assertDictEqual(output, {\
+ 'EVERFLOW': {'policy_desc': 'EVERFLOW', 'ports': ['Ethernet0','Ethernet4'], 'stage': 'ingress', 'type': 'MIRROR'},
+ 'EVERFLOWV6':{'policy_desc': 'EVERFLOWV6', 'ports': ['Ethernet0','Ethernet4'], 'stage': 'ingress', 'type': 'MIRRORV6'},
+ 'SNMP_ACL': {'policy_desc': 'SNMP_ACL', 'services': ['SNMP'], 'stage': 'ingress', 'type': 'CTRLPLANE'},
+ 'SSH_ONLY': {'policy_desc': 'SSH_ONLY', 'services': ['SSH'], 'stage': 'ingress', 'type': 'CTRLPLANE'}})
+
+
def test_back_end_asic_acl(self):
argument = "-m {} -p {} -n asic3 --var-json \"ACL_TABLE\"".format(self.sample_graph, self.port_config[3])
output = json.loads(self.run_script(argument))
self.assertDictEqual(output, {})
+ def test_back_end_asic_acl1(self):
+ argument = "-m {} -p {} -n asic3 --var-json \"ACL_TABLE\"".format(self.sample_graph1, self.port_config[3])
+ output = json.loads(self.run_script(argument))
+ self.assertDictEqual(output, {})
+
+
def test_loopback_intfs(self):
argument = "-m {} --var-json \"LOOPBACK_INTERFACE\"".format(self.sample_graph)
output = json.loads(self.run_script(argument))
diff --git a/src/sonic-ctrmgrd/setup.py b/src/sonic-ctrmgrd/setup.py
index b53efc7615d..85e02cae12e 100644
--- a/src/sonic-ctrmgrd/setup.py
+++ b/src/sonic-ctrmgrd/setup.py
@@ -24,6 +24,7 @@
tests_require=[
'pytest',
'pytest-cov',
+ 'sonic-py-common',
],
install_requires=['netaddr', 'pyyaml'],
license="GNU General Public License v3",
diff --git a/src/sonic-ctrmgrd/tests/common_test.py b/src/sonic-ctrmgrd/tests/common_test.py
index 5b3eae151cb..54c877f1136 100755
--- a/src/sonic-ctrmgrd/tests/common_test.py
+++ b/src/sonic-ctrmgrd/tests/common_test.py
@@ -1,6 +1,4 @@
import copy
-import importlib.machinery
-import importlib.util
import json
import os
import subprocess
@@ -655,13 +653,3 @@ def create_remote_ctr_config_json():
s.write(str_conf)
return fname
-
-
-def load_mod_from_file(modname, fpath):
- spec = importlib.util.spec_from_loader(modname,
- importlib.machinery.SourceFileLoader(modname, fpath))
- mod = importlib.util.module_from_spec(spec)
- spec.loader.exec_module(mod)
- sys.modules[modname] = mod
- return mod
-
diff --git a/src/sonic-ctrmgrd/tests/container_test.py b/src/sonic-ctrmgrd/tests/container_test.py
index 4d28a7761d2..146d41386de 100755
--- a/src/sonic-ctrmgrd/tests/container_test.py
+++ b/src/sonic-ctrmgrd/tests/container_test.py
@@ -2,12 +2,14 @@
from unittest.mock import MagicMock, patch
import pytest
+from sonic_py_common.general import load_module_from_source
from . import common_test
-common_test.load_mod_from_file("docker",
+
+load_module_from_source("docker",
os.path.join(os.path.dirname(os.path.realpath(__file__)), "mock_docker.py"))
-container = common_test.load_mod_from_file("container",
+container = load_module_from_source("container",
os.path.join(os.path.dirname(os.path.realpath(__file__)), "../ctrmgr/container"))
diff --git a/src/sonic-ctrmgrd/tests/ctrmgr_tools_test.py b/src/sonic-ctrmgrd/tests/ctrmgr_tools_test.py
index d6ffa15ed68..8ea5a5195fd 100755
--- a/src/sonic-ctrmgrd/tests/ctrmgr_tools_test.py
+++ b/src/sonic-ctrmgrd/tests/ctrmgr_tools_test.py
@@ -3,10 +3,11 @@
from unittest.mock import MagicMock, patch
import pytest
+from sonic_py_common.general import load_module_from_source
from . import common_test
-common_test.load_mod_from_file("docker",
+load_module_from_source("docker",
os.path.join(os.path.dirname(os.path.realpath(__file__)), "mock_docker.py"))
sys.path.append("ctrmgr")
diff --git a/src/sonic-host-services/setup.py b/src/sonic-host-services/setup.py
index e5431d034d9..057ae6f1859 100644
--- a/src/sonic-host-services/setup.py
+++ b/src/sonic-host-services/setup.py
@@ -35,6 +35,7 @@
],
tests_require = [
'pytest',
+ 'sonic-py-common'
],
classifiers = [
'Development Status :: 3 - Alpha',
diff --git a/src/sonic-host-services/tests/determine-reboot-cause_test.py b/src/sonic-host-services/tests/determine-reboot-cause_test.py
index 4eb95ee219e..d9e999a5ce2 100644
--- a/src/sonic-host-services/tests/determine-reboot-cause_test.py
+++ b/src/sonic-host-services/tests/determine-reboot-cause_test.py
@@ -1,10 +1,9 @@
-import importlib.machinery
-import importlib.util
import sys
import os
import pytest
import swsssdk
+from sonic_py_common.general import load_module_from_source
# TODO: Remove this if/else block once we no longer support Python 2
if sys.version_info.major == 3:
@@ -31,11 +30,7 @@
# Load the file under test
determine_reboot_cause_path = os.path.join(scripts_path, 'determine-reboot-cause')
-loader = importlib.machinery.SourceFileLoader('determine_reboot_cause', determine_reboot_cause_path)
-spec = importlib.util.spec_from_loader(loader.name, loader)
-determine_reboot_cause = importlib.util.module_from_spec(spec)
-loader.exec_module(determine_reboot_cause)
-sys.modules['determine_reboot_cause'] = determine_reboot_cause
+determine_reboot_cause = load_module_from_source('determine_reboot_cause', determine_reboot_cause_path)
PROC_CMDLINE_CONTENTS = """\
diff --git a/src/sonic-host-services/tests/hostcfgd/hostcfgd_test.py b/src/sonic-host-services/tests/hostcfgd/hostcfgd_test.py
index 57e7215715d..2abbb7f7cad 100644
--- a/src/sonic-host-services/tests/hostcfgd/hostcfgd_test.py
+++ b/src/sonic-host-services/tests/hostcfgd/hostcfgd_test.py
@@ -1,13 +1,13 @@
-import importlib.machinery
-import importlib.util
import os
import sys
import swsssdk
from parameterized import parameterized
+from sonic_py_common.general import load_module_from_source
from unittest import TestCase, mock
-from tests.hostcfgd.test_vectors import HOSTCFGD_TEST_VECTOR
-from tests.hostcfgd.mock_configdb import MockConfigDb
+
+from .test_vectors import HOSTCFGD_TEST_VECTOR
+from .mock_configdb import MockConfigDb
swsssdk.ConfigDBConnector = MockConfigDb
@@ -18,11 +18,7 @@
# Load the file under test
hostcfgd_path = os.path.join(scripts_path, 'hostcfgd')
-loader = importlib.machinery.SourceFileLoader('hostcfgd', hostcfgd_path)
-spec = importlib.util.spec_from_loader(loader.name, loader)
-hostcfgd = importlib.util.module_from_spec(spec)
-loader.exec_module(hostcfgd)
-sys.modules['hostcfgd'] = hostcfgd
+hostcfgd = load_module_from_source('hostcfgd', hostcfgd_path)
class TestHostcfgd(TestCase):
diff --git a/src/sonic-host-services/tests/procdockerstatsd_test.py b/src/sonic-host-services/tests/procdockerstatsd_test.py
index bb218e52ce2..65c5a738ca6 100644
--- a/src/sonic-host-services/tests/procdockerstatsd_test.py
+++ b/src/sonic-host-services/tests/procdockerstatsd_test.py
@@ -1,10 +1,9 @@
-import importlib.machinery
-import importlib.util
import sys
import os
import pytest
import swsssdk
+from sonic_py_common.general import load_module_from_source
from .mock_connector import MockConnector
@@ -17,11 +16,7 @@
# Load the file under test
procdockerstatsd_path = os.path.join(scripts_path, 'procdockerstatsd')
-loader = importlib.machinery.SourceFileLoader('procdockerstatsd', procdockerstatsd_path)
-spec = importlib.util.spec_from_loader(loader.name, loader)
-procdockerstatsd = importlib.util.module_from_spec(spec)
-loader.exec_module(procdockerstatsd)
-sys.modules['procdockerstatsd'] = procdockerstatsd
+procdockerstatsd = load_module_from_source('procdockerstatsd', procdockerstatsd_path)
class TestProcDockerStatsDaemon(object):
def test_convert_to_bytes(self):
diff --git a/src/sonic-platform-common b/src/sonic-platform-common
index 1673d259484..872f0a306bb 160000
--- a/src/sonic-platform-common
+++ b/src/sonic-platform-common
@@ -1 +1 @@
-Subproject commit 1673d25948428c10305674553095199b25cf89e5
+Subproject commit 872f0a306bb8508b1974cebdf62482a25ce7cf1a
diff --git a/src/sonic-platform-daemons b/src/sonic-platform-daemons
index 450b7d783e2..0bd9f698f75 160000
--- a/src/sonic-platform-daemons
+++ b/src/sonic-platform-daemons
@@ -1 +1 @@
-Subproject commit 450b7d783e2705677127dca711c73105170635ba
+Subproject commit 0bd9f698f7550f498af54a52199cb5ad5a45e306
diff --git a/src/sonic-py-common/sonic_py_common/daemon_base.py b/src/sonic-py-common/sonic_py_common/daemon_base.py
index 745b3c55ba9..86870ba1fdc 100644
--- a/src/sonic-py-common/sonic_py_common/daemon_base.py
+++ b/src/sonic-py-common/sonic_py_common/daemon_base.py
@@ -2,6 +2,7 @@
import sys
from . import device_info
+from .general import load_module_from_source
from .logger import Logger
#
@@ -25,29 +26,6 @@ def db_connect(db_name, namespace=EMPTY_NAMESPACE):
return swsscommon.DBConnector(db_name, REDIS_TIMEOUT_MSECS, True, namespace)
-# TODO: Consider moving this logic out of daemon_base and into antoher file
-# so that it can be used by non-daemons. We can simply call that function here
-# to retain backward compatibility.
-def _load_module_from_file(module_name, file_path):
- module = None
-
- # TODO: Remove this check once we no longer support Python 2
- if sys.version_info.major == 3:
- import importlib.machinery
- import importlib.util
- loader = importlib.machinery.SourceFileLoader(module_name, file_path)
- spec = importlib.util.spec_from_loader(loader.name, loader)
- module = importlib.util.module_from_spec(spec)
- loader.exec_module(module)
- else:
- import imp
- module = imp.load_source(module_name, file_path)
-
- sys.modules[module_name] = module
-
- return module
-
-
#
# DaemonBase ===================================================================
#
@@ -92,7 +70,7 @@ def load_platform_util(self, module_name, class_name):
try:
module_file = "/".join([platform_path, "plugins", module_name + ".py"])
- module = _load_module_from_file(module_name, module_file)
+ module = load_module_from_source(module_name, module_file)
except IOError as e:
raise IOError("Failed to load platform module '%s': %s" % (module_name, str(e)))
diff --git a/src/sonic-py-common/sonic_py_common/general.py b/src/sonic-py-common/sonic_py_common/general.py
new file mode 100644
index 00000000000..9e04f3e214e
--- /dev/null
+++ b/src/sonic-py-common/sonic_py_common/general.py
@@ -0,0 +1,25 @@
+import sys
+
+
+def load_module_from_source(module_name, file_path):
+ """
+ This function will load the Python source file specified by
+ as a module named and return an instance of the module
+ """
+ module = None
+
+ # TODO: Remove this check once we no longer support Python 2
+ if sys.version_info.major == 3:
+ import importlib.machinery
+ import importlib.util
+ loader = importlib.machinery.SourceFileLoader(module_name, file_path)
+ spec = importlib.util.spec_from_loader(loader.name, loader)
+ module = importlib.util.module_from_spec(spec)
+ loader.exec_module(module)
+ else:
+ import imp
+ module = imp.load_source(module_name, file_path)
+
+ sys.modules[module_name] = module
+
+ return module
diff --git a/src/sonic-py-common/sonic_py_common/task_base.py b/src/sonic-py-common/sonic_py_common/task_base.py
index e1738ffba21..afb0ccf699f 100644
--- a/src/sonic-py-common/sonic_py_common/task_base.py
+++ b/src/sonic-py-common/sonic_py_common/task_base.py
@@ -7,44 +7,96 @@
#
# ProcessTaskBase =====================================================================
#
-class ProcessTaskBase(object): # TODO: put this class to swss-platform-common
- def __init__(self):
- self.task_process = None
+class ProcessTaskBase(object):
+ """
+ Base class for creating an object that gets spawned as a separate process
+
+ Child class needs to implement the task_worker method, which should be
+ designed to return if task_stopping_event is set
+
+ """
+ def __init__(self, stop_timeout_secs=1):
+ """
+ Initializer
+
+ Args:
+ stop_timeout_secs (int): Number of seconds to wait for process to exit
+ upon calling task_stop(). If the process fails to stop before the
+ specified timeout, it will attemp to kill the process via brute
+ force. If you would like to wait indefinitely, pass in `None`.
+ """
+ self._stop_timeout_secs = stop_timeout_secs
+ self._task_process = None
self.task_stopping_event = multiprocessing.Event()
def task_worker(self):
- pass
+ raise NotImplementedError
def task_run(self):
if self.task_stopping_event.is_set():
return
- self.task_process = multiprocessing.Process(target=self.task_worker)
- self.task_process.start()
+ self._task_process = multiprocessing.Process(target=self.task_worker)
+ self._task_process.start()
def task_stop(self):
+ # Signal the process to stop
self.task_stopping_event.set()
- os.kill(self.task_process.pid, signal.SIGKILL)
+
+ # Wait for the process to exit
+ self._task_process.join(self._stop_timeout_secs)
+
+ # If the process didn't exit, attempt to kill it
+ if self._task_process.is_alive():
+ os.kill(self._task_process.pid, signal.SIGKILL)
+
+ if self._task_process.is_alive():
+ return False
+
+ return True
#
# ThreadTaskBase =====================================================================
#
-class ThreadTaskBase(object): # TODO: put this class to swss-platform-common;
- def __init__(self):
- self.task_thread = None
+class ThreadTaskBase(object):
+ """
+ Base class for creating an object that gets spawned as a separate thread
+
+ Child class needs to implement the task_worker method, which should be
+ designed to return if task_stopping_event is set
+ """
+ def __init__(self, stop_timeout_secs=None):
+ """
+ Initializer
+
+ Args:
+ stop_timeout_secs (int): Number of seconds to wait for thread to exit
+ upon calling task_stop(). If you would like to wait indefinitely,
+ pass in None.
+ """
+ self._stop_timeout_secs = stop_timeout_secs
+ self._task_thread = None
self.task_stopping_event = threading.Event()
def task_worker(self):
- pass
+ raise NotImplementedError
def task_run(self):
if self.task_stopping_event.is_set():
return
- self.task_thread = threading.Thread(target=self.task_worker)
- self.task_thread.start()
+ self._task_thread = threading.Thread(target=self.task_worker)
+ self._task_thread.start()
def task_stop(self):
+ # Signal the thread to stop
self.task_stopping_event.set()
- self.task_thread.join()
+
+ # Wait for the thread to exit
+ self._task_thread.join(self._stop_timeout_secs)
+
+ if self._task_thread.is_alive():
+ return False
+
+ return True
diff --git a/src/sonic-snmpagent b/src/sonic-snmpagent
index c20bf604361..4e063e4ade8 160000
--- a/src/sonic-snmpagent
+++ b/src/sonic-snmpagent
@@ -1 +1 @@
-Subproject commit c20bf604361c7a1ea47f0815897df6dbb2239a01
+Subproject commit 4e063e4ade89943f2413a767f24564aecfa2cd1c
diff --git a/src/sonic-swss b/src/sonic-swss
index fa983d2e2be..5c63670cd28 160000
--- a/src/sonic-swss
+++ b/src/sonic-swss
@@ -1 +1 @@
-Subproject commit fa983d2e2be9fa007ac56dc3a6ef0c40e72e2ec9
+Subproject commit 5c63670cd284d3c02dff49f2fb0476a40d052c28
diff --git a/src/sonic-telemetry b/src/sonic-telemetry
index 7cd86c5a6bc..1c3f75e0d2b 160000
--- a/src/sonic-telemetry
+++ b/src/sonic-telemetry
@@ -1 +1 @@
-Subproject commit 7cd86c5a6bc38b94f0d7e87c9c278b9d20779a2b
+Subproject commit 1c3f75e0d2b635dcff7d1bdff5cc9a03ed4bf7c5
diff --git a/src/sonic-utilities b/src/sonic-utilities
index 3f2a2964aa5..597639943a5 160000
--- a/src/sonic-utilities
+++ b/src/sonic-utilities
@@ -1 +1 @@
-Subproject commit 3f2a2964aa5b3704d19665c5b6982a6a90213960
+Subproject commit 597639943a5fdd21183ac50428e74f160288b4ae