diff --git a/README.md b/README.md index f08df0a82e6..27c094cd193 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Software for Open Networking in the Cloud - SONiC # Management - +# cls test # Description Tools for managing, configuring and monitoring SONiC diff --git a/ansible/README.deploy.md b/ansible/README.deploy.md index 05105a8e56c..1ee2dd8a336 100644 --- a/ansible/README.deploy.md +++ b/ansible/README.deploy.md @@ -14,7 +14,7 @@ and public [sonicdev Docker registry](https://sonicdev-microsoft.azurecr.io/). - Update [inventory](/ansible/inventory/) file with correct information for your environment. - ansible_host = management ip address - sonic_hwsku = Supported Hardware SKU, e.g. Force10-S6000, ACS-MSN2700 -- Update [group_vars/sonic/variables](/ansible/group_vars/sonic/variables/) file with: +- Update [group_vars/sonic/vars](/ansible/group_vars/sonic/vars/) file with: - Replace `sonicadmin_user` and `ansible_ssh_user` with the username you built into the baseimage - Replace `sonicadmin_initial_password` with the password you built into baseimage. - Update `[ntp,syslog,dns]_servers` with a list of your server IPs for these services. diff --git a/ansible/README.test.md b/ansible/README.test.md index b05dc486c81..d6fb9d8b9dd 100644 --- a/ansible/README.test.md +++ b/ansible/README.test.md @@ -12,306 +12,162 @@ ### **Run test by test case name** -All test cases name and calling variables and applied topologies are specified in [ansible/roles/test/vars/testcases.yml](roles/test/vars/testcases.yml) +This is going to be the supported method to call individual test case going forward. All test cases name and calling variables and applied topologies are specified in [ansible/roles/test/vars/testcases.yml](roles/test/vars/testcases.yml) -When calling test, testbed_name is the main entry to pickup/understand the testbed information associated with the test (ie. ptf_host and testbed_type, VMs info...). testbed_name is inherited from your own `ansible/testbed.csv` file. The first column of each line(one testbed topology definition) is the unique name of the testbed and will be used in testbed_name option when calling test. +When calling test, testbed_name is the main entry to pickup/understand the testbed information associated with the test (ie. ptf_host and tyestbed_type, VMs info...). testbed_name is inherited from your own `ansible/testbed.csv` file. The first column of each line(one testbed topology definition) is the unique name of the testbed and will be used in testbed_name option when calling test. ***Example of running a test case:*** - `ansible-playbook -i {INVENTORY} -l {DUT_NAME} test_sonic.yml -e testbed_name={TESTBED_NAME} -e testcase_name={TESTCASE_NAME}` + `ansible-playbook -i lab -l str-s6000-acs-1 test_sonic.yml -e testbed_name={TESTBED_NAME} -e testcase_name={TESTCASE_NAME}` Where: `testcase_name=bgp_fact` `testbed_name=vms-t1-lag` -- Replace {INVENTORY} in each command line with the inventory file name -- Replace {DUT_NAME} in each command line with the host name of switch under test -- Replace {TESTBED_NAME} in each command line with the first column in your 'ansible/testbed.csv' file associated with the DUT_NAME -- Replace {TESTCASE_NAME} in each command line with the testcase tag from 'roles/test/vars/testcases.yml' --- +### **Run test by test case tag `(DEPRECATING)`** + +When Ansible running playbooks by tag, it first include all tasks(all test cases) within test role shich not relate to specific tag. It's very slow along with adding more test cases and it occupied too much resource other than just run one test case using specific tag. It does not scale. + +We newly added a run test case by test name option(see above setion). Running test by tag option won’t be actively maintained going forward, but will backward compatible for all already working test cases, and eventually will be phaseout. There still going to be more improvement after the initial check in. + +- Replace {DUT_NAME} in each command line with the host name of switch under test +- Replace {PTF_HOST} in each command line with the host name or IP of the PTF testbed host +- Replace {TESTBED_TYPE} in each command line with the type of the testbed being used ##### ACL tests ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=acl -e testbed_name={TESTBED_NAME} +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} --tags acl --extra-vars "testbed_type={TESTBED_TYPE} ptf_host={PTF_HOST}" ``` -- Requires switch connected to a VM set testbed +- Requires switch connected to a t1 or t1-lag testbed ##### ARP tests ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=arp -e testbed_name={TESTBED_NAME} +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} --become --tags arp --extra-vars "ptf_host={PTF_HOST}" ``` - Requires switch connected to a PTF testbed ##### BGP facts verification test ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=bgp_fact -e testbed_name={TESTBED_NAME} -``` -- Requires switch connected to a VM set testbed - -##### BGP Multipath Relax test -``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=bgp_multipath_relax -e testbed_name={TESTBED_NAME} -``` -- Requires switch connected to a VM set testbed -- This test only works for T1 related topologies(t1, t1-lag, ...) -- You might need to redeploy your VMs before you run this test due to the change for ToR VM router configuration changes - `./testbed-cli.sh config-vm your-topo-name(vms1-1) your-vm-name(VM0108)` will do this for you - -##### Config test -``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=config -e testbed_name={TESTBED_NAME} +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} --become --tags bgp_fact ``` - Requires switch connected to a VM set testbed -##### Continuous Reboot test -``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=continuous_reboot -e testbed_name={TESTBED_NAME} -e repeat_count={REPEAT_COUNT} -``` -- Requires switch connected to a VM set testbed -- Replace {REPEAT_COUNT} with the number of times the reboot has to be done. Default: 3 - ##### CoPP test ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=copp -e testbed_name={TESTBED_NAME} -``` -- Requires switch connected to a PTF testbed - -##### CRM test -``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=crm -e testbed_name={TESTBED_NAME} -``` -- Requires switch connected to a VM set testbed - -##### DECAP test -``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=decap -e testbed_name={TESTBED_NAME} +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} --become --tags copp --extra-vars "ptf_host={PTF_HOST}" ``` - Requires switch connected to a PTF testbed ##### DHCP relay test ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=dhcp_relay -e testbed_name={TESTBED_NAME} +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} --become --tags dhcp_relay --extra-vars "ptf_host={PTF_HOST}" ``` - Requires switch connected to a PTF testbed -##### DIP SIP test -``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=dip_sip -e testbed_name={TESTBED_NAME} -``` -- Requires switch connected to a PTF testbed - -##### DIR BCAST test -``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=dir_bcast -e testbed_name={TESTBED_NAME} -``` -- Requires switch connected to a PTF testbed - -##### ECMP test -``` -ansible-playbook test_sonic_by_tag.yml -i inventory --limit {DUT_NAME}, --become --tags ecmp --extra-vars "testbed_type={TESTBED_TYPE} vm_hosts=[DESTINATION_VMS] vm_source={SOURCE_VM} [ipv6=True]" -``` -- Requires switch connected to a VM testbed (t1); default IPv4 - ##### ECN WRED test ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=ecn_wred -e testbed_name={TESTBED_NAME} +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} --become --tags ecn_wred ``` - Requires switch connected to a VM testbed ##### Everflow_testbed test ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=everflow_testbed -e testbed_name={TESTBED_NAME} +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} --tags everflow_testbed --extra-vars "testbed_type={TESTBED_TYPE} ptf_host={PTF_HOST}" ``` -- Requires switch connected to a PTF testbed - -##### Fast reboot link util test -``` -ansible-playbook -i linkstate/testbed_inv.py -e target_host={TESTBED_NAME} linkstate/{STATE}.yml -``` -- Requires switch connected to a PTF testbed -- Replace {STATE} with up or down -- This test is run before running the fast-reboot/warm-reboot tests and is used to enable link state propagation from fanout to the VMS - -##### FAST REBOOT test -``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=fast-reboot -e testbed_name={TESTBED_NAME} -e new_sonic_image={IMAGE_URL} --stay_in_target_image={VALUE1} --cleanup_old_sonic_images={VALUE2} -``` -- Requires switch connected to PTF testbed -- Replace {IMAGE_URL} with the link pointing to the next image to fast-reboot into -- Replace {VALUE1} and {VALUE2} with true/false. Default: false -- stay_in_target_image parameter decides if the DUT should be reverted back to the old image after fast-reboot -- cleanup_old_sonic_images parameter will decide if all the images on the DUT should be cleaned up except for the current and the next images +- Requires switch connected to a VM testbed ##### FDB test ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=fdb -e testbed_name={TESTBED_NAME} +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} --tags fdb --extra-vars "testbed_type={TESTBED_TYPE} ptf_host={PTF_HOST} [ipv6=True]" ``` -- Requires switch connected to a PTF testbed +- Requires switch connected to a VM testbed(t0); default IPv4 -##### FDB Mac expire test +##### FIB test ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=fdb_mac_expire -e testbed_name={TESTBED_NAME} +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} --tags fib --extra-vars "testbed_type={TESTBED_TYPE} ptf_host={PTF_HOST} [ipv4=Flase]" ``` -- Requires switch connected to a PTF testbed +- Requires switch connected to a VM testbed; default IPv4 -##### FIB v4 test +##### MTU test ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=fib -e testbed_name={TESTBED_NAME} -e ipv6=False +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} --tags mtu --extra-vars "testbed_type={TESTBED_TYPE} ptf_host={PTF_HOST}" ``` -- Requires switch connected to a PTF testbed +- Requires switch connected to a t1 or t1-lag testbed -##### FIB v6 test +##### Fast-Reboot test ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=fib -e testbed_name={TESTBED_NAME} -e ipv4=False +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} --become --tags fast_reboot --extra-vars "ptf_host={PTF_HOST}" ``` - Requires switch connected to a PTF testbed -##### LAG test +##### IPDecap Test ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=lag_2 -e testbed_name={TESTBED_NAME} +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} -tags decap --extra-vars "testbed_type={TESTBED_TYPE} ptf_host={PTF_HOST} dscp_mode=pipe|uniform" ``` -- Requires switch connected to a VM set testbed with lags configured +- Require VM testbed +- dscp_mode=pipe: if your ASIC type is Broadcom; +- dscp_mode=uniform: if your ASIC type is Mellanox -##### Link Flap test +##### Lag-2 test ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=link_flap -e testbed_name={TESTBED_NAME} +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} --tags lag-2 --extra-vars "testbed_type={TESTBED_TYPE} ptf_host={PTF_HOST}" ``` -- Requires switch connected to fanout switch. VM or PTF testbed +- Requires switch connected to a VM testbed with lag configured (t0, t1-lag) ##### LLDP test ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME},lldp_neighbors -e testcase_name=lldp -e testbed_name={TESTBED_NAME} +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME},lldp_neighbors --become --tags lldp ``` - Requires switch connected to a VM set testbed -##### MAC read test +##### Link flap test ``` -ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} -e testbed_name={TESTBED_NAME} -e testbed_type={TESTBED_TYPE} -e testcase_name=read_mac -e iterations={ITERATIONS} -e image1={IMAGE1} -e image2={IMAGE2} -``` -- Replace {ITERATIONS} with the integer number of image flipping iterations. -- Replace {IMAGE1} and {IMAGE2} with URLs to the specific SONiC binary images. -- Requires switch connected to a VM set testbed - -##### Mem check test -``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=mem_check -e testbed_name={TESTBED_NAME} -``` -- Requires switch connected to a VM set or PTF testbed - -##### MTU test -``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=mtu -e testbed_name={TESTBED_NAME} -``` -- Requires switch connected to PTF testbed - -##### Neighbor Mac test -``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=neighbor -e testbed_name={TESTBED_NAME} -``` -- Requires switch connected to a PTF testbed - -##### Neighbor Mac address change test -``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=neighbor_mac_noptf -e testbed_name={TESTBED_NAME} -``` -- Requires switch connected to a VM set or PTF testbed - -### SNMP memory test -``` -ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME}, --become --tags snmp_memory -e "tolerance=0.05" -e "min_memory_size=512000" +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME}, --become --tags link_flap ``` +- Requires switch connected to fanout switch. VM or PTF testbed ##### NTP test ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=ntp -e testbed_name={TESTBED_NAME} -``` -- Requires switch connected to a VM set or PTF testbed - -##### PFC watchdog test -``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=pfc_wd -e testbed_name={TESTBED_NAME} -``` -- Requires switch connected to a PTF testbed - -##### Portstat test -``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=portstat -e testbed_name={TESTBED_NAME} +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} --become --tags ntp ``` -##### Port Toggle test -``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=port_toggle -e testbed_name={TESTBED_NAME} -``` -- Requires switch connected to a VM set or PTF testbed - -##### Reboot test +##### SNMP tests ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=reboot -e testbed_name={TESTBED_NAME} +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} --become --tags snmp,snmp_cpu,snmp_interfaces ``` -- Requires switch connected to a VM set or PTF testbed +- Require to run Anisble-playbook from docker-sonic-mgmt container. ##### Sensors test ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=sensors -e testbed_name={TESTBED_NAME} +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} --become --tags sensors ``` -- Requires switch connected to a VM set or PTF testbed -##### Service ACL test +##### Syslog test ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=service_acl -e testbed_name={TESTBED_NAME} +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} --become --tags syslog ``` -- Requires switch connected to a VM set or PTF testbed -##### SNMP tests +##### PFC WD test ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=snmp -e testbed_name={TESTBED_NAME} +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} --tags pfc_wd --extra-vars "testbed_type={TESTBED_TYPE}" ``` -- Requires switch connected to a VM set or PTF testbed -##### Syslog test +##### BGP multipath relax test ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=syslog -e testbed_name={TESTBED_NAME} +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} --tags bgp_multipath_relax --extra-vars "testbed_type={TESTBED_TYPE}" ``` -- Requires switch connected to a VM set or PTF testbed +This test only works for T1 related topologies(t1, t1-lag, ...) +You might need to redeploy your VMs before you run this test due to the change for ToR VM router configuration changes +`./testbed-cli.sh config-vm your-topo-name(vms1-1) your-vm-name(VM0108)` will do this for you ##### VLAN test ``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=vlan -e testbed_name={TESTBED_NAME} +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} -e "testbed_name={TESTBED_NAME} testcase_name=vlan" ``` -- Requires switch connected to a PTF testbed +- Requires switch connected to a t0 testbed - Requires switch connected to fanout switch and fanout switch need support [QinQ](https://en.wikipedia.org/wiki/IEEE_802.1ad). -##### VNET test -``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=vnet_vxlan -e testbed_name={TESTBED_NAME} -``` -- Requires switch connected to a PTF testbed - -##### Vxlan decap test -``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=vxlan-decap -e testbed_name={TESTBED_NAME} +### CRM test ``` -- Requires switch connected to a PTF testbed - -##### Warm reboot test -``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=warm-reboot -e testbed_name={TESTBED_NAME} -e new_sonic_image={IMAGE_URL} -e stay_in_target_image={VALUE1} -e cleanup_old_sonic_images={VALUE2} -``` -- Requires switch connected to PTF testbed -- Replace {IMAGE_URL} with the link pointing to the next image to warm-reboot into -- Replace {VALUE1} and {VALUE2} with true/false. Default: false -- stay_in_target_image parameter decides if the DUT should be reverted back to the old image after warm-reboot -- cleanup_old_sonic_images parameter will decide if all the images on the DUT should be cleaned up except for the current and the next images -- parameters 'new_sonic_image', 'stay_in_target_image', 'cleanup_old_sonic_images' are optional - -##### Warm reboot FIB test -``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=warm-reboot-fib -e testbed_name={TESTBED_NAME} -``` -- Requires switch connected to a PTF testbed - -##### Warm reboot Sad test -``` -ansible-playbook test_sonic.yml -i {INVENTORY} --limit {DUT_NAME} -e testcase_name=warm-reboot-sad -e testbed_name={TESTBED_NAME} -``` -- Requires switch connected to a PTF testbed +ansible-playbook test_sonic.yml -i inventory --limit {DUT_NAME} --become --tags crm diff --git a/ansible/README.testbed.md b/ansible/README.testbed.md index d2b030f2808..6fa93a1e82b 100644 --- a/ansible/README.testbed.md +++ b/ansible/README.testbed.md @@ -2,7 +2,6 @@ - [Overview](doc/README.testbed.Overview.md) - [Setup](doc/README.testbed.Setup.md) - - [Virtual Switch Testbed Setup](doc/README.testbed.VsSetup.md) - [Topology](doc/README.testbed.Topology.md) - [Configuration](doc/README.testbed.Config.md) - [Minigraph](doc/README.testbed.Minigraph.md) diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg index 866e4677e80..f43545b16ea 100644 --- a/ansible/ansible.cfg +++ b/ansible/ansible.cfg @@ -1,7 +1,7 @@ # config file for ansible -- http://ansible.com/ # ============================================== -# nearly all parameters can be overridden in ansible-playbook +# nearly all parameters can be overridden in ansible-playbook # or with command line flags. ansible will read ANSIBLE_CONFIG, # ansible.cfg in the current working directory, .ansible.cfg in # the home directory or /etc/ansible/ansible.cfg, whichever it @@ -11,7 +11,8 @@ # some basic default values... -inventory = /etc/ansible/hosts +#inventory = /etc/ansible/hosts +inventory = /var/clsnet/git-sw-csa/sonic-mgmt/ansible library = library:library/ixia remote_tmp = $HOME/.ansible/tmp pattern = * @@ -23,7 +24,6 @@ sudo_user = root transport = smart #remote_port = 22 module_lang = C -max_diff_size = 512000 # plays will gather facts by default, which contain information about # the remote system. @@ -56,7 +56,7 @@ timeout = 10 # logging is off by default unless this path is defined # if so defined, consider logrotate -# log_path = $HOME/ansible.log +log_path = $HOME/ansible.log # default module name for /usr/bin/ansible #module_name = command @@ -74,22 +74,22 @@ timeout = 10 # list any Jinja2 extensions to enable here: #jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n -# if set, always use this private key file for authentication, same as +# if set, always use this private key file for authentication, same as # if passing --private-key to ansible or ansible-playbook #private_key_file = /path/to/file -# format of string {{ ansible_managed }} available within Jinja2 +# format of string {{ ansible_managed }} available within Jinja2 # templates indicates to users editing templates files will be replaced. # replacing {file}, {host} and {uid} and strftime codes with proper values. ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host} # by default, ansible-playbook will display "Skipping [host]" if it determines a task -# should not be run on a host. Set this to "False" if you don't want to see these "Skipping" -# messages. NOTE: the task header will still be shown regardless of whether or not the +# should not be run on a host. Set this to "False" if you don't want to see these "Skipping" +# messages. NOTE: the task header will still be shown regardless of whether or not the # task is skipped. #display_skipped_hosts = True -# by default (as of 1.3), Ansible will raise errors when attempting to dereference +# by default (as of 1.3), Ansible will raise errors when attempting to dereference # Jinja2 variables that are not set in templates or action lines. Uncomment this line # to revert the behavior to pre-1.3. #error_on_undefined_vars = False @@ -108,7 +108,7 @@ deprecation_warnings = False # (as of 1.8), Ansible can optionally warn when usage of the shell and # command module appear to be simplified by using a default Ansible module # instead. These warnings can be silenced by adjusting the following -# setting or adding warn=yes or warn=no to the end of the command line +# setting or adding warn=yes or warn=no to the end of the command line # parameter string. This will for example suggest using the git module # instead of shelling out to the git command. # command_warnings = False @@ -116,7 +116,7 @@ deprecation_warnings = False # set plugin path directories here, separate with colons action_plugins = plugins/action -callback_plugins = plugins/callback +# callback_plugins = /usr/share/ansible_plugins/callback_plugins connection_plugins = plugins/connection # lookup_plugins = /usr/share/ansible_plugins/lookup_plugins # vars_plugins = /usr/share/ansible_plugins/vars_plugins @@ -124,30 +124,30 @@ filter_plugins = plugins/filter callback_whitelist = profile_tasks # by default callbacks are not loaded for /bin/ansible, enable this if you -# want, for example, a notification or logging callback to also apply to +# want, for example, a notification or logging callback to also apply to # /bin/ansible runs #bin_ansible_callbacks = False # don't like cows? that's unfortunate. -# set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1 +# set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1 #nocows = 1 # don't like colors either? # set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1 #nocolor = 1 -# the CA certificate path used for validating SSL certs. This path +# the CA certificate path used for validating SSL certs. This path # should exist on the controlling node, not the target nodes # common locations: # RHEL/CentOS: /etc/pki/tls/certs/ca-bundle.crt # Fedora : /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem # Ubuntu : /usr/share/ca-certificates/cacert.org/cacert.org.crt -#ca_file_path = +#ca_file_path = # the http user-agent string to use when fetching urls. Some web server # operators block the default urllib user agent as it is frequently used -# by malicious attacks/scripts, so we set it to something unique to +# by malicious attacks/scripts, so we set it to something unique to # avoid issues. #http_user_agent = ansible-agent @@ -156,9 +156,9 @@ callback_whitelist = profile_tasks # wanting to use, for example, IP information from one group of servers # without having to talk to them in the same playbook run to get their # current IP information. -fact_caching = memory +fact_caching = jsonfile fact_caching_connection = ~/.ansible/cache -fact_caching_timeout = 86400 +fact_caching_timeout = 1200 # retry files @@ -185,33 +185,33 @@ become_method='sudo' [ssh_connection] # ssh arguments to use -# Leaving off ControlPersist will result in poor performance, so use +# Leaving off ControlPersist will result in poor performance, so use # paramiko on older platforms rather than removing it -ssh_args = -o ControlMaster=auto -o ControlPersist=120s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no +ssh_args = -o ControlMaster=auto -o ControlPersist=120s -o UserKnownHostsFile=/dev/null # The path to use for the ControlPath sockets. This defaults to # "%(directory)s/ansible-ssh-%%h-%%p-%%r", however on some systems with -# very long hostnames or very long path names (caused by long user names or +# very long hostnames or very long path names (caused by long user names or # deeply nested home directories) this can exceed the character limit on -# file socket names (108 characters for most platforms). In that case, you +# file socket names (108 characters for most platforms). In that case, you # may wish to shorten the string below. -# -# Example: +# +# Example: # control_path = %(directory)s/%%h-%%r #control_path = %(directory)s/ansible-ssh-%%h-%%p-%%r -# Enabling pipelining reduces the number of SSH operations required to -# execute a module on the remote server. This can result in a significant -# performance improvement when enabled, however when using "sudo:" you must +# Enabling pipelining reduces the number of SSH operations required to +# execute a module on the remote server. This can result in a significant +# performance improvement when enabled, however when using "sudo:" you must # first disable 'requiretty' in /etc/sudoers # # By default, this option is disabled to preserve compatibility with # sudoers configurations that have requiretty (the default on many distros). -# +# pipelining = True -# if True, make ansible use scp if the connection type is ssh +# if True, make ansible use scp if the connection type is ssh # (default is sftp) #scp_if_ssh = True @@ -222,7 +222,7 @@ accelerate_connect_timeout = 5.0 # The daemon timeout is measured in minutes. This time is measured # from the last activity to the accelerate daemon. -accelerate_daemon_timeout = 30 +accelerate_daemon_timeout = 30 # If set to yes, accelerate_multi_key will allow multiple # private keys to be uploaded to it, though each user must diff --git a/ansible/basic_check.yml b/ansible/basic_check.yml index 7a6835bb917..00787ac12c5 100644 --- a/ansible/basic_check.yml +++ b/ansible/basic_check.yml @@ -24,5 +24,5 @@ minigraph_facts: host={{ inventory_hostname }} tags: always - - include_tasks: roles/test/tasks/interface.yml + - include: roles/test/tasks/interface.yml diff --git a/ansible/config_sonic_basedon_testbed.yml b/ansible/config_sonic_basedon_testbed.yml index af2afae4b13..0498a331f10 100644 --- a/ansible/config_sonic_basedon_testbed.yml +++ b/ansible/config_sonic_basedon_testbed.yml @@ -76,7 +76,8 @@ remote_dut: "{{ ansible_ssh_host }}" - name: gather testbed VM informations - testbed_vm_info: base_vm={{ testbed_facts['vm_base'] }} topo={{ testbed_facts['topo'] }} vm_file={{ vm_file }} + # testbed_vm_info: base_vm={{ testbed_facts['vm_base'] }} topo={{ testbed_facts['topo'] }} vm_file={{ vm_file }} + testbed_vm_info: base_vm="{{vm_base}}" topo="{{topo}}" delegate_to: localhost when: "VM_topo | bool" @@ -93,14 +94,6 @@ with_items: "{{ host_if_indexes }}" when: "('host_interfaces' in vm_topo_config) and ('tor' in vm_topo_config['dut_type'] | lower)" - - name: find all vlan configurations for T0 topology - vlan_config: - vm_topo_config: "{{ vm_topo_config }}" - port_alias: "{{ port_alias }}" - vlan_config: "{{ vlan_config | default(None) }}" - delegate_to: localhost - when: "('host_interfaces' in vm_topo_config) and ('tor' in vm_topo_config['dut_type'] | lower)" - - name: find all interface indexes mapping connecting to VM set_fact: interface_to_vms: "{{ interface_to_vms|default({}) | combine({ item.key: item.value['interface_indexes'] }) }}" diff --git a/ansible/doc/README.new.testbed.Configuration.md b/ansible/doc/README.new.testbed.Configuration.md index e144202d731..2f7bf5722c1 100644 --- a/ansible/doc/README.new.testbed.Configuration.md +++ b/ansible/doc/README.new.testbed.Configuration.md @@ -95,7 +95,7 @@ Like the veos_groups section, this section contains information about the server Confirm the following: - root_path - server's root path to building the VMs - cd_image_filename - you should be able to locate "Aboot-veos-serial-8.0.0.iso" -- hdd_image_file: you should also be able to locate "vEOS-lab-4.20.15M.vmdk" +- hdd_image_file: you should also be able to locate "vEOS-lab-4.15.10M.vmdk" Define: - vm_console_base - if you are running multiple sets of sonic-mgmt VMs, define a conflict-free vm_console_base @@ -154,9 +154,11 @@ From the leaf-fanout to the server, make sure to define: ### docker_registry section: **USAGE**: /vars/docker_registry.yml -The docker registry container below information: +The docker registry container 3 pieces of information: 1. docker_registry_host +2. docker_registry_username +3. docker_registry_password If you already have this information set up, you can choose to leave this section blank and the script will skip this section. diff --git a/ansible/doc/README.testbed.Cli.md b/ansible/doc/README.testbed.Cli.md index 8c42251eeb2..dd0dabba1bc 100644 --- a/ansible/doc/README.testbed.Cli.md +++ b/ansible/doc/README.testbed.Cli.md @@ -12,7 +12,7 @@ ## Add/Remove topo ``` -# conf-name,testbed-name,topo,ptf_image_name,ptf_ip,server,vm_base,dut,owner +# uniq-name,testbed-name,topo,ptf_image_name,ptf_ip,server,vm_base,dut,owner vms1-1-t1,vms1-1,t1,docker-ptf-sai-mlnx,10.0.10.5/23,server_1,VM0100,str-msn2700-11,t1 tests vms1-1-t1-lag,vms1-1,t1-lag,docker-ptf-sai-mlnx,10.0.10.5/23,server_1,VM0100,str-msn2700-11,t1-lag tests @@ -32,7 +32,7 @@ Caveat: Have to remember what was the initial topology. Should be fixed in futur # Renumber topo ``` -# conf-name,testbed-name,topo,ptf_image_name,ptf_ip,server,vm_base,dut,owner +# uniq-name,testbed-name,topo,ptf_image_name,ptf_ip,server,vm_base,dut,owner vms2-2-b,vms2-2,t1,docker-ptf-sai-brcm,10.0.10.7/23,server_1,VM0100,str-d6000-05,brcm test vms2-2-m,vms2-2,t1,docker-ptf-sai-mlnx,10.0.10.7/23,server_1,VM0100,str-msn2700-5,mlnx test @@ -48,23 +48,3 @@ Goal is to use one VM set against different DUTs Feature: The VMs configuration will NOT be updated while switching from one topo to another (faster). TODO: check topo field when renumbering between topologies - -# Deploy Ixia IxNetwork API server -``` -# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,comment -example-ixia,vms6-1,t0-64,docker-keysight-api-server,example-ixia-ptf-1,10.0.0.30/32,,server_1,,example-s6100-dut-1,Test with Keysight API Server -``` -- To add a new testbed “example-ixia”: - - ./testbed-cli add-topo example-ixia ~/.password - -- To remove a Keysight API server docker container - - ./testbed-cli remove-topo example-ixia ~/.password - -Note that it's mandatory to name the image "docker-keysight-api-server", as that triggers the Ixia IxNetwork API server deployment. -Much like the PTF docker image, this image will be pulled from the configured docker registry. - -Also, topologies with the Keysight API server will not be using any VMs. - -The most recent IxNetwork API Server docker image can be found [here](http://downloads.ixiacom.com/support/downloads_and_updates/public/ixnetwork/9.00_Update-3/Ixia_IxNetworkWeb_Docker_9.00.100.213.tar.bz2). -See also the [Ixia software download](https://support.ixiacom.com/public/support-overview/product-support/downloads-updates/versions/68) page for any newer versions. - diff --git a/ansible/doc/README.testbed.Config.md b/ansible/doc/README.testbed.Config.md index eb4a697f498..9a03ae9f78e 100644 --- a/ansible/doc/README.testbed.Config.md +++ b/ansible/doc/README.testbed.Config.md @@ -44,13 +44,13 @@ vms-t1-lag,vms1-1,t1-lag,docker-ptf-sai-mlnx,10.255.0.178/24,server_1,VM0100,str ### ```testbed.csv``` consistency rules ``` -# conf-name,testbed-name,topo,ptf_image_name,ptf_ip,server,vm_base,dut,owner +# uniq-name,testbed-name,topo,ptf_image_name,ptf_ip,server,vm_base,dut,owner vms2-2-b,vms2-2,t1,docker-ptf-sai-brcm,10.0.10.7/23,server_1,VM0100,str-d6000-05,brcm test vms2-2-m,vms2-2,t1,docker-ptf-sai-mlnx,10.0.10.7/23,server_1,VM0100,str-msn2700-5,mlnx test ``` Must be strictly checked in code reviews - - conf-name must be unique + - uniq-name must be unique - All testbed records with the same testbed-name must have the same: - ptf_ip - server diff --git a/ansible/doc/README.testbed.Overview.md b/ansible/doc/README.testbed.Overview.md index 4a5030f8670..4fcf3aa1381 100644 --- a/ansible/doc/README.testbed.Overview.md +++ b/ansible/doc/README.testbed.Overview.md @@ -54,98 +54,3 @@ DUT front panel port is directly connected to one of PTF container ports. Usuall ![](img/testbed-injected.png) DUT front panel port is directly connected to one of VMs interfaces. But also we have a tap into this connection. Packets coming from the physical vlan interface are sent to both the VMs and the PTF docker. Packets from the VM and PTF docker are sent to the vlan interface. It allows us to inject packets from the PTF host to DUT and maintain a BGP session between VM and DUT at the same time. - -# SONiC Testbed with Keysight IxNetwork as Traffic Generator - -This section gives an overview of the stand-alone testbed topology where SONiC DUT is directly connected with Keysight’s protocol emulator and traffic generator (IxNetwork). - -## Physical Topology - -Based on test need there may be multiple topologies possible as shown below : - -- Single DUT Topology -![](img/single-dut-topology.png) - - -- Multiple DUT Topology -![](img/multiple-dut-topology.png) - - -- Multiple IxNetwork Topology -![](img/multiple-ixnetwork.PNG) - -## Topology Description - -### Ixia Chassis (IxNetwork) -Keysight IxNetwork is used as the network tester to emulate protocol sessions eg. OSPF, BGP, LACP etc. and send data traffic with various user defined traffic profiles at line rate. Based on test need this can support line cards of varying speed ranging from 1G to 400G. It also supports vlan separated logical interfaces over each physical port. - -### IxNetwork API Server Docker - -The IxNetwork API Server docker is installed in the Testbed server along with sonic-mgmt docker. It provides API server, that is used to configure the Traffic Generator (IxNetwork) using restPy APIs. It is capable of offering multiple sessions of IxNetwork API server. Each session runs independent of each other and configures IxNetwork. - -### Network connections -- IxNetwork API server is connected to IxNetwork via the management port. -- IxNetwork test ports are directly connected to single or multiple DUTs. - -## Deploy IxNetwork API Server - -### Download IxNetwork API Server docker image -1. Download IxNetwork Web Edition (Docker deployment) from [ here ](https://ks-aws-prd-itshared-opix.s3-us-west-1.amazonaws.com/IxSoftwareUpgrades/IxNetwork/9.0_Update3/Ixia_IxNetworkWeb_Docker_9.00.100.213.tar.bz2) - -2. Copy the tar.bz2 file on the testbed server. - -3. Make sure the interface has promiscuous mode enabled -``` - ifconfig ens160 promisc - ``` - -3. Decompress the file (it may take a few minutes): -``` -tar xvjf -``` -### Run IxNetwork API Server docker - -1. Load the image to docker: -``` -docker load -i Ixia_IxNetworkWeb_Docker_.tar -``` -2. Loaded image : `ixnetworkweb__image` - -3. Create the macvlan bridge to be used by IxNetwork Web Edition: -``` -docker network create -d macvlan -o parent=ens160 --subnet=192.168.x.0/24 --gateway=192.168.x.254 -(NOTE: Use your subnet, prefix length and gateway IP address.) -``` - -4. Verify bridge got created properly: -``` -docker network ls -docker network inspect IxNetVlanMac -``` -5. Deploy the IxNetwork Web Edition container using the following command ixnetworkweb_\_image should be as shown in step 2 above): -``` -docker run --net \ ---ip \ ---hostname \ ---name \ ---privileged \ ---restart=always \ ---cap-add=SYS_ADMIN \ ---cap-add=SYS_TIME \ ---cap-add=NET_ADMIN \ ---cap-add=SYS_PTRACE \ --i -d \ --v /sys/fs/cgroup:/sys/fs/cgroup \ --v /var/crash/=/var/crash \ --v /opt/container/one/configs:/root/.local/share/Ixia/sdmStreamManager/common \ --v /opt/container/one/results:/root/.local/share/Ixia/IxNetwork/data/result \ --v /opt/container/one/settings:/root/.local/share/IXIA/IxNetwork.Globals \ ---tmpfs /run \ -ixnetworkweb__image - -Note : The folders within /opt/container/one/ should to be created with read and write permission prior docker run. - -``` - -6. Launch IxNetworkWeb using browser `https://container ip` - diff --git a/ansible/doc/README.testbed.Setup.md b/ansible/doc/README.testbed.Setup.md index 7e66bac5bb5..e4924d826ec 100644 --- a/ansible/doc/README.testbed.Setup.md +++ b/ansible/doc/README.testbed.Setup.md @@ -2,144 +2,124 @@ This document describes the steps to setup the testbed and deploy a topology. -## Prepare Testbed Server - -- Install Ubuntu 18.04 amd64 on the server. -- Setup management port configuration using this sample `/etc/network/interfaces`: - ``` - root@server-1:~# cat /etc/network/interfaces - # The management network interface - auto ma0 - iface ma0 inet manual - - # Server, VM and PTF management interface - auto br1 - iface br1 inet static - bridge_ports ma0 - bridge_stp off - bridge_maxwait 0 - bridge_fd 0 - address 10.250.0.245 - netmask 255.255.255.0 - network 10.250.0.0 - broadcast 10.250.0.255 - gateway 10.250.0.1 - dns-nameservers 10.250.0.1 10.250.0.2 - # dns-* options are implemented by the resolvconf package, if installed - ``` - -- Install python 2.7 (this is required by Ansible). -- Add Docker's official GPG key: - ``` - $ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - - ``` - -## Setup Docker Registry for `docker-ptf` - -The PTF docker container is used to send and receive data plane packets to the DUT. - -1. Build `docker-ptf` image - ``` - git clone --recursive https://github.com/Azure/sonic-buildimage.git - cd sonic-buildimage - make configure PLATFORM=generic - make target/docker-ptf.gz - ``` - -2. Setup your own [Docker Registry](https://docs.docker.com/registry/) and upload `docker-ptf` to your registry. - -## Build and Run `docker-sonic-mgmt` - -Managing the testbed and running tests requires various dependencies to be installed and configured. We have built a `docker-sonic-mgmt` image that takes care of these dependencies so you can use `ansible-playbook`, `pytest`, and `spytest`. - -1. Build `docker-sonic-mgmt` image from scratch: - ``` - git clone --recursive https://github.com/Azure/sonic-buildimage.git - cd sonic-buildimage - make configure PLATFORM=generic - make target/docker-sonic-mgmt.gz - ``` - - You can also download a pre-built `docker-sonic-mgmt` image [here](https://sonic-jenkins.westus2.cloudapp.azure.com/job/bldenv/job/docker-sonic-mgmt/lastSuccessfulBuild/artifact/sonic-buildimage/target/docker-sonic-mgmt.gz). - -2. Clone the `sonic-mgmt` repo into your working directory: - ``` - git clone https://github.com/Azure/sonic-mgmt - ``` - -3. Create a `docker-sonic-mgmt` container. Note that you must mount your clone of `sonic-mgmt` inside the container to access the deployment and testing scripts: - ``` - docker load < docker-sonic-mgmt.gz - docker run -v $PWD:/data -it docker-sonic-mgmt bash - cd ~/sonic-mgmt - ``` - -**NOTE: From this point on, all steps are ran inside the `docker-sonic-mgmt` container.** - -## Prepare Testbed Configuration - -Once you are in the docker container, you need to modify the testbed configuration files to reflect your lab setup. +## Prepare testbed server + +- Install Ubuntu 16.04 or 17.04 amd64 server. +- Setup management port configuration using sample ```/etc/network/interfaces```. + +``` +root@server-1:~# cat /etc/network/interfaces +# The management network interface +auto ma0 +iface ma0 inet manual + +# Server, VM and PTF management interface +auto br1 +iface br1 inet static + bridge_ports ma0 + bridge_stp off + bridge_maxwait 0 + bridge_fd 0 + address 10.250.0.245 + netmask 255.255.255.0 + network 10.250.0.0 + broadcast 10.250.0.255 + gateway 10.250.0.1 + dns-nameservers 10.250.0.1 10.250.0.2 + # dns-* options are implemented by the resolvconf package, if installed +``` + +- Installed python 2.7 (required by ansible). +- Add Docker's official GPG key +``` + $ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +``` + +## Setup docker registry for *PTF* docker + +PTF docker is used to send and receive packets to test data plane. + +- Build PTF docker +``` +git clone --recursive https://github.com/Azure/sonic-buildimage.git +make configure PLATFORM=generic +make target/docker-ptf.gz +``` + +- Setup [docker registry](https://docs.docker.com/registry/) and upload *docker-ptf* to the docker registry. + +## Build and run *sonic-mgmt* docker + +ansible playbook in *sonic-mgmt* repo requires to setup ansible and various dependencies. +We have built a *sonic-mgmt* docker that installs all dependencies, and you can build +the docker and run ansible playbook inside the docker. + +- Build *sonic-mgmt* docker +``` +git clone --recursive https://github.com/Azure/sonic-buildimage.git +make configure PLATFORM=generic +make target/docker-sonic-mgmt.gz +``` + +Pre-built *sonic-mgmt* can also be downloaded from [here](https://sonic-jenkins.westus2.cloudapp.azure.com/job/bldenv/job/docker-sonic-mgmt/lastSuccessfulBuild/artifact/target/docker-sonic-mgmt.gz). + +- Run *sonic-mgmt* docker +``` +docker load -i target/docker-sonic-mgmt.gz +docker run -it docker-sonic-mgmt bash +cd ~/sonic-mgmt +``` + +From now on, all steps are running inside the *sonic-mgmt* docker. + +## Prepare testbed configurations + +Latest *sonic-mgmt* repo is cloned into *sonic-mgmt* docker under '/var/[your-login-username]/sonic-mgmt`. +Once you are in the docker, you need to modify the testbed configuration files to reflect your lab setup. - Server - - Update the server management IP in [`ansible/veos`](../veos). - - - Update the testbed server credentials in [`ansible/group_vars/vm_host/creds.yml`](../group_vars/vm_host/creds.yml). - - - Update the server network configuration for the VM and PTF management interfaces in [`ansible/host_vars/STR-ACS-SERV-01.yml`](../host_vars/STR-ACS-SERV-01.yml). - - `external_port`: server trunk port name (connected to the fanout switch) - - `mgmt_gw`: ip of the gateway for the VM management interfaces - - `mgmt_prefixlen`: prefixlen for the management interfaces - - - Check that ansible can reach this host: - ``` - ansible -m ping -i veos vm_host_1 - ``` - -- VMs - - Download [vEOS image from Arista](https://www.arista.com/en/support/software-download). - - - Copy these image files to `~/veos-vm/images` on your testbed server: - - `Aboot-veos-serial-8.0.0.iso` - - `vEOS-lab-4.20.15M.vmdk` - - - Update the VM IP addresses in the [`ansible/veos`](../veos) inventory file. These IP addresses should be in the management subnet defined above. - - - Update the VM credentials in `ansible/group_vars/eos/creds.yml`. - -- PTF Docker - - Update the docker registry information in [`vars/docker_registry.yml`](../vars/docker_registry.yml). - -## Setup VMs on the Server - -1. Start the VMs: - ``` - ./testbed-cli.sh start-vms server_1 password.txt - ``` - Please note: `password.txt` is the ansible vault password file name/path. Ansible allows users to use `ansible-vault` to encrypt password files. By default, this shell script **requires** a password file. If you are not using `ansible-vault`, just create an empty file and pass the file name to the command line. **The file name and location is created and maintained by the user.** - -2. Check that all the VMs are up and running: - ``` - ansible -m ping -i veos server_1 - ``` - -## Deploy Fanout Switch Vlan - -You need to specify all physical connections that exist in the lab before deploying the fanout and running the tests. - -Please follow the "Testbed Physical Topology" section of the [Configuration Guide](README.testbed.Config.md) to prepare your lab connection graph file. - -We are using Arista switches as the fanout switches in our lab. So, the playbook under `roles/fanout` is for deploying fanout (leaf) switch Vlan configurations on Arista devices only. If you are using other types of fanout switches, you can manually configure the Vlan configurations on the switch, or you can deploy a regular Layer-2 switch configuration. - -Our fanout switches deploy using the Arista switch's eosadmin shell login. If you have an Arista switch as your fanout and you want to run `fanout/tasks/main.yml` to deploy the switch, please `scp` the `roles/fanout/template/rc.eos` file to the Arista switch flash, and make sure that you can login to the shell with `fanout_admin_user/fanout_admin_password`. - -**`TODO:`** -- Improve testbed root fanout switch configuration method. -- Update the inventory file format. Some of the early fanout definition files have duplicated fields with the inventory file. We should adopt a new inventory file and improve the lab graph. - -## Deploy Topology - -- Update `testbed.csv` with your data. At the least, you should update the PTF management interface settings. -- To deploy a topology run: ```./testbed-cli.sh add-topo vms-t1 ~/.password``` -- To remove a topology run: ```./testbed-cli.sh remove-topo vms-t1 ~/.password``` - -**NOTE:** The last step in `testbed-cli.sh` is trying to re-deploy the Vlan range in the root fanout switch to match the VLAN range specified in the topology. In other words, it's trying to change the "allowed" Vlan for the Arista switch ports. If you have a different type of switch, this may or may not work. Please review the steps and update accordingly if necessary. If you comment out the last step, you may manually swap Vlan ranges in the root fanout to make the testbed topology switch work. + - Update server management IP in [```ansible/veos```](../veos). + - Update testbed server credentials in [```ansible/group_vars/vm_host/creds.yml```](../group_vars/vm_host/creds.yml). + - Update server network configuration for VM and PTF management interface in [```ansible/host_vars/STR-ACS-SERV-01.yml```](../host_vars/STR-ACS-SERV-01.yml). + - ```external_port```: server trunk port name (connected to the fanout switch) + - ```mgmt_gw```: ip of gateway for VM mgmt interfaces + - ```mgmt_prefixlen```: prefixlen for management interfaces + - Check that ansible could reach this device by command ```ansible -m ping -i veos vm_host_1```. + +- VM + - Download vEOS image from [arista](https://www.arista.com/en/support/software-download). + - Copy below image files to ```~/veos-vm/images``` on your testbed server. + - ```Aboot-veos-serial-8.0.0.iso``` + - ```vEOS-lab-4.15.9M.vmdk``` + - Update VM IP addresses [```ansible/veos```](../voes) inventory file. These IP addresses should be in the management subnet defined above. + - Update VM credentials in [```ansible/group_vars/eos/creds.yml```](../group_vars/eos/creds.yml). + +- ```PTF``` docker + - Update docker registry information in [```vars/docker_registry.yml```](../vars/docker_registry.yml). + +## Setup VMs in the server + +``` +./testbed-cli.sh start-vms server_1 password.txt +``` + - please note: Here "password.txt" is the ansible vault password file name/path. Ansible allows user use ansible vault to encrypt password files. By default, this shell script require a password file. If you are not using ansible vault, just create an empty file and pass the filename to the command line. The file name and location is created and maintained by user. + +Check that all VMs are up and running: ```ansible -m ping -i veos server_1``` + +## Deploy fanout switch Vlan + +You need to specify all lab physical connections before running fanout deployment and some of the tests. + +Please follow [Configuration](README.testbed.Config.md) 'Testbed Physical Topology' section to prepare your lab connection graph file. + +We are using Arista switches as fanout switch in our lab. So, the playbook under roles/fanout is for deploy fanout(leaf) switch Vlans configuration of Arista only. If you are using other type of fanout switches, you may manually configure Vlan configurations in switch or you have a good way to deploy regular Layer2 switch configuration in lab would also work. Our fanout switch deploy using Arista switch eosadmin shell login. If you do have an Arista switch as fanout and you want to run the fanout/tasks/main.yml to deploy the switch, please scp the roles/fanout/template/rc.eos file to Arista switch flash, and make sure that you can use your fanout_admin_user/fanout_admin_password to login to shell. + +TODO: Improve testbed rootfanout switch configuration method; along we are changing the inventory file format, some of the early fanout definition files has duplicated fields with inventory file, should adopt new inventory file and improve the lab graph + +## Deploy topology + +- Update ```testbed.csv``` with your data. At least update PTF mgmt interface settings +- To deploy PTF topology run: ```./testbed-cli.sh add-topo ptf1-m ~/.password``` +- To remove PTF topology run: ```./testbed-cli.sh remove-topo ptf1-m ~/.password``` +- To deploy T1 topology run: ```./testbed-cli.sh add-topo vms-t1 ~/.password``` +- The last step in testbed-cli is trying to re-deploy Vlan range in root fanout switch to match the VLAN range specified in that topology. It's trying to change the 'allowed' Vlan for Arista switch port. If you have other type of switch, it may or may not work. Please review it and change accordingly if required. If you comment out the last step, you may manually swap Vlan ranges in rootfanout to make the testbed topology switch to work. diff --git a/ansible/doc/README.testbed.VsSetup.md b/ansible/doc/README.testbed.VsSetup.md index 25391072099..627869e84b4 100644 --- a/ansible/doc/README.testbed.VsSetup.md +++ b/ansible/doc/README.testbed.VsSetup.md @@ -4,199 +4,105 @@ This document describes the steps to setup the virtual switch based testbed and ## Prepare testbed server -- Install Ubuntu 20.04 amd64 server. To setup a T0 topology, the server needs to have 10GB free memory. -- Setup internal management network: -``` -$ git clone https://github.com/Azure/sonic-mgmt -$ cd sonic-mgmt/ansible -$ sudo ./setup-management-network.sh -``` - -### Use vEOS image - -- Download vEOS image from [arista](https://www.arista.com/en/support/software-download). -- Copy below image files to `~/veos-vm/images` on your testbed server. - - `Aboot-veos-serial-8.0.0.iso` - - `vEOS-lab-4.20.15M.vmdk` - -### Use cEOS image (experimental) -#### Option 1, download and import cEOS image manually -- Download cEOS image from [arista](https://www.arista.com/en/support/software-download) onto your testbed server -- Import cEOS image (It will take several minutes to import, so please be patient) +- Install Ubuntu 18.04 amd64 server. To setup a T0 topology, the server needs to have 10GB free memory. +- Setup internal management network. ``` -$ docker import cEOS64-lab-4.23.2F.tar.xz ceosimage:4.23.2F -$ docker images -REPOSITORY TAG IMAGE ID CREATED SIZE -ceosimage 4.23.2F d53c28e38448 2 hours ago 1.82GB +brctl addbr br1 +ifconfig br1 10.250.0.1/24 +ifconfig br1 up ``` -#### Option 2, download and image cEOS image automatically -Alternatively, you can host the cEOS image on a http server. Specify `vm_images_url` for downloading the image [here](https://github.com/Azure/sonic-mgmt/blob/master/ansible/group_vars/vm_host/main.yml#L2). If a saskey is required for downloading cEOS image, specify `ceosimage_saskey` in `sonic-mgmt/ansible/vars/azure_storage.yml`. -If you want to skip image downloading when the cEOS image is not imported locally, set `skip_ceos_image_downloading` to `true` in `sonic-mgmt/ansible/group_vars/all/ceos.yml`. Then when cEOS image is not locally imported, the scripts will not try to download it and will fail with an error message. Please use option 1 to download and import the cEOS image manually. +- Download vEOS image from [arista](https://www.arista.com/en/support/software-download). +- Copy below image files to ```~/veos-vm/images``` on your testbed server. + - ```Aboot-veos-serial-8.0.0.iso``` + - ```vEOS-lab-4.15.9M.vmdk``` -## Download sonic-vs image +## Setup docker registry for *PTF* docker -- Download sonic-vs image from [here](https://sonic-jenkins.westus2.cloudapp.azure.com/job/vs/job/buildimage-vs-image/lastSuccessfulBuild/artifact/target/sonic-vs.img.gz) -``` -$ wget https://sonic-jenkins.westus2.cloudapp.azure.com/job/vs/job/buildimage-vs-image/lastSuccessfulBuild/artifact/target/sonic-vs.img.gz -``` +PTF docker is used to send and receive packets to test data plane. -- unzip the image and move it into `~/sonic-vm/images/` +- Build PTF docker ``` -$ gzip -d sonic-vs.img.gz -$ mkdir -p ~/sonic-vm/images -$ mv sonic-vs.img ~/sonic-vm/images +git clone --recursive https://github.com/Azure/sonic-buildimage.git +make configure PLATFORM=generic +make target/docker-ptf.gz ``` -## Setup sonic-mgmt docker +- Setup [docker registry](https://docs.docker.com/registry/) and upload *docker-ptf* to the docker registry. -### Build or download *sonic-mgmt* docker image -(Note: downloading or building the sonic-mgmt image is optional) +## Build or download *sonic-mgmt* docker image ansible playbook in *sonic-mgmt* repo requires to setup ansible and various dependencies. -We have built a *sonic-mgmt* docker that installs all dependencies, and you can build +We have built a *sonic-mgmt* docker that installs all dependencies, and you can build the docker and run ansible playbook inside the docker. - Build *sonic-mgmt* docker ``` -$ git clone --recursive https://github.com/Azure/sonic-buildimage.git -$ make configure PLATFORM=generic -$ make target/docker-sonic-mgmt.gz -``` - -- Or, download pre-built *sonic-mgmt* image from [here](https://sonic-jenkins.westus2.cloudapp.azure.com/job/bldenv/job/docker-sonic-mgmt/lastSuccessfulBuild/artifact/sonic-buildimage/target/docker-sonic-mgmt.gz). -``` -$ wget https://sonic-jenkins.westus2.cloudapp.azure.com/job/bldenv/job/docker-sonic-mgmt/lastSuccessfulBuild/artifact/sonic-buildimage/target/docker-sonic-mgmt.gz -``` - -- Load *sonic-mgmt* image -``` -$ docker load -i docker-sonic-mgmt.gz +git clone --recursive https://github.com/Azure/sonic-buildimage.git +make configure PLATFORM=generic +make target/docker-sonic-mgmt.gz ``` -Run the `setup-container.sh` in the root directory of the sonic-mgmt repository: +Pre-built *sonic-mgmt* can also be downloaded from [here](https://sonic-jenkins.westus2.cloudapp.azure.com/job/bldenv/job/docker-sonic-mgmt/lastSuccessfulBuild/artifact/target/docker-sonic-mgmt.gz). -``` -$ cd sonic-mgmt -$ ./setup-container.sh -n -d /data -``` +## Download sonic-vs image -From now on, all steps are running inside the *sonic-mgmt* docker except where otherwise specified. +- Download sonic-vs image from [here](https://sonic-jenkins.westus2.cloudapp.azure.com/job/vs/job/buildimage-vs-image/lastSuccessfulBuild/artifact/target/sonic-vs.img.gz) +- unzip the image and move it into ```~/sonic-vm/images/``` -You can enter your sonic-mgmt container with the following command: +## Clone sonic-mgmt repo ``` -$ docker exec -u -it bash +git clone https://github.com/Azure/sonic-mgmt ``` -### Setup public key to login into the linux host from sonic-mgmt docker - -- Modify veos_vtb to use the user name, e.g., `foo` to login linux host (this can be your username on the host). +- Modify veos.vtb to use the user name to login linux host. Add public key authorized\_keys for your user. +Put the private key inside the sonic-mgmt docker container. Make sure you can login into box using +```ssh yourusername@172.17.0.1``` without any password prompt inside the docker container. ``` -lgh@gulv-vm2:/data/sonic-mgmt/ansible$ git diff -diff --git a/ansible/veos_vtb b/ansible/veos_vtb -index 3e7b3c4e..edabfc40 100644 ---- a/ansible/veos_vtb -+++ b/ansible/veos_vtb -@@ -73,7 +73,7 @@ vm_host_1: - hosts: - STR-ACS-VSERV-01: - ansible_host: 172.17.0.1 -- ansible_user: use_own_value -+ ansible_user: foo +lgh@gulv-vm2:/data/sonic/sonic-mgmt/ansible$ git diff +diff --git a/ansible/veos.vtb b/ansible/veos.vtb +index 4ea5a7a..4cfc448 100644 +--- a/ansible/veos.vtb ++++ b/ansible/veos.vtb +@@ -1,5 +1,5 @@ +[vm_host_1] +-STR-ACS-VSERV-01 ansible_host=172.17.0.1 ansible_user=use_own_value ++STR-ACS-VSERV-01 ansible_host=172.17.0.1 ansible_user=lgh - vms_1: - hosts: + [vm_host:children] +vm_host_1 ``` -- Create dummy `password.txt` under `/data/sonic-mgmt/ansible` - - Please note: Here "password.txt" is the Ansible Vault password file name/path. Ansible allows user to use Ansible Vault to encrypt password files. By default, this shell script requires a password file. If you are not using Ansible Vault, just create a file with a dummy password and pass the filename to the command line. The file name and location is created and maintained by user. - -- Add user `foo`'s public key to `/home/foo/.ssh/authorized_keys` on the host - -- On the host, run `sudo visudo` and add the following line at the end: +## Run sonic-mgmt docker ``` -foo ALL=(ALL) NOPASSWD:ALL +docker run -v $PWD:/data -it docker-sonic-mgmt bash ``` -- Add user `foo`'s private key to `$HOME/.ssh/id_rsa` inside sonic-mgmt docker container. - -- Test you can login into the host `ssh foo@172.17.0.1` without any password prompt -from the `sonic-mgmt` container. Then, test you can sudo without password prompt in the host. +From now on, all steps are running inside the *sonic-mgmt* docker. ## Setup Arista VMs in the server -(skip this step if you use cEOS image) - -``` -$ ./testbed-cli.sh -m veos_vtb -n 4 start-vms server_1 password.txt -``` - - Please note: Here "password.txt" is the Ansible Vault password file name/path. Ansible allows user to use Ansible Vault to encrypt password files. By default, this shell script requires a password file. If you are not using Ansible Vault, just create a file with a dummy password and pass the filename to the command line. The file name and location is created and maintained by user. - -Check that all VMs are up and running, and the passwd is `123456` ``` -$ ansible -m ping -i veos_vtb server_1 -u root -k -VM0102 | SUCCESS => { - "changed": false, - "ping": "pong" -} -VM0101 | SUCCESS => { - "changed": false, - "ping": "pong" -} -STR-ACS-VSERV-01 | SUCCESS => { - "changed": false, - "ping": "pong" -} -VM0103 | SUCCESS => { - "changed": false, - "ping": "pong" -} -VM0100 | SUCCESS => { - "changed": false, - "ping": "pong" -} +./testbed-cli.sh -m veos.vtb start-vms server_1 password.txt ``` + - please note: Here "password.txt" is the ansible vault password file name/path. Ansible allows user use ansible vault to encrypt password files. By default, this shell script require a password file. If you are not using ansible vault, just create an empty file and pass the filename to the command line. The file name and location is created and maintained by user. +Check that all VMs are up and running: ```ansible -m ping -i veos server_1``` ## Deploy T0 topology -### vEOS ``` -$ cd /data/sonic-mgmt/ansible -$ ./testbed-cli.sh -t vtestbed.csv -m veos_vtb add-topo vms-kvm-t0 password.txt -``` - -### cEOS -``` -$ cd /data/sonic-mgmt/ansible -$ ./testbed-cli.sh -t vtestbed.csv -m veos_vtb -k ceos add-topo vms-kvm-t0 password.txt -``` - -Verify topology setup successfully. - -``` -$ docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -575064498cbc ceosimage:4.23.2F "/sbin/init systemd.…" About a minute ago Up About a minute ceos_vms6-1_VM0103 -d71b8970bcbb debian:jessie "bash" About a minute ago Up About a minute net_vms6-1_VM0103 -3d2e5ecdd472 ceosimage:4.23.2F "/sbin/init systemd.…" About a minute ago Up About a minute ceos_vms6-1_VM0102 -28d64c74fa54 debian:jessie "bash" About a minute ago Up About a minute net_vms6-1_VM0102 -0fa067a47c7f ceosimage:4.23.2F "/sbin/init systemd.…" About a minute ago Up About a minute ceos_vms6-1_VM0101 -47066451fa4c debian:jessie "bash" About a minute ago Up About a minute net_vms6-1_VM0101 -e07bd0245bd9 ceosimage:4.23.2F "/sbin/init systemd.…" About a minute ago Up About a minute ceos_vms6-1_VM0100 -4584820bf368 debian:jessie "bash" 7 minutes ago Up 7 minutes net_vms6-1_VM0100 -c929c622232a sonicdev-microsoft.azurecr.io:443/docker-ptf:latest "/usr/local/bin/supe…" 7 minutes ago Up 7 minutes ptf_vms6-1 +./testbed-cli.sh -t vtestbed.csv -m veos.vtb add-topo vms-kvm-t0 password.txt ``` ## Deploy minigraph on the DUT ``` -$ ./testbed-cli.sh -t vtestbed.csv -m veos_vtb deploy-mg vms-kvm-t0 lab password.txt +./testbed-cli.sh -t vtestbed.csv -m veos.vtb deploy-mg vms-kvm-t0 lab password.txt ``` You should be login into the sonic kvm using IP: 10.250.0.101 using admin:password. @@ -215,13 +121,3 @@ Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/P 10.0.0.61 4 64600 3205 950 0 0 0 00:00:21 6400 10.0.0.63 4 64600 3204 950 0 0 0 00:00:21 6400 ``` - - - - - - - - - - diff --git a/ansible/eos.yml b/ansible/eos.yml index de1b2bab9d9..a07b94d5275 100644 --- a/ansible/eos.yml +++ b/ansible/eos.yml @@ -4,4 +4,4 @@ - hosts: eos gather_facts: no roles: - - role: eos + - role: eos diff --git a/ansible/fanout_connect.yml b/ansible/fanout_connect.yml index 3b34d8bfd9b..2406bf0d41f 100644 --- a/ansible/fanout_connect.yml +++ b/ansible/fanout_connect.yml @@ -4,7 +4,7 @@ gather_facts: no tasks: - fail: msg="Please provide VM server name and server port name, see comment line in playbook" - when: + when: - dut is not defined - block: @@ -16,7 +16,7 @@ - name: get the username running the deploy command: whoami - delegate_to: localhost + connection: local become: no register: calling_username changed_when: false @@ -25,5 +25,5 @@ - set_fact: connect_leaf=false - - include_tasks: roles/fanout/tasks/rootfanout_connect.yml - when: external_port is defined +# - include: roles/fanout/tasks/rootfanout_connect.yml +# when: external_port is defined diff --git a/ansible/files/lab_connection_graph.xml b/ansible/files/lab_connection_graph.xml index 885b21a8fd8..9cd156b795e 100644 --- a/ansible/files/lab_connection_graph.xml +++ b/ansible/files/lab_connection_graph.xml @@ -5,89 +5,192 @@ + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ansible/files/sonic_lab_devices.csv b/ansible/files/sonic_lab_devices.csv index b11a58d53d3..c7c1bee107e 100644 --- a/ansible/files/sonic_lab_devices.csv +++ b/ansible/files/sonic_lab_devices.csv @@ -1,4 +1,11 @@ Hostname,ManagementIp,HwSku,Type +cel-e1031-01,10.250.0.100/23,Celestica-E1031-T48S4,DevSonic +cel-seastone-01,10.251.0.100/23,Celestica-DX010-C32,DevSonic +cel-seastone-02,10.250.0.100/23,Seastone-DX010-10-50,DevSonic +cel-seastone-03,10.250.0.100/23,Seastone-DX010-50,DevSonic +cel-seastone2-01,10.251.0.100/23,Seastone_2,DevSonic +e1031-fanout,10.250.0.235/23,Celestica-E1031-T48S4,FanoutLeafSonic +seastone-fanout,10.251.0.235/23,Celestica-DX010-C32,FanoutLeafSonic str-msn2700-01,10.251.0.188/23,Mellanox-2700,DevSonic str-7260-10,10.251.0.13/23,Arista-7260QX-64,FanoutLeaf str-7260-11,10.251.0.234/23,Arista-7260QX-64,FanoutRoot diff --git a/ansible/files/sonic_lab_links.csv b/ansible/files/sonic_lab_links.csv index f82e968740f..563c9396882 100644 --- a/ansible/files/sonic_lab_links.csv +++ b/ansible/files/sonic_lab_links.csv @@ -1,35 +1,95 @@ StartDevice,StartPort,EndDevice,EndPort,BandWidth,VlanID,VlanMode -str-msn2700-01,Ethernet0,str-7260-10,Ethernet1,40000,1681,Access -str-msn2700-01,Ethernet4,str-7260-10,Ethernet2,40000,1682,Access -str-msn2700-01,Ethernet8,str-7260-10,Ethernet3,40000,1683,Access -str-msn2700-01,Ethernet12,str-7260-10,Ethernet4,40000,1684,Access -str-msn2700-01,Ethernet16,str-7260-10,Ethernet5,40000,1685,Access -str-msn2700-01,Ethernet20,str-7260-10,Ethernet6,40000,1686,Access -str-msn2700-01,Ethernet24,str-7260-10,Ethernet7,40000,1687,Access -str-msn2700-01,Ethernet28,str-7260-10,Ethernet8,40000,1688,Access -str-msn2700-01,Ethernet32,str-7260-10,Ethernet9,40000,1689,Access -str-msn2700-01,Ethernet36,str-7260-10,Ethernet10,40000,1690,Access -str-msn2700-01,Ethernet40,str-7260-10,Ethernet11,40000,1691,Access -str-msn2700-01,Ethernet44,str-7260-10,Ethernet12,40000,1692,Access -str-msn2700-01,Ethernet48,str-7260-10,Ethernet13,40000,1693,Access -str-msn2700-01,Ethernet52,str-7260-10,Ethernet14,40000,1694,Access -str-msn2700-01,Ethernet56,str-7260-10,Ethernet15,40000,1695,Access -str-msn2700-01,Ethernet60,str-7260-10,Ethernet16,40000,1696,Access -str-msn2700-01,Ethernet64,str-7260-10,Ethernet17,40000,1697,Access -str-msn2700-01,Ethernet68,str-7260-10,Ethernet18,40000,1698,Access -str-msn2700-01,Ethernet72,str-7260-10,Ethernet19,40000,1699,Access -str-msn2700-01,Ethernet76,str-7260-10,Ethernet20,40000,1700,Access -str-msn2700-01,Ethernet80,str-7260-10,Ethernet21,40000,1701,Access -str-msn2700-01,Ethernet84,str-7260-10,Ethernet22,40000,1702,Access -str-msn2700-01,Ethernet88,str-7260-10,Ethernet23,40000,1703,Access -str-msn2700-01,Ethernet92,str-7260-10,Ethernet24,40000,1704,Access -str-msn2700-01,Ethernet96,str-7260-10,Ethernet25,40000,1705,Access -str-msn2700-01,Ethernet100,str-7260-10,Ethernet26,40000,1706,Access -str-msn2700-01,Ethernet104,str-7260-10,Ethernet27,40000,1707,Access -str-msn2700-01,Ethernet108,str-7260-10,Ethernet28,40000,1708,Access -str-msn2700-01,Ethernet112,str-7260-10,Ethernet29,40000,1709,Access -str-msn2700-01,Ethernet116,str-7260-10,Ethernet30,40000,1710,Access -str-msn2700-01,Ethernet120,str-7260-10,Ethernet31,40000,1711,Access -str-msn2700-01,Ethernet124,str-7260-10,Ethernet32,40000,1712,Access -str-7260-11,Ethernet19,str-acs-serv-01,p4p1,40000,,Trunk -str-7260-11,Ethernet30,str-7260-10,Ethernet64,40000,1681-1712,Trunk +cel-e1031-01,Ethernet1,e1031-fanout,Ethernet1,1000,100,Access +cel-e1031-01,Ethernet2,e1031-fanout,Ethernet2,1000,101,Access +cel-e1031-01,Ethernet3,e1031-fanout,Ethernet3,1000,102,Access +cel-e1031-01,Ethernet4,e1031-fanout,Ethernet4,1000,103,Access +cel-e1031-01,Ethernet5,e1031-fanout,Ethernet5,1000,104,Access +cel-e1031-01,Ethernet6,e1031-fanout,Ethernet6,1000,105,Access +cel-e1031-01,Ethernet7,e1031-fanout,Ethernet7,1000,106,Access +cel-e1031-01,Ethernet8,e1031-fanout,Ethernet8,1000,107,Access +cel-e1031-01,Ethernet9,e1031-fanout,Ethernet9,1000,108,Access +cel-e1031-01,Ethernet10,e1031-fanout,Ethernet10,1000,109,Access +cel-e1031-01,Ethernet11,e1031-fanout,Ethernet11,1000,110,Access +cel-e1031-01,Ethernet12,e1031-fanout,Ethernet12,1000,111,Access +cel-e1031-01,Ethernet13,e1031-fanout,Ethernet13,1000,112,Access +cel-e1031-01,Ethernet14,e1031-fanout,Ethernet14,1000,113,Access +cel-e1031-01,Ethernet15,e1031-fanout,Ethernet15,1000,114,Access +cel-e1031-01,Ethernet16,e1031-fanout,Ethernet16,1000,115,Access +cel-e1031-01,Ethernet17,e1031-fanout,Ethernet17,1000,116,Access +cel-e1031-01,Ethernet18,e1031-fanout,Ethernet18,1000,117,Access +cel-e1031-01,Ethernet19,e1031-fanout,Ethernet19,1000,118,Access +cel-e1031-01,Ethernet20,e1031-fanout,Ethernet20,1000,119,Access +cel-e1031-01,Ethernet21,e1031-fanout,Ethernet21,1000,120,Access +cel-e1031-01,Ethernet22,e1031-fanout,Ethernet22,1000,121,Access +cel-e1031-01,Ethernet23,e1031-fanout,Ethernet23,1000,122,Access +cel-e1031-01,Ethernet24,e1031-fanout,Ethernet24,1000,123,Access +cel-e1031-01,Ethernet25,e1031-fanout,Ethernet25,1000,124,Access +cel-e1031-01,Ethernet26,e1031-fanout,Ethernet26,1000,125,Access +cel-e1031-01,Ethernet27,e1031-fanout,Ethernet27,1000,126,Access +cel-e1031-01,Ethernet28,e1031-fanout,Ethernet28,1000,127,Access +cel-e1031-01,Ethernet29,e1031-fanout,Ethernet29,1000,128,Access +cel-e1031-01,Ethernet30,e1031-fanout,Ethernet30,1000,129,Access +cel-e1031-01,Ethernet31,e1031-fanout,Ethernet31,1000,130,Access +cel-e1031-01,Ethernet32,e1031-fanout,Ethernet32,1000,131,Access +cel-seastone-01,Ethernet0,seastone-fanout,Ethernet0,100000,100,Access +cel-seastone-01,Ethernet4,seastone-fanout,Ethernet4,100000,101,Access +cel-seastone-01,Ethernet8,seastone-fanout,Ethernet8,100000,102,Access +cel-seastone-01,Ethernet12,seastone-fanout,Ethernet12,100000,103,Access +cel-seastone-01,Ethernet16,seastone-fanout,Ethernet16,100000,104,Access +cel-seastone-01,Ethernet20,seastone-fanout,Ethernet20,100000,105,Access +cel-seastone-01,Ethernet24,seastone-fanout,Ethernet24,100000,106,Access +cel-seastone-01,Ethernet28,seastone-fanout,Ethernet28,100000,107,Access +cel-seastone-01,Ethernet32,seastone-fanout,Ethernet32,100000,108,Access +cel-seastone-01,Ethernet36,seastone-fanout,Ethernet36,100000,109,Access +cel-seastone-01,Ethernet40,seastone-fanout,Ethernet40,100000,110,Access +cel-seastone-01,Ethernet44,seastone-fanout,Ethernet44,100000,111,Access +cel-seastone-01,Ethernet48,seastone-fanout,Ethernet48,100000,112,Access +cel-seastone-01,Ethernet52,seastone-fanout,Ethernet52,100000,113,Access +cel-seastone-01,Ethernet56,seastone-fanout,Ethernet56,100000,114,Access +cel-seastone-01,Ethernet60,seastone-fanout,Ethernet60,100000,115,Access +cel-seastone-01,Ethernet64,seastone-fanout,Ethernet64,100000,116,Access +cel-seastone-01,Ethernet68,seastone-fanout,Ethernet68,100000,117,Access +cel-seastone-01,Ethernet72,seastone-fanout,Ethernet72,100000,118,Access +cel-seastone-01,Ethernet76,seastone-fanout,Ethernet76,100000,119,Access +cel-seastone-01,Ethernet80,seastone-fanout,Ethernet80,100000,120,Access +cel-seastone-01,Ethernet84,seastone-fanout,Ethernet84,100000,121,Access +cel-seastone-01,Ethernet88,seastone-fanout,Ethernet88,100000,122,Access +cel-seastone-01,Ethernet92,seastone-fanout,Ethernet92,100000,123,Access +cel-seastone-01,Ethernet96,seastone-fanout,Ethernet96,100000,124,Access +cel-seastone-01,Ethernet100,seastone-fanout,Ethernet100,100000,125,Access +cel-seastone-01,Ethernet104,seastone-fanout,Ethernet104,100000,126,Access +cel-seastone-01,Ethernet108,seastone-fanout,Ethernet108,100000,127,Access +cel-seastone-01,Ethernet112,seastone-fanout,Ethernet112,100000,128,Access +cel-seastone-01,Ethernet116,seastone-fanout,Ethernet116,100000,129,Access +cel-seastone-01,Ethernet120,seastone-fanout,Ethernet120,100000,130,Access +cel-seastone2-01,Ethernet0,seastone-fanout,Ethernet0,100000,100,Access +cel-seastone2-01,Ethernet4,seastone-fanout,Ethernet4,100000,101,Access +cel-seastone2-01,Ethernet8,seastone-fanout,Ethernet8,100000,102,Access +cel-seastone2-01,Ethernet12,seastone-fanout,Ethernet12,100000,103,Access +cel-seastone2-01,Ethernet16,seastone-fanout,Ethernet16,100000,104,Access +cel-seastone2-01,Ethernet20,seastone-fanout,Ethernet20,100000,105,Access +cel-seastone2-01,Ethernet24,seastone-fanout,Ethernet24,100000,106,Access +cel-seastone2-01,Ethernet28,seastone-fanout,Ethernet28,100000,107,Access +cel-seastone2-01,Ethernet32,seastone-fanout,Ethernet32,100000,108,Access +cel-seastone2-01,Ethernet36,seastone-fanout,Ethernet36,100000,109,Access +cel-seastone2-01,Ethernet40,seastone-fanout,Ethernet40,100000,110,Access +cel-seastone2-01,Ethernet44,seastone-fanout,Ethernet44,100000,111,Access +cel-seastone2-01,Ethernet48,seastone-fanout,Ethernet48,100000,112,Access +cel-seastone2-01,Ethernet52,seastone-fanout,Ethernet52,100000,113,Access +cel-seastone2-01,Ethernet56,seastone-fanout,Ethernet56,100000,114,Access +cel-seastone2-01,Ethernet60,seastone-fanout,Ethernet60,100000,115,Access +cel-seastone2-01,Ethernet64,seastone-fanout,Ethernet64,100000,116,Access +cel-seastone2-01,Ethernet68,seastone-fanout,Ethernet68,100000,117,Access +cel-seastone2-01,Ethernet72,seastone-fanout,Ethernet72,100000,118,Access +cel-seastone2-01,Ethernet76,seastone-fanout,Ethernet76,100000,119,Access +cel-seastone2-01,Ethernet80,seastone-fanout,Ethernet80,100000,120,Access +cel-seastone2-01,Ethernet84,seastone-fanout,Ethernet84,100000,121,Access +cel-seastone2-01,Ethernet88,seastone-fanout,Ethernet88,100000,122,Access +cel-seastone2-01,Ethernet92,seastone-fanout,Ethernet92,100000,123,Access +cel-seastone2-01,Ethernet96,seastone-fanout,Ethernet96,100000,124,Access +cel-seastone2-01,Ethernet100,seastone-fanout,Ethernet100,100000,125,Access +cel-seastone2-01,Ethernet104,seastone-fanout,Ethernet104,100000,126,Access +cel-seastone2-01,Ethernet108,seastone-fanout,Ethernet108,100000,127,Access +cel-seastone2-01,Ethernet112,seastone-fanout,Ethernet112,100000,128,Access +cel-seastone2-01,Ethernet116,seastone-fanout,Ethernet116,100000,129,Access +cel-seastone2-01,Ethernet120,seastone-fanout,Ethernet120,100000,130,Access diff --git a/ansible/group_vars/all/labinfo.json b/ansible/group_vars/all/labinfo.json index c279a2c4edf..4de77d7bda9 100644 --- a/ansible/group_vars/all/labinfo.json +++ b/ansible/group_vars/all/labinfo.json @@ -10,7 +10,9 @@ "Arista-VM": "Arista", "Nexus-3064-NX": "Nexus", "Force10-S6100": "Force10", - "Force10-S6000": "Force10" + "Force10-S6000": "Force10", + "Celestica-DX010-C32": "Sonic", + "Celestica-E1031-T48S4": "Sonic" }, "switch_login": { "Nexus": { @@ -20,13 +22,18 @@ }, "Arista": { "user": "admin", - "passwd": ["password", "123456"], - "enable": ["", null] + "passwd": ["root","password", "123456"], + "enable": ['', null] }, "Force10": { "user": "admin", "passwd": ["password"], "enable": ["password"] + }, + "Sonic": { + "user": "admin", + "passwd": ["root","password","123456"], + "enable": ['',null] } } } diff --git a/ansible/group_vars/eos/creds.yml b/ansible/group_vars/eos/creds.yml new file mode 100644 index 00000000000..f57c2e405c0 --- /dev/null +++ b/ansible/group_vars/eos/creds.yml @@ -0,0 +1,4 @@ + +ansible_user: root +ansible_password: 123456 + diff --git a/ansible/group_vars/eos/eos.yml b/ansible/group_vars/eos/eos.yml index 795c0763f59..a18236f8af7 100644 --- a/ansible/group_vars/eos/eos.yml +++ b/ansible/group_vars/eos/eos.yml @@ -2,5 +2,3 @@ snmp_rocommunity: strcommunity snmp_location: str bgp_gr_timer: 700 - -ceos_image_mount_dir: /data/ceos diff --git a/ansible/group_vars/fanout/secrets.yml b/ansible/group_vars/fanout/secrets.yml index 735cd95babc..49a12b8b79a 100644 --- a/ansible/group_vars/fanout/secrets.yml +++ b/ansible/group_vars/fanout/secrets.yml @@ -1,16 +1,6 @@ -# Please update the actual username and password according to your lab configuration - ansible_ssh_user: user ansible_ssh_pass: password fanout_mlnx_user: admin fanout_mlnx_password: admin fanout_sonic_user: admin fanout_sonic_password: password - -# Credential for accessing the network cli interface -fanout_network_user: netadmin -fanout_network_password: netpassword - -# Credential for accessing the Linux shell -fanout_shell_user: shelladmin -fanout_shell_password: shellpassword diff --git a/ansible/group_vars/lab/lab.yml b/ansible/group_vars/lab/lab.yml index f62e2f547fa..bbe1947da6e 100644 --- a/ansible/group_vars/lab/lab.yml +++ b/ansible/group_vars/lab/lab.yml @@ -3,7 +3,7 @@ # file: group_vars/lab.yml # ntp variables -ntp_servers: ['10.0.0.1', '10.0.0.2'] +ntp_servers: ['10.251.0.235'] # syslog variables syslog_servers: ['10.0.0.5', '10.0.0.6'] @@ -21,12 +21,6 @@ radius_servers: [] tacacs_servers: ['10.0.0.9', '10.0.0.8'] -tacacs_passkey: testing123 -tacacs_rw_user: test_rwuser -tacacs_rw_user_passwd: '123456' -tacacs_ro_user: test_rouser -tacacs_ro_user_passwd: '123456' - # tacacs grous tacacs_group: 'testlab' diff --git a/ansible/group_vars/sonic/sku-sensors-data.yml b/ansible/group_vars/sonic/sku-sensors-data.yml index 6997362d07c..df18f6e52c6 100644 --- a/ansible/group_vars/sonic/sku-sensors-data.yml +++ b/ansible/group_vars/sonic/sku-sensors-data.yml @@ -127,11 +127,6 @@ sensors_checks: - SMF_S6100_ON-isa-0000/PSU2 Output Power/power4_input - SMF_S6100_ON-isa-0000/XP1R0V/curr21_input - SMF_S6100_ON-isa-0000/XP1R0V_ROV/curr22_input - - SMF_S6100_ON-isa-0000/PSU1 Input Current/curr601_input - - SMF_S6100_ON-isa-0000/PSU1 Output Current/curr602_input - - SMF_S6100_ON-isa-0000/PSU2 Input Current/curr701_input - - SMF_S6100_ON-isa-0000/PSU2 Output Current/curr702_input - temp: - SMF_S6100_ON-isa-0000/CPU On-board (U2900)/temp1_input - "SMF_S6100_ON-isa-0000/BCM On-Board #1 (U44)/temp2_input" @@ -209,7 +204,6 @@ sensors_checks: - SMF_Z9100_ON-isa-0000/SW XP1R8V_FPGA_MON/in26_alarm - SMF_Z9100_ON-isa-0000/SW XP3R3V_FPGA_MON/in27_alarm - SMF_Z9100_ON-isa-0000/SW XP3R3V_EARLY_MON/in28_alarm - compares: temp: - - coretemp-isa-0000/Core 0/temp2_input @@ -255,11 +249,6 @@ sensors_checks: - SMF_Z9100_ON-isa-0000/PSU2 VOUT/in32_input - SMF_Z9100_ON-isa-0000/XP1R0V/curr21_input - SMF_Z9100_ON-isa-0000/XP1R0V_ROV/curr22_input - - SMF_Z9100_ON-isa-0000/PSU1 Input Current/curr601_input - - SMF_Z9100_ON-isa-0000/PSU1 Output Current/curr602_input - - SMF_Z9100_ON-isa-0000/PSU2 Input Current/curr701_input - - SMF_Z9100_ON-isa-0000/PSU2 Output Current/curr702_input - temp: - coretemp-isa-0000/Core 0/temp2_input - coretemp-isa-0000/Core 1/temp3_input @@ -441,6 +430,10 @@ sensors_checks: - - pmbus-i2c-5-27/vout2/in3_min - pmbus-i2c-5-27/vout2/in3_input temp: + - - acpitz-virtual-0/temp1/temp1_input + - acpitz-virtual-0/temp1/temp1_crit + - - acpitz-virtual-0/temp2/temp2_input + - acpitz-virtual-0/temp2/temp2_crit - - lm75-i2c-7-4a/temp1/temp1_input - lm75-i2c-7-4a/temp1/temp1_max_hyst - - lm75-i2c-17-49/temp1/temp1_input @@ -685,6 +678,10 @@ sensors_checks: - - pmbus-i2c-5-27/vout2/in3_min - pmbus-i2c-5-27/vout2/in3_input temp: + - - acpitz-virtual-0/temp1/temp1_input + - acpitz-virtual-0/temp1/temp1_crit + - - acpitz-virtual-0/temp2/temp2_input + - acpitz-virtual-0/temp2/temp2_crit - - lm75-i2c-7-4a/temp1/temp1_input - lm75-i2c-7-4a/temp1/temp1_max_hyst - - lm75-i2c-17-49/temp1/temp1_input @@ -785,22 +782,20 @@ sensors_checks: alarms: fan: [] power: - - tps53679-i2c-5-70/vin1/in1_alarm - - tps53679-i2c-5-70/vin2/in2_alarm - - tps53679-i2c-5-70/vout1/in3_lcrit_alarm - - tps53679-i2c-5-70/vout1/in3_crit_alarm - - tps53679-i2c-5-70/vout2/in4_lcrit_alarm - - tps53679-i2c-5-70/vout2/in4_crit_alarm + - tps53679-i2c-5-70/vin/in1_alarm + - tps53679-i2c-5-70/vout1/in2_lcrit_alarm + - tps53679-i2c-5-70/vout1/in2_crit_alarm + - tps53679-i2c-5-70/vout2/in3_lcrit_alarm + - tps53679-i2c-5-70/vout2/in3_crit_alarm - tps53679-i2c-5-70/iout1/curr1_max_alarm - tps53679-i2c-5-70/iout1/curr1_crit_alarm - tps53679-i2c-5-70/iout2/curr2_max_alarm - tps53679-i2c-5-70/iout2/curr2_crit_alarm - - tps53679-i2c-5-71/vin1/in1_alarm - - tps53679-i2c-5-71/vin2/in2_alarm - - tps53679-i2c-5-71/vout1/in3_lcrit_alarm - - tps53679-i2c-5-71/vout1/in3_crit_alarm - - tps53679-i2c-5-71/vout2/in4_lcrit_alarm - - tps53679-i2c-5-71/vout2/in4_crit_alarm + - tps53679-i2c-5-71/vin/in1_alarm + - tps53679-i2c-5-71/vout1/in2_lcrit_alarm + - tps53679-i2c-5-71/vout1/in2_crit_alarm + - tps53679-i2c-5-71/vout2/in3_lcrit_alarm + - tps53679-i2c-5-71/vout2/in3_crit_alarm - tps53679-i2c-5-71/iout1/curr1_max_alarm - tps53679-i2c-5-71/iout1/curr1_crit_alarm - tps53679-i2c-5-71/iout2/curr2_max_alarm @@ -861,41 +856,37 @@ sensors_checks: - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail Curr (out)/curr1_max_alarm - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail Curr (out)/curr2_crit_alarm - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-5-70/PMIC-1 PSU 12V Rail (in1)/in1_alarm - - tps53679-i2c-5-70/PMIC-1 PSU 12V Rail (in2)/in2_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail (out)/in3_crit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail (out)/in3_lcrit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail (out)/in4_crit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail (out)/in4_lcrit_alarm + - tps53679-i2c-5-70/PMIC-1 PSU 12V Rail (in)/in1_alarm + - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail (out)/in2_crit_alarm + - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail (out)/in2_lcrit_alarm + - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail (out)/in3_crit_alarm + - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail (out)/in3_lcrit_alarm - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail Curr (out)/curr1_crit_alarm - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail Curr (out)/curr1_max_alarm - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail Curr (out)/curr2_crit_alarm - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-5-71/PMIC-2 PSU 12V Rail (in1)/in1_alarm - - tps53679-i2c-5-71/PMIC-2 PSU 12V Rail (in2)/in2_alarm - - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail (out)/in3_crit_alarm - - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail (out)/in3_lcrit_alarm - - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail (out)/in4_crit_alarm - - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail (out)/in4_lcrit_alarm + - tps53679-i2c-5-71/PMIC-2 PSU 12V Rail (in)/in1_alarm + - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail (out)/in2_crit_alarm + - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail (out)/in2_lcrit_alarm + - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail (out)/in3_crit_alarm + - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail (out)/in3_lcrit_alarm - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail Curr (out)/curr1_crit_alarm - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail Curr (out)/curr1_max_alarm - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail Curr (out)/curr2_crit_alarm - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-15-58/PMIC-3 PSU 12V Rail (in1)/in1_alarm - - tps53679-i2c-15-58/PMIC-3 PSU 12V Rail (in2)/in2_alarm - - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail (out)/in3_crit_alarm - - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail (out)/in3_lcrit_alarm - - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail (out)/in4_crit_alarm - - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail (out)/in4_lcrit_alarm + - tps53679-i2c-15-58/PMIC-3 PSU 12V Rail (in)/in1_alarm + - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail (out)/in2_crit_alarm + - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail (out)/in2_lcrit_alarm + - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail (out)/in3_crit_alarm + - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail (out)/in3_lcrit_alarm - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail Curr (out)/curr1_crit_alarm - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-15-61/PMIC-4 PSU 12V Rail (in1)/in1_alarm - - tps53679-i2c-15-61/PMIC-4 PSU 12V Rail (in2)/in2_alarm - - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail (out)/in3_crit_alarm - - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail (out)/in3_lcrit_alarm + - tps53679-i2c-15-61/PMIC-4 PSU 12V Rail (in)/in1_alarm + - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail (out)/in2_crit_alarm + - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail (out)/in2_lcrit_alarm - dps460-i2c-4-58/PSU-1 220V Rail Curr (in)/curr1_crit_alarm - dps460-i2c-4-58/PSU-1 220V Rail Curr (in)/curr1_max_alarm @@ -1045,184 +1036,119 @@ sensors_checks: temp: [] psu_skips: {} - x86_64-mlnx_msn3700c-r0: + x86_64-arista_7050_qx32: alarms: - fan: - - dps460-i2c-4-58/PSU-1 Fan 1/fan1_alarm - - dps460-i2c-4-58/PSU-1 Fan 1/fan1_fault - - - dps460-i2c-4-59/PSU-2 Fan 1/fan1_alarm - - dps460-i2c-4-59/PSU-2 Fan 1/fan1_fault - - - mlxreg_fan-isa-0000/Chassis Fan Drawer-1 Tach 1/fan1_fault - - mlxreg_fan-isa-0000/Chassis Fan Drawer-1 Tach 2/fan2_fault - - mlxreg_fan-isa-0000/Chassis Fan Drawer-2 Tach 1/fan3_fault - - mlxreg_fan-isa-0000/Chassis Fan Drawer-2 Tach 2/fan4_fault - - mlxreg_fan-isa-0000/Chassis Fan Drawer-3 Tach 1/fan5_fault - - mlxreg_fan-isa-0000/Chassis Fan Drawer-3 Tach 2/fan6_fault - - mlxreg_fan-isa-0000/Chassis Fan Drawer-4 Tach 1/fan7_fault - - mlxreg_fan-isa-0000/Chassis Fan Drawer-4 Tach 2/fan8_fault + fan: [] power: - - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail Curr (out)/curr1_crit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail Curr (out)/curr2_crit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-5-70/PMIC-1 PSU 12V Rail (in1)/in1_alarm - - tps53679-i2c-5-70/PMIC-1 PSU 12V Rail (in2)/in2_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail (out)/in3_crit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail (out)/in3_lcrit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail (out)/in4_crit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail (out)/in4_lcrit_alarm + - dps460-i2c-6-58/vin/in1_min_alarm + - dps460-i2c-6-58/vin/in1_max_alarm + - dps460-i2c-6-58/vin/in1_lcrit_alarm + - dps460-i2c-6-58/vin/in1_crit_alarm + - dps460-i2c-6-58/vout1/in3_min_alarm + - dps460-i2c-6-58/vout1/in3_max_alarm + - dps460-i2c-6-58/vout1/in3_lcrit_alarm + - dps460-i2c-6-58/vout1/in3_crit_alarm + - dps460-i2c-6-58/iin/curr1_max_alarm + - dps460-i2c-6-58/iin/curr1_crit_alarm + - dps460-i2c-6-58/iout1/curr2_max_alarm + - dps460-i2c-6-58/iout1/curr2_lcrit_alarm + - dps460-i2c-6-58/iout1/curr2_crit_alarm - - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail Curr (out)/curr1_crit_alarm - - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail Curr (out)/curr2_crit_alarm - - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-5-71/PMIC-2 PSU 12V Rail (in1)/in1_alarm - - tps53679-i2c-5-71/PMIC-2 PSU 12V Rail (in2)/in2_alarm - - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail (out)/in3_crit_alarm - - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail (out)/in3_lcrit_alarm - - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail (out)/in4_crit_alarm - - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail (out)/in4_lcrit_alarm + temp: + - dps460-i2c-6-58/Power supply 2 inlet temp sensor/temp1_max_alarm + - dps460-i2c-6-58/Power supply 2 inlet temp sensor/temp1_min_alarm + - dps460-i2c-6-58/Power supply 2 inlet temp sensor/temp1_crit_alarm + - dps460-i2c-6-58/Power supply 2 inlet temp sensor/temp1_lcrit_alarm + - dps460-i2c-6-58/Power supply 2 internal sensor/temp2_max_alarm + - dps460-i2c-6-58/Power supply 2 internal sensor/temp2_min_alarm + - dps460-i2c-6-58/Power supply 2 internal sensor/temp2_crit_alarm + - dps460-i2c-6-58/Power supply 2 internal sensor/temp2_lcrit_alarm - - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail Curr (out)/curr1_crit_alarm - - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail Curr (out)/curr2_crit_alarm - - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-15-58/PMIC-3 PSU 12V Rail (in1)/in1_alarm - - tps53679-i2c-15-58/PMIC-3 PSU 12V Rail (in2)/in2_alarm - - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail (out)/in3_crit_alarm - - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail (out)/in3_lcrit_alarm - - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail (out)/in4_crit_alarm - - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail (out)/in4_lcrit_alarm + compares: + fan: [] + power: [] + temp: + - - k10temp-pci-00c3/Cpu temp sensor/temp1_input + - k10temp-pci-00c3/Cpu temp sensor/temp1_max + - - lm73-i2c-3-48/Back panel temp sensor/temp1_input + - lm73-i2c-3-48/Back panel temp sensor/temp1_max + - - max6658-i2c-2-4c/Board temp sensor/temp1_input + - max6658-i2c-2-4c/Board temp sensor/temp1_max + - - max6658-i2c-2-4c/Front panel temp sensor/temp2_input + - max6658-i2c-2-4c/Front panel temp sensor/temp2_max + - - dps460-i2c-6-58/Power supply 2 inlet temp sensor/temp1_input + - dps460-i2c-6-58/Power supply 2 inlet temp sensor/temp1_max + - - dps460-i2c-6-58/Power supply 2 internal sensor/temp2_input + - dps460-i2c-6-58/Power supply 2 internal sensor/temp2_max - - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail Curr (out)/curr1_crit_alarm - - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-15-61/PMIC-4 PSU 12V Rail (in1)/in1_alarm - - tps53679-i2c-15-61/PMIC-4 PSU 12V Rail (in2)/in2_alarm - - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail (out)/in3_crit_alarm - - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail (out)/in3_lcrit_alarm + non_zero: + fan: + - dps460-i2c-6-58/fan1/fan1_input + power: [] + temp: [] - - dps460-i2c-4-58/PSU-1 220V Rail Curr (in)/curr1_crit_alarm - - dps460-i2c-4-58/PSU-1 220V Rail Curr (in)/curr1_max_alarm - - dps460-i2c-4-58/PSU-1 12V Rail Curr (out)/curr2_crit_alarm - - dps460-i2c-4-58/PSU-1 12V Rail Curr (out)/curr2_lcrit_alarm - - dps460-i2c-4-58/PSU-1 12V Rail Curr (out)/curr2_max_alarm - - dps460-i2c-4-58/PSU-1 220V Rail Pwr (in)/power1_alarm - - dps460-i2c-4-58/PSU-1 12V Rail Pwr (out)/power2_cap_alarm - - dps460-i2c-4-58/PSU-1 12V Rail Pwr (out)/power2_crit_alarm - - dps460-i2c-4-58/PSU-1 12V Rail Pwr (out)/power2_max_alarm - - dps460-i2c-4-58/PSU-1 220V Rail (in)/in1_crit_alarm - - dps460-i2c-4-58/PSU-1 220V Rail (in)/in1_lcrit_alarm - - dps460-i2c-4-58/PSU-1 220V Rail (in)/in1_max_alarm - - dps460-i2c-4-58/PSU-1 220V Rail (in)/in1_min_alarm - - dps460-i2c-4-58/PSU-1 12V Rail (out)/in3_crit_alarm - - dps460-i2c-4-58/PSU-1 12V Rail (out)/in3_lcrit_alarm - - dps460-i2c-4-58/PSU-1 12V Rail (out)/in3_max_alarm - - dps460-i2c-4-58/PSU-1 12V Rail (out)/in3_min_alarm + psu_skips: {} - - dps460-i2c-4-59/PSU-2 220V Rail Curr (in)/curr1_crit_alarm - - dps460-i2c-4-59/PSU-2 220V Rail Curr (in)/curr1_max_alarm - - dps460-i2c-4-59/PSU-2 12V Rail Curr (out)/curr2_crit_alarm - - dps460-i2c-4-59/PSU-2 12V Rail Curr (out)/curr2_lcrit_alarm - - dps460-i2c-4-59/PSU-2 12V Rail Curr (out)/curr2_max_alarm - - dps460-i2c-4-59/PSU-2 220V Rail Pwr (in)/power1_alarm - - dps460-i2c-4-59/PSU-2 12V Rail Pwr (out)/power2_cap_alarm - - dps460-i2c-4-59/PSU-2 12V Rail Pwr (out)/power2_crit_alarm - - dps460-i2c-4-59/PSU-2 12V Rail Pwr (out)/power2_max_alarm - - dps460-i2c-4-59/PSU-2 220V Rail (in)/in1_crit_alarm - - dps460-i2c-4-59/PSU-2 220V Rail (in)/in1_lcrit_alarm - - dps460-i2c-4-59/PSU-2 220V Rail (in)/in1_max_alarm - - dps460-i2c-4-59/PSU-2 220V Rail (in)/in1_min_alarm - - dps460-i2c-4-59/PSU-2 12V Rail (out)/in3_crit_alarm - - dps460-i2c-4-59/PSU-2 12V Rail (out)/in3_lcrit_alarm - - dps460-i2c-4-59/PSU-2 12V Rail (out)/in3_max_alarm - - dps460-i2c-4-59/PSU-2 12V Rail (out)/in3_min_alarm + x86_64-arista_7260cx3_64: + alarms: + fan: + - pmbus-i2c-3-58/fan1/fan1_alarm + - pmbus-i2c-4-58/fan1/fan1_alarm + - pmbus-i2c-3-58/fan1/fan1_fault + - pmbus-i2c-4-58/fan1/fan1_fault + - la_cpld-i2c-85-60/fan1/fan1_fault + - la_cpld-i2c-85-60/fan2/fan2_fault + - la_cpld-i2c-85-60/fan3/fan3_fault + - la_cpld-i2c-85-60/fan4/fan4_fault + power: + - pmbus-i2c-3-58/iin/curr1_max_alarm + - pmbus-i2c-3-58/iout1/curr2_max_alarm + - pmbus-i2c-3-58/iout1/curr2_crit_alarm + - pmbus-i2c-3-58/iout2/curr3_crit_alarm + - pmbus-i2c-3-58/vin/in1_alarm + - pmbus-i2c-3-58/vout1/in2_lcrit_alarm + - pmbus-i2c-3-58/vout1/in2_crit_alarm + - pmbus-i2c-4-58/iin/curr1_max_alarm + - pmbus-i2c-4-58/iout1/curr2_max_alarm + - pmbus-i2c-4-58/iout1/curr2_crit_alarm + - pmbus-i2c-4-58/iout2/curr3_crit_alarm + - pmbus-i2c-4-58/vin/in1_alarm + - pmbus-i2c-4-58/vout1/in2_lcrit_alarm + - pmbus-i2c-4-58/vout1/in2_crit_alarm temp: - coretemp-isa-0000/Physical id 0/temp1_crit_alarm - coretemp-isa-0000/Core 0/temp2_crit_alarm - coretemp-isa-0000/Core 1/temp3_crit_alarm + - lm73-i2c-88-48/Front panel temp sensor/temp1_min_alarm + - lm73-i2c-88-48/Front panel temp sensor/temp1_max_alarm + - max6658-i2c-1-4c/Asic temp sensor/temp1_min_alarm + - max6658-i2c-1-4c/Asic temp sensor/temp1_max_alarm + - max6658-i2c-1-4c/Asic temp sensor/temp1_crit_alarm + - max6658-i2c-73-4c/Back panel temp sensor 1/temp1_min_alarm + - max6658-i2c-73-4c/Back panel temp sensor 1/temp1_max_alarm + - max6658-i2c-73-4c/Back panel temp sensor 1/temp1_crit_alarm + - max6658-i2c-73-4c/Back panel temp sensor 2/temp2_min_alarm + - max6658-i2c-73-4c/Back panel temp sensor 2/temp2_max_alarm + - max6658-i2c-73-4c/Back panel temp sensor 2/temp2_crit_alarm + - max6658-i2c-73-4c/Back panel temp sensor 2/temp2_fault + - pmbus-i2c-3-58/Power supply 1 exhaust temp sensor/temp3_alarm + - pmbus-i2c-3-58/Power supply 1 inlet temp sensor/temp2_alarm + - pmbus-i2c-3-58/Power supply 1 hotspot sensor/temp1_alarm + - pmbus-i2c-4-58/Power supply 2 exhaust temp sensor/temp3_alarm + - pmbus-i2c-4-58/Power supply 2 inlet temp sensor/temp2_alarm + - pmbus-i2c-4-58/Power supply 2 hotspot sensor/temp1_alarm - - mlxsw-i2c-2-48/front panel 001/temp2_fault - - mlxsw-i2c-2-48/front panel 002/temp3_fault - - mlxsw-i2c-2-48/front panel 003/temp4_fault - - mlxsw-i2c-2-48/front panel 004/temp5_fault - - mlxsw-i2c-2-48/front panel 005/temp6_fault - - mlxsw-i2c-2-48/front panel 006/temp7_fault - - mlxsw-i2c-2-48/front panel 007/temp8_fault - - mlxsw-i2c-2-48/front panel 008/temp9_fault - - mlxsw-i2c-2-48/front panel 009/temp10_fault - - mlxsw-i2c-2-48/front panel 010/temp11_fault - - mlxsw-i2c-2-48/front panel 011/temp12_fault - - mlxsw-i2c-2-48/front panel 012/temp13_fault - - mlxsw-i2c-2-48/front panel 013/temp14_fault - - mlxsw-i2c-2-48/front panel 014/temp15_fault - - mlxsw-i2c-2-48/front panel 015/temp16_fault - - mlxsw-i2c-2-48/front panel 016/temp17_fault - - mlxsw-i2c-2-48/front panel 017/temp18_fault - - mlxsw-i2c-2-48/front panel 018/temp19_fault - - mlxsw-i2c-2-48/front panel 019/temp20_fault - - mlxsw-i2c-2-48/front panel 020/temp21_fault - - mlxsw-i2c-2-48/front panel 021/temp22_fault - - mlxsw-i2c-2-48/front panel 022/temp23_fault - - mlxsw-i2c-2-48/front panel 023/temp24_fault - - mlxsw-i2c-2-48/front panel 024/temp25_fault - - mlxsw-i2c-2-48/front panel 025/temp26_fault - - mlxsw-i2c-2-48/front panel 026/temp27_fault - - mlxsw-i2c-2-48/front panel 027/temp28_fault - - mlxsw-i2c-2-48/front panel 028/temp29_fault - - mlxsw-i2c-2-48/front panel 029/temp30_fault - - mlxsw-i2c-2-48/front panel 030/temp31_fault - - mlxsw-i2c-2-48/front panel 031/temp32_fault - - mlxsw-i2c-2-48/front panel 032/temp33_fault - - - tps53679-i2c-5-70/PMIC-1 Temp 1/temp1_crit_alarm - - tps53679-i2c-5-70/PMIC-1 Temp 1/temp1_max_alarm - - tps53679-i2c-5-70/PMIC-1 Temp 2/temp2_crit_alarm - - tps53679-i2c-5-70/PMIC-1 Temp 2/temp2_max_alarm - - - tps53679-i2c-5-71/PMIC-2 Temp 1/temp1_crit_alarm - - tps53679-i2c-5-71/PMIC-2 Temp 1/temp1_max_alarm - - tps53679-i2c-5-71/PMIC-2 Temp 2/temp2_crit_alarm - - tps53679-i2c-5-71/PMIC-2 Temp 2/temp2_max_alarm - - - tps53679-i2c-15-58/PMIC-3 Temp 1/temp1_crit_alarm - - tps53679-i2c-15-58/PMIC-3 Temp 1/temp1_max_alarm - - tps53679-i2c-15-58/PMIC-3 Temp 2/temp2_crit_alarm - - tps53679-i2c-15-58/PMIC-3 Temp 2/temp2_max_alarm - - - tps53679-i2c-15-61/PMIC-4 Temp 1/temp1_crit_alarm - - tps53679-i2c-15-61/PMIC-4 Temp 1/temp1_max_alarm - - tps53679-i2c-15-61/PMIC-4 Temp 2/temp2_crit_alarm - - tps53679-i2c-15-61/PMIC-4 Temp 2/temp2_max_alarm - - - dps460-i2c-4-58/PSU-1 Temp 1/temp1_crit_alarm - - dps460-i2c-4-58/PSU-1 Temp 1/temp1_lcrit_alarm - - dps460-i2c-4-58/PSU-1 Temp 1/temp1_max_alarm - - dps460-i2c-4-58/PSU-1 Temp 1/temp1_min_alarm - - dps460-i2c-4-58/PSU-1 Temp 2/temp2_crit_alarm - - dps460-i2c-4-58/PSU-1 Temp 2/temp2_lcrit_alarm - - dps460-i2c-4-58/PSU-1 Temp 2/temp2_max_alarm - - dps460-i2c-4-58/PSU-1 Temp 2/temp2_min_alarm - - dps460-i2c-4-58/PSU-1 Temp 3/temp3_crit_alarm - - dps460-i2c-4-58/PSU-1 Temp 3/temp3_lcrit_alarm - - dps460-i2c-4-58/PSU-1 Temp 3/temp3_max_alarm - - dps460-i2c-4-58/PSU-1 Temp 3/temp3_min_alarm - - - dps460-i2c-4-59/PSU-2 Temp 1/temp1_crit_alarm - - dps460-i2c-4-59/PSU-2 Temp 1/temp1_lcrit_alarm - - dps460-i2c-4-59/PSU-2 Temp 1/temp1_max_alarm - - dps460-i2c-4-59/PSU-2 Temp 1/temp1_min_alarm - - dps460-i2c-4-59/PSU-2 Temp 2/temp2_crit_alarm - - dps460-i2c-4-59/PSU-2 Temp 2/temp2_lcrit_alarm - - dps460-i2c-4-59/PSU-2 Temp 2/temp2_max_alarm - - dps460-i2c-4-59/PSU-2 Temp 2/temp2_min_alarm - - dps460-i2c-4-59/PSU-2 Temp 3/temp3_crit_alarm - - dps460-i2c-4-59/PSU-2 Temp 3/temp3_lcrit_alarm - - dps460-i2c-4-59/PSU-2 Temp 3/temp3_max_alarm - - dps460-i2c-4-59/PSU-2 Temp 3/temp3_min_alarm compares: - power: [] + fan: [] + power: + - - pmbus-i2c-3-58/iin/curr1_input + - pmbus-i2c-3-58/iin/curr1_max + - - pmbus-i2c-3-58/iout1/curr2_input + - pmbus-i2c-3-58/iout1/curr2_max + - - pmbus-i2c-4-58/iin/curr1_input + - pmbus-i2c-4-58/iin/curr1_max + - - pmbus-i2c-4-58/iout1/curr2_input + - pmbus-i2c-4-58/iout1/curr2_max temp: - - coretemp-isa-0000/Physical id 0/temp1_input - coretemp-isa-0000/Physical id 0/temp1_max @@ -1230,772 +1156,29 @@ sensors_checks: - coretemp-isa-0000/Core 0/temp2_max - - coretemp-isa-0000/Core 1/temp3_input - coretemp-isa-0000/Core 1/temp3_max + - - lm73-i2c-88-48/Front panel temp sensor/temp1_input + - lm73-i2c-88-48/Front panel temp sensor/temp1_max + - - max6658-i2c-1-4c/Asic temp sensor/temp1_input + - max6658-i2c-1-4c/Asic temp sensor/temp1_max + - - max6658-i2c-73-4c/Back panel temp sensor 1/temp1_input + - max6658-i2c-73-4c/Back panel temp sensor 1/temp1_max + - - max6658-i2c-73-4c/Back panel temp sensor 2/temp2_input + - max6658-i2c-73-4c/Back panel temp sensor 2/temp2_max - - - tmp102-i2c-7-49/Ambient Fan Side Temp (air intake)/temp1_input - - tmp102-i2c-7-49/Ambient Fan Side Temp (air intake)/temp1_max - - - - tmp102-i2c-7-4a/Ambient Port Side Temp (air exhaust)/temp1_input - - tmp102-i2c-7-4a/Ambient Port Side Temp (air exhaust)/temp1_max - - - - tmp102-i2c-15-49/Ambient COMEX Temp/temp1_input - - tmp102-i2c-15-49/Ambient COMEX Temp/temp1_max non_zero: - fan: [] + fan: + - pmbus-i2c-3-58/fan1/fan1_input + - pmbus-i2c-4-58/fan1/fan1_input + - la_cpld-i2c-85-60/fan1/fan1_input + - la_cpld-i2c-85-60/fan2/fan2_input + - la_cpld-i2c-85-60/fan3/fan3_input + - la_cpld-i2c-85-60/fan4/fan4_input power: [] temp: [] + psu_skips: {} - x86_64-mlnx_msn3800-r0: - alarms: - fan: - - dps460-i2c-4-58/PSU-1 Fan 1/fan1_alarm - - dps460-i2c-4-58/PSU-1 Fan 1/fan1_fault - - - dps460-i2c-4-59/PSU-2 Fan 1/fan1_alarm - - dps460-i2c-4-59/PSU-2 Fan 1/fan1_fault - - - mlxreg_fan-isa-0000/Chassis Fan Drawer-1 Tach 1/fan1_fault - - mlxreg_fan-isa-0000/Chassis Fan Drawer-2 Tach 1/fan2_fault - - mlxreg_fan-isa-0000/Chassis Fan Drawer-3 Tach 1/fan3_fault - power: - - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail Curr (out)/curr1_crit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail Curr (out)/curr2_crit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-5-70/PMIC-1 PSU 12V Rail (in1)/in1_alarm - - tps53679-i2c-5-70/PMIC-1 PSU 12V Rail (in2)/in2_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail (out)/in3_crit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail (out)/in3_lcrit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail (out)/in4_crit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail (out)/in4_lcrit_alarm - - - tps53679-i2c-5-71/PMIC-2 GB 0.8V Rail Curr (out)/curr1_crit_alarm - - tps53679-i2c-5-71/PMIC-2 GB 0.8V Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-5-71/PMIC-2 GB 1.125V Rail Curr (out)/curr2_crit_alarm - - tps53679-i2c-5-71/PMIC-2 GB 1.125V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-5-71/PMIC-2 PSU 12V Rail (in1)/in1_alarm - - tps53679-i2c-5-71/PMIC-2 PSU 12V Rail (in2)/in2_alarm - - tps53679-i2c-5-71/PMIC-2 GB 0.8V Rail (out)/in3_crit_alarm - - tps53679-i2c-5-71/PMIC-2 GB 0.8V Rail (out)/in3_lcrit_alarm - - tps53679-i2c-5-71/PMIC-2 GB 1.125V Rail (out)/in4_crit_alarm - - tps53679-i2c-5-71/PMIC-2 GB 1.125V Rail (out)/in4_lcrit_alarm - - - tps53679-i2c-5-72/PMIC-3 ASIC 1.8V Rail Curr (out)/curr1_crit_alarm - - tps53679-i2c-5-72/PMIC-3 ASIC 1.8V Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-5-72/PMIC-3 PSU 12V Rail (in1)/in1_alarm - - tps53679-i2c-5-72/PMIC-3 PSU 12V Rail (in2)/in2_alarm - - tps53679-i2c-5-72/PMIC-3 ASIC 1.8V Rail (out)/in3_crit_alarm - - tps53679-i2c-5-72/PMIC-3 ASIC 1.8V Rail (out)/in3_lcrit_alarm - - - tps53679-i2c-5-73/PMIC-4 GB 0.8V Rail Curr (out)/curr1_crit_alarm - - tps53679-i2c-5-73/PMIC-4 GB 0.8V Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-5-73/PMIC-4 GB 1.125V Rail Curr (out)/curr2_crit_alarm - - tps53679-i2c-5-73/PMIC-4 GB 1.125V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-5-73/PMIC-4 PSU 12V Rail (in1)/in1_alarm - - tps53679-i2c-5-73/PMIC-4 PSU 12V Rail (in2)/in2_alarm - - tps53679-i2c-5-73/PMIC-4 GB 0.8V Rail (out)/in3_crit_alarm - - tps53679-i2c-5-73/PMIC-4 GB 0.8V Rail (out)/in3_lcrit_alarm - - tps53679-i2c-5-73/PMIC-4 GB 1.125V Rail (out)/in4_crit_alarm - - tps53679-i2c-5-73/PMIC-4 GB 1.125V Rail (out)/in4_lcrit_alarm - - - tps53679-i2c-15-58/PMIC-5 COMEX 1.8V Rail Curr (out)/curr1_crit_alarm - - tps53679-i2c-15-58/PMIC-5 COMEX 1.8V Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-15-58/PMIC-5 COMEX 1.05V Rail Curr (out)/curr2_crit_alarm - - tps53679-i2c-15-58/PMIC-5 COMEX 1.05V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-15-58/PMIC-5 PSU 12V Rail (in1)/in1_alarm - - tps53679-i2c-15-58/PMIC-5 PSU 12V Rail (in2)/in2_alarm - - tps53679-i2c-15-58/PMIC-5 COMEX 1.8V Rail (out)/in3_crit_alarm - - tps53679-i2c-15-58/PMIC-5 COMEX 1.8V Rail (out)/in3_lcrit_alarm - - tps53679-i2c-15-58/PMIC-5 COMEX 1.05V Rail (out)/in4_crit_alarm - - tps53679-i2c-15-58/PMIC-5 COMEX 1.05V Rail (out)/in4_lcrit_alarm - - - tps53679-i2c-15-61/PMIC-6 COMEX 1.2V Rail Curr (out)/curr1_crit_alarm - - tps53679-i2c-15-61/PMIC-6 COMEX 1.2V Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-15-61/PMIC-6 PSU 12V Rail (in1)/in1_alarm - - tps53679-i2c-15-61/PMIC-6 PSU 12V Rail (in2)/in2_alarm - - tps53679-i2c-15-61/PMIC-6 COMEX 1.2V Rail (out)/in3_crit_alarm - - tps53679-i2c-15-61/PMIC-6 COMEX 1.2V Rail (out)/in3_lcrit_alarm - - - dps460-i2c-4-58/PSU-1 220V Rail Curr (in)/curr1_crit_alarm - - dps460-i2c-4-58/PSU-1 220V Rail Curr (in)/curr1_max_alarm - - dps460-i2c-4-58/PSU-1 12V Rail Curr (out)/curr2_crit_alarm - - dps460-i2c-4-58/PSU-1 12V Rail Curr (out)/curr2_lcrit_alarm - - dps460-i2c-4-58/PSU-1 12V Rail Curr (out)/curr2_max_alarm - - dps460-i2c-4-58/PSU-1 220V Rail Pwr (in)/power1_alarm - - dps460-i2c-4-58/PSU-1 12V Rail Pwr (out)/power2_cap_alarm - - dps460-i2c-4-58/PSU-1 12V Rail Pwr (out)/power2_crit_alarm - - dps460-i2c-4-58/PSU-1 12V Rail Pwr (out)/power2_max_alarm - - dps460-i2c-4-58/PSU-1 220V Rail (in)/in1_crit_alarm - - dps460-i2c-4-58/PSU-1 220V Rail (in)/in1_lcrit_alarm - - dps460-i2c-4-58/PSU-1 220V Rail (in)/in1_max_alarm - - dps460-i2c-4-58/PSU-1 220V Rail (in)/in1_min_alarm - - dps460-i2c-4-58/PSU-1 12V Rail (out)/in3_crit_alarm - - dps460-i2c-4-58/PSU-1 12V Rail (out)/in3_lcrit_alarm - - dps460-i2c-4-58/PSU-1 12V Rail (out)/in3_max_alarm - - dps460-i2c-4-58/PSU-1 12V Rail (out)/in3_min_alarm - - - dps460-i2c-4-59/PSU-2 220V Rail Curr (in)/curr1_crit_alarm - - dps460-i2c-4-59/PSU-2 220V Rail Curr (in)/curr1_max_alarm - - dps460-i2c-4-59/PSU-2 12V Rail Curr (out)/curr2_crit_alarm - - dps460-i2c-4-59/PSU-2 12V Rail Curr (out)/curr2_lcrit_alarm - - dps460-i2c-4-59/PSU-2 12V Rail Curr (out)/curr2_max_alarm - - dps460-i2c-4-59/PSU-2 220V Rail Pwr (in)/power1_alarm - - dps460-i2c-4-59/PSU-2 12V Rail Pwr (out)/power2_cap_alarm - - dps460-i2c-4-59/PSU-2 12V Rail Pwr (out)/power2_crit_alarm - - dps460-i2c-4-59/PSU-2 12V Rail Pwr (out)/power2_max_alarm - - dps460-i2c-4-59/PSU-2 220V Rail (in)/in1_crit_alarm - - dps460-i2c-4-59/PSU-2 220V Rail (in)/in1_lcrit_alarm - - dps460-i2c-4-59/PSU-2 220V Rail (in)/in1_max_alarm - - dps460-i2c-4-59/PSU-2 220V Rail (in)/in1_min_alarm - - dps460-i2c-4-59/PSU-2 12V Rail (out)/in3_crit_alarm - - dps460-i2c-4-59/PSU-2 12V Rail (out)/in3_lcrit_alarm - - dps460-i2c-4-59/PSU-2 12V Rail (out)/in3_max_alarm - - dps460-i2c-4-59/PSU-2 12V Rail (out)/in3_min_alarm - temp: - - coretemp-isa-0000/Physical id 0/temp1_crit_alarm - - coretemp-isa-0000/Core 0/temp2_crit_alarm - - coretemp-isa-0000/Core 1/temp3_crit_alarm - - coretemp-isa-0000/Core 2/temp4_crit_alarm - - coretemp-isa-0000/Core 3/temp5_crit_alarm - - - mlxsw-i2c-2-48/front panel 001/temp2_fault - - mlxsw-i2c-2-48/front panel 002/temp3_fault - - mlxsw-i2c-2-48/front panel 003/temp4_fault - - mlxsw-i2c-2-48/front panel 004/temp5_fault - - mlxsw-i2c-2-48/front panel 005/temp6_fault - - mlxsw-i2c-2-48/front panel 006/temp7_fault - - mlxsw-i2c-2-48/front panel 007/temp8_fault - - mlxsw-i2c-2-48/front panel 008/temp9_fault - - mlxsw-i2c-2-48/front panel 009/temp10_fault - - mlxsw-i2c-2-48/front panel 010/temp11_fault - - mlxsw-i2c-2-48/front panel 011/temp12_fault - - mlxsw-i2c-2-48/front panel 012/temp13_fault - - mlxsw-i2c-2-48/front panel 013/temp14_fault - - mlxsw-i2c-2-48/front panel 014/temp15_fault - - mlxsw-i2c-2-48/front panel 015/temp16_fault - - mlxsw-i2c-2-48/front panel 016/temp17_fault - - mlxsw-i2c-2-48/front panel 017/temp18_fault - - mlxsw-i2c-2-48/front panel 018/temp19_fault - - mlxsw-i2c-2-48/front panel 019/temp20_fault - - mlxsw-i2c-2-48/front panel 020/temp21_fault - - mlxsw-i2c-2-48/front panel 021/temp22_fault - - mlxsw-i2c-2-48/front panel 022/temp23_fault - - mlxsw-i2c-2-48/front panel 023/temp24_fault - - mlxsw-i2c-2-48/front panel 024/temp25_fault - - mlxsw-i2c-2-48/front panel 025/temp26_fault - - mlxsw-i2c-2-48/front panel 026/temp27_fault - - mlxsw-i2c-2-48/front panel 027/temp28_fault - - mlxsw-i2c-2-48/front panel 028/temp29_fault - - mlxsw-i2c-2-48/front panel 029/temp30_fault - - mlxsw-i2c-2-48/front panel 030/temp31_fault - - mlxsw-i2c-2-48/front panel 031/temp32_fault - - mlxsw-i2c-2-48/front panel 032/temp33_fault - - mlxsw-i2c-2-48/front panel 033/temp34_fault - - mlxsw-i2c-2-48/front panel 034/temp35_fault - - mlxsw-i2c-2-48/front panel 035/temp36_fault - - mlxsw-i2c-2-48/front panel 036/temp37_fault - - mlxsw-i2c-2-48/front panel 037/temp38_fault - - mlxsw-i2c-2-48/front panel 038/temp39_fault - - mlxsw-i2c-2-48/front panel 039/temp40_fault - - mlxsw-i2c-2-48/front panel 040/temp41_fault - - mlxsw-i2c-2-48/front panel 041/temp42_fault - - mlxsw-i2c-2-48/front panel 042/temp43_fault - - mlxsw-i2c-2-48/front panel 043/temp44_fault - - mlxsw-i2c-2-48/front panel 044/temp45_fault - - mlxsw-i2c-2-48/front panel 045/temp46_fault - - mlxsw-i2c-2-48/front panel 046/temp47_fault - - mlxsw-i2c-2-48/front panel 047/temp48_fault - - mlxsw-i2c-2-48/front panel 048/temp49_fault - - mlxsw-i2c-2-48/front panel 049/temp50_fault - - mlxsw-i2c-2-48/front panel 050/temp51_fault - - mlxsw-i2c-2-48/front panel 051/temp52_fault - - mlxsw-i2c-2-48/front panel 052/temp53_fault - - mlxsw-i2c-2-48/front panel 053/temp54_fault - - mlxsw-i2c-2-48/front panel 054/temp55_fault - - mlxsw-i2c-2-48/front panel 055/temp56_fault - - mlxsw-i2c-2-48/front panel 056/temp57_fault - - mlxsw-i2c-2-48/front panel 057/temp58_fault - - mlxsw-i2c-2-48/front panel 058/temp59_fault - - mlxsw-i2c-2-48/front panel 059/temp60_fault - - mlxsw-i2c-2-48/front panel 060/temp61_fault - - mlxsw-i2c-2-48/front panel 061/temp62_fault - - mlxsw-i2c-2-48/front panel 062/temp63_fault - - mlxsw-i2c-2-48/front panel 063/temp64_fault - - mlxsw-i2c-2-48/front panel 064/temp65_fault - - - tps53679-i2c-5-70/PMIC-1 Temp 1/temp1_crit_alarm - - tps53679-i2c-5-70/PMIC-1 Temp 1/temp1_max_alarm - - tps53679-i2c-5-70/PMIC-1 Temp 2/temp2_crit_alarm - - tps53679-i2c-5-70/PMIC-1 Temp 2/temp2_max_alarm - - - tps53679-i2c-5-71/PMIC-2 Temp 1/temp1_crit_alarm - - tps53679-i2c-5-71/PMIC-2 Temp 1/temp1_max_alarm - - tps53679-i2c-5-71/PMIC-2 Temp 2/temp2_crit_alarm - - tps53679-i2c-5-71/PMIC-2 Temp 2/temp2_max_alarm - - - tps53679-i2c-5-72/PMIC-3 Temp 1/temp1_crit_alarm - - tps53679-i2c-5-72/PMIC-3 Temp 1/temp1_max_alarm - - tps53679-i2c-5-72/PMIC-3 Temp 2/temp2_crit_alarm - - tps53679-i2c-5-72/PMIC-3 Temp 2/temp2_max_alarm - - - tps53679-i2c-5-73/PMIC-4 Temp 1/temp1_crit_alarm - - tps53679-i2c-5-73/PMIC-4 Temp 1/temp1_max_alarm - - tps53679-i2c-5-73/PMIC-4 Temp 2/temp2_crit_alarm - - tps53679-i2c-5-73/PMIC-4 Temp 2/temp2_max_alarm - - - tps53679-i2c-15-58/PMIC-5 Temp 1/temp1_crit_alarm - - tps53679-i2c-15-58/PMIC-5 Temp 1/temp1_max_alarm - - tps53679-i2c-15-58/PMIC-5 Temp 2/temp2_crit_alarm - - tps53679-i2c-15-58/PMIC-5 Temp 2/temp2_max_alarm - - - tps53679-i2c-15-61/PMIC-6 Temp 1/temp1_crit_alarm - - tps53679-i2c-15-61/PMIC-6 Temp 1/temp1_max_alarm - - tps53679-i2c-15-61/PMIC-6 Temp 2/temp2_crit_alarm - - tps53679-i2c-15-61/PMIC-6 Temp 2/temp2_max_alarm - - - dps460-i2c-4-58/PSU-1 Temp 1/temp1_crit_alarm - - dps460-i2c-4-58/PSU-1 Temp 1/temp1_lcrit_alarm - - dps460-i2c-4-58/PSU-1 Temp 1/temp1_max_alarm - - dps460-i2c-4-58/PSU-1 Temp 1/temp1_min_alarm - - dps460-i2c-4-58/PSU-1 Temp 2/temp2_crit_alarm - - dps460-i2c-4-58/PSU-1 Temp 2/temp2_lcrit_alarm - - dps460-i2c-4-58/PSU-1 Temp 2/temp2_max_alarm - - dps460-i2c-4-58/PSU-1 Temp 2/temp2_min_alarm - - dps460-i2c-4-58/PSU-1 Temp 3/temp3_crit_alarm - - dps460-i2c-4-58/PSU-1 Temp 3/temp3_lcrit_alarm - - dps460-i2c-4-58/PSU-1 Temp 3/temp3_max_alarm - - dps460-i2c-4-58/PSU-1 Temp 3/temp3_min_alarm - - - dps460-i2c-4-59/PSU-2 Temp 1/temp1_crit_alarm - - dps460-i2c-4-59/PSU-2 Temp 1/temp1_lcrit_alarm - - dps460-i2c-4-59/PSU-2 Temp 1/temp1_max_alarm - - dps460-i2c-4-59/PSU-2 Temp 1/temp1_min_alarm - - dps460-i2c-4-59/PSU-2 Temp 2/temp2_crit_alarm - - dps460-i2c-4-59/PSU-2 Temp 2/temp2_lcrit_alarm - - dps460-i2c-4-59/PSU-2 Temp 2/temp2_max_alarm - - dps460-i2c-4-59/PSU-2 Temp 2/temp2_min_alarm - - dps460-i2c-4-59/PSU-2 Temp 3/temp3_crit_alarm - - dps460-i2c-4-59/PSU-2 Temp 3/temp3_lcrit_alarm - - dps460-i2c-4-59/PSU-2 Temp 3/temp3_max_alarm - - dps460-i2c-4-59/PSU-2 Temp 3/temp3_min_alarm - compares: - power: [] - temp: - - - coretemp-isa-0000/Physical id 0/temp1_input - - coretemp-isa-0000/Physical id 0/temp1_max - - - coretemp-isa-0000/Core 0/temp2_input - - coretemp-isa-0000/Core 0/temp2_max - - - coretemp-isa-0000/Core 1/temp3_input - - coretemp-isa-0000/Core 1/temp3_max - - - coretemp-isa-0000/Core 2/temp4_input - - coretemp-isa-0000/Core 2/temp4_max - - - coretemp-isa-0000/Core 3/temp5_input - - coretemp-isa-0000/Core 3/temp5_max - - - - tmp102-i2c-7-49/Ambient Fan Side Temp (air intake)/temp1_input - - tmp102-i2c-7-49/Ambient Fan Side Temp (air intake)/temp1_max - - - - tmp102-i2c-7-4a/Ambient Port Side Temp (air exhaust)/temp1_input - - tmp102-i2c-7-4a/Ambient Port Side Temp (air exhaust)/temp1_max - - - - tmp102-i2c-15-49/Ambient COMEX Temp/temp1_input - - tmp102-i2c-15-49/Ambient COMEX Temp/temp1_max - non_zero: - fan: [] - power: [] - temp: [] - psu_skips: - dps460-i2c-4-58: - number: 2 - side: right - skip_list: - - dps460-i2c-4-58 - dps460-i2c-4-59: - number: 1 - side: left - skip_list: - - dps460-i2c-4-59 - - x86_64-mlnx_msn4700-r0: - alarms: - fan: - - dps460-i2c-4-58/PSU-1(L) Fan 1/fan1_alarm - - dps460-i2c-4-58/PSU-1(L) Fan 1/fan1_fault - - - dps460-i2c-4-59/PSU-2(R) Fan 1/fan1_alarm - - dps460-i2c-4-59/PSU-2(R) Fan 1/fan1_fault - - - mlxreg_fan-isa-0000/Chassis Fan Drawer-1 Tach 1/fan1_fault - - mlxreg_fan-isa-0000/Chassis Fan Drawer-1 Tach 2/fan2_fault - - mlxreg_fan-isa-0000/Chassis Fan Drawer-2 Tach 1/fan3_fault - - mlxreg_fan-isa-0000/Chassis Fan Drawer-2 Tach 2/fan4_fault - - mlxreg_fan-isa-0000/Chassis Fan Drawer-3 Tach 1/fan5_fault - - mlxreg_fan-isa-0000/Chassis Fan Drawer-3 Tach 2/fan6_fault - - mlxreg_fan-isa-0000/Chassis Fan Drawer-4 Tach 1/fan7_fault - - mlxreg_fan-isa-0000/Chassis Fan Drawer-4 Tach 2/fan8_fault - - mlxreg_fan-isa-0000/Chassis Fan Drawer-5 Tach 1/fan9_fault - - mlxreg_fan-isa-0000/Chassis Fan Drawer-5 Tach 2/fan10_fault - - mlxreg_fan-isa-0000/Chassis Fan Drawer-6 Tach 1/fan11_fault - - mlxreg_fan-isa-0000/Chassis Fan Drawer-6 Tach 2/fan12_fault - power: - - xdpe12284-i2c-5-62/PMIC-1 ASIC 0.85V VCORE_T6_7 Rail Curr (in)/curr1_max_alarm - - xdpe12284-i2c-5-62/PMIC-1 ASIC 1.8V T6_7 Rail Curr (in)/curr2_max_alarm - - xdpe12284-i2c-5-62/PMIC-1 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_max_alarm - - xdpe12284-i2c-5-62/PMIC-1 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_crit_alarm - - xdpe12284-i2c-5-62/PMIC-1 ASIC 1.8V T6_7 Rail Curr (out)/curr4_max_alarm - - xdpe12284-i2c-5-62/PMIC-1 ASIC 1.8V T6_7 Rail Curr (out)/curr4_crit_alarm - - xdpe12284-i2c-5-62/PMIC-1 PSU 12V Rail (in1)/in1_lcrit_alarm - - xdpe12284-i2c-5-62/PMIC-1 PSU 12V Rail (in1)/in1_crit_alarm - - xdpe12284-i2c-5-62/PMIC-1 PSU 12V Rail (in2)/in2_lcrit_alarm - - xdpe12284-i2c-5-62/PMIC-1 PSU 12V Rail (in2)/in2_crit_alarm - - xdpe12284-i2c-5-62/PMIC-1 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_crit_alarm - - xdpe12284-i2c-5-62/PMIC-1 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_lcrit_alarm - - xdpe12284-i2c-5-62/PMIC-1 ASIC 1.8V T6_7 Rail (out)/in4_crit_alarm - - xdpe12284-i2c-5-62/PMIC-1 ASIC 1.8V T6_7 Rail (out)/in4_lcrit_alarm - - - xdpe12284-i2c-5-64/PMIC-2 ASIC 0.85V VCORE_T6_7 Rail Curr (in)/curr1_max_alarm - - xdpe12284-i2c-5-64/PMIC-2 ASIC 1.8V T6_7 Rail Curr (in)/curr2_max_alarm - - xdpe12284-i2c-5-64/PMIC-2 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_max_alarm - - xdpe12284-i2c-5-64/PMIC-2 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_crit_alarm - - xdpe12284-i2c-5-64/PMIC-2 ASIC 1.8V T6_7 Rail Curr (out)/curr4_max_alarm - - xdpe12284-i2c-5-64/PMIC-2 ASIC 1.8V T6_7 Rail Curr (out)/curr4_crit_alarm - - xdpe12284-i2c-5-64/PMIC-2 PSU 12V Rail (in1)/in1_lcrit_alarm - - xdpe12284-i2c-5-64/PMIC-2 PSU 12V Rail (in1)/in1_crit_alarm - - xdpe12284-i2c-5-64/PMIC-2 PSU 12V Rail (in2)/in2_lcrit_alarm - - xdpe12284-i2c-5-64/PMIC-2 PSU 12V Rail (in2)/in2_crit_alarm - - xdpe12284-i2c-5-64/PMIC-2 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_crit_alarm - - xdpe12284-i2c-5-64/PMIC-2 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_lcrit_alarm - - xdpe12284-i2c-5-64/PMIC-2 ASIC 1.8V T6_7 Rail (out)/in4_crit_alarm - - xdpe12284-i2c-5-64/PMIC-2 ASIC 1.8V T6_7 Rail (out)/in4_lcrit_alarm - - - xdpe12284-i2c-5-66/PMIC-3 ASIC 0.85V VCORE_T6_7 Rail Curr (in)/curr1_max_alarm - - xdpe12284-i2c-5-66/PMIC-3 ASIC 1.8V T6_7 Rail Curr (in)/curr2_max_alarm - - xdpe12284-i2c-5-66/PMIC-3 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_max_alarm - - xdpe12284-i2c-5-66/PMIC-3 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_crit_alarm - - xdpe12284-i2c-5-66/PMIC-3 ASIC 1.8V T6_7 Rail Curr (out)/curr4_max_alarm - - xdpe12284-i2c-5-66/PMIC-3 ASIC 1.8V T6_7 Rail Curr (out)/curr4_crit_alarm - - xdpe12284-i2c-5-66/PMIC-3 PSU 12V Rail (in1)/in1_lcrit_alarm - - xdpe12284-i2c-5-66/PMIC-3 PSU 12V Rail (in1)/in1_crit_alarm - - xdpe12284-i2c-5-66/PMIC-3 PSU 12V Rail (in2)/in2_lcrit_alarm - - xdpe12284-i2c-5-66/PMIC-3 PSU 12V Rail (in2)/in2_crit_alarm - - xdpe12284-i2c-5-66/PMIC-3 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_crit_alarm - - xdpe12284-i2c-5-66/PMIC-3 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_lcrit_alarm - - xdpe12284-i2c-5-66/PMIC-3 ASIC 1.8V T6_7 Rail (out)/in4_crit_alarm - - xdpe12284-i2c-5-66/PMIC-3 ASIC 1.8V T6_7 Rail (out)/in4_lcrit_alarm - - - xdpe12284-i2c-5-68/PMIC-4 ASIC 0.85V VCORE_T6_7 Rail Curr (in)/curr1_max_alarm - - xdpe12284-i2c-5-68/PMIC-4 ASIC 1.8V T6_7 Rail Curr (in)/curr2_max_alarm - - xdpe12284-i2c-5-68/PMIC-4 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_max_alarm - - xdpe12284-i2c-5-68/PMIC-4 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_crit_alarm - - xdpe12284-i2c-5-68/PMIC-4 ASIC 1.8V T6_7 Rail Curr (out)/curr4_max_alarm - - xdpe12284-i2c-5-68/PMIC-4 ASIC 1.8V T6_7 Rail Curr (out)/curr4_crit_alarm - - xdpe12284-i2c-5-68/PMIC-4 PSU 12V Rail (in1)/in1_lcrit_alarm - - xdpe12284-i2c-5-68/PMIC-4 PSU 12V Rail (in1)/in1_crit_alarm - - xdpe12284-i2c-5-68/PMIC-4 PSU 12V Rail (in2)/in2_lcrit_alarm - - xdpe12284-i2c-5-68/PMIC-4 PSU 12V Rail (in2)/in2_crit_alarm - - xdpe12284-i2c-5-68/PMIC-4 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_crit_alarm - - xdpe12284-i2c-5-68/PMIC-4 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_lcrit_alarm - - xdpe12284-i2c-5-68/PMIC-4 ASIC 1.8V T6_7 Rail (out)/in4_crit_alarm - - xdpe12284-i2c-5-68/PMIC-4 ASIC 1.8V T6_7 Rail (out)/in4_lcrit_alarm - - - xdpe12284-i2c-5-6a/PMIC-5 ASIC 0.85V VCORE_T6_7 Rail Curr (in)/curr1_max_alarm - - xdpe12284-i2c-5-6a/PMIC-5 ASIC 1.8V T6_7 Rail Curr (in)/curr2_max_alarm - - xdpe12284-i2c-5-6a/PMIC-5 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_max_alarm - - xdpe12284-i2c-5-6a/PMIC-5 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_crit_alarm - - xdpe12284-i2c-5-6a/PMIC-5 ASIC 1.8V T6_7 Rail Curr (out)/curr4_max_alarm - - xdpe12284-i2c-5-6a/PMIC-5 ASIC 1.8V T6_7 Rail Curr (out)/curr4_crit_alarm - - xdpe12284-i2c-5-6a/PMIC-5 PSU 12V Rail (in1)/in1_lcrit_alarm - - xdpe12284-i2c-5-6a/PMIC-5 PSU 12V Rail (in1)/in1_crit_alarm - - xdpe12284-i2c-5-6a/PMIC-5 PSU 12V Rail (in2)/in2_lcrit_alarm - - xdpe12284-i2c-5-6a/PMIC-5 PSU 12V Rail (in2)/in2_crit_alarm - - xdpe12284-i2c-5-6a/PMIC-5 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_crit_alarm - - xdpe12284-i2c-5-6a/PMIC-5 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_lcrit_alarm - - xdpe12284-i2c-5-6a/PMIC-5 ASIC 1.8V T6_7 Rail (out)/in4_crit_alarm - - xdpe12284-i2c-5-6a/PMIC-5 ASIC 1.8V T6_7 Rail (out)/in4_lcrit_alarm - - - xdpe12284-i2c-5-6c/PMIC-6 ASIC 0.85V VCORE_T6_7 Rail Curr (in)/curr1_max_alarm - - xdpe12284-i2c-5-6c/PMIC-6 ASIC 1.8V T6_7 Rail Curr (in)/curr2_max_alarm - - xdpe12284-i2c-5-6c/PMIC-6 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_max_alarm - - xdpe12284-i2c-5-6c/PMIC-6 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_crit_alarm - - xdpe12284-i2c-5-6c/PMIC-6 ASIC 1.8V T6_7 Rail Curr (out)/curr4_max_alarm - - xdpe12284-i2c-5-6c/PMIC-6 ASIC 1.8V T6_7 Rail Curr (out)/curr4_crit_alarm - - xdpe12284-i2c-5-6c/PMIC-6 PSU 12V Rail (in1)/in1_lcrit_alarm - - xdpe12284-i2c-5-6c/PMIC-6 PSU 12V Rail (in1)/in1_crit_alarm - - xdpe12284-i2c-5-6c/PMIC-6 PSU 12V Rail (in2)/in2_lcrit_alarm - - xdpe12284-i2c-5-6c/PMIC-6 PSU 12V Rail (in2)/in2_crit_alarm - - xdpe12284-i2c-5-6c/PMIC-6 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_crit_alarm - - xdpe12284-i2c-5-6c/PMIC-6 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_lcrit_alarm - - xdpe12284-i2c-5-6c/PMIC-6 ASIC 1.8V T6_7 Rail (out)/in4_crit_alarm - - xdpe12284-i2c-5-6c/PMIC-6 ASIC 1.8V T6_7 Rail (out)/in4_lcrit_alarm - - - xdpe12284-i2c-5-6e/PMIC-7 ASIC 0.85V VCORE_T6_7 Rail Curr (in)/curr1_max_alarm - - xdpe12284-i2c-5-6e/PMIC-7 ASIC 1.8V T6_7 Rail Curr (in)/curr2_max_alarm - - xdpe12284-i2c-5-6e/PMIC-7 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_max_alarm - - xdpe12284-i2c-5-6e/PMIC-7 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_crit_alarm - - xdpe12284-i2c-5-6e/PMIC-7 ASIC 1.8V T6_7 Rail Curr (out)/curr4_max_alarm - - xdpe12284-i2c-5-6e/PMIC-7 ASIC 1.8V T6_7 Rail Curr (out)/curr4_crit_alarm - - xdpe12284-i2c-5-6e/PMIC-7 PSU 12V Rail (in1)/in1_lcrit_alarm - - xdpe12284-i2c-5-6e/PMIC-7 PSU 12V Rail (in1)/in1_crit_alarm - - xdpe12284-i2c-5-6e/PMIC-7 PSU 12V Rail (in2)/in2_lcrit_alarm - - xdpe12284-i2c-5-6e/PMIC-7 PSU 12V Rail (in2)/in2_crit_alarm - - xdpe12284-i2c-5-6e/PMIC-7 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_crit_alarm - - xdpe12284-i2c-5-6e/PMIC-7 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_lcrit_alarm - - xdpe12284-i2c-5-6e/PMIC-7 ASIC 1.8V T6_7 Rail (out)/in4_crit_alarm - - xdpe12284-i2c-5-6e/PMIC-7 ASIC 1.8V T6_7 Rail (out)/in4_lcrit_alarm - - - tps53679-i2c-15-58/PMIC-8 COMEX 1.8V Rail Curr (out)/curr1_crit_alarm - - tps53679-i2c-15-58/PMIC-8 COMEX 1.8V Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-15-58/PMIC-8 COMEX 1.05V Rail Curr (out)/curr2_crit_alarm - - tps53679-i2c-15-58/PMIC-8 COMEX 1.05V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-15-58/PMIC-8 PSU 12V Rail (in1)/in1_alarm - - tps53679-i2c-15-58/PMIC-8 PSU 12V Rail (in2)/in2_alarm - - tps53679-i2c-15-58/PMIC-8 COMEX 1.8V Rail (out)/in3_alarm - - tps53679-i2c-15-58/PMIC-8 COMEX 1.05V Rail (out)/in4_crit_alarm - - tps53679-i2c-15-58/PMIC-8 COMEX 1.05V Rail (out)/in4_lcrit_alarm - - - tps53679-i2c-15-61/PMIC-9 COMEX 1.2V Rail Curr (out)/curr1_crit_alarm - - tps53679-i2c-15-61/PMIC-9 COMEX 1.2V Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-15-61/PMIC-9 PSU 12V Rail (in1)/in1_alarm - - tps53679-i2c-15-61/PMIC-9 PSU 12V Rail (in2)/in2_alarm - - tps53679-i2c-15-61/PMIC-9 COMEX 1.2V Rail (out)/in3_crit_alarm - - tps53679-i2c-15-61/PMIC-9 COMEX 1.2V Rail (out)/in3_lcrit_alarm - - - dps460-i2c-4-58/PSU-1(L) 220V Rail Curr (in)/curr1_crit_alarm - - dps460-i2c-4-58/PSU-1(L) 220V Rail Curr (in)/curr1_max_alarm - - dps460-i2c-4-58/PSU-1(L) 12V Rail Curr (out)/curr2_crit_alarm - - dps460-i2c-4-58/PSU-1(L) 12V Rail Curr (out)/curr2_max_alarm - - dps460-i2c-4-58/PSU-1(L) 220V Rail Pwr (in)/power1_alarm - - dps460-i2c-4-58/PSU-1(L) 12V Rail Pwr (out)/power2_crit_alarm - - dps460-i2c-4-58/PSU-1(L) 12V Rail Pwr (out)/power2_max_alarm - - dps460-i2c-4-58/PSU-1(L) 220V Rail (in)/in1_crit_alarm - - dps460-i2c-4-58/PSU-1(L) 220V Rail (in)/in1_lcrit_alarm - - dps460-i2c-4-58/PSU-1(L) 220V Rail (in)/in1_max_alarm - - dps460-i2c-4-58/PSU-1(L) 220V Rail (in)/in1_min_alarm - - dps460-i2c-4-58/PSU-1(L) 12V Rail (out)/in3_crit_alarm - - dps460-i2c-4-58/PSU-1(L) 12V Rail (out)/in3_lcrit_alarm - - dps460-i2c-4-58/PSU-1(L) 12V Rail (out)/in3_max_alarm - - dps460-i2c-4-58/PSU-1(L) 12V Rail (out)/in3_min_alarm - - - dps460-i2c-4-59/PSU-2(R) 220V Rail Curr (in)/curr1_crit_alarm - - dps460-i2c-4-59/PSU-2(R) 220V Rail Curr (in)/curr1_max_alarm - - dps460-i2c-4-59/PSU-2(R) 12V Rail Curr (out)/curr2_crit_alarm - - dps460-i2c-4-59/PSU-2(R) 12V Rail Curr (out)/curr2_max_alarm - - dps460-i2c-4-59/PSU-2(R) 220V Rail Pwr (in)/power1_alarm - - dps460-i2c-4-59/PSU-2(R) 12V Rail Pwr (out)/power2_crit_alarm - - dps460-i2c-4-59/PSU-2(R) 12V Rail Pwr (out)/power2_max_alarm - - dps460-i2c-4-59/PSU-2(R) 220V Rail (in)/in1_crit_alarm - - dps460-i2c-4-59/PSU-2(R) 220V Rail (in)/in1_lcrit_alarm - - dps460-i2c-4-59/PSU-2(R) 220V Rail (in)/in1_max_alarm - - dps460-i2c-4-59/PSU-2(R) 220V Rail (in)/in1_min_alarm - - dps460-i2c-4-59/PSU-2(R) 12V Rail (out)/in3_crit_alarm - - dps460-i2c-4-59/PSU-2(R) 12V Rail (out)/in3_lcrit_alarm - - dps460-i2c-4-59/PSU-2(R) 12V Rail (out)/in3_max_alarm - - dps460-i2c-4-59/PSU-2(R) 12V Rail (out)/in3_min_alarm - temp: - - coretemp-isa-0000/Physical id 0/temp1_crit_alarm - - coretemp-isa-0000/Core 0/temp2_crit_alarm - - coretemp-isa-0000/Core 1/temp3_crit_alarm - - coretemp-isa-0000/Core 2/temp4_crit_alarm - - coretemp-isa-0000/Core 3/temp5_crit_alarm - - - mlxsw-i2c-2-48/front panel 001/temp2_fault - - mlxsw-i2c-2-48/front panel 002/temp3_fault - - mlxsw-i2c-2-48/front panel 003/temp4_fault - - mlxsw-i2c-2-48/front panel 004/temp5_fault - - mlxsw-i2c-2-48/front panel 005/temp6_fault - - mlxsw-i2c-2-48/front panel 006/temp7_fault - - mlxsw-i2c-2-48/front panel 007/temp8_fault - - mlxsw-i2c-2-48/front panel 008/temp9_fault - - mlxsw-i2c-2-48/front panel 009/temp10_fault - - mlxsw-i2c-2-48/front panel 010/temp11_fault - - mlxsw-i2c-2-48/front panel 011/temp12_fault - - mlxsw-i2c-2-48/front panel 012/temp13_fault - - mlxsw-i2c-2-48/front panel 013/temp14_fault - - mlxsw-i2c-2-48/front panel 014/temp15_fault - - mlxsw-i2c-2-48/front panel 015/temp16_fault - - mlxsw-i2c-2-48/front panel 016/temp17_fault - - mlxsw-i2c-2-48/front panel 017/temp18_fault - - mlxsw-i2c-2-48/front panel 018/temp19_fault - - mlxsw-i2c-2-48/front panel 019/temp20_fault - - mlxsw-i2c-2-48/front panel 020/temp21_fault - - mlxsw-i2c-2-48/front panel 021/temp22_fault - - mlxsw-i2c-2-48/front panel 022/temp23_fault - - mlxsw-i2c-2-48/front panel 023/temp24_fault - - mlxsw-i2c-2-48/front panel 024/temp25_fault - - mlxsw-i2c-2-48/front panel 025/temp26_fault - - mlxsw-i2c-2-48/front panel 026/temp27_fault - - mlxsw-i2c-2-48/front panel 027/temp28_fault - - mlxsw-i2c-2-48/front panel 028/temp29_fault - - mlxsw-i2c-2-48/front panel 029/temp30_fault - - mlxsw-i2c-2-48/front panel 030/temp31_fault - - mlxsw-i2c-2-48/front panel 031/temp32_fault - - mlxsw-i2c-2-48/front panel 032/temp33_fault - - - xdpe12284-i2c-5-62/PMIC-1 Temp 1/temp1_crit_alarm - - xdpe12284-i2c-5-62/PMIC-1 Temp 1/temp1_max_alarm - - xdpe12284-i2c-5-62/PMIC-1 Temp 2/temp2_crit_alarm - - xdpe12284-i2c-5-62/PMIC-1 Temp 2/temp2_max_alarm - - - xdpe12284-i2c-5-64/PMIC-2 Temp 1/temp1_crit_alarm - - xdpe12284-i2c-5-64/PMIC-2 Temp 1/temp1_max_alarm - - xdpe12284-i2c-5-64/PMIC-2 Temp 2/temp2_crit_alarm - - xdpe12284-i2c-5-64/PMIC-2 Temp 2/temp2_max_alarm - - - xdpe12284-i2c-5-66/PMIC-3 Temp 1/temp1_crit_alarm - - xdpe12284-i2c-5-66/PMIC-3 Temp 1/temp1_max_alarm - - xdpe12284-i2c-5-66/PMIC-3 Temp 2/temp2_crit_alarm - - xdpe12284-i2c-5-66/PMIC-3 Temp 2/temp2_max_alarm - - - xdpe12284-i2c-5-68/PMIC-4 Temp 1/temp1_crit_alarm - - xdpe12284-i2c-5-68/PMIC-4 Temp 1/temp1_max_alarm - - xdpe12284-i2c-5-68/PMIC-4 Temp 2/temp2_crit_alarm - - xdpe12284-i2c-5-68/PMIC-4 Temp 2/temp2_max_alarm - - - xdpe12284-i2c-5-6a/PMIC-5 Temp 1/temp1_crit_alarm - - xdpe12284-i2c-5-6a/PMIC-5 Temp 1/temp1_max_alarm - - xdpe12284-i2c-5-6a/PMIC-5 Temp 2/temp2_crit_alarm - - xdpe12284-i2c-5-6a/PMIC-5 Temp 2/temp2_max_alarm - - - xdpe12284-i2c-5-6c/PMIC-6 Temp 1/temp1_crit_alarm - - xdpe12284-i2c-5-6c/PMIC-6 Temp 1/temp1_max_alarm - - xdpe12284-i2c-5-6c/PMIC-6 Temp 2/temp2_crit_alarm - - xdpe12284-i2c-5-6c/PMIC-6 Temp 2/temp2_max_alarm - - - xdpe12284-i2c-5-6e/PMIC-7 Temp 1/temp1_crit_alarm - - xdpe12284-i2c-5-6e/PMIC-7 Temp 1/temp1_max_alarm - - xdpe12284-i2c-5-6e/PMIC-7 Temp 2/temp2_crit_alarm - - xdpe12284-i2c-5-6e/PMIC-7 Temp 2/temp2_max_alarm - - - tps53679-i2c-15-58/PMIC-8 Temp 1/temp1_crit_alarm - - tps53679-i2c-15-58/PMIC-8 Temp 1/temp1_max_alarm - - tps53679-i2c-15-58/PMIC-8 Temp 2/temp2_crit_alarm - - tps53679-i2c-15-58/PMIC-8 Temp 2/temp2_max_alarm - - - tps53679-i2c-15-61/PMIC-9 Temp 1/temp1_crit_alarm - - tps53679-i2c-15-61/PMIC-9 Temp 1/temp1_max_alarm - - tps53679-i2c-15-61/PMIC-9 Temp 2/temp2_crit_alarm - - tps53679-i2c-15-61/PMIC-9 Temp 2/temp2_max_alarm - - - dps460-i2c-4-58/PSU-1(L) Temp 1/temp1_crit_alarm - - dps460-i2c-4-58/PSU-1(L) Temp 1/temp1_max_alarm - - dps460-i2c-4-58/PSU-1(L) Temp 2/temp2_crit_alarm - - dps460-i2c-4-58/PSU-1(L) Temp 2/temp2_max_alarm - - dps460-i2c-4-58/PSU-1(L) Temp 3/temp3_crit_alarm - - dps460-i2c-4-58/PSU-1(L) Temp 3/temp3_max_alarm - - - dps460-i2c-4-59/PSU-2(R) Temp 1/temp1_crit_alarm - - dps460-i2c-4-59/PSU-2(R) Temp 1/temp1_max_alarm - - dps460-i2c-4-59/PSU-2(R) Temp 2/temp2_crit_alarm - - dps460-i2c-4-59/PSU-2(R) Temp 2/temp2_max_alarm - - dps460-i2c-4-59/PSU-2(R) Temp 3/temp3_crit_alarm - - dps460-i2c-4-59/PSU-2(R) Temp 3/temp3_max_alarm - compares: - power: [] - temp: - - - coretemp-isa-0000/Physical id 0/temp1_input - - coretemp-isa-0000/Physical id 0/temp1_max - - - coretemp-isa-0000/Core 0/temp2_input - - coretemp-isa-0000/Core 0/temp2_max - - - coretemp-isa-0000/Core 1/temp3_input - - coretemp-isa-0000/Core 1/temp3_max - - - coretemp-isa-0000/Core 2/temp4_input - - coretemp-isa-0000/Core 2/temp4_max - - - coretemp-isa-0000/Core 3/temp5_input - - coretemp-isa-0000/Core 3/temp5_max - - - - tmp102-i2c-7-49/Ambient Fan Side Temp (air intake)/temp1_input - - tmp102-i2c-7-49/Ambient Fan Side Temp (air intake)/temp1_max - - - - tmp102-i2c-7-4a/Ambient Port Side Temp (air exhaust)/temp1_input - - tmp102-i2c-7-4a/Ambient Port Side Temp (air exhaust)/temp1_max - - - - tmp102-i2c-15-49/Ambient COMEX Temp/temp1_input - - tmp102-i2c-15-49/Ambient COMEX Temp/temp1_max - non_zero: - fan: [] - power: [] - temp: [] - psu_skips: - dps460-i2c-4-58: - number: 2 - side: right - skip_list: - - dps460-i2c-4-58 - dps460-i2c-4-59: - number: 1 - side: left - skip_list: - - dps460-i2c-4-59 - - x86_64-arista_7050_qx32: - alarms: - fan: [] - power: - - dps460-i2c-8-58/vin/in1_min_alarm - - dps460-i2c-8-58/vin/in1_max_alarm - - dps460-i2c-8-58/vin/in1_lcrit_alarm - - dps460-i2c-8-58/vin/in1_crit_alarm - - dps460-i2c-8-58/vout1/in3_min_alarm - - dps460-i2c-8-58/vout1/in3_max_alarm - - dps460-i2c-8-58/vout1/in3_lcrit_alarm - - dps460-i2c-8-58/vout1/in3_crit_alarm - - dps460-i2c-8-58/iin/curr1_max_alarm - - dps460-i2c-8-58/iin/curr1_crit_alarm - - dps460-i2c-8-58/iout1/curr2_max_alarm - - dps460-i2c-8-58/iout1/curr2_lcrit_alarm - - dps460-i2c-8-58/iout1/curr2_crit_alarm - - dps460-i2c-9-58/vin/in1_min_alarm - - dps460-i2c-9-58/vin/in1_max_alarm - - dps460-i2c-9-58/vin/in1_lcrit_alarm - - dps460-i2c-9-58/vin/in1_crit_alarm - - dps460-i2c-9-58/vout1/in3_min_alarm - - dps460-i2c-9-58/vout1/in3_max_alarm - - dps460-i2c-9-58/vout1/in3_lcrit_alarm - - dps460-i2c-9-58/vout1/in3_crit_alarm - - dps460-i2c-9-58/iin/curr1_max_alarm - - dps460-i2c-9-58/iin/curr1_crit_alarm - - dps460-i2c-9-58/iout1/curr2_max_alarm - - dps460-i2c-9-58/iout1/curr2_lcrit_alarm - - dps460-i2c-9-58/iout1/curr2_crit_alarm - - temp: - - dps460-i2c-8-58/Power supply 1 inlet temp sensor/temp1_max_alarm - - dps460-i2c-8-58/Power supply 1 inlet temp sensor/temp1_min_alarm - - dps460-i2c-8-58/Power supply 1 inlet temp sensor/temp1_crit_alarm - - dps460-i2c-8-58/Power supply 1 inlet temp sensor/temp1_lcrit_alarm - - dps460-i2c-8-58/Power supply 1 internal sensor/temp2_max_alarm - - dps460-i2c-8-58/Power supply 1 internal sensor/temp2_min_alarm - - dps460-i2c-8-58/Power supply 1 internal sensor/temp2_crit_alarm - - dps460-i2c-8-58/Power supply 1 internal sensor/temp2_lcrit_alarm - - dps460-i2c-9-58/Power supply 2 inlet temp sensor/temp1_max_alarm - - dps460-i2c-9-58/Power supply 2 inlet temp sensor/temp1_min_alarm - - dps460-i2c-9-58/Power supply 2 inlet temp sensor/temp1_crit_alarm - - dps460-i2c-9-58/Power supply 2 inlet temp sensor/temp1_lcrit_alarm - - dps460-i2c-9-58/Power supply 2 internal sensor/temp2_max_alarm - - dps460-i2c-9-58/Power supply 2 internal sensor/temp2_min_alarm - - dps460-i2c-9-58/Power supply 2 internal sensor/temp2_crit_alarm - - dps460-i2c-9-58/Power supply 2 internal sensor/temp2_lcrit_alarm - - compares: - fan: [] - power: [] - temp: - - - k10temp-pci-00c3/Cpu temp sensor/temp1_input - - k10temp-pci-00c3/Cpu temp sensor/temp1_max - - - lm73-i2c-6-48/Back panel temp sensor/temp1_input - - lm73-i2c-6-48/Back panel temp sensor/temp1_max - - - max6658-i2c-5-4c/Board temp sensor/temp1_input - - max6658-i2c-5-4c/Board temp sensor/temp1_max - - - max6658-i2c-5-4c/Front panel temp sensor/temp2_input - - max6658-i2c-5-4c/Front panel temp sensor/temp2_max - - - dps460-i2c-8-58/Power supply 1 inlet temp sensor/temp1_input - - dps460-i2c-8-58/Power supply 1 inlet temp sensor/temp1_max - - - dps460-i2c-8-58/Power supply 1 internal sensor/temp2_input - - dps460-i2c-8-58/Power supply 1 internal sensor/temp2_max - - - dps460-i2c-9-58/Power supply 2 inlet temp sensor/temp1_input - - dps460-i2c-9-58/Power supply 2 inlet temp sensor/temp1_max - - - dps460-i2c-9-58/Power supply 2 internal sensor/temp2_input - - dps460-i2c-9-58/Power supply 2 internal sensor/temp2_max - - non_zero: - fan: - - dps460-i2c-8-58/fan1/fan1_input - - dps460-i2c-9-58/fan1/fan1_input - power: [] - temp: [] - - psu_skips: {} - - x86_64-arista_7260cx3_64: - alarms: - fan: - - pmbus-i2c-3-58/fan1/fan1_alarm - - pmbus-i2c-4-58/fan1/fan1_alarm - - pmbus-i2c-3-58/fan1/fan1_fault - - pmbus-i2c-4-58/fan1/fan1_fault - - la_cpld-i2c-85-60/fan1/fan1_fault - - la_cpld-i2c-85-60/fan2/fan2_fault - - la_cpld-i2c-85-60/fan3/fan3_fault - - la_cpld-i2c-85-60/fan4/fan4_fault - power: - - pmbus-i2c-3-58/iin/curr1_max_alarm - - pmbus-i2c-3-58/iout1/curr2_max_alarm - - pmbus-i2c-3-58/iout1/curr2_crit_alarm - - pmbus-i2c-3-58/iout2/curr3_crit_alarm - - pmbus-i2c-3-58/vin/in1_alarm - - pmbus-i2c-3-58/vout1/in2_lcrit_alarm - - pmbus-i2c-3-58/vout1/in2_crit_alarm - - pmbus-i2c-4-58/iin/curr1_max_alarm - - pmbus-i2c-4-58/iout1/curr2_max_alarm - - pmbus-i2c-4-58/iout1/curr2_crit_alarm - - pmbus-i2c-4-58/iout2/curr3_crit_alarm - - pmbus-i2c-4-58/vin/in1_alarm - - pmbus-i2c-4-58/vout1/in2_lcrit_alarm - - pmbus-i2c-4-58/vout1/in2_crit_alarm - temp: - - coretemp-isa-0000/Physical id 0/temp1_crit_alarm - - coretemp-isa-0000/Core 0/temp2_crit_alarm - - coretemp-isa-0000/Core 1/temp3_crit_alarm - - lm73-i2c-88-48/Front panel temp sensor/temp1_min_alarm - - lm73-i2c-88-48/Front panel temp sensor/temp1_max_alarm - - max6658-i2c-1-4c/Asic temp sensor/temp1_min_alarm - - max6658-i2c-1-4c/Asic temp sensor/temp1_max_alarm - - max6658-i2c-1-4c/Asic temp sensor/temp1_crit_alarm - - max6658-i2c-73-4c/Back panel temp sensor 1/temp1_min_alarm - - max6658-i2c-73-4c/Back panel temp sensor 1/temp1_max_alarm - - max6658-i2c-73-4c/Back panel temp sensor 1/temp1_crit_alarm - - max6658-i2c-73-4c/Back panel temp sensor 2/temp2_min_alarm - - max6658-i2c-73-4c/Back panel temp sensor 2/temp2_max_alarm - - max6658-i2c-73-4c/Back panel temp sensor 2/temp2_crit_alarm - - max6658-i2c-73-4c/Back panel temp sensor 2/temp2_fault - - pmbus-i2c-3-58/Power supply 1 exhaust temp sensor/temp3_alarm - - pmbus-i2c-3-58/Power supply 1 inlet temp sensor/temp2_alarm - - pmbus-i2c-3-58/Power supply 1 hotspot sensor/temp1_alarm - - pmbus-i2c-4-58/Power supply 2 exhaust temp sensor/temp3_alarm - - pmbus-i2c-4-58/Power supply 2 inlet temp sensor/temp2_alarm - - pmbus-i2c-4-58/Power supply 2 hotspot sensor/temp1_alarm - - compares: - fan: [] - power: - - - pmbus-i2c-3-58/iin/curr1_input - - pmbus-i2c-3-58/iin/curr1_max - - - pmbus-i2c-3-58/iout1/curr2_input - - pmbus-i2c-3-58/iout1/curr2_max - - - pmbus-i2c-4-58/iin/curr1_input - - pmbus-i2c-4-58/iin/curr1_max - - - pmbus-i2c-4-58/iout1/curr2_input - - pmbus-i2c-4-58/iout1/curr2_max - temp: - - - coretemp-isa-0000/Physical id 0/temp1_input - - coretemp-isa-0000/Physical id 0/temp1_max - - - coretemp-isa-0000/Core 0/temp2_input - - coretemp-isa-0000/Core 0/temp2_max - - - coretemp-isa-0000/Core 1/temp3_input - - coretemp-isa-0000/Core 1/temp3_max - - - lm73-i2c-88-48/Front panel temp sensor/temp1_input - - lm73-i2c-88-48/Front panel temp sensor/temp1_max - - - max6658-i2c-1-4c/Asic temp sensor/temp1_input - - max6658-i2c-1-4c/Asic temp sensor/temp1_max - - - max6658-i2c-73-4c/Back panel temp sensor 1/temp1_input - - max6658-i2c-73-4c/Back panel temp sensor 1/temp1_max - - - max6658-i2c-73-4c/Back panel temp sensor 2/temp2_input - - max6658-i2c-73-4c/Back panel temp sensor 2/temp2_max - - non_zero: - fan: - - pmbus-i2c-3-58/fan1/fan1_input - - pmbus-i2c-4-58/fan1/fan1_input - - la_cpld-i2c-85-60/fan1/fan1_input - - la_cpld-i2c-85-60/fan2/fan2_input - - la_cpld-i2c-85-60/fan3/fan3_input - - la_cpld-i2c-85-60/fan4/fan4_input - power: [] - temp: [] - - psu_skips: {} - - x86_64-ingrasys_s9100-r0: + x86_64-ingrasys_s9100-r0: alarms: fan: - w83795adg-i2c-0-2f/FANTRAY 1-A/fan1_alarm @@ -2219,128 +1402,42 @@ sensors_checks: - w83795adg-i2c-8-2f/FANTRAY 4-B/fan8_alarm power: - w83795adg-i2c-8-2f/0.9V/in0_alarm - - w83795adg-i2c-8-2f/VDD_CORE/in1_alarm - - w83795adg-i2c-8-2f/1.2V/in2_alarm - - w83795adg-i2c-8-2f/1.8V/in3_alarm - - w83795adg-i2c-8-2f/1.01V/in4_alarm - - w83795adg-i2c-8-2f/3.3VDD/in12_alarm - temp: - - coretemp-isa-0000/Physical id 0/temp1_crit_alarm - - coretemp-isa-0000/Core 0/temp2_crit_alarm - - coretemp-isa-0000/Core 1/temp3_crit_alarm - - coretemp-isa-0000/Core 2/temp4_crit_alarm - - coretemp-isa-0000/Core 3/temp5_crit_alarm - - compares: - fan: [] - power: [] - temp: - - - tmp75-i2c-12-4c/rear MAC Temp/temp1_input - - tmp75-i2c-12-4c/rear MAC Temp/temp1_max - - - tmp75-i2c-12-49/front MAC Temp/temp1_input - - tmp75-i2c-12-49/front MAC Temp/temp1_max - - - tmp75-i2c-0-4f/x86 CPU board Temp/temp1_input - - tmp75-i2c-0-4f/x86 CPU board Temp/temp1_max - - - tmp75-i2c-8-4a/BMC board Temp/temp1_input - - tmp75-i2c-8-4a/BMC board Temp/temp1_max - non_zero: - fan: - - w83795adg-i2c-8-2f/FANTRAY 1-A/fan1_input - - w83795adg-i2c-8-2f/FANTRAY 1-B/fan2_input - - w83795adg-i2c-8-2f/FANTRAY 2-A/fan3_input - - w83795adg-i2c-8-2f/FANTRAY 2-B/fan4_input - - w83795adg-i2c-8-2f/FANTRAY 3-A/fan5_input - - w83795adg-i2c-8-2f/FANTRAY 3-B/fan6_input - - w83795adg-i2c-8-2f/FANTRAY 4-A/fan7_input - - w83795adg-i2c-8-2f/FANTRAY 4-B/fan8_input - power: [] - temp: [] - psu_skips: {} - - x86_64-accton_wedge100bf_65x-r0: - alarms: - fan: [] - power: [] - temp: [] - - compares: - fan: [] - power: [] - temp: - - - tmp75-i2c-3-48/Outlet Middle Temp/temp1_input - - tmp75-i2c-3-48/Outlet Middle Temp/temp1_max - - - tmp75-i2c-3-49/Inlet Middle Temp/temp1_input - - tmp75-i2c-3-49/Inlet Middle Temp/temp1_max - - - tmp75-i2c-3-4a/Inlet Left Temp/temp1_input - - tmp75-i2c-3-4a/Inlet Left Temp/temp1_max - - - tmp75-i2c-3-4b/Switch Temp/temp1_input - - tmp75-i2c-3-4b/Switch Temp/temp1_max - - - tmp75-i2c-3-4c/Inlet Right Temp/temp1_input - - tmp75-i2c-3-4c/Inlet Right Temp/temp1_max - - - tmp75-i2c-8-48/Outlet Right Temp/temp1_input - - tmp75-i2c-8-48/Outlet Right Temp/temp1_max - - - tmp75-i2c-8-49/Outlet Left Temp/temp1_input - - tmp75-i2c-8-49/Outlet Left Temp/temp1_max - - non_zero: - fan: - - fancpld-i2c-8-33/Fan 1 front/fan1_input - - fancpld-i2c-8-33/Fan 1 rear/fan2_input - - fancpld-i2c-8-33/Fan 2 front/fan3_input - - fancpld-i2c-8-33/Fan 2 rear/fan4_input - - fancpld-i2c-8-33/Fan 3 front/fan5_input - - fancpld-i2c-8-33/Fan 3 rear/fan6_input - - fancpld-i2c-8-33/Fan 4 front/fan7_input - - fancpld-i2c-8-33/Fan 4 rear/fan8_input - - fancpld-i2c-8-33/Fan 5 front/fan9_input - - fancpld-i2c-8-33/Fan 5 rear/fan10_input - - power: [] - temp: [] - - psu_skips: {} - - x86_64-accton_wedge100bf_32x-r0: - alarms: - fan: [] - power: [] - temp: [] + - w83795adg-i2c-8-2f/VDD_CORE/in1_alarm + - w83795adg-i2c-8-2f/1.2V/in2_alarm + - w83795adg-i2c-8-2f/1.8V/in3_alarm + - w83795adg-i2c-8-2f/1.01V/in4_alarm + - w83795adg-i2c-8-2f/3.3VDD/in12_alarm + temp: + - coretemp-isa-0000/Physical id 0/temp1_crit_alarm + - coretemp-isa-0000/Core 0/temp2_crit_alarm + - coretemp-isa-0000/Core 1/temp3_crit_alarm + - coretemp-isa-0000/Core 2/temp4_crit_alarm + - coretemp-isa-0000/Core 3/temp5_crit_alarm compares: fan: [] power: [] temp: - - - tmp75-i2c-3-48/Outlet Middle Temp/temp1_input - - tmp75-i2c-3-48/Outlet Middle Temp/temp1_max - - - tmp75-i2c-3-49/Inlet Middle Temp/temp1_input - - tmp75-i2c-3-49/Inlet Middle Temp/temp1_max - - - tmp75-i2c-3-4a/Inlet Left Temp/temp1_input - - tmp75-i2c-3-4a/Inlet Left Temp/temp1_max - - - tmp75-i2c-3-4b/Switch Temp/temp1_input - - tmp75-i2c-3-4b/Switch Temp/temp1_max - - - tmp75-i2c-3-4c/Inlet Right Temp/temp1_input - - tmp75-i2c-3-4c/Inlet Right Temp/temp1_max - - - tmp75-i2c-8-48/Outlet Right Temp/temp1_input - - tmp75-i2c-8-48/Outlet Right Temp/temp1_max - - - tmp75-i2c-8-49/Outlet Left Temp/temp1_input - - tmp75-i2c-8-49/Outlet Left Temp/temp1_max - + - - tmp75-i2c-12-4c/rear MAC Temp/temp1_input + - tmp75-i2c-12-4c/rear MAC Temp/temp1_max + - - tmp75-i2c-12-49/front MAC Temp/temp1_input + - tmp75-i2c-12-49/front MAC Temp/temp1_max + - - tmp75-i2c-0-4f/x86 CPU board Temp/temp1_input + - tmp75-i2c-0-4f/x86 CPU board Temp/temp1_max + - - tmp75-i2c-8-4a/BMC board Temp/temp1_input + - tmp75-i2c-8-4a/BMC board Temp/temp1_max non_zero: fan: - - fancpld-i2c-8-33/Fan 1 front/fan1_input - - fancpld-i2c-8-33/Fan 1 rear/fan2_input - - fancpld-i2c-8-33/Fan 2 front/fan3_input - - fancpld-i2c-8-33/Fan 2 rear/fan4_input - - fancpld-i2c-8-33/Fan 3 front/fan5_input - - fancpld-i2c-8-33/Fan 3 rear/fan6_input - - fancpld-i2c-8-33/Fan 4 front/fan7_input - - fancpld-i2c-8-33/Fan 4 rear/fan8_input - - fancpld-i2c-8-33/Fan 5 front/fan9_input - - fancpld-i2c-8-33/Fan 5 rear/fan10_input - + - w83795adg-i2c-8-2f/FANTRAY 1-A/fan1_input + - w83795adg-i2c-8-2f/FANTRAY 1-B/fan2_input + - w83795adg-i2c-8-2f/FANTRAY 2-A/fan3_input + - w83795adg-i2c-8-2f/FANTRAY 2-B/fan4_input + - w83795adg-i2c-8-2f/FANTRAY 3-A/fan5_input + - w83795adg-i2c-8-2f/FANTRAY 3-B/fan6_input + - w83795adg-i2c-8-2f/FANTRAY 4-A/fan7_input + - w83795adg-i2c-8-2f/FANTRAY 4-B/fan8_input power: [] temp: [] - psu_skips: {} x86_64-arista_7060_cx32s: @@ -2687,7 +1784,7 @@ sensors_checks: - dps1900-i2c-7-58/vout1/in2_crit_alarm - dps1900-i2c-7-58/vout1/in2_lcrit_alarm temp: - - coretemp-isa-0000/P[a-z]* id 0/temp1_crit_alarm + - coretemp-isa-0000/Physical id 0/temp1_crit_alarm - coretemp-isa-0000/Core 0/temp2_crit_alarm - coretemp-isa-0000/Core 1/temp3_crit_alarm - dps1900-i2c-6-58/PSU1 primary hotspot temp/temp1_alarm @@ -2718,8 +1815,8 @@ sensors_checks: - - dps1900-i2c-7-58/iout1/curr2_input - dps1900-i2c-7-58/iout1/curr2_max temp: - - - coretemp-isa-0000/P[a-z]* id 0/temp1_input - - coretemp-isa-0000/P[a-z]* id 0/temp1_max + - - coretemp-isa-0000/Physical id 0/temp1_input + - coretemp-isa-0000/Physical id 0/temp1_max - - coretemp-isa-0000/Core 0/temp2_input - coretemp-isa-0000/Core 0/temp2_max - - coretemp-isa-0000/Core 1/temp3_input @@ -2744,6 +1841,184 @@ sensors_checks: psu_skips: {} + Celestica-DX010-C32: + alarms: + fan: + - dps460-i2c-10-5a/fan1/fan1_alarm + - dps460-i2c-10-5a/fan1/fan1_fault + - dps460-i2c-11-5b/fan1/fan1_alarm + - dps460-i2c-11-5b/fan1/fan1_fault + - emc2305-i2c-13-2e/fan1/fan2_alarm + - emc2305-i2c-13-2e/fan1/fan2_fault + - emc2305-i2c-13-2e/fan2/fan1_alarm + - emc2305-i2c-13-2e/fan2/fan1_fault + - emc2305-i2c-13-2e/fan3/fan4_alarm + - emc2305-i2c-13-2e/fan3/fan4_fault + - emc2305-i2c-13-2e/fan4/fan5_alarm + - emc2305-i2c-13-2e/fan4/fan5_fault + - emc2305-i2c-13-2e/fan5/fan3_alarm + - emc2305-i2c-13-2e/fan5/fan3_fault + - emc2305-i2c-13-4d/fan1/fan2_alarm + - emc2305-i2c-13-4d/fan1/fan2_fault + - emc2305-i2c-13-4d/fan2/fan4_alarm + - emc2305-i2c-13-4d/fan2/fan4_fault + - emc2305-i2c-13-4d/fan3/fan5_alarm + - emc2305-i2c-13-4d/fan3/fan5_fault + - emc2305-i2c-13-4d/fan4/fan3_alarm + - emc2305-i2c-13-4d/fan4/fan3_fault + - emc2305-i2c-13-4d/fan5/fan1_alarm + - emc2305-i2c-13-4d/fan5/fan1_fault + power: + - dps460-i2c-10-5a/iin/curr1_crit_alarm + - dps460-i2c-10-5a/iin/curr1_max_alarm + - dps460-i2c-10-5a/iout1/curr2_crit_alarm + - dps460-i2c-10-5a/iout1/curr2_max_alarm + - dps460-i2c-10-5a/pin/power1_alarm + - dps460-i2c-10-5a/pout1/power2_cap_alarm + - dps460-i2c-10-5a/pout1/power2_crit_alarm + - dps460-i2c-10-5a/pout1/power2_max_alarm + - dps460-i2c-11-5b/iin/curr1_crit_alarm + - dps460-i2c-11-5b/iin/curr1_max_alarm + - dps460-i2c-11-5b/iout1/curr2_crit_alarm + - dps460-i2c-11-5b/iout1/curr2_max_alarm + - dps460-i2c-11-5b/pin/power1_alarm + - dps460-i2c-11-5b/pout1/power2_max_alarm + - dps460-i2c-11-5b/vout1/in3_crit_alarm + - dps460-i2c-11-5b/vout1/in3_lcrit_alarm + temp: + - coretemp-isa-0000/Core 0/temp2_crit_alarm + - coretemp-isa-0000/Core 1/temp3_crit_alarm + - coretemp-isa-0000/Core 2/temp4_crit_alarm + - coretemp-isa-0000/Core 3/temp5_crit_alarm + - dps460-i2c-10-5a/Power Supply 1 temp sensor 1/temp1_max_alarm + - dps460-i2c-10-5a/Power Supply 1 temp sensor 2/temp2_max_alarm + - dps460-i2c-10-5a/Power Supply 1 temp sensor 3/temp3_max_alarm + - dps460-i2c-11-5b/Power Supply 2 temp sensor 1/temp1_max_alarm + - dps460-i2c-11-5b/Power Supply 2 temp sensor 2/temp2_max_alarm + - dps460-i2c-11-5b/Power Supply 2 temp sensor 3/temp3_max_alarm + compares: + fan: [] + power: + - - dps460-i2c-10-5a/iin/curr1_input + - dps460-i2c-10-5a/iin/curr1_crit + - - dps460-i2c-10-5a/iin/curr1_input + - dps460-i2c-10-5a/iin/curr1_max + - - dps460-i2c-10-5a/iout1/curr2_input + - dps460-i2c-10-5a/iout1/curr2_crit + - - dps460-i2c-10-5a/iout1/curr2_input + - dps460-i2c-10-5a/iout1/curr2_max + - - dps460-i2c-10-5a/pin/power1_input + - dps460-i2c-10-5a/pin/power1_max + - - dps460-i2c-10-5a/pout1/power2_input + - dps460-i2c-10-5a/pout1/power2_crit + - - dps460-i2c-10-5a/pout1/power2_input + - dps460-i2c-10-5a/pout1/power2_max + - - dps460-i2c-11-5b/iin/curr1_input + - dps460-i2c-11-5b/iin/curr1_crit + - - dps460-i2c-11-5b/iin/curr1_input + - dps460-i2c-11-5b/iin/curr1_max + - - dps460-i2c-11-5b/iout1/curr2_input + - dps460-i2c-11-5b/iout1/curr2_crit + - - dps460-i2c-11-5b/iout1/curr2_input + - dps460-i2c-11-5b/iout1/curr2_max + - - dps460-i2c-11-5b/pin/power1_input + - dps460-i2c-11-5b/pin/power1_max + - - dps460-i2c-11-5b/pout1/power2_input + - dps460-i2c-11-5b/pout1/power2_crit + - - dps460-i2c-11-5b/pout1/power2_input + - dps460-i2c-11-5b/pout1/power2_max + temp: + - - coretemp-isa-0000/Core 0/temp2_input + - coretemp-isa-0000/Core 0/temp2_crit + - - coretemp-isa-0000/Core 0/temp2_input + - coretemp-isa-0000/Core 0/temp2_max + - - coretemp-isa-0000/Core 1/temp3_input + - coretemp-isa-0000/Core 1/temp3_crit + - - coretemp-isa-0000/Core 1/temp3_input + - coretemp-isa-0000/Core 1/temp3_max + - - coretemp-isa-0000/Core 2/temp4_input + - coretemp-isa-0000/Core 2/temp4_crit + - - coretemp-isa-0000/Core 2/temp4_input + - coretemp-isa-0000/Core 2/temp4_max + - - coretemp-isa-0000/Core 3/temp5_input + - coretemp-isa-0000/Core 3/temp5_crit + - - coretemp-isa-0000/Core 3/temp5_input + - coretemp-isa-0000/Core 3/temp5_max + - - dps460-i2c-10-5a/Power Supply 1 temp sensor 1/temp1_input + - dps460-i2c-10-5a/Power Supply 1 temp sensor 1/temp1_max + - - dps460-i2c-10-5a/Power Supply 1 temp sensor 2/temp2_input + - dps460-i2c-10-5a/Power Supply 1 temp sensor 2/temp2_max + - - dps460-i2c-10-5a/Power Supply 1 temp sensor 3/temp3_input + - dps460-i2c-10-5a/Power Supply 1 temp sensor 3/temp3_max + - - dps460-i2c-11-5b/Power Supply 2 temp sensor 1/temp1_input + - dps460-i2c-11-5b/Power Supply 2 temp sensor 1/temp1_max + - - dps460-i2c-11-5b/Power Supply 2 temp sensor 2/temp2_input + - dps460-i2c-11-5b/Power Supply 2 temp sensor 2/temp2_max + - - dps460-i2c-11-5b/Power Supply 2 temp sensor 3/temp3_input + - dps460-i2c-11-5b/Power Supply 2 temp sensor 3/temp3_max + - - lm75b-i2c-14-48/Rear-panel temp sensor 1/temp1_input + - lm75b-i2c-14-48/Rear-panel temp sensor 1/temp1_max + - - lm75b-i2c-14-48/Rear-panel temp sensor 1/temp1_input + - lm75b-i2c-14-48/Rear-panel temp sensor 1/temp1_max_hyst + - - lm75b-i2c-15-4e/Rear-panel temp sensor 2/temp1_input + - lm75b-i2c-15-4e/Rear-panel temp sensor 2/temp1_max + - - lm75b-i2c-15-4e/Rear-panel temp sensor 2/temp1_input + - lm75b-i2c-15-4e/Rear-panel temp sensor 2/temp1_max_hyst + - - lm75b-i2c-5-48/Rear-panel temp sensor 1/temp1_input + - lm75b-i2c-5-48/Rear-panel temp sensor 1/temp1_max + - - lm75b-i2c-5-48/Rear-panel temp sensor 1/temp1_input + - lm75b-i2c-5-48/Rear-panel temp sensor 1/temp1_max_hyst + - - lm75b-i2c-6-49/Front-panel temp sensor 2/temp1_input + - lm75b-i2c-6-49/Front-panel temp sensor 2/temp1_max + - - lm75b-i2c-6-49/Front-panel temp sensor 2/temp1_input + - lm75b-i2c-6-49/Front-panel temp sensor 2/temp1_max_hyst + - - lm75b-i2c-7-4a/ASIC temp sensor/temp1_input + - lm75b-i2c-7-4a/ASIC temp sensor/temp1_max + - - lm75b-i2c-7-4a/ASIC temp sensor/temp1_input + - lm75b-i2c-7-4a/ASIC temp sensor/temp1_max_hyst + non_zero: + fan: + - dps460-i2c-10-5a/fan1/fan1_input + - dps460-i2c-11-5b/fan1/fan1_input + - emc2305-i2c-13-2e/fan1/fan2_input + - emc2305-i2c-13-2e/fan2/fan1_input + - emc2305-i2c-13-2e/fan3/fan4_input + - emc2305-i2c-13-2e/fan4/fan5_input + - emc2305-i2c-13-2e/fan5/fan3_input + - emc2305-i2c-13-4d/fan1/fan2_input + - emc2305-i2c-13-4d/fan2/fan4_input + - emc2305-i2c-13-4d/fan3/fan5_input + - emc2305-i2c-13-4d/fan4/fan3_input + - emc2305-i2c-13-4d/fan5/fan1_input + power: + - dps460-i2c-10-5a/iin/curr1_input + - dps460-i2c-10-5a/iout1/curr2_input + - dps460-i2c-10-5a/pin/power1_input + - dps460-i2c-10-5a/pout1/power2_input + - dps460-i2c-10-5a/vin/in1_input + - dps460-i2c-11-5b/iin/curr1_input + - dps460-i2c-11-5b/iout1/curr2_input + - dps460-i2c-11-5b/pin/power1_input + - dps460-i2c-11-5b/pout1/power2_input + - dps460-i2c-11-5b/vin/in1_input + - dps460-i2c-11-5b/vout1/in3_input + temp: + - coretemp-isa-0000/Core 0/temp2_input + - coretemp-isa-0000/Core 1/temp3_input + - coretemp-isa-0000/Core 2/temp4_input + - coretemp-isa-0000/Core 3/temp5_input + - dps460-i2c-10-5a/Power Supply 1 temp sensor 1/temp1_input + - dps460-i2c-10-5a/Power Supply 1 temp sensor 2/temp2_input + - dps460-i2c-10-5a/Power Supply 1 temp sensor 3/temp3_input + - dps460-i2c-11-5b/Power Supply 2 temp sensor 1/temp1_input + - dps460-i2c-11-5b/Power Supply 2 temp sensor 2/temp2_input + - dps460-i2c-11-5b/Power Supply 2 temp sensor 3/temp3_input + - lm75b-i2c-14-48/Rear-panel temp sensor 1/temp1_input + - lm75b-i2c-15-4e/Rear-panel temp sensor 2/temp1_input + - lm75b-i2c-5-48/Rear-panel temp sensor 1/temp1_input + - lm75b-i2c-6-49/Front-panel temp sensor 2/temp1_input + - lm75b-i2c-7-4a/ASIC temp sensor/temp1_input + psu_skips: {} x86_64-cel_e1031-r0: alarms: fan: [] @@ -2851,108 +2126,3 @@ sensors_checks: - pmbus-i2c-5-58/Power supply 2 sensor/temp3_input psu_skips: {} - - et6448m: - alarms: - fan: - - adt7473-i2c-0-2e/rear fan 1/fan1_alarm - - adt7473-i2c-0-2e/rear fan 2/fan2_alarm - power: - - adt7473-i2c-0-2e/+3.3V/in2_alarm - temp: - - adt7473-i2c-0-2e/temp1/temp1_alarm - - adt7473-i2c-0-2e/temp1/temp1_fault - - adt7473-i2c-0-2e/Board Temp/temp2_alarm - - adt7473-i2c-0-2e/temp3/temp3_alarm - - adt7473-i2c-0-2e/temp3/temp3_fault - compares: - fan: [] - power: - - - adt7473-i2c-0-2e/+3.3V/in2_input - - adt7473-i2c-0-2e/+3.3V/in2_max - temp: - - - adt7473-i2c-0-2e/temp1/temp1_input - - adt7473-i2c-0-2e/temp1/temp1_crit - - - adt7473-i2c-0-2e/Board Temp/temp2_input - - adt7473-i2c-0-2e/Board Temp/temp2_crit - - - adt7473-i2c-0-2e/temp3/temp3_input - - adt7473-i2c-0-2e/temp3/temp3_crit - non_zero: - fan: - - adt7473-i2c-0-2e/rear fan 1/fan1_input - - adt7473-i2c-0-2e/rear fan 2/fan2_input - power: - - adt7473-i2c-0-2e/+3.3V/in2_input - temp: [] - psu_skips: {} - - x86_64-juniper_qfx5210-r0: - alarms: - fan: - - qfx5210_64x_fan-i2c-17-68/fan1/fan1_fault - - qfx5210_64x_fan-i2c-17-68/fan2/fan2_fault - - qfx5210_64x_fan-i2c-17-68/fan3/fan3_fault - - qfx5210_64x_fan-i2c-17-68/fan4/fan4_fault - - qfx5210_64x_fan-i2c-17-68/fan11/fan11_fault - - qfx5210_64x_fan-i2c-17-68/fan12/fan12_fault - - qfx5210_64x_fan-i2c-17-68/fan13/fan13_fault - - qfx5210_64x_fan-i2c-17-68/fan14/fan14_fault - power: [] - temp: - - coretemp-isa-0000/Core 0/temp2_crit_alarm - - coretemp-isa-0000/Core 1/temp3_crit_alarm - - coretemp-isa-0000/Core 2/temp4_crit_alarm - - coretemp-isa-0000/Core 3/temp5_crit_alarm - - ym2851-i2c-10-5b/temp1/temp1_fault - - ym2851-i2c-9-58/temp1/temp1_fault - compares: - fan: [] - power: [] - temp: - - - coretemp-isa-0000/Core 0/temp2_input - - coretemp-isa-0000/Core 0/temp2_crit - - - coretemp-isa-0000/Core 1/temp3_input - - coretemp-isa-0000/Core 1/temp3_crit - - - coretemp-isa-0000/Core 2/temp4_input - - coretemp-isa-0000/Core 2/temp4_crit - - - coretemp-isa-0000/Core 3/temp5_input - - coretemp-isa-0000/Core 3/temp5_crit - - - lm75-i2c-18-48/temp1/temp1_input - - lm75-i2c-18-48/temp1/temp1_max - - - lm75-i2c-18-49/temp1/temp1_input - - lm75-i2c-18-49/temp1/temp1_max - - - lm75-i2c-18-4a/temp1/temp1_input - - lm75-i2c-18-4a/temp1/temp1_max - - - lm75-i2c-18-4b/temp1/temp1_input - - lm75-i2c-18-4b/temp1/temp1_max - - - lm75-i2c-17-4d/temp1/temp1_input - - lm75-i2c-17-4d/temp1/temp1_max - - - lm75-i2c-17-4e/temp1/temp1_input - - lm75-i2c-17-4e/temp1/temp1_max - non_zero: - fan: - - qfx5210_64x_fan-i2c-17-68/fan1/fan1_input - - qfx5210_64x_fan-i2c-17-68/fan2/fan2_input - - qfx5210_64x_fan-i2c-17-68/fan3/fan3_input - - qfx5210_64x_fan-i2c-17-68/fan4/fan4_input - - qfx5210_64x_fan-i2c-17-68/fan11/fan11_input - - qfx5210_64x_fan-i2c-17-68/fan12/fan12_input - - qfx5210_64x_fan-i2c-17-68/fan13/fan13_input - - qfx5210_64x_fan-i2c-17-68/fan14/fan14_input - - ym2851-i2c-10-5b/fan1/fan1_input - - ym2851-i2c-9-58/fan1/fan1_input - power: - - ym2851-i2c-10-5b/power2/power2_input - - ym2851-i2c-9-58/power2/power2_input - temp: [] - psu_skips: - ym2851-i2c-10-5b: - number: 2 - side: right - skip_list: - - ym2851-i2c-10-5b - ym2851-i2c-9-58: - number: 1 - side: left - skip_list: - - ym2851-i2c-9-58 diff --git a/ansible/group_vars/sonic/variables b/ansible/group_vars/sonic/variables index 15d7a64cacd..cd3577c097d 100644 --- a/ansible/group_vars/sonic/variables +++ b/ansible/group_vars/sonic/variables @@ -1,13 +1,11 @@ ansible_ssh_user: admin -ansible_connection: multi_passwd_ssh -ansible_altpassword: YourPaSsWoRd sonic_version: "v2" broadcom_hwskus: [ "Force10-S6000", "Accton-AS7712-32X", "Celestica-DX010-C32", "Seastone-DX010", "Celestica-E1031-T48S4"] broadcom_td2_hwskus: ['Force10-S6000', 'Force10-S6000-Q24S32', 'Arista-7050-QX32', 'Arista-7050-QX-32S'] -broadcom_th_hwskus: ['Force10-S6100', 'Arista-7060CX-32S-C32', 'Arista-7060CX-32S-C32-T1', 'Arista-7060CX-32S-D48C8', 'Celestica-DX010-C32', "Seastone-DX010" ] +broadcom_th_hwskus: ['Force10-S6100', 'Arista-7060CX-32S-C32', 'Arista-7060CX-32S-C32-T1', 'Arista-7060CX-32S-D48C8', 'Celestica-DX010-C32', "Seastone_2", "Seastone-DX010" ] broadcom_th2_hwskus: ['Arista-7260CX3-D108C8', 'Arista-7260CX3-C64', 'Arista-7260CX3-Q64'] mellanox_spc1_hwskus: [ 'ACS-MSN2700', 'ACS-MSN2740', 'ACS-MSN2100', 'ACS-MSN2410', 'ACS-MSN2010', 'Mellanox-SN2700', 'Mellanox-SN2700-D48C8' ] @@ -17,9 +15,7 @@ mellanox_hwskus: "{{ mellanox_spc1_hwskus + mellanox_spc2_hwskus + mellanox_spc3 cavium_hwskus: [ "AS7512", "XP-SIM" ] -barefoot_hwskus: [ "montara", "mavericks", "Arista-7170-64C", "newport" ] - -marvell_hwskus: [ "et6448m" ] +barefoot_hwskus: [ "montara", "mavericks", "Arista-7170-64C" ] ## Note: ## Docker volumes should be list instead of dict. However, if we want to keep code DRY, we diff --git a/ansible/group_vars/vm_host/creds.yml b/ansible/group_vars/vm_host/creds.yml index 029ab9a68a7..bcf80c488b8 100644 --- a/ansible/group_vars/vm_host/creds.yml +++ b/ansible/group_vars/vm_host/creds.yml @@ -1,4 +1,5 @@ --- -ansible_user: use_own_value -ansible_password: use_own_value -ansible_become_password: use_own_value +ansible_user: clsnet +ansible_password: 123456 +ansible_become_password: 123456 + diff --git a/ansible/group_vars/vm_host/main.yml b/ansible/group_vars/vm_host/main.yml index edbd360ef4c..a51df90446b 100644 --- a/ansible/group_vars/vm_host/main.yml +++ b/ansible/group_vars/vm_host/main.yml @@ -1,12 +1,9 @@ root_path: veos-vm vm_images_url: https://acsbe.blob.core.windows.net/vmimages cd_image_filename: Aboot-veos-serial-8.0.0.iso -hdd_image_filename: vEOS-lab-4.20.15M.vmdk -skip_image_downloading: false +hdd_image_filename: vEOS-lab-4.15.10M.vmdk +skip_image_downloading: True vm_console_base: 7000 memory: 2097152 max_fp_num: 4 - -ptf_bp_ip: 10.10.246.254/24 -ptf_bp_ipv6: fc0a::ff/64 diff --git a/ansible/host_vars/STR-ACS-SERV-01.yml b/ansible/host_vars/STR-ACS-SERV-01.yml index 20e5b88b6b9..9a1380011d7 100644 --- a/ansible/host_vars/STR-ACS-SERV-01.yml +++ b/ansible/host_vars/STR-ACS-SERV-01.yml @@ -1,6 +1,5 @@ mgmt_bridge: br1 mgmt_prefixlen: 23 -mgmt_gw: 10.255.0.1 -vm_mgmt_gw: 10.254.0.1 -external_port: p4p1 +mgmt_gw: 10.250.0.1 +external_port: enp59s0f0 diff --git a/ansible/host_vars/STR-ACS-SERV-02.yml b/ansible/host_vars/STR-ACS-SERV-02.yml index 43d0f89e1d4..8af81ed8629 100644 --- a/ansible/host_vars/STR-ACS-SERV-02.yml +++ b/ansible/host_vars/STR-ACS-SERV-02.yml @@ -1,4 +1,4 @@ -mgmt_bridge: br1 +mgmt_bridge: br2 mgmt_prefixlen: 23 -mgmt_gw: 10.255.0.1 -external_port: p4p1 +mgmt_gw: 10.251.0.1 +external_port: enp175s0f0 diff --git a/ansible/host_vars/STR-ACS-SERV-03.yml b/ansible/host_vars/STR-ACS-SERV-03.yml new file mode 100644 index 00000000000..8af81ed8629 --- /dev/null +++ b/ansible/host_vars/STR-ACS-SERV-03.yml @@ -0,0 +1,4 @@ +mgmt_bridge: br2 +mgmt_prefixlen: 23 +mgmt_gw: 10.251.0.1 +external_port: enp175s0f0 diff --git a/ansible/inventory b/ansible/inventory index 366f60d8bcf..939c8348f0b 100644 --- a/ansible/inventory +++ b/ansible/inventory @@ -1,49 +1,24 @@ -all: - children: - sonic: - children: - sonic_latest: - leaf_topo_1: - hosts: - switch1: - switch5: - ptf: - hosts: - ptf-1: - ansible_host: 10.0.0.200 - ansible_ssh_user: root - ansible_ssh_pass: password - pdu: - hosts: - pdu-1: - ansible_host: 192.168.9.2 - protocol: snmp - pdu-2: - ansible_host: 192.168.9.3 +[sonic_latest] +switch1 ansible_host=10.0.0.100 sonic_version=v2 sonic_hwsku=Force10-S6000 +switch2 ansible_host=10.0.0.101 sonic_version=v2 sonic_hwsku=ACS-MSN2700 +switch3 ansible_host=10.0.0.102 sonic_version=v2 sonic_hwsku=Force10-S6000 # LAG topo: 8 LAGs x 2 members/lag to spines; 16 ports to Tors +switch4 ansible_host=10.0.0.103 sonic_version=v2 sonic_hwsku=AS7512 sonic_portsku=32x40 +switch5 ansible_host=10.0.0.104 sonic_version=v2 sonic_hwsku=ACS-MSN2700 # LAG topo: 8 LAGs x 2 members/lag to spines; 16 ports to Tors +cel-seastone-02 ansible_host=10.250.0.100 sonic_version=v2 sonic_hwsku=Seastone-DX010 +cel-e1031-01 ansible_host=10.250.0.100 sonic_version=v2 sonic_hwsku=Celestica-E1031-T48S4 +cel-e1031-01 ansible_host=10.250.0.100 sonic_version=v2 sonic_hwsku=Celestica-E1031-T48S4 -sonic_latest: - hosts: - switch1: - ansible_host: 10.0.0.100 - sonic_version: v2 - sonic_hwsku: Force10-S6000 - pdu_host: pdu-1 - switch2: - ansible_host: 10.0.0.101 - sonic_version: v2 - sonic_hwsku: ACS-MSN2700 - pdu_host: pdu-1 - switch3: # LAG topo: 8 LAGs x 2 members/lag to spines; 16 ports to Tors - ansible_host: 10.0.0.102 - sonic_version: v2 - sonic_hwsku: Force10-S6000 - switch4: - ansible_host: 10.0.0.103 - sonic_version: v2 - sonic_hwsku: AS7512 - sonic_portsku: 32x40 - pdu_host: pdu-2 - switch5: # LAG topo: 8 LAGs x 2 members/lag to spines; 16 ports to Tors - ansible_host: 10.0.0.104 - sonic_version: v2 - sonic_hwsku: ACS-MSN2700 + +[sonic:children] +sonic_latest + +[leaf_topo_1] +switch1 +switch5 +cel-seastone-02 +cel-e1031-01 +cel-e1031-02 +e1031-fanout + +[ptf] +ptf-1 ansible_host=10.250.0.110 ansible_ssh_user=root ansible_ssh_pass=password diff --git a/ansible/lab b/ansible/lab old mode 100644 new mode 100755 index bbb55f1d495..cda06214719 --- a/ansible/lab +++ b/ansible/lab @@ -1,93 +1,46 @@ -all: - children: - lab: - vars: - mgmt_subnet_mask_length: 24 - children: - sonic: - children: - sonic_sn2700_40: - sonic_s6000: - sonic_s6100: - sonic_a7260: - fanout: - hosts: - str-7260-10: - ansible_host: 10.251.0.13 - str-7260-11: - ansible_host: 10.251.0.234 - str-msn2700-02: - ansible_host: 10.251.0.235 - os: sonic - ptf: - hosts: - ptf_ptf1: - ansible_host: 10.255.0.188 - ansible_ssh_user: root - ansible_ssh_pass: root - ptf_vms1-1: - ansible_host: 10.255.0.178 - ansible_ssh_user: root - ansible_ssh_pass: root - ptf_vms6-1: - ansible_host: 10.250.0.100 - ansible_ssh_user: root - ansible_ssh_pass: root +[sonic_slx] +cel-seastone-01 ansible_host=10.251.0.100 pdu_host=pdu-1 -sonic_sn2700_40: - vars: - hwsku: ACS-MSN2700 - iface_speed: 40000 - hosts: - str-msn2700-01: - ansible_host: 10.251.0.188 - serial: MT1234X56789 - base_mac: 24:8a:07:12:34:56 - syseeprom_info: - "0x21": "MSN2700" - "0x22": "MSN2700-CS2FO" - "0x23": "MT1234X56789" - "0x24": "24:8a:07:12:34:56" - "0x25": "12/07/2016" - "0x26": "0" - "0x28": "x86_64-mlnx_x86-r0" - "0x29": "2016.11-5.1.0008-9600" - "0x2A": "128" - "0x2B": "Mellanox" - "0xFE": "0xFBA1E964" +[sonic_slx:vars] +hwsku="Celestica-DX010-C32" +iface_speed='100000' -sonic_s6000: - vars: - hwsku: Force10-S6000 - iface_speed: 40000 - hosts: - lab-s6000-01: - ansible_host: 10.251.0.189 - ansible_hostv6: fec0::ffff:afa:9 - vlab-01: - ansible_host: 10.250.0.101 - ansible_hostv6: fec0::ffff:afa:1 - vlab-03: - ansible_host: 10.250.0.105 - ansible_hostv6: fec0::ffff:afa:5 - vlab-04: - ansible_host: 10.250.0.107 - ansible_hostv6: fec0::ffff:afa:7 +[sonic_e1031] +cel-e1031-01 ansible_host=10.250.0.100 + +[sonic_e1031:vars] +hwsku="Celestica-E1031-T48S4" +iface_speed='1000' -sonic_s6100: - vars: - hwsku: Force10-S6100 - iface_speed: 40000 - hosts: - lab-s6100-01: - ansible_host: 10.251.0.190 - vlab-02: - ansible_host: 10.250.0.102 +[sonic_slx2] +cel-seastone2-01 ansible_host=10.251.0.100 pdu_host=pdu-1 + +[sonic_slx2:vars] +hwsku="Seastone_2" +iface_speed='100000' + +[sonic:children] +sonic_slx +sonic_e1031 +sonic_slx2 + +[ptf] +ptf1 ansible_host=10.250.0.110 ansible_ssh_user=root ansible_ssh_pass=root +ptf2 ansible_host=10.251.0.110 ansible_ssh_user=root ansible_ssh_pass=root +ptf_vms1-1 ansible_host=10.255.0.178 ansible_ssh_user=root ansible_ssh_pass=root +ptf_vms6-1 ansible_host=10.250.0.102 ansible_ssh_user=root ansible_ssh_pass=root + +[lab:children] +sonic +fanout + +[lab:vars] +mgmt_subnet_mask_length="24" + +[fanout] +seastone-fanout ansible_host=10.251.0.235 os=sonic ansible_ssh_user=admin ansible_ssh_pass=password +e1031-fanout ansible_host=10.250.0.235 os=sonic ansible_ssh_user=admin ansible_ssh_pass=password + +[pdu] +pdu-1 ansible_host=10.204.112.55 protocol=snmp -sonic_a7260: - vars: - iface_speed: 100000 - hosts: - lab-a7260-01: - ansible_host: 10.251.0.191 - hwsku: Arista-7260CX3-D108C8 diff --git a/ansible/library/docker.py b/ansible/library/docker.py new file mode 100644 index 00000000000..9c40ca195ef --- /dev/null +++ b/ansible/library/docker.py @@ -0,0 +1,1777 @@ +#!/usr/bin/python + +# (c) 2013, Cove Schneider +# (c) 2014, Joshua Conner +# (c) 2014, Pavel Antonov +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +DOCUMENTATION = ''' +--- +module: docker +version_added: "1.4" +short_description: manage docker containers +description: + - Manage the life cycle of docker containers. +options: + count: + description: + - Number of matching containers that should be in the desired state. + default: 1 + image: + description: + - Container image used to match and launch containers. + required: true + pull: + description: + - Control when container images are updated from the C(docker_url) registry. + If "missing," images will be pulled only when missing from the host; + if '"always," the registry will be checked for a newer version of the + image' each time the task executes. + default: missing + choices: [ "missing", "always" ] + version_added: "1.9" + command: + description: + - Command used to match and launch containers. + default: null + name: + description: + - Name used to match and uniquely name launched containers. Explicit names + are used to uniquely identify a single container or to link among + containers. Mutually exclusive with a "count" other than "1". + default: null + version_added: "1.5" + ports: + description: + - "List containing private to public port mapping specification. + Use docker 'CLI-style syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000)' + where 8000 is a container port, 9000 is a host port, and 0.0.0.0 is - a host interface. + The container ports need to be exposed either in the Dockerfile or via the C(expose) option." + default: null + version_added: "1.5" + expose: + description: + - List of additional container ports to expose for port mappings or links. + If the port is already exposed using EXPOSE in a Dockerfile, you don't + need to expose it again. + default: null + version_added: "1.5" + publish_all_ports: + description: + - Publish all exposed ports to the host interfaces. + default: false + version_added: "1.5" + volumes: + description: + - List of volumes to mount within the container using docker CLI-style + - 'syntax: C(/host:/container[:mode]) where "mode" may be "rw" or "ro".' + default: null + volumes_from: + description: + - List of names of containers to mount volumes from. + default: null + links: + description: + - List of other containers to link within this container with an optional + - 'alias. Use docker CLI-style syntax: C(redis:myredis).' + default: null + version_added: "1.5" + log_driver: + description: + - You can specify a different logging driver for the container than for the daemon. + "json-file" Default logging driver for Docker. Writes JSON messages to file. + docker logs command is available only for this logging driver. + "none" disables any logging for the container. + "syslog" Syslog logging driver for Docker. Writes log messages to syslog. + docker logs command is not available for this logging driver. + "journald" Journald logging driver for Docker. Writes log messages to "journald". + "gelf" Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint likeGraylog or Logstash. + "fluentd" Fluentd logging driver for Docker. Writes log messages to "fluentd" (forward input). + If not defined explicitly, the Docker daemon's default ("json-file") will apply. + Requires docker >= 1.6.0. + required: false + default: json-file + choices: + - json-file + - none + - syslog + - journald + - gelf + - fluentd + version_added: "2.0" + log_opt: + description: + - Additional options to pass to the logging driver selected above. See Docker `log-driver + ` documentation for more information. + Requires docker >=1.7.0. + required: false + default: null + version_added: "2.0" + memory_limit: + description: + - RAM allocated to the container as a number of bytes or as a human-readable + string like "512MB". Leave as "0" to specify no limit. + default: 0 + docker_url: + description: + - URL of the host running the docker daemon. This will default to the env + var DOCKER_HOST if unspecified. + default: ${DOCKER_HOST} or unix://var/run/docker.sock + use_tls: + description: + - Whether to use tls to connect to the docker server. "no" means not to + use tls (and ignore any other tls related parameters). "encrypt" means + to use tls to encrypt the connection to the server. "verify" means to + also verify that the server's certificate is valid for the server + (this both verifies the certificate against the CA and that the + certificate was issued for that host. If this is unspecified, tls will + only be used if one of the other tls options require it. + choices: [ "no", "encrypt", "verify" ] + version_added: "1.9" + tls_client_cert: + description: + - Path to the PEM-encoded certificate used to authenticate docker client. + If specified tls_client_key must be valid + default: ${DOCKER_CERT_PATH}/cert.pem + version_added: "1.9" + tls_client_key: + description: + - Path to the PEM-encoded key used to authenticate docker client. If + specified tls_client_cert must be valid + default: ${DOCKER_CERT_PATH}/key.pem + version_added: "1.9" + tls_ca_cert: + description: + - Path to a PEM-encoded certificate authority to secure the Docker connection. + This has no effect if use_tls is encrypt. + default: ${DOCKER_CERT_PATH}/ca.pem + version_added: "1.9" + tls_hostname: + description: + - A hostname to check matches what's supplied in the docker server's + certificate. If unspecified, the hostname is taken from the docker_url. + default: Taken from docker_url + version_added: "1.9" + docker_api_version: + description: + - Remote API version to use. This defaults to the current default as + specified by docker-py. + default: docker-py default remote API version + version_added: "1.8" + docker_user: + description: + - Username or UID to use within the container + required: false + default: null + version_added: "2.0" + username: + description: + - Remote API username. + default: null + password: + description: + - Remote API password. + default: null + email: + description: + - Remote API email. + default: null + hostname: + description: + - Container hostname. + default: null + domainname: + description: + - Container domain name. + default: null + env: + description: + - Pass a dict of environment variables to the container. + default: null + dns: + description: + - List of custom DNS servers for the container. + required: false + default: null + detach: + description: + - Enable detached mode to leave the container running in background. If + disabled, fail unless the process exits cleanly. + default: true + signal: + version_added: "2.0" + description: + - With the state "killed", you can alter the signal sent to the + container. + required: false + default: KILL + state: + description: + - Assert the container's desired state. "present" only asserts that the + matching containers exist. "started" asserts that the matching + containers both exist and are running, but takes no action if any + configuration has changed. "reloaded" (added in Ansible 1.9) asserts that all matching + containers are running and restarts any that have any images or + configuration out of date. "restarted" unconditionally restarts (or + starts) the matching containers. "stopped" and '"killed" stop and kill + all matching containers. "absent" stops and then' removes any matching + containers. + required: false + default: started + choices: + - present + - started + - reloaded + - restarted + - stopped + - killed + - absent + privileged: + description: + - Whether the container should run in privileged mode or not. + default: false + lxc_conf: + description: + - LXC configuration parameters, such as C(lxc.aa_profile:unconfined). + default: null + stdin_open: + description: + - Keep stdin open after a container is launched. + default: false + version_added: "1.6" + tty: + description: + - Allocate a pseudo-tty within the container. + default: false + version_added: "1.6" + net: + description: + - 'Network mode for the launched container: bridge, none, container:' + - or host. Requires docker >= 0.11. + default: false + version_added: "1.8" + pid: + description: + - Set the PID namespace mode for the container (currently only supports 'host'). Requires docker-py >= 1.0.0 and docker >= 1.5.0 + required: false + default: None + aliases: [] + version_added: "1.9" + registry: + description: + - Remote registry URL to pull images from. + default: DockerHub + aliases: [] + version_added: "1.8" + read_only: + description: + - Mount the container's root filesystem as read only + default: null + aliases: [] + version_added: "2.0" + restart_policy: + description: + - Container restart policy. + choices: ["no", "on-failure", "always"] + default: null + version_added: "1.9" + restart_policy_retry: + description: + - Maximum number of times to restart a container. Leave as "0" for unlimited + retries. + default: 0 + version_added: "1.9" + extra_hosts: + version_added: "2.0" + description: + - Dict of custom host-to-IP mappings to be defined in the container + insecure_registry: + description: + - Use insecure private registry by HTTP instead of HTTPS. Needed for + docker-py >= 0.5.0. + default: false + version_added: "1.9" + cpu_set: + description: + - CPUs in which to allow execution. Requires docker-py >= 0.6.0. + required: false + default: null + version_added: "2.0" + cap_add: + description: + - Add capabilities for the container. Requires docker-py >= 0.5.0. + required: false + default: false + version_added: "2.0" + cap_drop: + description: + - Drop capabilities for the container. Requires docker-py >= 0.5.0. + required: false + default: false + aliases: [] + version_added: "2.0" + stop_timeout: + description: + - How many seconds to wait for the container to stop before killing it. + required: false + default: 10 + version_added: "2.0" +author: + - "Cove Schneider (@cove)" + - "Joshua Conner (@joshuaconner)" + - "Pavel Antonov (@softzilla)" + - "Ash Wilson (@smashwilson)" + - "Thomas Steinbach (@ThomasSteinbach)" + - "Philippe Jandot (@zfil)" +requirements: + - "python >= 2.6" + - "docker-py >= 0.3.0" + - "The docker server >= 0.10.0" +''' + +EXAMPLES = ''' +# Containers are matched either by name (if provided) or by an exact match of +# the image they were launched with and the command they're running. The module +# can accept either a name to target a container uniquely, or a count to operate +# on multiple containers at once when it makes sense to do so. + +# Ensure that a data container with the name "mydata" exists. If no container +# by this name exists, it will be created, but not started. + +- name: data container + docker: + name: mydata + image: busybox + state: present + volumes: + - /data + +# Ensure that a Redis server is running, using the volume from the data +# container. Expose the default Redis port. + +- name: redis container + docker: + name: myredis + image: redis + command: redis-server --appendonly yes + state: started + expose: + - 6379 + volumes_from: + - mydata + +# Ensure that a container of your application server is running. This will: +# - pull the latest version of your application image from DockerHub. +# - ensure that a container is running with the specified name and exact image. +# If any configuration options have changed, the existing container will be +# stopped and removed, and a new one will be launched in its place. +# - link this container to the existing redis container launched above with +# an alias. +# - bind TCP port 9000 within the container to port 8080 on all interfaces +# on the host. +# - bind UDP port 9001 within the container to port 8081 on the host, only +# listening on localhost. +# - set the environment variable SECRET_KEY to "ssssh". + +- name: application container + docker: + name: myapplication + image: someuser/appimage + state: reloaded + pull: always + links: + - "myredis:aliasedredis" + ports: + - "8080:9000" + - "127.0.0.1:8081:9001/udp" + env: + SECRET_KEY: ssssh + +# Ensure that exactly five containers of another server are running with this +# exact image and command. If fewer than five are running, more will be launched; +# if more are running, the excess will be stopped. + +- name: load-balanced containers + docker: + state: reloaded + count: 5 + image: someuser/anotherappimage + command: sleep 1d + +# Unconditionally restart a service container. This may be useful within a +# handler, for example. + +- name: application service + docker: + name: myservice + image: someuser/serviceimage + state: restarted + +# Stop all containers running the specified image. + +- name: obsolete container + docker: + image: someuser/oldandbusted + state: stopped + +# Stop and remove a container with the specified name. + +- name: obsolete container + docker: + name: ohno + image: someuser/oldandbusted + state: absent + +# Example Syslogging Output + +- name: myservice container + docker: + name: myservice + image: someservice/someimage + state: reloaded + log_driver: syslog + log_opt: + syslog-address: tcp://my-syslog-server:514 + syslog-facility: daemon + syslog-tag: myservice +''' + +HAS_DOCKER_PY = True +DEFAULT_DOCKER_API_VERSION = None + +import sys +import json +import os +import shlex +from urlparse import urlparse +try: + import docker.client + import docker.utils + import docker.errors + from requests.exceptions import RequestException +except ImportError: + HAS_DOCKER_PY = False + +if HAS_DOCKER_PY: + try: + from docker.errors import APIError as DockerAPIError + except ImportError: + from docker.client import APIError as DockerAPIError + try: + # docker-py 1.2+ + import docker.constants + DEFAULT_DOCKER_API_VERSION = docker.constants.DEFAULT_DOCKER_API_VERSION + except (ImportError, AttributeError): + # docker-py less than 1.2 + DEFAULT_DOCKER_API_VERSION = docker.client.DEFAULT_DOCKER_API_VERSION + + +def _human_to_bytes(number): + suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] + + if isinstance(number, int): + return number + if isinstance(number, str) and number.isdigit(): + return int(number) + if number[-1] == suffixes[0] and number[-2].isdigit(): + return number[:-1] + + i = 1 + for each in suffixes[1:]: + if number[-len(each):] == suffixes[i]: + return int(number[:-len(each)]) * (1024 ** i) + i = i + 1 + + raise ValueError('Could not convert %s to integer' % (number,)) + + +def _ansible_facts(container_list): + return {"docker_containers": container_list} + + +def _docker_id_quirk(inspect): + # XXX: some quirk in docker + if 'ID' in inspect: + inspect['Id'] = inspect['ID'] + del inspect['ID'] + return inspect + + +def get_split_image_tag(image): + # If image contains a host or org name, omit that from our check + if '/' in image: + registry, resource = image.rsplit('/', 1) + else: + registry, resource = None, image + + # now we can determine if image has a tag or a digest + tag = "latest" + basename = resource + for s in ['@',':']: + if s in resource: + basename, tag = resource.split(s, 1) + break + + if registry: + fullname = '/'.join((registry, basename)) + else: + fullname = basename + + return fullname, tag + +def normalize_image(image): + """ + Normalize a Docker image name to include the implied :latest tag. + """ + + return ":".join(get_split_image_tag(image)) + + +def is_running(container): + '''Return True if an inspected container is in a state we consider "running."''' + + return container['State']['Running'] == True and not container['State'].get('Ghost', False) + + +def get_docker_py_versioninfo(): + if hasattr(docker, '__version__'): + # a '__version__' attribute was added to the module but not until + # after 0.3.0 was pushed to pypi. If it's there, use it. + version = [] + for part in docker.__version__.split('.'): + try: + version.append(int(part)) + except ValueError: + for idx, char in enumerate(part): + if not char.isdigit(): + nondigit = part[idx:] + digit = part[:idx] + break + if digit: + version.append(int(digit)) + if nondigit: + version.append(nondigit) + elif hasattr(docker.Client, '_get_raw_response_socket'): + # HACK: if '__version__' isn't there, we check for the existence of + # `_get_raw_response_socket` in the docker.Client class, which was + # added in 0.3.0 + version = (0, 3, 0) + else: + # This is untrue but this module does not function with a version less + # than 0.3.0 so it's okay to lie here. + version = (0,) + + return tuple(version) + + +def check_dependencies(module): + """ + Ensure `docker-py` >= 0.3.0 is installed, and call module.fail_json with a + helpful error message if it isn't. + """ + if not HAS_DOCKER_PY: + module.fail_json(msg="`docker-py` doesn't seem to be installed, but is required for the Ansible Docker module.") + else: + versioninfo = get_docker_py_versioninfo() + if versioninfo < (0, 3, 0): + module.fail_json(msg="The Ansible Docker module requires `docker-py` >= 0.3.0.") + + +class DockerManager(object): + + counters = dict( + created=0, started=0, stopped=0, killed=0, removed=0, restarted=0, pulled=0 + ) + reload_reasons = [] + _capabilities = set() + + # Map optional parameters to minimum (docker-py version, server APIVersion) + # docker-py version is a tuple of ints because we have to compare them + # server APIVersion is passed to a docker-py function that takes strings + _cap_ver_req = { + 'dns': ((0, 3, 0), '1.10'), + 'volumes_from': ((0, 3, 0), '1.10'), + 'restart_policy': ((0, 5, 0), '1.14'), + 'extra_hosts': ((0, 7, 0), '1.3.1'), + 'pid': ((1, 0, 0), '1.17'), + 'log_driver': ((1, 2, 0), '1.18'), + 'log_opt': ((1, 2, 0), '1.18'), + 'host_config': ((0, 7, 0), '1.15'), + 'cpu_set': ((0, 6, 0), '1.14'), + 'cap_add': ((0, 5, 0), '1.14'), + 'cap_drop': ((0, 5, 0), '1.14'), + 'read_only': ((1, 0, 0), '1.17'), + 'stop_timeout': ((0, 5, 0), '1.0'), + # Clientside only + 'insecure_registry': ((0, 5, 0), '0.0') + } + + def __init__(self, module): + self.module = module + + self.binds = None + self.volumes = None + if self.module.params.get('volumes'): + self.binds = {} + self.volumes = [] + vols = self.module.params.get('volumes') + for vol in vols: + parts = vol.split(":") + # regular volume + if len(parts) == 1: + self.volumes.append(parts[0]) + # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container) + elif 2 <= len(parts) <= 3: + # default to read-write + ro = False + # with supplied bind mode + if len(parts) == 3: + if parts[2] not in ['ro', 'rw']: + self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') + else: + ro = parts[2] == 'ro' + self.binds[parts[0]] = {'bind': parts[1], 'ro': ro } + else: + self.module.fail_json(msg='volumes support 1 to 3 arguments') + + self.lxc_conf = None + if self.module.params.get('lxc_conf'): + self.lxc_conf = [] + options = self.module.params.get('lxc_conf') + for option in options: + parts = option.split(':', 1) + self.lxc_conf.append({"Key": parts[0], "Value": parts[1]}) + + self.exposed_ports = None + if self.module.params.get('expose'): + self.exposed_ports = self.get_exposed_ports(self.module.params.get('expose')) + + self.port_bindings = None + if self.module.params.get('ports'): + self.port_bindings = self.get_port_bindings(self.module.params.get('ports')) + + self.links = None + if self.module.params.get('links'): + self.links = self.get_links(self.module.params.get('links')) + + self.env = self.module.params.get('env', None) + + # Connect to the docker server using any configured host and TLS settings. + + env_host = os.getenv('DOCKER_HOST') + env_docker_verify = os.getenv('DOCKER_TLS_VERIFY') + env_cert_path = os.getenv('DOCKER_CERT_PATH') + env_docker_hostname = os.getenv('DOCKER_TLS_HOSTNAME') + + docker_url = module.params.get('docker_url') + if not docker_url: + if env_host: + docker_url = env_host + else: + docker_url = 'unix://var/run/docker.sock' + + docker_api_version = module.params.get('docker_api_version') + + tls_client_cert = module.params.get('tls_client_cert', None) + if not tls_client_cert and env_cert_path: + tls_client_cert = os.path.join(env_cert_path, 'cert.pem') + + tls_client_key = module.params.get('tls_client_key', None) + if not tls_client_key and env_cert_path: + tls_client_key = os.path.join(env_cert_path, 'key.pem') + + tls_ca_cert = module.params.get('tls_ca_cert') + if not tls_ca_cert and env_cert_path: + tls_ca_cert = os.path.join(env_cert_path, 'ca.pem') + + tls_hostname = module.params.get('tls_hostname') + if tls_hostname is None: + if env_docker_hostname: + tls_hostname = env_docker_hostname + else: + parsed_url = urlparse(docker_url) + if ':' in parsed_url.netloc: + tls_hostname = parsed_url.netloc[:parsed_url.netloc.rindex(':')] + else: + tls_hostname = parsed_url + if not tls_hostname: + tls_hostname = True + + # use_tls can be one of four values: + # no: Do not use tls + # encrypt: Use tls. We may do client auth. We will not verify the server + # verify: Use tls. We may do client auth. We will verify the server + # None: Only use tls if the parameters for client auth were specified + # or tls_ca_cert (which requests verifying the server with + # a specific ca certificate) + use_tls = module.params.get('use_tls') + if use_tls is None and env_docker_verify is not None: + use_tls = 'verify' + + tls_config = None + if use_tls != 'no': + params = {} + + # Setup client auth + if tls_client_cert and tls_client_key: + params['client_cert'] = (tls_client_cert, tls_client_key) + + # We're allowed to verify the connection to the server + if use_tls == 'verify' or (use_tls is None and tls_ca_cert): + if tls_ca_cert: + params['ca_cert'] = tls_ca_cert + params['verify'] = True + params['assert_hostname'] = tls_hostname + else: + params['verify'] = True + params['assert_hostname'] = tls_hostname + elif use_tls == 'encrypt': + params['verify'] = False + + if params: + # See https://github.com/docker/docker-py/blob/d39da11/docker/utils/utils.py#L279-L296 + docker_url = docker_url.replace('tcp://', 'https://') + tls_config = docker.tls.TLSConfig(**params) + + self.client = docker.Client(base_url=docker_url, + version=docker_api_version, + tls=tls_config) + + self.docker_py_versioninfo = get_docker_py_versioninfo() + + def _check_capabilities(self): + """ + Create a list of available capabilities + """ + api_version = self.client.version()['ApiVersion'] + for cap, req_vers in self._cap_ver_req.items(): + if (self.docker_py_versioninfo >= req_vers[0] and + docker.utils.compare_version(req_vers[1], api_version) >= 0): + self._capabilities.add(cap) + + def ensure_capability(self, capability, fail=True): + """ + Some of the functionality this ansible module implements are only + available in newer versions of docker. Ensure that the capability + is available here. + + If fail is set to False then return True or False depending on whether + we have the capability. Otherwise, simply fail and exit the module if + we lack the capability. + """ + if not self._capabilities: + self._check_capabilities() + + if capability in self._capabilities: + return True + + if not fail: + return False + + api_version = self.client.version()['ApiVersion'] + self.module.fail_json(msg='Specifying the `%s` parameter requires' + ' docker-py: %s, docker server apiversion %s; found' + ' docker-py: %s, server: %s' % ( + capability, + '.'.join(map(str, self._cap_ver_req[capability][0])), + self._cap_ver_req[capability][1], + '.'.join(map(str, self.docker_py_versioninfo)), + api_version)) + + def get_links(self, links): + """ + Parse the links passed, if a link is specified without an alias then just create the alias of the same name as the link + """ + processed_links = {} + + for link in links: + parsed_link = link.split(':', 1) + if(len(parsed_link) == 2): + processed_links[parsed_link[0]] = parsed_link[1] + else: + processed_links[parsed_link[0]] = parsed_link[0] + + return processed_links + + def get_exposed_ports(self, expose_list): + """ + Parse the ports and protocols (TCP/UDP) to expose in the docker-py `create_container` call from the docker CLI-style syntax. + """ + if expose_list: + exposed = [] + for port in expose_list: + port = str(port).strip() + if port.endswith('/tcp') or port.endswith('/udp'): + port_with_proto = tuple(port.split('/')) + else: + # assume tcp protocol if not specified + port_with_proto = (port, 'tcp') + exposed.append(port_with_proto) + return exposed + else: + return None + + def get_start_params(self): + """ + Create start params + """ + params = { + 'lxc_conf': self.lxc_conf, + 'binds': self.binds, + 'port_bindings': self.port_bindings, + 'publish_all_ports': self.module.params.get('publish_all_ports'), + 'privileged': self.module.params.get('privileged'), + 'links': self.links, + 'network_mode': self.module.params.get('net'), + } + + optionals = {} + for optional_param in ('dns', 'volumes_from', 'restart_policy', + 'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver', + 'cap_add', 'cap_drop', 'read_only', 'log_opt'): + optionals[optional_param] = self.module.params.get(optional_param) + + if optionals['dns'] is not None: + self.ensure_capability('dns') + params['dns'] = optionals['dns'] + + if optionals['volumes_from'] is not None: + self.ensure_capability('volumes_from') + params['volumes_from'] = optionals['volumes_from'] + + if optionals['restart_policy'] is not None: + self.ensure_capability('restart_policy') + params['restart_policy'] = { 'Name': optionals['restart_policy'] } + if params['restart_policy']['Name'] == 'on-failure': + params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry'] + + # docker_py only accepts 'host' or None + if 'pid' in optionals and not optionals['pid']: + optionals['pid'] = None + + if optionals['pid'] is not None: + self.ensure_capability('pid') + params['pid_mode'] = optionals['pid'] + + if optionals['extra_hosts'] is not None: + self.ensure_capability('extra_hosts') + params['extra_hosts'] = optionals['extra_hosts'] + + if optionals['log_driver'] is not None: + self.ensure_capability('log_driver') + log_config = docker.utils.LogConfig(type=docker.utils.LogConfig.types.JSON) + if optionals['log_opt'] is not None: + for k, v in optionals['log_opt'].iteritems(): + log_config.set_config_value(k, v) + log_config.type = optionals['log_driver'] + params['log_config'] = log_config + + if optionals['cap_add'] is not None: + self.ensure_capability('cap_add') + params['cap_add'] = optionals['cap_add'] + + if optionals['cap_drop'] is not None: + self.ensure_capability('cap_drop') + params['cap_drop'] = optionals['cap_drop'] + + if optionals['read_only'] is not None: + self.ensure_capability('read_only') + params['read_only'] = optionals['read_only'] + + return params + + def create_host_config(self): + """ + Create HostConfig object + """ + params = self.get_start_params() + return docker.utils.create_host_config(**params) + + def get_port_bindings(self, ports): + """ + Parse the `ports` string into a port bindings dict for the `start_container` call. + """ + binds = {} + for port in ports: + # ports could potentially be an array like [80, 443], so we make sure they're strings + # before splitting + parts = str(port).split(':') + container_port = parts[-1] + if '/' not in container_port: + container_port = int(parts[-1]) + + p_len = len(parts) + if p_len == 1: + # Bind `container_port` of the container to a dynamically + # allocated TCP port on all available interfaces of the host + # machine. + bind = ('0.0.0.0',) + elif p_len == 2: + # Bind `container_port` of the container to port `parts[0]` on + # all available interfaces of the host machine. + bind = ('0.0.0.0', int(parts[0])) + elif p_len == 3: + # Bind `container_port` of the container to port `parts[1]` on + # IP `parts[0]` of the host machine. If `parts[1]` empty bind + # to a dynamically allocated port of IP `parts[0]`. + bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],) + + if container_port in binds: + old_bind = binds[container_port] + if isinstance(old_bind, list): + # append to list if it already exists + old_bind.append(bind) + else: + # otherwise create list that contains the old and new binds + binds[container_port] = [binds[container_port], bind] + else: + binds[container_port] = bind + + return binds + + def get_summary_message(self): + ''' + Generate a message that briefly describes the actions taken by this + task, in English. + ''' + + parts = [] + for k, v in self.counters.iteritems(): + if v == 0: + continue + + if v == 1: + plural = "" + else: + plural = "s" + parts.append("%s %d container%s" % (k, v, plural)) + + if parts: + return ", ".join(parts) + "." + else: + return "No action taken." + + def get_reload_reason_message(self): + ''' + Generate a message describing why any reloaded containers were reloaded. + ''' + + if self.reload_reasons: + return ", ".join(self.reload_reasons) + else: + return None + + def get_summary_counters_msg(self): + msg = "" + for k, v in self.counters.iteritems(): + msg = msg + "%s %d " % (k, v) + + return msg + + def increment_counter(self, name): + self.counters[name] = self.counters[name] + 1 + + def has_changed(self): + for k, v in self.counters.iteritems(): + if v > 0: + return True + + return False + + def get_inspect_image(self): + try: + return self.client.inspect_image(self.module.params.get('image')) + except DockerAPIError as e: + if e.response.status_code == 404: + return None + else: + raise e + + def get_image_repo_tags(self): + image, tag = get_split_image_tag(self.module.params.get('image')) + if tag is None: + tag = 'latest' + resource = '%s:%s' % (image, tag) + + for image in self.client.images(name=image): + # If image is pulled by digest, RepoTags may be None + repo_tags = image.get('RepoTags', None) + if repo_tags is not None and resource in repo_tags: + return repo_tags + repo_digests = image.get('RepoDigests', None) + if repo_digests is not None and resource in repo_digests: + return repo_digests + return [] + + def get_inspect_containers(self, containers): + inspect = [] + for i in containers: + details = self.client.inspect_container(i['Id']) + details = _docker_id_quirk(details) + inspect.append(details) + + return inspect + + def get_differing_containers(self): + """ + Inspect all matching, running containers, and return those that were + started with parameters that differ from the ones that are provided + during this module run. A list containing the differing + containers will be returned, and a short string describing the specific + difference encountered in each container will be appended to + reload_reasons. + + This generates the set of containers that need to be stopped and + started with new parameters with state=reloaded. + """ + + running = self.get_running_containers() + current = self.get_inspect_containers(running) + + #Get API version + api_version = self.client.version()['ApiVersion'] + + image = self.get_inspect_image() + if image is None: + # The image isn't present. Assume that we're about to pull a new + # tag and *everything* will be restarted. + # + # This will give false positives if you untag an image on the host + # and there's nothing more to pull. + return current + + differing = [] + + for container in current: + + # IMAGE + # Compare the image by ID rather than name, so that containers + # will be restarted when new versions of an existing image are + # pulled. + if container['Image'] != image['Id']: + self.reload_reasons.append('image ({0} => {1})'.format(container['Image'], image['Id'])) + differing.append(container) + continue + + # COMMAND + + expected_command = self.module.params.get('command') + if expected_command: + expected_command = shlex.split(expected_command) + actual_command = container["Config"]["Cmd"] + + if actual_command != expected_command: + self.reload_reasons.append('command ({0} => {1})'.format(actual_command, expected_command)) + differing.append(container) + continue + + # EXPOSED PORTS + expected_exposed_ports = set((image['ContainerConfig'].get('ExposedPorts') or {}).keys()) + for p in (self.exposed_ports or []): + expected_exposed_ports.add("/".join(p)) + + actually_exposed_ports = set((container["Config"].get("ExposedPorts") or {}).keys()) + + if actually_exposed_ports != expected_exposed_ports: + self.reload_reasons.append('exposed_ports ({0} => {1})'.format(actually_exposed_ports, expected_exposed_ports)) + differing.append(container) + continue + + # VOLUMES + + expected_volume_keys = set((image['ContainerConfig']['Volumes'] or {}).keys()) + if self.volumes: + expected_volume_keys.update(self.volumes) + + actual_volume_keys = set((container['Config']['Volumes'] or {}).keys()) + + if actual_volume_keys != expected_volume_keys: + self.reload_reasons.append('volumes ({0} => {1})'.format(actual_volume_keys, expected_volume_keys)) + differing.append(container) + continue + + # MEM_LIMIT + + try: + expected_mem = _human_to_bytes(self.module.params.get('memory_limit')) + except ValueError as e: + self.module.fail_json(msg=str(e)) + + #For v1.19 API and above use HostConfig, otherwise use Config + if docker.utils.compare_version('1.19', api_version) >= 0: + actual_mem = container['HostConfig']['Memory'] + else: + actual_mem = container['Config']['Memory'] + + if expected_mem and actual_mem != expected_mem: + self.reload_reasons.append('memory ({0} => {1})'.format(actual_mem, expected_mem)) + differing.append(container) + continue + + # ENVIRONMENT + # actual_env is likely to include environment variables injected by + # the Dockerfile. + + expected_env = {} + + for image_env in image['ContainerConfig']['Env'] or []: + name, value = image_env.split('=', 1) + expected_env[name] = value + + if self.env: + for name, value in self.env.iteritems(): + expected_env[name] = str(value) + + actual_env = {} + for container_env in container['Config']['Env'] or []: + name, value = container_env.split('=', 1) + actual_env[name] = value + + if actual_env != expected_env: + # Don't include the environment difference in the output. + self.reload_reasons.append('environment {0} => {1}'.format(actual_env, expected_env)) + differing.append(container) + continue + + # HOSTNAME + + expected_hostname = self.module.params.get('hostname') + actual_hostname = container['Config']['Hostname'] + if expected_hostname and actual_hostname != expected_hostname: + self.reload_reasons.append('hostname ({0} => {1})'.format(actual_hostname, expected_hostname)) + differing.append(container) + continue + + # DOMAINNAME + + expected_domainname = self.module.params.get('domainname') + actual_domainname = container['Config']['Domainname'] + if expected_domainname and actual_domainname != expected_domainname: + self.reload_reasons.append('domainname ({0} => {1})'.format(actual_domainname, expected_domainname)) + differing.append(container) + continue + + # DETACH + + # We don't have to check for undetached containers. If it wasn't + # detached, it would have stopped before the playbook continued! + + # NAME + + # We also don't have to check name, because this is one of the + # criteria that's used to determine which container(s) match in + # the first place. + + # STDIN_OPEN + + expected_stdin_open = self.module.params.get('stdin_open') + actual_stdin_open = container['Config']['OpenStdin'] + if actual_stdin_open != expected_stdin_open: + self.reload_reasons.append('stdin_open ({0} => {1})'.format(actual_stdin_open, expected_stdin_open)) + differing.append(container) + continue + + # TTY + + expected_tty = self.module.params.get('tty') + actual_tty = container['Config']['Tty'] + if actual_tty != expected_tty: + self.reload_reasons.append('tty ({0} => {1})'.format(actual_tty, expected_tty)) + differing.append(container) + continue + + # -- "start" call differences -- + + # LXC_CONF + + if self.lxc_conf: + expected_lxc = set(self.lxc_conf) + actual_lxc = set(container['HostConfig']['LxcConf'] or []) + if actual_lxc != expected_lxc: + self.reload_reasons.append('lxc_conf ({0} => {1})'.format(actual_lxc, expected_lxc)) + differing.append(container) + continue + + # BINDS + + expected_binds = set() + if self.binds: + for host_path, config in self.binds.iteritems(): + if isinstance(config, dict): + container_path = config['bind'] + if config['ro']: + mode = 'ro' + else: + mode = 'rw' + else: + container_path = config + mode = 'rw' + expected_binds.add("{0}:{1}:{2}".format(host_path, container_path, mode)) + + actual_binds = set() + for bind in (container['HostConfig']['Binds'] or []): + if len(bind.split(':')) == 2: + actual_binds.add(bind + ":rw") + else: + actual_binds.add(bind) + + if actual_binds != expected_binds: + self.reload_reasons.append('binds ({0} => {1})'.format(actual_binds, expected_binds)) + differing.append(container) + continue + + # PORT BINDINGS + + expected_bound_ports = {} + if self.port_bindings: + for container_port, config in self.port_bindings.iteritems(): + if isinstance(container_port, int): + container_port = "{0}/tcp".format(container_port) + if len(config) == 1: + expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': ""}] + elif isinstance(config[0], tuple): + expected_bound_ports[container_port] = [] + for hostip, hostport in config: + expected_bound_ports[container_port].append({ 'HostIp': hostip, 'HostPort': str(hostport)}) + else: + expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}] + + actual_bound_ports = container['HostConfig']['PortBindings'] or {} + + if actual_bound_ports != expected_bound_ports: + self.reload_reasons.append('port bindings ({0} => {1})'.format(actual_bound_ports, expected_bound_ports)) + differing.append(container) + continue + + # PUBLISHING ALL PORTS + + # What we really care about is the set of ports that is actually + # published. That should be caught above. + + # PRIVILEGED + + expected_privileged = self.module.params.get('privileged') + actual_privileged = container['HostConfig']['Privileged'] + if actual_privileged != expected_privileged: + self.reload_reasons.append('privileged ({0} => {1})'.format(actual_privileged, expected_privileged)) + differing.append(container) + continue + + # LINKS + + expected_links = set() + for link, alias in (self.links or {}).iteritems(): + expected_links.add("/{0}:{1}/{2}".format(link, container["Name"], alias)) + + actual_links = set(container['HostConfig']['Links'] or []) + if actual_links != expected_links: + self.reload_reasons.append('links ({0} => {1})'.format(actual_links, expected_links)) + differing.append(container) + continue + + # NETWORK MODE + + expected_netmode = self.module.params.get('net') or 'bridge' + actual_netmode = container['HostConfig']['NetworkMode'] or 'bridge' + if actual_netmode != expected_netmode: + self.reload_reasons.append('net ({0} => {1})'.format(actual_netmode, expected_netmode)) + differing.append(container) + continue + + # DNS + + expected_dns = set(self.module.params.get('dns') or []) + actual_dns = set(container['HostConfig']['Dns'] or []) + if actual_dns != expected_dns: + self.reload_reasons.append('dns ({0} => {1})'.format(actual_dns, expected_dns)) + differing.append(container) + continue + + # VOLUMES_FROM + + expected_volumes_from = set(self.module.params.get('volumes_from') or []) + actual_volumes_from = set(container['HostConfig']['VolumesFrom'] or []) + if actual_volumes_from != expected_volumes_from: + self.reload_reasons.append('volumes_from ({0} => {1})'.format(actual_volumes_from, expected_volumes_from)) + differing.append(container) + + # LOG_DRIVER + + if self.ensure_capability('log_driver', False): + expected_log_driver = self.module.params.get('log_driver') or 'json-file' + actual_log_driver = container['HostConfig']['LogConfig']['Type'] + if actual_log_driver != expected_log_driver: + self.reload_reasons.append('log_driver ({0} => {1})'.format(actual_log_driver, expected_log_driver)) + differing.append(container) + continue + + if self.ensure_capability('log_opt', False): + expected_logging_opts = self.module.params.get('log_opt') or {} + actual_log_opts = container['HostConfig']['LogConfig']['Config'] + if len(set(expected_logging_opts.items()) - set(actual_log_opts.items())) != 0: + log_opt_reasons = { + 'added': dict(set(expected_logging_opts.items()) - set(actual_log_opts.items())), + 'removed': dict(set(actual_log_opts.items()) - set(expected_logging_opts.items())) + } + self.reload_reasons.append('log_opt ({0})'.format(log_opt_reasons)) + differing.append(container) + + return differing + + def get_deployed_containers(self): + """ + Return any matching containers that are already present. + """ + + command = self.module.params.get('command') + if command is not None: + command = shlex.split(command) + name = self.module.params.get('name') + if name and not name.startswith('/'): + name = '/' + name + deployed = [] + + # "images" will be a collection of equivalent "name:tag" image names + # that map to the same Docker image. + inspected = self.get_inspect_image() + if inspected: + repo_tags = self.get_image_repo_tags() + else: + repo_tags = [normalize_image(self.module.params.get('image'))] + + for container in self.client.containers(all=True): + details = None + + if name: + name_list = container.get('Names') + if name_list is None: + name_list = [] + matches = name in name_list + else: + details = self.client.inspect_container(container['Id']) + details = _docker_id_quirk(details) + + running_image = normalize_image(details['Config']['Image']) + + image_matches = running_image in repo_tags + + command_matches = command == details['Config']['Cmd'] + + matches = image_matches and command_matches + + if matches: + if not details: + details = self.client.inspect_container(container['Id']) + details = _docker_id_quirk(details) + + deployed.append(details) + + return deployed + + def get_running_containers(self): + return [c for c in self.get_deployed_containers() if is_running(c)] + + def pull_image(self): + extra_params = {} + if self.module.params.get('insecure_registry'): + if self.ensure_capability('insecure_registry', fail=False): + extra_params['insecure_registry'] = self.module.params.get('insecure_registry') + + resource = self.module.params.get('image') + image, tag = get_split_image_tag(resource) + if self.module.params.get('username'): + try: + self.client.login( + self.module.params.get('username'), + password=self.module.params.get('password'), + email=self.module.params.get('email'), + registry=self.module.params.get('registry') + ) + except Exception as e: + self.module.fail_json(msg="failed to login to the remote registry, check your username/password.", error=repr(e)) + try: + changes = list(self.client.pull(image, tag=tag, stream=True, **extra_params)) + try: + last = changes[-1] + # seems Docker 1.8 puts an empty dict at the end of the + # stream; catch that and get the previous instead + # https://github.com/ansible/ansible-modules-core/issues/2043 + if last.strip() == '{}': + last = changes[-2] + except IndexError: + last = '{}' + status = json.loads(last).get('status', '') + if status.startswith('Status: Image is up to date for'): + # Image is already up to date. Don't increment the counter. + pass + elif (status.startswith('Status: Downloaded newer image for') or + status.startswith('Download complete')): + # Image was updated. Increment the pull counter. + self.increment_counter('pulled') + else: + # Unrecognized status string. + self.module.fail_json(msg="Unrecognized status from pull.", status=status, changes=changes) + except Exception as e: + self.module.fail_json(msg="Failed to pull the specified image: %s" % resource, error=repr(e)) + + def create_containers(self, count=1): + try: + mem_limit = _human_to_bytes(self.module.params.get('memory_limit')) + except ValueError as e: + self.module.fail_json(msg=str(e)) + api_version = self.client.version()['ApiVersion'] + + params = {'image': self.module.params.get('image'), + 'command': self.module.params.get('command'), + 'ports': self.exposed_ports, + 'volumes': self.volumes, + 'environment': self.env, + 'hostname': self.module.params.get('hostname'), + 'domainname': self.module.params.get('domainname'), + 'detach': self.module.params.get('detach'), + 'name': self.module.params.get('name'), + 'stdin_open': self.module.params.get('stdin_open'), + 'tty': self.module.params.get('tty'), + 'cpuset': self.module.params.get('cpu_set'), + 'user': self.module.params.get('docker_user'), + } + if self.ensure_capability('host_config', fail=False): + params['host_config'] = self.create_host_config() + + #For v1.19 API and above use HostConfig, otherwise use Config + if docker.utils.compare_version('1.19', api_version) < 0: + params['mem_limit'] = mem_limit + else: + params['host_config']['Memory'] = mem_limit + + + def do_create(count, params): + results = [] + for _ in range(count): + result = self.client.create_container(**params) + self.increment_counter('created') + results.append(result) + + return results + + try: + containers = do_create(count, params) + except docker.errors.APIError as e: + if e.response.status_code != 404: + raise + + self.pull_image() + containers = do_create(count, params) + + return containers + + def start_containers(self, containers): + params = {} + + if not self.ensure_capability('host_config', fail=False): + params = self.get_start_params() + + for i in containers: + self.client.start(i) + self.increment_counter('started') + + if not self.module.params.get('detach'): + status = self.client.wait(i['Id']) + if status != 0: + output = self.client.logs(i['Id'], stdout=True, stderr=True, + stream=False, timestamps=False) + self.module.fail_json(status=status, msg=output) + + def stop_containers(self, containers): + for i in containers: + self.client.stop(i['Id'], self.module.params.get('stop_timeout')) + self.increment_counter('stopped') + + return [self.client.wait(i['Id']) for i in containers] + + def remove_containers(self, containers): + for i in containers: + self.client.remove_container(i['Id']) + self.increment_counter('removed') + + def kill_containers(self, containers): + for i in containers: + self.client.kill(i['Id'], self.module.params.get('signal')) + self.increment_counter('killed') + + def restart_containers(self, containers): + for i in containers: + self.client.restart(i['Id']) + self.increment_counter('restarted') + + +class ContainerSet: + + def __init__(self, manager): + self.manager = manager + self.running = [] + self.deployed = [] + self.changed = [] + + def refresh(self): + ''' + Update our view of the matching containers from the Docker daemon. + ''' + + + self.deployed = self.manager.get_deployed_containers() + self.running = [c for c in self.deployed if is_running(c)] + + def notice_changed(self, containers): + ''' + Record a collection of containers as "changed". + ''' + + self.changed.extend(containers) + + +def present(manager, containers, count, name): + '''Ensure that exactly `count` matching containers exist in any state.''' + + containers.refresh() + delta = count - len(containers.deployed) + + if delta > 0: + created = manager.create_containers(delta) + containers.notice_changed(manager.get_inspect_containers(created)) + + if delta < 0: + # If both running and stopped containers exist, remove + # stopped containers first. + containers.deployed.sort(lambda cx, cy: cmp(is_running(cx), is_running(cy))) + + to_stop = [] + to_remove = [] + for c in containers.deployed[0:-delta]: + if is_running(c): + to_stop.append(c) + to_remove.append(c) + + manager.stop_containers(to_stop) + containers.notice_changed(manager.get_inspect_containers(to_remove)) + manager.remove_containers(to_remove) + +def started(manager, containers, count, name): + '''Ensure that exactly `count` matching containers exist and are running.''' + + containers.refresh() + delta = count - len(containers.running) + + if delta > 0: + if name and containers.deployed: + # A stopped container exists with the requested name. + # Clean it up before attempting to start a new one. + manager.remove_containers(containers.deployed) + + created = manager.create_containers(delta) + manager.start_containers(created) + containers.notice_changed(manager.get_inspect_containers(created)) + + if delta < 0: + excess = containers.running[0:-delta] + containers.notice_changed(manager.get_inspect_containers(excess)) + manager.stop_containers(excess) + manager.remove_containers(excess) + +def reloaded(manager, containers, count, name): + ''' + Ensure that exactly `count` matching containers exist and are + running. If any associated settings have been changed (volumes, + ports or so on), restart those containers. + ''' + + containers.refresh() + + for container in manager.get_differing_containers(): + manager.stop_containers([container]) + manager.remove_containers([container]) + + started(manager, containers, count, name) + +def restarted(manager, containers, count, name): + ''' + Ensure that exactly `count` matching containers exist and are + running. Unconditionally restart any that were already running. + ''' + + containers.refresh() + + for container in manager.get_differing_containers(): + manager.stop_containers([container]) + manager.remove_containers([container]) + + manager.restart_containers(containers.running) + started(manager, containers, count, name) + +def stopped(manager, containers, count, name): + '''Stop any matching containers that are running.''' + + containers.refresh() + + manager.stop_containers(containers.running) + containers.notice_changed(manager.get_inspect_containers(containers.running)) + +def killed(manager, containers, count, name): + '''Kill any matching containers that are running.''' + + containers.refresh() + + manager.kill_containers(containers.running) + containers.notice_changed(manager.get_inspect_containers(containers.running)) + +def absent(manager, containers, count, name): + '''Stop and remove any matching containers.''' + + containers.refresh() + + manager.stop_containers(containers.running) + containers.notice_changed(manager.get_inspect_containers(containers.deployed)) + manager.remove_containers(containers.deployed) + +def main(): + module = AnsibleModule( + argument_spec = dict( + count = dict(default=1), + image = dict(required=True), + pull = dict(required=False, default='missing', choices=['missing', 'always']), + command = dict(required=False, default=None), + expose = dict(required=False, default=None, type='list'), + ports = dict(required=False, default=None, type='list'), + publish_all_ports = dict(default=False, type='bool'), + volumes = dict(default=None, type='list'), + volumes_from = dict(default=None), + links = dict(default=None, type='list'), + memory_limit = dict(default=0), + memory_swap = dict(default=0), + docker_url = dict(), + use_tls = dict(default=None, choices=['no', 'encrypt', 'verify']), + tls_client_cert = dict(required=False, default=None, type='str'), + tls_client_key = dict(required=False, default=None, type='str'), + tls_ca_cert = dict(required=False, default=None, type='str'), + tls_hostname = dict(required=False, type='str', default=None), + docker_api_version = dict(required=False, default=DEFAULT_DOCKER_API_VERSION, type='str'), + docker_user = dict(default=None), + username = dict(default=None), + password = dict(), + email = dict(), + registry = dict(), + hostname = dict(default=None), + domainname = dict(default=None), + env = dict(type='dict'), + dns = dict(), + detach = dict(default=True, type='bool'), + state = dict(default='started', choices=['present', 'started', 'reloaded', 'restarted', 'stopped', 'killed', 'absent', 'running']), + signal = dict(default=None), + restart_policy = dict(default=None, choices=['always', 'on-failure', 'no']), + restart_policy_retry = dict(default=0, type='int'), + extra_hosts = dict(type='dict'), + debug = dict(default=False, type='bool'), + privileged = dict(default=False, type='bool'), + stdin_open = dict(default=False, type='bool'), + tty = dict(default=False, type='bool'), + lxc_conf = dict(default=None, type='list'), + name = dict(default=None), + net = dict(default=None), + pid = dict(default=None), + insecure_registry = dict(default=False, type='bool'), + log_driver = dict(default=None, choices=['json-file', 'none', 'syslog', 'journald', 'gelf', 'fluentd']), + log_opt = dict(default=None, type='dict'), + cpu_set = dict(default=None), + cap_add = dict(default=None, type='list'), + cap_drop = dict(default=None, type='list'), + read_only = dict(default=None, type='bool'), + stop_timeout = dict(default=10, type='int'), + ), + required_together = ( + ['tls_client_cert', 'tls_client_key'], + ), + ) + + check_dependencies(module) + + try: + manager = DockerManager(module) + count = int(module.params.get('count')) + name = module.params.get('name') + pull = module.params.get('pull') + + state = module.params.get('state') + if state == 'running': + # Renamed running to started in 1.9 + state = 'started' + + if count < 0: + module.fail_json(msg="Count must be greater than zero") + + if count > 1 and name: + module.fail_json(msg="Count and name must not be used together") + + # Explicitly pull new container images, if requested. Do this before + # noticing running and deployed containers so that the image names + # will differ if a newer image has been pulled. + # Missing images should be pulled first to avoid downtime when old + # container is stopped, but image for new one is now downloaded yet. + # It also prevents removal of running container before realizing + # that requested image cannot be retrieved. + if pull == "always" or (state == 'reloaded' and manager.get_inspect_image() is None): + manager.pull_image() + + containers = ContainerSet(manager) + + if state == 'present': + present(manager, containers, count, name) + elif state == 'started': + started(manager, containers, count, name) + elif state == 'reloaded': + reloaded(manager, containers, count, name) + elif state == 'restarted': + restarted(manager, containers, count, name) + elif state == 'stopped': + stopped(manager, containers, count, name) + elif state == 'killed': + killed(manager, containers, count, name) + elif state == 'absent': + absent(manager, containers, count, name) + else: + module.fail_json(msg='Unrecognized state %s. Must be one of: ' + 'present; started; reloaded; restarted; ' + 'stopped; killed; absent.' % state) + + module.exit_json(changed=manager.has_changed(), + msg=manager.get_summary_message(), + summary=manager.counters, + reload_reasons=manager.get_reload_reason_message(), + ansible_facts=_ansible_facts(containers.changed)) + + except DockerAPIError as e: + module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation) + + except RequestException as e: + module.fail_json(changed=manager.has_changed(), msg=repr(e)) + +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/ansible/library/minigraph_facts.py b/ansible/library/minigraph_facts.py index 15f4eeaad8d..0a27dc0632c 100644 --- a/ansible/library/minigraph_facts.py +++ b/ansible/library/minigraph_facts.py @@ -209,7 +209,7 @@ def parse_dpg(dpg, hname): pcintfname = pcintf.find(str(QName(ns, "Name"))).text pcintfmbr = pcintf.find(str(QName(ns, "AttachTo"))).text pcmbr_list = pcintfmbr.split(';', 1) - for i, member in enumerate(pcmbr_list): + for i, member in enumerate(pcmbr_list): pcmbr_list[i] = port_alias_to_name_map[member] ports[port_alias_to_name_map[member]] = {'name': port_alias_to_name_map[member], 'alias': member} pcs[pcintfname] = {'name': pcintfname, 'members': pcmbr_list} @@ -528,6 +528,9 @@ def parse_xml(filename, hostname): elif hwsku == "Celestica-DX010-C32": for i in range(1, 33): port_alias_to_name_map["etp%d" % i] = "Ethernet%d" % ((i - 1) * 4) + elif hwsku == "Seastone_2": + for i in range(1, 33): + port_alias_to_name_map["QSFP%d" % i] = "Ethernet%d" % ((i - 1) * 4) elif hwsku == "Seastone-DX010": for i in range(1, 33): port_alias_to_name_map["Eth%d" % i] = "Ethernet%d" % ((i - 1) * 4) diff --git a/ansible/minigraph/lab-a7260-01.t0-116.xml b/ansible/minigraph/lab-a7260-01.t0-116.xml index ba260ef7349..8e4836fb9cb 100644 --- a/ansible/minigraph/lab-a7260-01.t0-116.xml +++ b/ansible/minigraph/lab-a7260-01.t0-116.xml @@ -220,7 +220,7 @@ Vlan1000 - Ethernet1/1;Ethernet1/3;Ethernet2/1;Ethernet2/3;Ethernet3/1;Ethernet3/3;Ethernet4/1;Ethernet4/3;Ethernet5/1;Ethernet5/3;Ethernet6/1;Ethernet6/3;Ethernet7/1;Ethernet7/3;Ethernet8/1;Ethernet8/3;Ethernet9/1;Ethernet9/3;Ethernet10/1;Ethernet10/3;Ethernet11/1;Ethernet11/3;Ethernet12/1;Ethernet12/3;Ethernet21/1;Ethernet21/3;Ethernet22/1;Ethernet22/3;Ethernet23/1;Ethernet23/3;Ethernet24/1;Ethernet24/3;Ethernet25/1;Ethernet25/3;Ethernet26/1;Ethernet26/3;Ethernet27/1;Ethernet27/3;Ethernet28/1;Ethernet28/3;Ethernet29/1;Ethernet29/3;Ethernet30/1;Ethernet30/3;Ethernet31/1;Ethernet31/3;Ethernet32/1;Ethernet32/3;Ethernet33/1;Ethernet33/3;Ethernet34/1;Ethernet34/3;Ethernet35/1;Ethernet35/3;Ethernet36/1;Ethernet36/3;Ethernet37/1;Ethernet37/3;Ethernet38/1;Ethernet38/3;Ethernet39/1;Ethernet39/3;Ethernet40/1;Ethernet40/3;Ethernet41/1;Ethernet41/3;Ethernet42/1;Ethernet42/3;Ethernet43/1;Ethernet43/3;Ethernet44/1;Ethernet44/3;Ethernet45/1;Ethernet45/3;Ethernet46/1;Ethernet46/3;Ethernet47/1;Ethernet47/3;Ethernet48/1;Ethernet48/3;Ethernet49/1;Ethernet49/3;Ethernet50/1;Ethernet50/3;Ethernet51/1;Ethernet51/3;Ethernet52/1;Ethernet52/3;Ethernet53/1;Ethernet53/3;Ethernet54/1;Ethernet54/3;Ethernet55/1;Ethernet55/3;Ethernet56/1;Ethernet56/3;Ethernet57/1;Ethernet57/3;Ethernet58/1;Ethernet58/3;Ethernet59/1;Ethernet59/3;Ethernet60/1;Ethernet60/3;Ethernet61/1;Ethernet61/3;Ethernet62/1;Ethernet62/3;Ethernet63/1;Ethernet63/3;Ethernet64/1;Ethernet64/3 + Ethernet1/1;Ethernet1/3;Ethernet2/1;Ethernet2/3;Ethernet3/1;Ethernet3/3;Ethernet4/1;Ethernet4/3;Ethernet5/1;Ethernet5/3;Ethernet6/1;Ethernet6/3;Ethernet7/1;Ethernet7/3;Ethernet8/1;Ethernet8/3;Ethernet9/1;Ethernet9/3;Ethernet10/1;Ethernet10/3;Ethernet11/1;Ethernet11/3;Ethernet12/1;Ethernet12/3;Ethernet21/1;Ethernet21/3;Ethernet22/1;Ethernet22/3;Ethernet23/1;Ethernet23/3;Ethernet24/1;Ethernet24/3;Ethernet25/1;Ethernet25/3;Ethernet26/1;Ethernet26/3;Ethernet27/1;Ethernet27/3;Ethernet28/1;Ethernet28/3;Ethernet29/1;Ethernet29/3;Ethernet30/1;Ethernet30/3;Ethernet31/1;Ethernet31/3;Ethernet32/1;Ethernet32/3;Ethernet33/1;Ethernet33/3;Ethernet34/1;Ethernet34/3;Ethernet35/1;Ethernet35/3;Ethernet36/1;Ethernet36/3;Ethernet37/1;Ethernet37/3;Ethernet38/1;Ethernet38/3;Ethernet39/1;Ethernet39/3;Ethernet40/1;Ethernet40/3;Ethernet41/1;Ethernet41/3;Ethernet42/1;Ethernet42/3;Ethernet43/1;Ethernet43/3;Ethernet44/1;Ethernet44/3;Ethernet45/1;Ethernet45/3;Ethernet46/1;Ethernet46/3;Ethernet47/1;Ethernet47/3;Ethernet48/1;Ethernet48/3;Ethernet49/1;Ethernet49/3;Ethernet50/1;Ethernet50/3;Ethernet51/1;Ethernet51/3;Ethernet52/1;Ethernet52/3;Ethernet53/1;Ethernet53/3;Ethernet54/1;Ethernet54/3;Ethernet55/1;Ethernet55/3;Ethernet56/1;Ethernet56/3;Ethernet57/1;Ethernet57/3;Ethernet58/1;Ethernet58/3;Ethernet59/1;Ethernet59/3;Ethernet60/1;Ethernet60/3;Ethernet61/1;Ethernet61/3;Ethernet62/1;Ethernet62/3;Ethernet63/1;Ethernet63/3;Ethernet64/1;Ethernet64/3 False 0.0.0.0/0 @@ -289,12 +289,7 @@ Everflow Everflow - - ERSPANV6 - EverflowV6 - EverflowV6 - - + VTY_LINE ssh-only SSH @@ -2783,9 +2778,9 @@ true 0 - Arista-7260CX3-D108C8 + Arista-7260CX3-D108C8 - + diff --git a/ansible/minigraph/lab-s6000-01.t0.xml b/ansible/minigraph/lab-s6000-01.t0.xml index 5301c5d8f3e..81dea7bc5eb 100644 --- a/ansible/minigraph/lab-s6000-01.t0.xml +++ b/ansible/minigraph/lab-s6000-01.t0.xml @@ -220,7 +220,7 @@ Vlan1000 - fortyGigE0/4;fortyGigE0/8;fortyGigE0/12;fortyGigE0/16;fortyGigE0/20;fortyGigE0/24;fortyGigE0/28;fortyGigE0/32;fortyGigE0/36;fortyGigE0/40;fortyGigE0/44;fortyGigE0/48;fortyGigE0/52;fortyGigE0/56;fortyGigE0/60;fortyGigE0/64;fortyGigE0/68;fortyGigE0/72;fortyGigE0/76;fortyGigE0/80;fortyGigE0/84;fortyGigE0/88;fortyGigE0/92;fortyGigE0/96 + fortyGigE0/4;fortyGigE0/8;fortyGigE0/12;fortyGigE0/16;fortyGigE0/20;fortyGigE0/24;fortyGigE0/28;fortyGigE0/32;fortyGigE0/36;fortyGigE0/40;fortyGigE0/44;fortyGigE0/48;fortyGigE0/52;fortyGigE0/56;fortyGigE0/60;fortyGigE0/64;fortyGigE0/68;fortyGigE0/72;fortyGigE0/76;fortyGigE0/80;fortyGigE0/84;fortyGigE0/88;fortyGigE0/92;fortyGigE0/96 False 0.0.0.0/0 @@ -289,12 +289,7 @@ Everflow Everflow - - ERSPANV6 - EverflowV6 - EverflowV6 - - + VTY_LINE ssh-only SSH @@ -969,9 +964,9 @@ true 0 - Force10-S6000 + Force10-S6000 - + diff --git a/ansible/minigraph/lab-s6100-01.t0-64.xml b/ansible/minigraph/lab-s6100-01.t0-64.xml index fcd3091c006..fe90fdb0752 100644 --- a/ansible/minigraph/lab-s6100-01.t0-64.xml +++ b/ansible/minigraph/lab-s6100-01.t0-64.xml @@ -220,7 +220,7 @@ Vlan1000 - fortyGigE1/1/7;fortyGigE1/1/8;fortyGigE1/1/9;fortyGigE1/1/10;fortyGigE1/1/11;fortyGigE1/1/12;fortyGigE1/1/13;fortyGigE1/1/14;fortyGigE1/1/15;fortyGigE1/1/16;fortyGigE1/2/7;fortyGigE1/2/8;fortyGigE1/2/9;fortyGigE1/2/10;fortyGigE1/2/11;fortyGigE1/2/12;fortyGigE1/2/13;fortyGigE1/2/14;fortyGigE1/2/15;fortyGigE1/2/16;fortyGigE1/3/1;fortyGigE1/3/5;fortyGigE1/3/6;fortyGigE1/3/7;fortyGigE1/3/8;fortyGigE1/3/9;fortyGigE1/3/10;fortyGigE1/3/11;fortyGigE1/4/1;fortyGigE1/4/5;fortyGigE1/4/6;fortyGigE1/4/7;fortyGigE1/4/8;fortyGigE1/4/9;fortyGigE1/4/10;fortyGigE1/4/11 + fortyGigE1/1/7;fortyGigE1/1/8;fortyGigE1/1/9;fortyGigE1/1/10;fortyGigE1/1/11;fortyGigE1/1/12;fortyGigE1/1/13;fortyGigE1/1/14;fortyGigE1/1/15;fortyGigE1/1/16;fortyGigE1/2/7;fortyGigE1/2/8;fortyGigE1/2/9;fortyGigE1/2/10;fortyGigE1/2/11;fortyGigE1/2/12;fortyGigE1/2/13;fortyGigE1/2/14;fortyGigE1/2/15;fortyGigE1/2/16;fortyGigE1/3/1;fortyGigE1/3/5;fortyGigE1/3/6;fortyGigE1/3/7;fortyGigE1/3/8;fortyGigE1/3/9;fortyGigE1/3/10;fortyGigE1/3/11;fortyGigE1/4/1;fortyGigE1/4/5;fortyGigE1/4/6;fortyGigE1/4/7;fortyGigE1/4/8;fortyGigE1/4/9;fortyGigE1/4/10;fortyGigE1/4/11 False 0.0.0.0/0 @@ -289,12 +289,7 @@ Everflow Everflow - - ERSPANV6 - EverflowV6 - EverflowV6 - - + VTY_LINE ssh-only SSH @@ -1497,9 +1492,9 @@ true 0 - Force10-S6100 + Force10-S6100 - + diff --git a/ansible/minigraph/lab-s6100-01.t1-64-lag.xml b/ansible/minigraph/lab-s6100-01.t1-64-lag.xml index d6eb90ecf31..bcfd6e6b80f 100644 --- a/ansible/minigraph/lab-s6100-01.t1-64-lag.xml +++ b/ansible/minigraph/lab-s6100-01.t1-64-lag.xml @@ -1155,12 +1155,7 @@ Everflow Everflow - - ERSPANV6 - EverflowV6 - EverflowV6 - - + VTY_LINE ssh-only SSH @@ -1410,6 +1405,13 @@ Arista-VM + + ARISTA05T0 + + 10.250.0.10 + + Arista-VM + ARISTA09T0 @@ -1418,9 +1420,9 @@ Arista-VM - ARISTA20T0 + ARISTA06T0 - 10.250.0.25 + 10.250.0.11 Arista-VM @@ -1467,9 +1469,9 @@ Arista-VM - ARISTA05T0 + ARISTA20T0 - 10.250.0.10 + 10.250.0.25 Arista-VM @@ -1543,13 +1545,6 @@ Arista-VM - - ARISTA06T0 - - 10.250.0.11 - - Arista-VM - @@ -2391,9 +2386,9 @@ true 0 - Force10-S6100 + Force10-S6100 - + diff --git a/ansible/minigraph/lab-s6100-01.t1-64.xml b/ansible/minigraph/lab-s6100-01.t1-64.xml index d0a516eb474..846c4230f70 100644 --- a/ansible/minigraph/lab-s6100-01.t1-64.xml +++ b/ansible/minigraph/lab-s6100-01.t1-64.xml @@ -2635,12 +2635,7 @@ Everflow Everflow - - ERSPANV6 - EverflowV6 - EverflowV6 - - + VTY_LINE ssh-only SSH @@ -3191,13 +3186,6 @@ Arista-VM - - ARISTA21T0 - - 10.250.0.38 - - Arista-VM - ARISTA05T0 @@ -3283,9 +3271,9 @@ Arista-VM - ARISTA07T0 + ARISTA21T0 - 10.250.0.24 + 10.250.0.38 Arista-VM @@ -3303,13 +3291,6 @@ Arista-VM - - ARISTA03T0 - - 10.250.0.20 - - Arista-VM - ARISTA01T0 @@ -3381,9 +3362,9 @@ Arista-VM - ARISTA25T0 + ARISTA03T0 - 10.250.0.42 + 10.250.0.20 Arista-VM @@ -3430,16 +3411,16 @@ Arista-VM - ARISTA18T0 + ARISTA07T0 - 10.250.0.35 + 10.250.0.24 Arista-VM - ARISTA30T0 + ARISTA18T0 - 10.250.0.47 + 10.250.0.35 Arista-VM @@ -3464,6 +3445,13 @@ Arista-VM + + ARISTA30T0 + + 10.250.0.47 + + Arista-VM + ARISTA33T0 @@ -3485,6 +3473,13 @@ Arista-VM + + ARISTA25T0 + + 10.250.0.42 + + Arista-VM + ARISTA04T2 @@ -4403,9 +4398,9 @@ true 0 - Force10-S6100 + Force10-S6100 - + diff --git a/ansible/minigraph/str-msn2700-01.t0.xml b/ansible/minigraph/str-msn2700-01.t0.xml index 010e08335b3..f0c6dde021b 100644 --- a/ansible/minigraph/str-msn2700-01.t0.xml +++ b/ansible/minigraph/str-msn2700-01.t0.xml @@ -220,7 +220,7 @@ Vlan1000 - etp2;etp3;etp4;etp5;etp6;etp7;etp8;etp9;etp10;etp11;etp12;etp13;etp14;etp15;etp16;etp17;etp18;etp19;etp20;etp21;etp22;etp23;etp24;etp25 + etp2;etp3;etp4;etp5;etp6;etp7;etp8;etp9;etp10;etp11;etp12;etp13;etp14;etp15;etp16;etp17;etp18;etp19;etp20;etp21;etp22;etp23;etp24;etp25 False 0.0.0.0/0 @@ -289,12 +289,7 @@ Everflow Everflow - - ERSPANV6 - EverflowV6 - EverflowV6 - - + VTY_LINE ssh-only SSH @@ -969,9 +964,9 @@ true 0 - ACS-MSN2700 + ACS-MSN2700 - + diff --git a/ansible/minigraph/str-msn2700-01.t1-lag.xml b/ansible/minigraph/str-msn2700-01.t1-lag.xml index 0236078ac7b..d57101dae36 100644 --- a/ansible/minigraph/str-msn2700-01.t1-lag.xml +++ b/ansible/minigraph/str-msn2700-01.t1-lag.xml @@ -1075,12 +1075,7 @@ Everflow Everflow - - ERSPANV6 - EverflowV6 - EverflowV6 - - + VTY_LINE ssh-only SSH @@ -1923,9 +1918,9 @@ true 0 - ACS-MSN2700 + ACS-MSN2700 - + diff --git a/ansible/minigraph/str-msn2700-01.t1.xml b/ansible/minigraph/str-msn2700-01.t1.xml index b1b449378af..74b0c07fdb4 100644 --- a/ansible/minigraph/str-msn2700-01.t1.xml +++ b/ansible/minigraph/str-msn2700-01.t1.xml @@ -1355,12 +1355,7 @@ Everflow Everflow - - ERSPANV6 - EverflowV6 - EverflowV6 - - + VTY_LINE ssh-only SSH @@ -2259,9 +2254,9 @@ true 0 - ACS-MSN2700 + ACS-MSN2700 - + diff --git a/ansible/minigraph/switch-t0.xml b/ansible/minigraph/switch-t0.xml index 4e98e659ec9..413aedf7e23 100644 --- a/ansible/minigraph/switch-t0.xml +++ b/ansible/minigraph/switch-t0.xml @@ -172,13 +172,11 @@ PortChannel01 - true fortyGigE0/112 PortChannel02 - false fortyGigE0/116 diff --git a/ansible/ocp b/ansible/ocp index b4965f598aa..c4c30df5f4b 100644 --- a/ansible/ocp +++ b/ansible/ocp @@ -1,35 +1,21 @@ -all: - children: - ocp: - children: - sonic_stable: - sonic_latest: - sonic: - children: - sonic_stable: - sonic_latest: +[sonic_stable] -sonic_latest: - hosts: - OCPSCH0104001MS: - ansible_host: 192.168.200.10 - sonic_hwsku: AS7512 - OCPSCH0104002MS: - ansible_host: 192.168.200.11 - sonic_hwsku: ACS-MSN2700 - OCPSCH01040AALF: - ansible_host: 192.168.200.12 - OCPSCH01040BBLF: - ansible_host: 192.168.200.13 - OCPSCH01040CCLF: - ansible_host: 192.168.200.14 - OCPSCH01040DDLF: - ansible_host: 192.168.200.15 - OCPSCH01040EELF: - ansible_host: 192.168.200.16 - OCPSCH01040FFLF: - ansible_host: 192.168.200.17 - OCPSCH01040GGLF: - ansible_host: 192.168.200.18 - OCPSCH01040HHLF: - ansible_host: 192.168.200.19 +[sonic_latest] +OCPSCH0104001MS ansible_host=192.168.200.10 sonic_hwsku=AS7512 +OCPSCH0104002MS ansible_host=192.168.200.11 sonic_hwsku=ACS-MSN2700 +OCPSCH01040AALF ansible_host=192.168.200.12 +OCPSCH01040BBLF ansible_host=192.168.200.13 +OCPSCH01040CCLF ansible_host=192.168.200.14 +OCPSCH01040DDLF ansible_host=192.168.200.15 +OCPSCH01040EELF ansible_host=192.168.200.16 +OCPSCH01040FFLF ansible_host=192.168.200.17 +OCPSCH01040GGLF ansible_host=192.168.200.18 +OCPSCH01040HHLF ansible_host=192.168.200.19 + +[sonic:children] +sonic_stable +sonic_latest + +[ocp:children] +sonic_stable +sonic_latest diff --git a/ansible/roles/eos/files/boot-config b/ansible/roles/eos/files/boot-config index 0565a4786e0..9be9c2cf2cd 100644 --- a/ansible/roles/eos/files/boot-config +++ b/ansible/roles/eos/files/boot-config @@ -1 +1,2 @@ SWI=flash:/vEOS.swi + diff --git a/ansible/roles/eos/tasks/main.yml b/ansible/roles/eos/tasks/main.yml index 9968faef920..9e1b5f3611e 100644 --- a/ansible/roles/eos/tasks/main.yml +++ b/ansible/roles/eos/tasks/main.yml @@ -1,5 +1,5 @@ -- debug: msg="{{ vm_type }}" -- debug: msg="{{ vm_set_name }}" +- name: Set ansible login user name and password + set_fact: ansible_user="root" ansible_password={{ eos_root_password }} - name: Load topo variables include_vars: "vars/topo_{{ topo }}.yml" @@ -11,7 +11,7 @@ set_fact: current_server={{ group_names | extract_by_prefix('server_') }} - name: Extract VM names from the inventory - set_fact: VM_list={{ groups[current_server] | filter_by_prefix('VM') | sort }} + set_fact: VM_list={{ groups[current_server] | filter_by_prefix('VM') }} - name: Get VM host name set_fact: VM_host={{ groups[current_server] | difference(VM_list) }} @@ -24,6 +24,15 @@ msg: "cannot find {{ inventory_hostname }} in the topology" when: hostname == "hostname not found" +- name: Get VM front panel interface number + shell: virsh domiflist {{ inventory_hostname }} | grep -E "^{{ inventory_hostname }}-t" | wc -l + register: fp_num + delegate_to: "{{ VM_host[0] }}" + become: yes + +- name: Set EOS backplane port name + set_fact: bp_ifname="Ethernet{{ fp_num.stdout|int + 1 }}" + - name: Set properties list to default value, when properties are not defined set_fact: properties_list=[] when: configuration is not defined or configuration[hostname] is not defined or configuration[hostname]['properties'] is not defined @@ -32,13 +41,34 @@ set_fact: properties_list="{{ configuration[hostname]['properties'] }}" when: configuration and configuration[hostname] and configuration[hostname]['properties'] is defined +- name: copy boot-config + copy: src=boot-config + dest=/mnt/flash/boot-config + - name: Expand {{ hostname }} properties into props set_fact: props="{{ configuration_properties[item] | combine(props | default({})) }}" with_items: "{{ properties_list }}" when: hostname in configuration and configuration_properties[item] is defined -- include_tasks: veos.yml - when: vm_type == "veos" +- name: build a startup config + template: src="{{ topo }}-{{ props.swrole }}.j2" + dest=/mnt/flash/startup-config + when: hostname in configuration + +- name: Restart the box + command: /sbin/shutdown -r now "Ansible updates triggered" + when: hostname in configuration + +- name: Pause for reboot + pause: seconds=30 + when: hostname in configuration -- include_tasks: ceos.yml - when: vm_type == "ceos" +- name: Wait for VM to come up + wait_for: + host: "{{ ansible_ssh_host }}" + port: 22 + state: started + delay: 10 + timeout: 600 + connection: local + when: hostname in configuration diff --git a/ansible/roles/eos/templates/t0-e1031-leaf.j2 b/ansible/roles/eos/templates/t0-e1031-leaf.j2 new file mode 100644 index 00000000000..1614085b662 --- /dev/null +++ b/ansible/roles/eos/templates/t0-e1031-leaf.j2 @@ -0,0 +1,186 @@ +{% set host = configuration[hostname] %} +{% set mgmt_ip = ansible_host %} +no schedule tech-support +! +hostname {{ hostname }} +! +vrf definition MGMT + rd 1:1 +! +spanning-tree mode mstp +! +aaa root secret 0 123456 +! +username admin privilege 15 role network-admin secret 0 123456 +! +clock timezone UTC +! +lldp run +lldp management-address Management1 +lldp management-address vrf MGMT +! +snmp-server community {{ snmp_rocommunity }} ro +snmp-server vrf MGMT +! +ip routing +ip routing vrf MGMT +ipv6 unicast-routing +! +{% if vm_mgmt_gw is defined %} +ip route vrf MGMT 0.0.0.0/0 {{ vm_mgmt_gw }} +{% else %} +ip route vrf MGMT 0.0.0.0/0 {{ mgmt_gw }} +{% endif %} +! +route-map DEFAULT_ROUTES permit +! +{# #} +{# NOTE: Using large enough values (e.g., podset_number = 200, #} +{# us to overflow the 192.168.0.0/16 private address space here. #} +{# This should be fine for internal use, but may pose an issue if used otherwise #} +{# #} +{% for podset in range(0, props.podset_number) %} +{% for tor in range(0, props.tor_number) %} +{% for subnet in range(0, props.tor_subnet_number) %} +{# Skip tor 0 podset 0 #} +{% if podset != 0 or tor != 0 %} +{% set suffix = ( (podset * props.tor_number * props.max_tor_subnet_number * props.tor_subnet_size) + + (tor * props.max_tor_subnet_number * props.tor_subnet_size) + + (subnet * props.tor_subnet_size) ) %} +{% set octet2 = (168 + (suffix // (256 ** 2))) %} +{% set octet1 = (192 + (octet2 // 256)) %} +{% set octet2 = (octet2 % 256) %} +{% set octet3 = ((suffix // 256) % 256) %} +{% set octet4 = (suffix % 256) %} +{% set prefixlen_v4 = (32 - ((props.tor_subnet_size | log(2))) | int) %} +ip route {{ octet1 }}.{{ octet2 }}.{{ octet3 }}.{{ octet4 }}/{{ prefixlen_v4 }} {{ props.nhipv4 }} +ipv6 route {{ '20%02x' % octet1 }}:{{ '%02X%02X' % (octet2, octet3) }}:0:{{ '%02X' % octet4 }}::/64 {{ props.nhipv6 }} +{% endif %} +{% endfor %} +{% endfor %} +{% endfor %} +! +{% for podset in range(0, props.podset_number) %} +{% for tor in range(0, props.tor_number) %} +{# Skip tor 0 podset 0 #} +{% if podset != 0 or tor != 0 %} +{% set suffix = ( (podset * props.tor_number * props.max_tor_subnet_number * props.tor_subnet_size) + + (tor * props.max_tor_subnet_number * props.tor_subnet_size) ) %} +{% set octet2 = (168 + (suffix // (256 ** 2))) %} +{% set octet1 = (192 + (octet2 // 256)) %} +{% set octet2 = (octet2 % 256) %} +{% set octet3 = ((suffix // 256) % 256) %} +{% set octet4 = (suffix % 256) %} +{% set prefixlen_v4 = (32 - (((props.max_tor_subnet_number * props.tor_subnet_size) | log(2)) | int) ) %} +{% set prefixlen_v6 = (64 - (((props.max_tor_subnet_number * props.tor_subnet_size) | log(2)) | int) ) %} +ip prefix-list test_ipv4_{{ podset}}_{{ tor }} seq 10 permit {{ octet1 }}.{{ octet2 }}.{{ octet3 }}.{{ octet4 }}/{{ prefixlen_v4 }} ge {{ prefixlen_v4 }} +ipv6 prefix-list test_ipv6_{{ podset}}_{{ tor }} + seq 10 permit {{ '20%02x' % octet1 }}:{{ '%02X%02X' % (octet2, octet3) }}:0:{{ '%02X' % octet4 }}::/{{ prefixlen_v6 }} ge {{ prefixlen_v6 }} +exit +{% endif %} +{% endfor %} +{% endfor %} +! +interface Management 1 + description TO LAB MGMT SWITCH + vrf forwarding MGMT + ip address {{ mgmt_ip }}/{{ mgmt_prefixlen }} + no shutdown +! +{% for name, iface in host['interfaces'].items() %} +interface {{ name }} +{% if name.startswith('Loopback') %} + description LOOPBACK +{% else %} + no switchport +{% endif %} +{% if name.startswith('Port-Channel') %} + port-channel min-links 1 +{% endif %} +{% if iface['lacp'] is defined %} + channel-group {{ iface['lacp'] }} mode active + lacp rate normal +{% endif %} +{% if iface['ipv4'] is defined %} + ip address {{ iface['ipv4'] }} +{% endif %} +{% if iface['ipv6'] is defined %} + ipv6 enable + ipv6 address {{ iface['ipv6'] }} + ipv6 nd ra suppress +{% endif %} + no shutdown +! +{% endfor %} +! +interface {{ bp_ifname }} + description backplane + no switchport +{% if host['bp_interface']['ipv4'] is defined %} + ip address {{ host['bp_interface']['ipv4'] }} +{% endif %} +{% if host['bp_interface']['ipv6'] is defined %} + ipv6 enable + ipv6 address {{ host['bp_interface']['ipv6'] }} + ipv6 nd ra suppress +{% endif %} + no shutdown +! +{% for podset in range(0, props.podset_number) %} +{% if range(0, 1000)|random() >= props.failure_rate %} +{% for tor in range(0, props.tor_number) %} +{% set leafasn = props.leaf_asn_start + podset %} +{% set torasn = props.tor_asn_start + tor %} +route-map PREPENDAS permit {{ 2 * (podset * props.tor_number + tor + 1) }} + match ip address prefix-list test_ipv4_{{ podset }}_{{ tor }} +{% if podset == 0 %} + set as-path prepend {{ torasn }} +{% else %} + set as-path prepend {{ props.spine_asn }} {{ leafasn }} {{ torasn }} +{% endif %} +! +route-map PREPENDAS permit {{ 2 * (podset * props.tor_number + tor + 1) + 1 }} + match ipv6 address prefix-list test_ipv6_{{ podset }}_{{ tor }} +{% if podset == 0 %} + set as-path prepend {{ torasn }} +{% else %} + set as-path prepend {{ props.spine_asn }} {{ leafasn }} {{ torasn }} +{% endif %} +! +{% endfor %} +{% endif %} +{% endfor %} +! +router bgp {{ host['bgp']['asn'] }} + router-id {{ host['interfaces']['Loopback0']['ipv4'] | ipaddr('address') }} + ! +{% for asn, remote_ips in host['bgp']['peers'].items() %} +{% for remote_ip in remote_ips %} + neighbor {{ remote_ip }} remote-as {{ asn }} + neighbor {{ remote_ip }} description {{ asn }} + neighbor {{ remote_ip }} default-originate route-map DEFAULT_ROUTES +{% if remote_ip | ipv6 %} + address-family ipv6 + neighbor {{ remote_ip }} activate + exit +{% endif %} +{% endfor %} +{% endfor %} + ! +{% for name, iface in host['interfaces'].items() if name.startswith('Loopback') %} +{% if iface['ipv4'] is defined %} + network {{ iface['ipv4'] }} +{% endif %} +{% if iface['ipv6'] is defined %} + network {{ iface['ipv6'] }} +{% endif %} +{% endfor %} + redistribute static route-map PREPENDAS +! +management api http-commands + no protocol https + protocol http + no shutdown +! +end +s diff --git a/ansible/roles/eos/templates/t0-leaf-lag-2.j2 b/ansible/roles/eos/templates/t0-leaf-lag-2.j2 index 6e7d28cdd2d..e69de29bb2d 100644 --- a/ansible/roles/eos/templates/t0-leaf-lag-2.j2 +++ b/ansible/roles/eos/templates/t0-leaf-lag-2.j2 @@ -1,135 +0,0 @@ -{% set host = configuration[hostname] %} -{% set mgmt_ip = ansible_host %} -{% if vm_type is defined and vm_type == "ceos" %} -{% set mgmt_if_index = 0 %} -{% else %} -{% set mgmt_if_index = 1 %} -{% endif %} -no schedule tech-support -! -{% if vm_type is defined and vm_type == "ceos" %} -agent LicenseManager shutdown -agent PowerFuse shutdown -agent PowerManager shutdown -agent Thermostat shutdown -agent LedPolicy shutdown -agent StandbyCpld shutdown -agent Bfd shutdown -{% endif %} -! -hostname {{ hostname }} -! -vrf definition MGMT - rd 1:1 -! -spanning-tree mode mstp -! -aaa root secret 0 123456 -! -username admin privilege 15 role network-admin secret 0 123456 -! -clock timezone UTC -! -lldp run -lldp management-address Management{{ mgmt_if_index }} -lldp management-address vrf MGMT -! -snmp-server community {{ snmp_rocommunity }} ro -snmp-server vrf MGMT -! -ip routing -ip routing vrf MGMT -ipv6 unicast-routing -! -{% if vm_mgmt_gw is defined %} -ip route vrf MGMT 0.0.0.0/0 {{ vm_mgmt_gw }} -{% else %} -ip route vrf MGMT 0.0.0.0/0 {{ mgmt_gw }} -{% endif %} -! -interface Management {{ mgmt_if_index }} - description TO LAB MGMT SWITCH -{% if vm_type is defined and vm_type == "ceos" %} - vrf MGMT -{% else %} - vrf forwarding MGMT -{% endif %} - ip address {{ mgmt_ip }}/{{ mgmt_prefixlen }} - no shutdown -! -{% for name, iface in host['interfaces'].items() %} -interface {{ name }} -{% if name.startswith('Loopback') %} - description LOOPBACK -{% else %} - no switchport -{% endif %} -{% if name.startswith('Port-Channel') %} - port-channel min-links 2 -{% endif %} -{% if iface['lacp'] is defined %} - channel-group {{ iface['lacp'] }} mode active - lacp rate normal -{% endif %} -{% if iface['ipv4'] is defined %} - ip address {{ iface['ipv4'] }} -{% endif %} -{% if iface['ipv6'] is defined %} - ipv6 enable - ipv6 address {{ iface['ipv6'] }} - ipv6 nd ra suppress -{% endif %} - no shutdown -! -{% endfor %} -! -interface {{ bp_ifname }} - description backplane - no switchport -{% if host['bp_interface']['ipv4'] is defined %} - ip address {{ host['bp_interface']['ipv4'] }} -{% endif %} -{% if host['bp_interface']['ipv6'] is defined %} - ipv6 enable - ipv6 address {{ host['bp_interface']['ipv6'] }} - ipv6 nd ra suppress -{% endif %} - no shutdown -! -router bgp {{ host['bgp']['asn'] }} - router-id {{ host['interfaces']['Loopback0']['ipv4'] | ipaddr('address') }} - ! -{% for asn, remote_ips in host['bgp']['peers'].items() %} -{% for remote_ip in remote_ips %} - neighbor {{ remote_ip }} remote-as {{ asn }} - neighbor {{ remote_ip }} description {{ asn }} -{% if remote_ip | ipv6 %} - address-family ipv6 - neighbor {{ remote_ip }} activate - exit -{% endif %} -{% endfor %} -{% endfor %} - neighbor {{ props.nhipv4 }} remote-as {{ host['bgp']['asn'] }} - neighbor {{ props.nhipv4 }} description exabgp_v4 - neighbor {{ props.nhipv6 }} remote-as {{ host['bgp']['asn'] }} - neighbor {{ props.nhipv6 }} description exabgp_v6 - address-family ipv6 - neighbor {{ props.nhipv6 }} activate - exit - ! -{% for name, iface in host['interfaces'].items() if name.startswith('Loopback') %} -{% if iface['ipv4'] is defined %} - network {{ iface['ipv4'] }} -{% endif %} -{% if iface['ipv6'] is defined %} - network {{ iface['ipv6'] }} -{% endif %} -{% endfor %} -! -management api http-commands - no protocol https - protocol http - no shutdown -! -end diff --git a/ansible/roles/eos/templates/t0-leaf.j2 b/ansible/roles/eos/templates/t0-leaf.j2 index 50694063a1e..1614085b662 100644 --- a/ansible/roles/eos/templates/t0-leaf.j2 +++ b/ansible/roles/eos/templates/t0-leaf.j2 @@ -1,22 +1,7 @@ {% set host = configuration[hostname] %} {% set mgmt_ip = ansible_host %} -{% if vm_type is defined and vm_type == "ceos" %} -{% set mgmt_if_index = 0 %} -{% else %} -{% set mgmt_if_index = 1 %} -{% endif %} no schedule tech-support ! -{% if vm_type is defined and vm_type == "ceos" %} -agent LicenseManager shutdown -agent PowerFuse shutdown -agent PowerManager shutdown -agent Thermostat shutdown -agent LedPolicy shutdown -agent StandbyCpld shutdown -agent Bfd shutdown -{% endif %} -! hostname {{ hostname }} ! vrf definition MGMT @@ -31,7 +16,7 @@ username admin privilege 15 role network-admin secret 0 123456 clock timezone UTC ! lldp run -lldp management-address Management{{ mgmt_if_index }} +lldp management-address Management1 lldp management-address vrf MGMT ! snmp-server community {{ snmp_rocommunity }} ro @@ -47,13 +32,58 @@ ip route vrf MGMT 0.0.0.0/0 {{ vm_mgmt_gw }} ip route vrf MGMT 0.0.0.0/0 {{ mgmt_gw }} {% endif %} ! -interface Management {{ mgmt_if_index }} +route-map DEFAULT_ROUTES permit +! +{# #} +{# NOTE: Using large enough values (e.g., podset_number = 200, #} +{# us to overflow the 192.168.0.0/16 private address space here. #} +{# This should be fine for internal use, but may pose an issue if used otherwise #} +{# #} +{% for podset in range(0, props.podset_number) %} +{% for tor in range(0, props.tor_number) %} +{% for subnet in range(0, props.tor_subnet_number) %} +{# Skip tor 0 podset 0 #} +{% if podset != 0 or tor != 0 %} +{% set suffix = ( (podset * props.tor_number * props.max_tor_subnet_number * props.tor_subnet_size) + + (tor * props.max_tor_subnet_number * props.tor_subnet_size) + + (subnet * props.tor_subnet_size) ) %} +{% set octet2 = (168 + (suffix // (256 ** 2))) %} +{% set octet1 = (192 + (octet2 // 256)) %} +{% set octet2 = (octet2 % 256) %} +{% set octet3 = ((suffix // 256) % 256) %} +{% set octet4 = (suffix % 256) %} +{% set prefixlen_v4 = (32 - ((props.tor_subnet_size | log(2))) | int) %} +ip route {{ octet1 }}.{{ octet2 }}.{{ octet3 }}.{{ octet4 }}/{{ prefixlen_v4 }} {{ props.nhipv4 }} +ipv6 route {{ '20%02x' % octet1 }}:{{ '%02X%02X' % (octet2, octet3) }}:0:{{ '%02X' % octet4 }}::/64 {{ props.nhipv6 }} +{% endif %} +{% endfor %} +{% endfor %} +{% endfor %} +! +{% for podset in range(0, props.podset_number) %} +{% for tor in range(0, props.tor_number) %} +{# Skip tor 0 podset 0 #} +{% if podset != 0 or tor != 0 %} +{% set suffix = ( (podset * props.tor_number * props.max_tor_subnet_number * props.tor_subnet_size) + + (tor * props.max_tor_subnet_number * props.tor_subnet_size) ) %} +{% set octet2 = (168 + (suffix // (256 ** 2))) %} +{% set octet1 = (192 + (octet2 // 256)) %} +{% set octet2 = (octet2 % 256) %} +{% set octet3 = ((suffix // 256) % 256) %} +{% set octet4 = (suffix % 256) %} +{% set prefixlen_v4 = (32 - (((props.max_tor_subnet_number * props.tor_subnet_size) | log(2)) | int) ) %} +{% set prefixlen_v6 = (64 - (((props.max_tor_subnet_number * props.tor_subnet_size) | log(2)) | int) ) %} +ip prefix-list test_ipv4_{{ podset}}_{{ tor }} seq 10 permit {{ octet1 }}.{{ octet2 }}.{{ octet3 }}.{{ octet4 }}/{{ prefixlen_v4 }} ge {{ prefixlen_v4 }} +ipv6 prefix-list test_ipv6_{{ podset}}_{{ tor }} + seq 10 permit {{ '20%02x' % octet1 }}:{{ '%02X%02X' % (octet2, octet3) }}:0:{{ '%02X' % octet4 }}::/{{ prefixlen_v6 }} ge {{ prefixlen_v6 }} +exit +{% endif %} +{% endfor %} +{% endfor %} +! +interface Management 1 description TO LAB MGMT SWITCH -{% if vm_type is defined and vm_type == "ceos" %} - vrf MGMT -{% else %} vrf forwarding MGMT -{% endif %} ip address {{ mgmt_ip }}/{{ mgmt_prefixlen }} no shutdown ! @@ -96,6 +126,31 @@ interface {{ bp_ifname }} {% endif %} no shutdown ! +{% for podset in range(0, props.podset_number) %} +{% if range(0, 1000)|random() >= props.failure_rate %} +{% for tor in range(0, props.tor_number) %} +{% set leafasn = props.leaf_asn_start + podset %} +{% set torasn = props.tor_asn_start + tor %} +route-map PREPENDAS permit {{ 2 * (podset * props.tor_number + tor + 1) }} + match ip address prefix-list test_ipv4_{{ podset }}_{{ tor }} +{% if podset == 0 %} + set as-path prepend {{ torasn }} +{% else %} + set as-path prepend {{ props.spine_asn }} {{ leafasn }} {{ torasn }} +{% endif %} +! +route-map PREPENDAS permit {{ 2 * (podset * props.tor_number + tor + 1) + 1 }} + match ipv6 address prefix-list test_ipv6_{{ podset }}_{{ tor }} +{% if podset == 0 %} + set as-path prepend {{ torasn }} +{% else %} + set as-path prepend {{ props.spine_asn }} {{ leafasn }} {{ torasn }} +{% endif %} +! +{% endfor %} +{% endif %} +{% endfor %} +! router bgp {{ host['bgp']['asn'] }} router-id {{ host['interfaces']['Loopback0']['ipv4'] | ipaddr('address') }} ! @@ -103,6 +158,7 @@ router bgp {{ host['bgp']['asn'] }} {% for remote_ip in remote_ips %} neighbor {{ remote_ip }} remote-as {{ asn }} neighbor {{ remote_ip }} description {{ asn }} + neighbor {{ remote_ip }} default-originate route-map DEFAULT_ROUTES {% if remote_ip | ipv6 %} address-family ipv6 neighbor {{ remote_ip }} activate @@ -110,13 +166,6 @@ router bgp {{ host['bgp']['asn'] }} {% endif %} {% endfor %} {% endfor %} - neighbor {{ props.nhipv4 }} remote-as {{ host['bgp']['asn'] }} - neighbor {{ props.nhipv4 }} description exabgp_v4 - neighbor {{ props.nhipv6 }} remote-as {{ host['bgp']['asn'] }} - neighbor {{ props.nhipv6 }} description exabgp_v6 - address-family ipv6 - neighbor {{ props.nhipv6 }} activate - exit ! {% for name, iface in host['interfaces'].items() if name.startswith('Loopback') %} {% if iface['ipv4'] is defined %} @@ -126,6 +175,7 @@ router bgp {{ host['bgp']['asn'] }} network {{ iface['ipv6'] }} {% endif %} {% endfor %} + redistribute static route-map PREPENDAS ! management api http-commands no protocol https @@ -133,3 +183,4 @@ management api http-commands no shutdown ! end +s diff --git a/ansible/roles/eos/templates/t0-slx-leaf.j2 b/ansible/roles/eos/templates/t0-slx-leaf.j2 new file mode 100644 index 00000000000..277163ccc62 --- /dev/null +++ b/ansible/roles/eos/templates/t0-slx-leaf.j2 @@ -0,0 +1,189 @@ +{% set host = configuration[hostname] %} +{% set mgmt_ip = ansible_host %} +no schedule tech-support +! +hostname {{ hostname }} +! +vrf definition MGMT + rd 1:1 +! +spanning-tree mode mstp +! +aaa root secret 0 123456 +! +username admin privilege 15 role network-admin secret 0 123456 +! +clock timezone UTC +! +lldp run +lldp management-address Management1 +lldp management-address vrf MGMT +! +snmp-server community {{ snmp_rocommunity }} ro +snmp-server vrf MGMT +! +ip routing +ip routing vrf MGMT +ipv6 unicast-routing +! +{% if vm_mgmt_gw is defined %} +ip route vrf MGMT 0.0.0.0/0 {{ vm_mgmt_gw }} +{% else %} +ip route vrf MGMT 0.0.0.0/0 {{ mgmt_gw }} +{% endif %} +! +route-map DEFAULT_ROUTES permit +! +{# #} +{# NOTE: Using large enough values (e.g., podset_number = 200, #} +{# us to overflow the 192.168.0.0/16 private address space here. #} +{# This should be fine for internal use, but may pose an issue if used otherwise #} +{# #} +{% for podset in range(0, props.podset_number) %} +{% for tor in range(0, props.tor_number) %} +{% for subnet in range(0, props.tor_subnet_number) %} +{# Skip tor 0 podset 0 #} +{% if podset != 0 or tor != 0 %} +{% set suffix = ( (podset * props.tor_number * props.max_tor_subnet_number * props.tor_subnet_size) + + (tor * props.max_tor_subnet_number * props.tor_subnet_size) + + (subnet * props.tor_subnet_size) ) %} +{% set octet2 = (168 + (suffix // (256 ** 2))) %} +{% set octet1 = (192 + (octet2 // 256)) %} +{% set octet2 = (octet2 % 256) %} +{% set octet3 = ((suffix // 256) % 256) %} +{% set octet4 = (suffix % 256) %} +{% set prefixlen_v4 = (32 - ((props.tor_subnet_size | log(2))) | int) %} +ip route {{ octet1 }}.{{ octet2 }}.{{ octet3 }}.{{ octet4 }}/{{ prefixlen_v4 }} {{ props.nhipv4 }} +ipv6 route {{ '20%02x' % octet1 }}:{{ '%02X%02X' % (octet2, octet3) }}:0:{{ '%02X' % octet4 }}::/64 {{ props.nhipv6 }} +{% endif %} +{% endfor %} +{% endfor %} +{% endfor %} +! +{% for podset in range(0, props.podset_number) %} +{% for tor in range(0, props.tor_number) %} +{# Skip tor 0 podset 0 #} +{% if podset != 0 or tor != 0 %} +{% set suffix = ( (podset * props.tor_number * props.max_tor_subnet_number * props.tor_subnet_size) + + (tor * props.max_tor_subnet_number * props.tor_subnet_size) ) %} +{% set octet2 = (168 + (suffix // (256 ** 2))) %} +{% set octet1 = (192 + (octet2 // 256)) %} +{% set octet2 = (octet2 % 256) %} +{% set octet3 = ((suffix // 256) % 256) %} +{% set octet4 = (suffix % 256) %} +{% set prefixlen_v4 = (32 - (((props.max_tor_subnet_number * props.tor_subnet_size) | log(2)) | int) ) %} +{% set prefixlen_v6 = (64 - (((props.max_tor_subnet_number * props.tor_subnet_size) | log(2)) | int) ) %} +ip prefix-list test_ipv4_{{ podset}}_{{ tor }} seq 10 permit {{ octet1 }}.{{ octet2 }}.{{ octet3 }}.{{ octet4 }}/{{ prefixlen_v4 }} ge {{ prefixlen_v4 }} +ipv6 prefix-list test_ipv6_{{ podset}}_{{ tor }} + seq 10 permit {{ '20%02x' % octet1 }}:{{ '%02X%02X' % (octet2, octet3) }}:0:{{ '%02X' % octet4 }}::/{{ prefixlen_v6 }} ge {{ prefixlen_v6 }} +exit +{% endif %} +{% endfor %} +{% endfor %} +! +interface Management 1 + description TO LAB MGMT SWITCH + vrf forwarding MGMT + ip address {{ mgmt_ip }}/{{ mgmt_prefixlen }} + no shutdown +! +{% for name, iface in host['interfaces'].items() %} +interface {{ name }} +{% if name.startswith('Loopback') %} + description LOOPBACK +{% else %} + no switchport +{% endif %} +{% if name.startswith('Port-Channel') %} + port-channel min-links 1 +{% endif %} +{% if iface['lacp'] is defined %} + channel-group {{ iface['lacp'] }} mode active + lacp rate normal +{% endif %} +{% if iface['ipv4'] is defined %} + ip address {{ iface['ipv4'] }} +{% endif %} +{% if iface['ipv6'] is defined %} + ipv6 enable + ipv6 address {{ iface['ipv6'] }} + ipv6 nd ra suppress +{% endif %} + no shutdown +! +{% endfor %} +! +interface {{ bp_ifname }} + description backplane + no switchport +{% if host['bp_interface']['ipv4'] is defined %} + ip address {{ host['bp_interface']['ipv4'] }} +{% endif %} +{% if host['bp_interface']['ipv6'] is defined %} + ipv6 enable + ipv6 address {{ host['bp_interface']['ipv6'] }} + ipv6 nd ra suppress +{% endif %} + no shutdown +! +{% for podset in range(0, props.podset_number) %} +{% if range(0, 1000)|random() >= props.failure_rate %} +{% for tor in range(0, props.tor_number) %} +{% set leafasn = props.leaf_asn_start + podset %} +{% set torasn = props.tor_asn_start + tor %} +route-map PREPENDAS permit {{ 2 * (podset * props.tor_number + tor + 1) }} + match ip address prefix-list test_ipv4_{{ podset }}_{{ tor }} +{% if podset == 0 %} + set as-path prepend {{ torasn }} +{% else %} + set as-path prepend {{ props.spine_asn }} {{ leafasn }} {{ torasn }} +{% endif %} +! +route-map PREPENDAS permit {{ 2 * (podset * props.tor_number + tor + 1) + 1 }} + match ipv6 address prefix-list test_ipv6_{{ podset }}_{{ tor }} +{% if podset == 0 %} + set as-path prepend {{ torasn }} +{% else %} + set as-path prepend {{ props.spine_asn }} {{ leafasn }} {{ torasn }} +{% endif %} +! +{% endfor %} +{% endif %} +{% endfor %} +! +router bgp {{ host['bgp']['asn'] }} + router-id {{ host['interfaces']['Loopback0']['ipv4'] | ipaddr('address') }} + ! +{% for asn, remote_ips in host['bgp']['peers'].items() %} +{% for remote_ip in remote_ips %} + neighbor {{ remote_ip }} remote-as {{ asn }} + neighbor {{ remote_ip }} description {{ asn }} + neighbor {{ remote_ip }} default-originate route-map DEFAULT_ROUTES +{% if remote_ip | ipv6 %} + address-family ipv6 + neighbor {{ remote_ip }} activate + exit +{% endif %} +{% endfor %} +{% endfor %} + ! +{% for name, iface in host['interfaces'].items() if name.startswith('Loopback') %} +{% if iface['ipv4'] is defined %} + network {{ iface['ipv4'] }} +{% endif %} +{% if iface['ipv6'] is defined %} + network {{ iface['ipv6'] }} +{% endif %} +{% endfor %} + redistribute static route-map PREPENDAS +! +management api http-commands + no protocol https + protocol http + no shutdown +! +end +s + 136,1 66% + 72,1 23% + diff --git a/ansible/roles/eos/templates/t1-64-lag-tor.j2 b/ansible/roles/eos/templates/t1-64-lag-tor.j2 index 92d52642baf..eb720966201 100644 --- a/ansible/roles/eos/templates/t1-64-lag-tor.j2 +++ b/ansible/roles/eos/templates/t1-64-lag-tor.j2 @@ -1,23 +1,8 @@ {% set host = configuration[hostname] %} {% set mgmt_ip = ansible_host %} {% set tornum = host['tornum'] %} -{% if vm_type is defined and vm_type == "ceos" %} -{% set mgmt_if_index = 0 %} -{% else %} -{% set mgmt_if_index = 1 %} -{% endif %} no schedule tech-support ! -{% if vm_type is defined and vm_type == "ceos" %} -agent LicenseManager shutdown -agent PowerFuse shutdown -agent PowerManager shutdown -agent Thermostat shutdown -agent LedPolicy shutdown -agent StandbyCpld shutdown -agent Bfd shutdown -{% endif %} -! hostname {{ hostname }} ! vrf definition MGMT @@ -32,7 +17,7 @@ username admin privilege 15 role network-admin secret 0 123456 clock timezone UTC ! lldp run -lldp management-address Management{{ mgmt_if_index }} +lldp management-address Management1 lldp management-address vrf MGMT ! snmp-server community {{ snmp_rocommunity }} ro @@ -44,13 +29,32 @@ ipv6 unicast-routing ! ip route vrf MGMT 0.0.0.0/0 {{ mgmt_gw }} ! -interface Management {{ mgmt_if_index }} +{% for subnet in range(0, props.tor_subnet_number) %} +ip route 172.16.{{ tornum }}.{{ subnet }}/32 {{ props.nhipv4 }} +ipv6 route 20AC:10{{ '%02X' % tornum }}:0:{{ '%02X' % subnet }}::/64 {{ props.nhipv6 }} +{% endfor %} +! +{% if 'vips' in host %} +{% for subnet in host['vips']['ipv4']['prefixes'] %} +ip route {{ subnet }} {{ props.nhipv4 }} +{% endfor %} +{% set index = 1 %} +{% for subnet in host['vips']['ipv4']['prefixes'] %} +ip prefix-list test_vip_{{ index }} seq 1{{ index }} permit {{ subnet }} +{% set index = index + 1 %} +{% endfor %} +{% set index = 1 %} +{% for subnet in host['vips']['ipv4']['prefixes'] %} +route-map PREPENDAS permit 2{{ index }} + match ip address prefix-list test_vip_{{ index }} + set as-path prepend {{ host['vips']['ipv4']['asn'] }} +{% set index = index + 1 %} +{% endfor %} +{% endif %} +! +interface Management 1 description TO LAB MGMT SWITCH - {% if vm_type is defined and vm_type == "ceos" %} - vrf MGMT -{% else %} vrf forwarding MGMT - {% endif %} ip address {{ mgmt_ip }}/{{ mgmt_prefixlen }} no shutdown ! @@ -110,14 +114,10 @@ router bgp {{ host['bgp']['asn'] }} {% endif %} {% endfor %} {% endfor %} - neighbor {{ props.nhipv4 }} remote-as {{ host['bgp']['asn'] }} - neighbor {{ props.nhipv4 }} description exabgp_v4 - neighbor {{ props.nhipv6 }} remote-as {{ host['bgp']['asn'] }} - neighbor {{ props.nhipv6 }} description exabgp_v6 - address-family ipv6 - neighbor {{ props.nhipv6 }} activate - exit - ! +! +{% if 'vips' in host %} +redistribute static route-map PREPENDAS +{% endif %} {% for name, iface in host['interfaces'].items() if name.startswith('Loopback') %} {% if iface['ipv4'] is defined %} network {{ iface['ipv4'] }} @@ -126,6 +126,10 @@ router bgp {{ host['bgp']['asn'] }} network {{ iface['ipv6'] }} {% endif %} {% endfor %} +{% for subnet in range(0, props.tor_subnet_number) %} + network 172.16.{{ tornum }}.{{ subnet }}/32 + network 20AC:10{{ '%02X' % tornum }}:0:{{ '%02X' % subnet }}::/64 +{% endfor %} ! management api http-commands no protocol https @@ -133,3 +137,4 @@ management api http-commands no shutdown ! end + diff --git a/ansible/roles/eos/templates/t1-lag-spine.j2 b/ansible/roles/eos/templates/t1-lag-spine.j2 index a673d12618c..57eca1bd1d5 100644 --- a/ansible/roles/eos/templates/t1-lag-spine.j2 +++ b/ansible/roles/eos/templates/t1-lag-spine.j2 @@ -1,22 +1,7 @@ {% set host = configuration[hostname] %} {% set mgmt_ip = ansible_host %} -{% if vm_type is defined and vm_type == "ceos" %} -{% set mgmt_if_index = 0 %} -{% else %} -{% set mgmt_if_index = 1 %} -{% endif %} no schedule tech-support ! -{% if vm_type is defined and vm_type == "ceos" %} -agent LicenseManager shutdown -agent PowerFuse shutdown -agent PowerManager shutdown -agent Thermostat shutdown -agent LedPolicy shutdown -agent StandbyCpld shutdown -agent Bfd shutdown -{% endif %} -! hostname {{ hostname }} ! vrf definition MGMT @@ -31,7 +16,7 @@ username admin privilege 15 role network-admin secret 0 123456 clock timezone UTC ! lldp run -lldp management-address Management{{ mgmt_if_index }} +lldp management-address Management1 lldp management-address vrf MGMT ! snmp-server community {{ snmp_rocommunity }} ro @@ -43,13 +28,29 @@ ipv6 unicast-routing ! ip route vrf MGMT 0.0.0.0/0 {{ mgmt_gw }} ! -interface Management {{ mgmt_if_index }} +route-map DEFAULT_ROUTES permit +! +{% for podset in range(0, props.podset_number) %} +{% for tor in range(0, props.tor_number) %} +{% for subnet in range(0, props.tor_subnet_number) %} +ip route 192.168.{{ podset }}.{{ tor * 16 + subnet }}/32 {{ props.nhipv4 }} +ipv6 route 20C0:A8{{ '%02X' % podset }}:0:{{ '%02X' % (tor * 16 + subnet)}}::/64 {{ props.nhipv6 }} +{% endfor %} +{% endfor %} +{% endfor %} +! +{% for podset in range(0, props.podset_number) %} +{% for tor in range(0, props.tor_number) %} +ip prefix-list test_ipv4_{{ podset}}_{{ tor }} seq 10 permit 192.168.{{ podset }}.{{ tor * 16 }}/28 ge 28 +ipv6 prefix-list test_ipv6_{{ podset}}_{{ tor }} + seq 10 permit 20C0:A8{{ '%02X' % podset }}:0:{{ '%02X' % (tor * 16) }}::/60 ge 60 +exit +{% endfor %} +{% endfor %} +! +interface Management 1 description TO LAB MGMT SWITCH -{% if vm_type is defined and vm_type == "ceos" %} - vrf MGMT -{% else %} vrf forwarding MGMT -{% endif %} ip address {{ mgmt_ip }}/{{ mgmt_prefixlen }} no shutdown ! @@ -60,13 +61,6 @@ interface {{ name }} {% else %} no switchport {% endif %} -{% if name.startswith('Port-Channel') %} - port-channel min-links 2 -{% endif %} -{% if iface['lacp'] is defined %} - channel-group {{ iface['lacp'] }} mode active - lacp rate normal -{% endif %} {% if iface['ipv4'] is defined %} ip address {{ iface['ipv4'] }} {% endif %} @@ -92,6 +86,23 @@ interface {{ bp_ifname }} {% endif %} no shutdown ! +{% for podset in range(0, props.podset_number) %} +{% if range(0, 1000)|random() >= props.failure_rate %} +{% for tor in range(0, props.tor_number) %} +{% set leafasn = props.leaf_asn_start + podset %} +{% set torasn = props.tor_asn_start + tor %} +route-map PREPENDAS permit {{ 2 * (podset * props.tor_number + tor + 1) }} + match ip address prefix-list test_ipv4_{{ podset }}_{{ tor }} + set as-path prepend {{ leafasn }} {{ torasn }} +! +route-map PREPENDAS permit {{ 2 * (podset * props.tor_number + tor + 1) + 1 }} + match ipv6 address prefix-list test_ipv6_{{ podset }}_{{ tor }} + set as-path prepend {{ leafasn }} {{ torasn }} +! +{% endfor %} +{% endif %} +{% endfor %} +! router bgp {{ host['bgp']['asn'] }} router-id {{ host['interfaces']['Loopback0']['ipv4'] | ipaddr('address') }} ! @@ -99,6 +110,7 @@ router bgp {{ host['bgp']['asn'] }} {% for remote_ip in remote_ips %} neighbor {{ remote_ip }} remote-as {{ asn }} neighbor {{ remote_ip }} description {{ asn }} + neighbor {{ remote_ip }} default-originate route-map DEFAULT_ROUTES {% if remote_ip | ipv6 %} address-family ipv6 neighbor {{ remote_ip }} activate @@ -106,13 +118,6 @@ router bgp {{ host['bgp']['asn'] }} {% endif %} {% endfor %} {% endfor %} - neighbor {{ props.nhipv4 }} remote-as {{ host['bgp']['asn'] }} - neighbor {{ props.nhipv4 }} description exabgp_v4 - neighbor {{ props.nhipv6 }} remote-as {{ host['bgp']['asn'] }} - neighbor {{ props.nhipv6 }} description exabgp_v6 - address-family ipv6 - neighbor {{ props.nhipv6 }} activate - exit ! {% for name, iface in host['interfaces'].items() if name.startswith('Loopback') %} {% if iface['ipv4'] is defined %} @@ -122,6 +127,7 @@ router bgp {{ host['bgp']['asn'] }} network {{ iface['ipv6'] }} {% endif %} {% endfor %} + redistribute static route-map PREPENDAS ! management api http-commands no protocol https @@ -129,3 +135,4 @@ management api http-commands no shutdown ! end + diff --git a/ansible/roles/eos/templates/t1-lag-tor.j2 b/ansible/roles/eos/templates/t1-lag-tor.j2 index d92746b4563..2a515847655 100644 --- a/ansible/roles/eos/templates/t1-lag-tor.j2 +++ b/ansible/roles/eos/templates/t1-lag-tor.j2 @@ -1,22 +1,8 @@ {% set host = configuration[hostname] %} {% set mgmt_ip = ansible_host %} -{% if vm_type is defined and vm_type == "ceos" %} -{% set mgmt_if_index = 0 %} -{% else %} -{% set mgmt_if_index = 1 %} -{% endif %} +{% set tornum = host['tornum'] %} no schedule tech-support ! -{% if vm_type is defined and vm_type == "ceos" %} -agent LicenseManager shutdown -agent PowerFuse shutdown -agent PowerManager shutdown -agent Thermostat shutdown -agent LedPolicy shutdown -agent StandbyCpld shutdown -agent Bfd shutdown -{% endif %} -! hostname {{ hostname }} ! vrf definition MGMT @@ -31,7 +17,7 @@ username admin privilege 15 role network-admin secret 0 123456 clock timezone UTC ! lldp run -lldp management-address Management{{ mgmt_if_index }} +lldp management-address Management1 lldp management-address vrf MGMT ! snmp-server community {{ snmp_rocommunity }} ro @@ -43,13 +29,31 @@ ipv6 unicast-routing ! ip route vrf MGMT 0.0.0.0/0 {{ mgmt_gw }} ! -interface Management {{ mgmt_if_index }} +{% for subnet in range(0, props.tor_subnet_number) %} +ip route 172.16.{{ tornum }}.{{ subnet }}/32 {{ props.nhipv4 }} +ipv6 route 20AC:10{{ '%02X' % tornum }}:0:{{ '%02X' % subnet }}::/64 {{ props.nhipv6 }} +{% endfor %} +{% if 'vips' in host %} +{% for subnet in host['vips']['ipv4']['prefixes'] %} +ip route {{ subnet }} {{ props.nhipv4 }} +{% endfor %} +{% set index = 1 %} +{% for subnet in host['vips']['ipv4']['prefixes'] %} +ip prefix-list test_vip_{{ index }} seq 1{{ index }} permit {{ subnet }} +{% set index = index + 1 %} +{% endfor %} +{% set index = 1 %} +{% for subnet in host['vips']['ipv4']['prefixes'] %} +route-map PREPENDAS permit 2{{ index }} + match ip address prefix-list test_vip_{{ index }} + set as-path prepend {{ host['vips']['ipv4']['asn'] }} +{% set index = index + 1 %} +{% endfor %} +{% endif %} +! +interface Management 1 description TO LAB MGMT SWITCH -{% if vm_type is defined and vm_type == "ceos" %} - vrf MGMT -{% else %} vrf forwarding MGMT -{% endif %} ip address {{ mgmt_ip }}/{{ mgmt_prefixlen }} no shutdown ! @@ -102,14 +106,10 @@ router bgp {{ host['bgp']['asn'] }} {% endif %} {% endfor %} {% endfor %} - neighbor {{ props.nhipv4 }} remote-as {{ host['bgp']['asn'] }} - neighbor {{ props.nhipv4 }} description exabgp_v4 - neighbor {{ props.nhipv6 }} remote-as {{ host['bgp']['asn'] }} - neighbor {{ props.nhipv6 }} description exabgp_v6 - address-family ipv6 - neighbor {{ props.nhipv6 }} activate - exit ! +{% if 'vips' in host %} +redistribute static route-map PREPENDAS +{% endif %} {% for name, iface in host['interfaces'].items() if name.startswith('Loopback') %} {% if iface['ipv4'] is defined %} network {{ iface['ipv4'] }} @@ -118,6 +118,10 @@ router bgp {{ host['bgp']['asn'] }} network {{ iface['ipv6'] }} {% endif %} {% endfor %} +{% for subnet in range(0, props.tor_subnet_number) %} + network 172.16.{{ tornum }}.{{ subnet }}/32 + network 20AC:10{{ '%02X' % tornum }}:0:{{ '%02X' % subnet }}::/64 +{% endfor %} ! management api http-commands no protocol https @@ -125,3 +129,4 @@ management api http-commands no shutdown ! end + diff --git a/ansible/roles/eos/templates/t1-slx-spine.j2 b/ansible/roles/eos/templates/t1-slx-spine.j2 new file mode 100644 index 00000000000..57eca1bd1d5 --- /dev/null +++ b/ansible/roles/eos/templates/t1-slx-spine.j2 @@ -0,0 +1,138 @@ +{% set host = configuration[hostname] %} +{% set mgmt_ip = ansible_host %} +no schedule tech-support +! +hostname {{ hostname }} +! +vrf definition MGMT + rd 1:1 +! +spanning-tree mode mstp +! +aaa root secret 0 123456 +! +username admin privilege 15 role network-admin secret 0 123456 +! +clock timezone UTC +! +lldp run +lldp management-address Management1 +lldp management-address vrf MGMT +! +snmp-server community {{ snmp_rocommunity }} ro +snmp-server vrf MGMT +! +ip routing +ip routing vrf MGMT +ipv6 unicast-routing +! +ip route vrf MGMT 0.0.0.0/0 {{ mgmt_gw }} +! +route-map DEFAULT_ROUTES permit +! +{% for podset in range(0, props.podset_number) %} +{% for tor in range(0, props.tor_number) %} +{% for subnet in range(0, props.tor_subnet_number) %} +ip route 192.168.{{ podset }}.{{ tor * 16 + subnet }}/32 {{ props.nhipv4 }} +ipv6 route 20C0:A8{{ '%02X' % podset }}:0:{{ '%02X' % (tor * 16 + subnet)}}::/64 {{ props.nhipv6 }} +{% endfor %} +{% endfor %} +{% endfor %} +! +{% for podset in range(0, props.podset_number) %} +{% for tor in range(0, props.tor_number) %} +ip prefix-list test_ipv4_{{ podset}}_{{ tor }} seq 10 permit 192.168.{{ podset }}.{{ tor * 16 }}/28 ge 28 +ipv6 prefix-list test_ipv6_{{ podset}}_{{ tor }} + seq 10 permit 20C0:A8{{ '%02X' % podset }}:0:{{ '%02X' % (tor * 16) }}::/60 ge 60 +exit +{% endfor %} +{% endfor %} +! +interface Management 1 + description TO LAB MGMT SWITCH + vrf forwarding MGMT + ip address {{ mgmt_ip }}/{{ mgmt_prefixlen }} + no shutdown +! +{% for name, iface in host['interfaces'].items() %} +interface {{ name }} +{% if name.startswith('Loopback') %} + description LOOPBACK +{% else %} + no switchport +{% endif %} +{% if iface['ipv4'] is defined %} + ip address {{ iface['ipv4'] }} +{% endif %} +{% if iface['ipv6'] is defined %} + ipv6 enable + ipv6 address {{ iface['ipv6'] }} + ipv6 nd ra suppress +{% endif %} + no shutdown +! +{% endfor %} +! +interface {{ bp_ifname }} + description backplane + no switchport +{% if host['bp_interface']['ipv4'] is defined %} + ip address {{ host['bp_interface']['ipv4'] }} +{% endif %} +{% if host['bp_interface']['ipv6'] is defined %} + ipv6 enable + ipv6 address {{ host['bp_interface']['ipv6'] }} + ipv6 nd ra suppress +{% endif %} + no shutdown +! +{% for podset in range(0, props.podset_number) %} +{% if range(0, 1000)|random() >= props.failure_rate %} +{% for tor in range(0, props.tor_number) %} +{% set leafasn = props.leaf_asn_start + podset %} +{% set torasn = props.tor_asn_start + tor %} +route-map PREPENDAS permit {{ 2 * (podset * props.tor_number + tor + 1) }} + match ip address prefix-list test_ipv4_{{ podset }}_{{ tor }} + set as-path prepend {{ leafasn }} {{ torasn }} +! +route-map PREPENDAS permit {{ 2 * (podset * props.tor_number + tor + 1) + 1 }} + match ipv6 address prefix-list test_ipv6_{{ podset }}_{{ tor }} + set as-path prepend {{ leafasn }} {{ torasn }} +! +{% endfor %} +{% endif %} +{% endfor %} +! +router bgp {{ host['bgp']['asn'] }} + router-id {{ host['interfaces']['Loopback0']['ipv4'] | ipaddr('address') }} + ! +{% for asn, remote_ips in host['bgp']['peers'].items() %} +{% for remote_ip in remote_ips %} + neighbor {{ remote_ip }} remote-as {{ asn }} + neighbor {{ remote_ip }} description {{ asn }} + neighbor {{ remote_ip }} default-originate route-map DEFAULT_ROUTES +{% if remote_ip | ipv6 %} + address-family ipv6 + neighbor {{ remote_ip }} activate + exit +{% endif %} +{% endfor %} +{% endfor %} + ! +{% for name, iface in host['interfaces'].items() if name.startswith('Loopback') %} +{% if iface['ipv4'] is defined %} + network {{ iface['ipv4'] }} +{% endif %} +{% if iface['ipv6'] is defined %} + network {{ iface['ipv6'] }} +{% endif %} +{% endfor %} + redistribute static route-map PREPENDAS +! +management api http-commands + no protocol https + protocol http + no shutdown +! +end + diff --git a/ansible/roles/eos/templates/t1-slx-tor.j2 b/ansible/roles/eos/templates/t1-slx-tor.j2 new file mode 100644 index 00000000000..2a515847655 --- /dev/null +++ b/ansible/roles/eos/templates/t1-slx-tor.j2 @@ -0,0 +1,132 @@ +{% set host = configuration[hostname] %} +{% set mgmt_ip = ansible_host %} +{% set tornum = host['tornum'] %} +no schedule tech-support +! +hostname {{ hostname }} +! +vrf definition MGMT + rd 1:1 +! +spanning-tree mode mstp +! +aaa root secret 0 123456 +! +username admin privilege 15 role network-admin secret 0 123456 +! +clock timezone UTC +! +lldp run +lldp management-address Management1 +lldp management-address vrf MGMT +! +snmp-server community {{ snmp_rocommunity }} ro +snmp-server vrf MGMT +! +ip routing +ip routing vrf MGMT +ipv6 unicast-routing +! +ip route vrf MGMT 0.0.0.0/0 {{ mgmt_gw }} +! +{% for subnet in range(0, props.tor_subnet_number) %} +ip route 172.16.{{ tornum }}.{{ subnet }}/32 {{ props.nhipv4 }} +ipv6 route 20AC:10{{ '%02X' % tornum }}:0:{{ '%02X' % subnet }}::/64 {{ props.nhipv6 }} +{% endfor %} +{% if 'vips' in host %} +{% for subnet in host['vips']['ipv4']['prefixes'] %} +ip route {{ subnet }} {{ props.nhipv4 }} +{% endfor %} +{% set index = 1 %} +{% for subnet in host['vips']['ipv4']['prefixes'] %} +ip prefix-list test_vip_{{ index }} seq 1{{ index }} permit {{ subnet }} +{% set index = index + 1 %} +{% endfor %} +{% set index = 1 %} +{% for subnet in host['vips']['ipv4']['prefixes'] %} +route-map PREPENDAS permit 2{{ index }} + match ip address prefix-list test_vip_{{ index }} + set as-path prepend {{ host['vips']['ipv4']['asn'] }} +{% set index = index + 1 %} +{% endfor %} +{% endif %} +! +interface Management 1 + description TO LAB MGMT SWITCH + vrf forwarding MGMT + ip address {{ mgmt_ip }}/{{ mgmt_prefixlen }} + no shutdown +! +{% for name, iface in host['interfaces'].items() %} +interface {{ name }} +{% if name.startswith('Loopback') %} + description LOOPBACK +{% else %} + no switchport +{% endif %} +{% if iface['ipv4'] is defined %} + ip address {{ iface['ipv4'] }} +{% endif %} +{% if iface['ipv6'] is defined %} + ipv6 enable + ipv6 address {{ iface['ipv6'] }} + ipv6 nd ra suppress +{% endif %} + no shutdown +! +{% endfor %} +! +interface {{ bp_ifname }} + description backplane + no switchport +{% if host['bp_interface']['ipv4'] is defined %} + ip address {{ host['bp_interface']['ipv4'] }} +{% endif %} +{% if host['bp_interface']['ipv6'] is defined %} + ipv6 enable + ipv6 address {{ host['bp_interface']['ipv6'] }} + ipv6 nd ra suppress +{% endif %} + no shutdown +! +router bgp {{ host['bgp']['asn'] }} + router-id {{ host['interfaces']['Loopback0']['ipv4'] | ipaddr('address') }} + ! + graceful-restart restart-time {{ bgp_gr_timer }} + graceful-restart + ! +{% for asn, remote_ips in host['bgp']['peers'].items() %} +{% for remote_ip in remote_ips %} + neighbor {{ remote_ip }} remote-as {{ asn }} + neighbor {{ remote_ip }} description {{ asn }} +{% if remote_ip | ipv6 %} + address-family ipv6 + neighbor {{ remote_ip }} activate + exit +{% endif %} +{% endfor %} +{% endfor %} + ! +{% if 'vips' in host %} +redistribute static route-map PREPENDAS +{% endif %} +{% for name, iface in host['interfaces'].items() if name.startswith('Loopback') %} +{% if iface['ipv4'] is defined %} + network {{ iface['ipv4'] }} +{% endif %} +{% if iface['ipv6'] is defined %} + network {{ iface['ipv6'] }} +{% endif %} +{% endfor %} +{% for subnet in range(0, props.tor_subnet_number) %} + network 172.16.{{ tornum }}.{{ subnet }}/32 + network 20AC:10{{ '%02X' % tornum }}:0:{{ '%02X' % subnet }}::/64 +{% endfor %} +! +management api http-commands + no protocol https + protocol http + no shutdown +! +end + diff --git a/ansible/roles/fanout/tasks/fanout_mlnx.yml b/ansible/roles/fanout/tasks/fanout_mlnx.yml index 8fb1179d2ec..6966614a1d0 100644 --- a/ansible/roles/fanout/tasks/fanout_mlnx.yml +++ b/ansible/roles/fanout/tasks/fanout_mlnx.yml @@ -6,23 +6,16 @@ ### specified in this playbook, you would need to come up with your own fanout switch deployment ### playbook ################################################################################################ + # Gather minigraph facts +- name: Gathering lab graph facts about the device + conn_graph_facts: host={{ inventory_hostname }} + connection: local + tags: always + - name: prepare fanout switch admin login info set_fact: ansible_ssh_user={{ fanout_mlnx_user }} ansible_ssh_pass={{ fanout_mlnx_password }} peer_hwsku={{device_info['HwSku']}} tags: always -########################################## -# upgrade ONYX OS image on fanout switch # -########################################## -- block: - - name: upgrade ONYX OS - include_tasks: - file: mlnx/upgrade_onyx.yml - apply: - tags: - - upgrade - when: peer_hwsku == "MLNX-OS" and image_url is defined - tags: upgrade - ########################################################## # deploy tasks to deploy default configuration on fanout # ########################################################## @@ -35,29 +28,18 @@ vars: action_variable: "deploy" when: peer_hwsku == "MLNX-OS" - tags: deploy + tags: deploy ################################################################### # build, deploy and start docker images for the PFC WD test # ################################################################### -- name: build, deploy and start docker images for the PFC WD test - include_tasks: - file: mlnx/deploy_pfcwd_fanout.yml - apply: - tags: - - deploy - - pfcwd_config +- include: mlnx/deploy_pfcwd_fanout.yml when: peer_hwsku == "MLNX-OS" tags: deploy,pfcwd_config ################################################################### # check and recover docker images for the PFC WD test # ################################################################### -- name: check and recover docker images for the PFC WD test - include_tasks: - file: mlnx/check_pfcwd_fanout.yml - apply: - tags: - - check_pfcwd_config +- include: mlnx/check_pfcwd_fanout.yml when: peer_hwsku == "MLNX-OS" tags: check_pfcwd_config diff --git a/ansible/roles/fanout/tasks/fanout_sonic.yml b/ansible/roles/fanout/tasks/fanout_sonic.yml index 884a688426f..860494488f9 100644 --- a/ansible/roles/fanout/tasks/fanout_sonic.yml +++ b/ansible/roles/fanout/tasks/fanout_sonic.yml @@ -6,23 +6,28 @@ - name: find interface name mapping port_alias: hwsku="{{ device_info["HwSku"] }}" -- name: build fanout vlan config - template: src=sonic_deploy.j2 - dest=/etc/sonic/vlan.json - become: yes +#- name: build fanout vlan config +# template: src=sonic_deploy.j2 +# +# dest=/etc/sonic/vlan.json +# become: yes - name: disable all copp rules - shell: - cmd: docker exec swss bash -c 'echo [] > /etc/swss/config.d/00-copp.config.json' + copy: content='[]' + dest=/etc/swss/config.d/00-copp.config.json become: yes + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i swss python -- name: generate config_db.json - shell: sonic-cfggen -H -j /etc/sonic/vlan.json -j /etc/sonic/init_cfg.json --print-data > /etc/sonic/config_db.json - become: yes +#- name: generate config_db.json +# shell: sonic-cfggen -H -j /etc/sonic/vlan.json -j /etc/sonic/init_cfg.json --print-data > /etc/sonic/config_db.json +# +# become: yes -- name: reload config_db.json - shell: config reload -y - become: yes +#- name: reload config_db.json +# shell: config reload -y +# become: yes - name: stop and disable lldp service service: name=lldp state=stopped enabled=no diff --git a/ansible/roles/fanout/tasks/main.yml b/ansible/roles/fanout/tasks/main.yml index 80cfa6fbd75..6110d331001 100644 --- a/ansible/roles/fanout/tasks/main.yml +++ b/ansible/roles/fanout/tasks/main.yml @@ -1,38 +1,36 @@ -############################################################################################## -### playbook to deploy the fanout switch -### Use this playbook to deploy the VLAN configurations of fanout switch in SONiC testbed -### This playbook will run based on hardware platform. Each fanout switch hardware type has its -### own unique feature and configuration command or format. Unless you use the hardware switch -### specified in this playbook, you would need to come up with your own fanout switch deployment -### playbook -################################################################################################ -- name: Gathering lab graph facts about the device - conn_graph_facts: host={{ inventory_hostname }} - delegate_to: localhost - tags: always - -- set_fact: sw_type="{{ device_info['Type'] }}" - -- set_fact: os='eos' - when: os is not defined - tags: always - -- import_tasks: fanout_eos.yml - when: os == 'eos' - -- import_tasks: fanout_sonic.yml - when: os == 'sonic' - -- import_tasks: fanout_mlnx.yml - when: os == 'mellanox' - -- block: - - set_fact: - leaf_name: "{{ inventory_hostname }}" - leaf: "{{ ansible_host }}" - - - import_tasks: rootfanout_connect.yml - vars: - deploy_leaf: true - when: sw_type == 'FanoutLeaf' - tags: always +############################################################################################## +### playbook to deploy the fanout switch +### Use this playbook to deploy the VLAN configurations of fanout switch in SONiC testbed +### This playbook will run based on hardware platform. Each fanout switch hardware type has its +### own unique feature and configuration command or format. Unless you use the hardware switch +### specified in this playbook, you would need to come up with your own fanout switch deployment +### playbook +################################################################################################ +# Deploy fanout switch +- name: Gathering lab graph facts about the device + conn_graph_facts: host={{ inventory_hostname }} + connection: local + +- set_fact: sw_type="{{ device_info['Type'] }}" + +- set_fact: os='eos' + when: os is not defined + +- include: fanout_eos.yml + when: os == 'eos' + +- include: fanout_sonic.yml + when: os == 'sonic' + +- include: fanout_mlnx.yml + when: os == 'mellanox' + +- block: + - set_fact: + leaf_name: "{{ inventory_hostname }}" + leaf: "{{ ansible_host }}" + +# - include: rootfanout_connect.yml +# deploy_leaf=true +# when: sw_type == 'FanoutLeaf' + diff --git a/ansible/roles/fanout/tasks/mlnx/check_pfcwd_fanout.yml b/ansible/roles/fanout/tasks/mlnx/check_pfcwd_fanout.yml index 84277685414..c168a5dd12c 100644 --- a/ansible/roles/fanout/tasks/mlnx/check_pfcwd_fanout.yml +++ b/ansible/roles/fanout/tasks/mlnx/check_pfcwd_fanout.yml @@ -15,8 +15,8 @@ dockers_installed: "{{output.stdout|search(\"pfc_storm\")}}" dockers_running: "{{output.stdout|search(\"storm\")|bool}}" -- fail: msg="PFCWD dockers not installed" - when: not dockers_installed +- debug: + msg: "Dockers installed{{':'}} {{dockers_installed}}" -- fail: msg="PFCWD dockers are not running" - when: not dockers_running +- debug: + msg: "Dockers running {{':'}} {{dockers_running}}" diff --git a/ansible/roles/fanout/tasks/mlnx/deploy_pfcwd_fanout.yml b/ansible/roles/fanout/tasks/mlnx/deploy_pfcwd_fanout.yml index 6c75721165b..461d15fd93f 100644 --- a/ansible/roles/fanout/tasks/mlnx/deploy_pfcwd_fanout.yml +++ b/ansible/roles/fanout/tasks/mlnx/deploy_pfcwd_fanout.yml @@ -17,24 +17,22 @@ command: make args: chdir: "{{item | dirname}}" - with_items: "{{ pfcwd_dockers }}" + with_items: pfcwd_dockers delegate_to: localhost when: pfcwd_dockers_url is not defined - name: Copy pfcwd docker images to switch - include_tasks: scp_copy.yml + include: scp_copy.yml vars: src: "{{ item }}" dest: "{{ fanout_img_path }}" - with_items: "{{ pfcwd_dockers }}" + with_items: pfcwd_dockers when: pfcwd_dockers_url is not defined - name: Download pre-built pfcwd dockers if path specified - include_tasks: download_copy_image_fanout.yml - vars: - image_url: "{{ pfcwd_dockers_url }}/{{ item | basename }}" - fanout_dest: "{{ fanout_img_path }}/{{ item | basename }}" - with_items: "{{ pfcwd_dockers }}" + include: download_copy_pfcwd_fanout.yml + with_items: pfcwd_dockers + delegate_to: localhost when: pfcwd_dockers_url is defined - name: Load and start dockers diff --git a/ansible/roles/fanout/tasks/mlnx/download_copy_pfcwd_fanout.yml b/ansible/roles/fanout/tasks/mlnx/download_copy_pfcwd_fanout.yml new file mode 100644 index 00000000000..f5695a651a4 --- /dev/null +++ b/ansible/roles/fanout/tasks/mlnx/download_copy_pfcwd_fanout.yml @@ -0,0 +1,21 @@ +- block: + + - name: Get timestamp + set_fact: timestamp="{{lookup('pipe','date +%Y%m%d%H%M%S')}}" + + - name: Get temporary filename + set_fact: filename="/tmp/pfcwd_docker_{{ timestamp }}" + + - name: Download pre-built pfcwd docker image + get_url: url={{ pfcwd_dockers_url }}/{{ item | basename }} dest={{ filename }} + + - name: Copy the downloaded pfcwd docker image to switch + include: scp_copy.yml + vars: + src: "{{ filename }}" + dest: "{{ fanout_img_path }}/{{ item | basename }}" + + always: + + - name: Remove the downloaded pfcwd docker image + file: path={{ filename }} state=absent diff --git a/ansible/roles/fanout/tasks/rootfanout_connect.yml b/ansible/roles/fanout/tasks/rootfanout_connect.yml index 0231461b412..ebb1bc017d2 100644 --- a/ansible/roles/fanout/tasks/rootfanout_connect.yml +++ b/ansible/roles/fanout/tasks/rootfanout_connect.yml @@ -7,35 +7,30 @@ - set_fact: dut="{{ leaf_name }}" when: deploy_leaf -- debug: msg="Configuring fanout switch for {{ dut }}" - -- name: Gathering connection facts about the DUTs or leaffanout device - conn_graph_facts: - host: "{{ dut if ',' not in dut else omit }}" - hosts: "{{ dut.split(',') if ',' in dut else omit }}" - delegate_to: localhost +- name: Gathering connection facts about the DUT or leaffanout device + conn_graph_facts: host={{ dut }} + connection: local tags: always register: devinfo -- name: Gathering connection facts about the lab - conn_graph_facts: - anchor: "{{ dut.split(',') | list }}" - delegate_to: localhost +- name: Gathering connection facts about the lab + conn_graph_facts: + connection: local tags: always register: lab - set_fact: - dev_vlans: "{{ devinfo.ansible_facts.device_vlan_range|flatten(levels=1) }}" + dev_vlans: "{{ devinfo.ansible_facts.device_vlan_range }}" lab_devices: "{{ lab.ansible_facts.device_info }}" - name: Find the root fanout switch set_fact: ansible_host: "{{ lab_devices[item]['mgmtip'] }}" - root_dev: "{{ item }}" + root_dev: "{{ item }}" with_items: "{{ lab_devices }}" when: lab_devices[item]['Type'] == 'FanoutRoot' -- set_fact: +- set_fact: root_conn: "{{ lab.ansible_facts['device_conn'][root_dev] }}" - name: Change root fanout port vlan diff --git a/ansible/roles/fanout/templates/mlnx_deploy_pfcwd_fanout.j2 b/ansible/roles/fanout/templates/mlnx_deploy_pfcwd_fanout.j2 index 9e7d70c74f8..e15d3cbe441 100644 --- a/ansible/roles/fanout/templates/mlnx_deploy_pfcwd_fanout.j2 +++ b/ansible/roles/fanout/templates/mlnx_deploy_pfcwd_fanout.j2 @@ -3,8 +3,6 @@ config t docker no shutdown ping -c 5 8.8.8.8 docker label storm -docker no start storm -docker remove image pfc_storm latest docker load pfc_storm.tgz docker start pfc_storm latest storm init label storm privileged network sdk docker start pfc_storm latest storm now label storm privileged network sdk diff --git a/ansible/roles/fanout/templates/mlnx_fanout.j2 b/ansible/roles/fanout/templates/mlnx_fanout.j2 index 37ae64f3030..7116041f979 100644 --- a/ansible/roles/fanout/templates/mlnx_fanout.j2 +++ b/ansible/roles/fanout/templates/mlnx_fanout.j2 @@ -54,10 +54,6 @@ {% macro fanout_deploy() %} conf t - -{# Disable CLI paging #} -no cli default paging enable - no lldp no spanning-tree ip routing @@ -75,7 +71,7 @@ interface ethernet {{ qsfp_split_4[i] }} module-type qsfp-split-4 force {% endfor %} {% for i in range(1, port_speed|length) %} -interface ethernet {{ eth_ports[i] }} speed {{ port_speed[i] }} no-autoneg force +interface ethernet {{ eth_ports[i] }} speed {{ port_speed[i] }} force {% endfor %} {% for i in range(1, eth_ports|length) %} diff --git a/ansible/roles/fanout/templates/sonic_deploy.j2 b/ansible/roles/fanout/templates/sonic_deploy.j2 index 2fc2a11f470..008e5366fea 100644 --- a/ansible/roles/fanout/templates/sonic_deploy.j2 +++ b/ansible/roles/fanout/templates/sonic_deploy.j2 @@ -9,7 +9,7 @@ "PORT": { {% for alias in device_conn %} - "{{ alias }}": { + "{{ port_alias_map[alias] }}": { {% if device_conn[alias]['speed'] == "100000" %} "fec" : "rs", {% endif %} @@ -31,14 +31,14 @@ {% for alias in device_port_vlans %} {% if device_port_vlans[alias]['mode'] == 'Access' %} {% if ns.firstPrinted %},{% endif %} - "Vlan{{ device_port_vlans[alias]['vlanids'] }}|{{ alias }}": { + "Vlan{{ device_port_vlans[alias]['vlanids'] }}|{{ port_alias_map[alias] }}": { "tagging_mode" : "untagged" } {% if ns.update({'firstPrinted': True}) %} {% endif %} {% elif device_port_vlans[alias]['mode'] == 'Trunk' %} {% for vlanid in device_port_vlans[alias]['vlanlist'] %} {% if ns.firstPrinted %},{% endif %} - "Vlan{{ vlanid }}|{{ alias }}": { + "Vlan{{ vlanid }}|{{ port_alias_map[alias] }}": { "tagging_mode" : "tagged" } {% if ns.update({'firstPrinted': True}) %} {% endif %} diff --git a/ansible/roles/sonic-common/files/bin/vtysh b/ansible/roles/sonic-common/files/bin/vtysh index 359101c06c6..e69de29bb2d 100755 --- a/ansible/roles/sonic-common/files/bin/vtysh +++ b/ansible/roles/sonic-common/files/bin/vtysh @@ -1,2 +0,0 @@ -#!/bin/bash -docker exec -i bgp vtysh "$@" diff --git a/ansible/roles/sonic-common/files/docker_clean.sh b/ansible/roles/sonic-common/files/docker_clean.sh index d384b06f5a6..e69de29bb2d 100644 --- a/ansible/roles/sonic-common/files/docker_clean.sh +++ b/ansible/roles/sonic-common/files/docker_clean.sh @@ -1,16 +0,0 @@ -#!/bin/bash - -# Clean up untagged docker images, ie. ':' -docker images -q --filter "dangling=true" | xargs --no-run-if-empty docker rmi - -# Clean up unused docker images, but ignore untagged docker images -# Note: -# if there is no tag or repository for one image, it will shows as 'repository:' -# or ':TAG' or ':' -# docker ps ...: -# list all used image -# docker images ...: -# list all images by tag and by digest -# grep -xvf A B: -# exclude all lines from file B matching any whole line in file A -grep -xvf <(docker ps -a --format {{.Image}}) <(docker images --format '{{.Repository}}:{{.Tag}}\n{{.Repository}}@{{.Digest}}' | grep -v '') | xargs --no-run-if-empty docker rmi diff --git a/ansible/roles/sonic-common/tasks/database.yml b/ansible/roles/sonic-common/tasks/database.yml index 2d3c4c2854a..32e314c3153 100644 --- a/ansible/roles/sonic-common/tasks/database.yml +++ b/ansible/roles/sonic-common/tasks/database.yml @@ -1,5 +1,5 @@ - name: clean up old container - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: docker-database docker_image: "{{ image_id_database }}" @@ -7,7 +7,7 @@ - name: Start the database docker container (redis-server) - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: database docker_image: "{{ image_id_database }}" diff --git a/ansible/roles/sonic-common/tasks/dhcp_relay.yml b/ansible/roles/sonic-common/tasks/dhcp_relay.yml index 7af40e1b960..05917db58ff 100644 --- a/ansible/roles/sonic-common/tasks/dhcp_relay.yml +++ b/ansible/roles/sonic-common/tasks/dhcp_relay.yml @@ -1,5 +1,5 @@ - name: Ensure DHCP Relay container started - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: dhcp_relay docker_image: "{{ image_id_dhcp_relay }}" diff --git a/ansible/roles/sonic-common/tasks/lldp.yml b/ansible/roles/sonic-common/tasks/lldp.yml index 4384bea123f..d1df01ba973 100644 --- a/ansible/roles/sonic-common/tasks/lldp.yml +++ b/ansible/roles/sonic-common/tasks/lldp.yml @@ -1,5 +1,5 @@ - name: Clean up old container - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: docker-lldp docker_image: "{{ image_id_lldp }}" @@ -12,7 +12,7 @@ when: sonic_version == "v2" - name: Ensure LLDP container started - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: lldp docker_image: "{{ image_id_lldp }}" @@ -23,10 +23,6 @@ docker_volumes: - /etc/sonic/:/etc/sonic/:ro -- include_tasks: add_container_to_inventory.yml - vars: - container_name: lldp - - block: - name: Setup LLDPD Daemon Config File become: true @@ -47,7 +43,9 @@ # Force handler flush to trigger daemon restarts - meta: flush_handlers - delegate_to: "{{ ansible_host }}_lldp" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i lldp python - name: Copy lldpctl helper script become: true diff --git a/ansible/roles/sonic-common/tasks/main.yml b/ansible/roles/sonic-common/tasks/main.yml index 3ded2299561..2b2cfb639d0 100644 --- a/ansible/roles/sonic-common/tasks/main.yml +++ b/ansible/roles/sonic-common/tasks/main.yml @@ -26,18 +26,12 @@ when: sonic_hwsku in barefoot_hwskus tags: always -- name: Set sonic_asic_type fact - set_fact: - sonic_asic_type: marvell - when: sonic_hwsku in marvell_hwskus - tags: always - - name: update sonicadmin password - include_tasks: passwd.yml + include: passwd.yml tags: system # Setup apt repo -- include_tasks: aptrepo.yml +- include: aptrepo.yml tags: repo - name: Ensure /etc/sonic path exists @@ -178,11 +172,11 @@ tags: network,unsafe # Docker engine -- include_tasks: docker.yml +- include: docker.yml tags: docker ## Redis database -- include_tasks: database.yml +- include: database.yml tags: - swss - database @@ -207,7 +201,7 @@ when: sonic_hwsku != "AS7512" # Setup Platform -- include_tasks: platform.yml +- include: platform.yml tags: platform,unsafe # Install Persistent Iptables Package @@ -218,21 +212,21 @@ tags: unsafe # setup sudoers -- include_tasks: sudoers.yml +- include: sudoers.yml # Install Logrotate -- include_tasks: logrotate.yml +- include: logrotate.yml # SNMP -- include_tasks: snmp.yml +- include: snmp.yml tags: snmp # LLDP -- include_tasks: lldp.yml +- include: lldp.yml tags: lldp # DHCP Relay -- include_tasks: dhcp_relay.yml +- include: dhcp_relay.yml tags: dhcp_relay when: minigraph_devices[inventory_hostname]['type'] == "ToRRouter" diff --git a/ansible/roles/sonic-common/tasks/platform.yml b/ansible/roles/sonic-common/tasks/platform.yml index 5801f5aeb71..cab0a366470 100644 --- a/ansible/roles/sonic-common/tasks/platform.yml +++ b/ansible/roles/sonic-common/tasks/platform.yml @@ -1,18 +1,18 @@ # Install Platform Modules for Dell - name: Confirm Platform Modules for Dell are installed become: true - include_tasks: platform-dell.yml + include: platform-dell.yml when: sonic_hwsku == "Force10-S6000" # Install Platform Modules for Mellanox - name: Confirm Platform Modules for Mellanox are installed become: true - include_tasks: platform-mlnx.yml + include: platform-mlnx.yml when: sonic_hwsku == "ACS-MSN2700" # Install Platform Modules for Cavium - name: Confirm Platform Modules for Cavium are installed become: true - include_tasks: platform-cavm.yml + include: platform-cavm.yml when: sonic_hwsku == "AS7512" diff --git a/ansible/roles/sonic-common/tasks/sensors_check.yml b/ansible/roles/sonic-common/tasks/sensors_check.yml index 0178df17b96..e69de29bb2d 100644 --- a/ansible/roles/sonic-common/tasks/sensors_check.yml +++ b/ansible/roles/sonic-common/tasks/sensors_check.yml @@ -1,4 +0,0 @@ -- name: run test - include_tasks: roles/test/tasks/pytest_runner.yml - vars: - test_node: platform/test_sensors.py \ No newline at end of file diff --git a/ansible/roles/sonic-common/tasks/snmp.yml b/ansible/roles/sonic-common/tasks/snmp.yml index ad4f1954905..02a15c51aa5 100644 --- a/ansible/roles/sonic-common/tasks/snmp.yml +++ b/ansible/roles/sonic-common/tasks/snmp.yml @@ -1,5 +1,5 @@ - name: Clean up old container - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: docker-snmp docker_image: "{{ image_id_snmp }}" @@ -12,7 +12,7 @@ when: sonic_version == "v2" - name: Ensure SNMP container started - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: snmp docker_image: "{{ image_id_snmp }}" @@ -42,13 +42,13 @@ template: src=snmp.yml.j2 dest=/etc/sonic/snmp.yml mode=0644 - + - name: Setup sonic_version file (for oneimage snmp dockers) become: true template: src=sonic_version.yml.j2 dest=/etc/sonic/sonic_version.yml mode=0644 - + - name: Setup sysDescription file (for legacy snmp dockers) become: true template: src=sysDescription.j2 @@ -60,13 +60,9 @@ - name: Determine if alias mappings are required stat: path="roles/sonicv2/files/ssw/{{ sonic_hwsku }}/alias_map.json" become: false - delegate_to: localhost + connection: local register: snmp_remap -- include_tasks: add_container_to_inventory.yml - vars: - container_name: snmp - - block: - name: Check version of snmpd shell: dpkg-query -W --showformat='${Version}' snmpd @@ -112,4 +108,6 @@ # Force handler flush to trigger daemon restarts - meta: flush_handlers - delegate_to: "{{ ansible_host }}_snmp" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i snmp python diff --git a/ansible/roles/sonic-common/tasks/sonicdocker.yml b/ansible/roles/sonic-common/tasks/sonicdocker.yml index 966327ef5be..e69de29bb2d 100644 --- a/ansible/roles/sonic-common/tasks/sonicdocker.yml +++ b/ansible/roles/sonic-common/tasks/sonicdocker.yml @@ -1,194 +0,0 @@ -## -## Encapsulate docker module with private docker registry, manage the container service -## by systemd on host, so it has full featured depdency control and restart policy -## -## The encapsulated module - sonicdocker -## docker_state: emulate the behavior of docker module -## ref: http://docs.ansible.com/ansible/docker_module.html -## -## +-------------+------+--------------+----------+---------------------+----------------+-------------+ -## | sonicdocker | pull | stop service | docker | post service | enable service | clean image | -## +-------------+------+--------------+----------+---------------------+----------------+-------------+ -## | present | | | present | | y | | -## | started | | | present | started | y | | -## | reloaded | y | pulled? | reloaded | restarted if pulled | y | if pulled | -## | | | | | started if not | | | -## | restarted | | | present | restarted | y | | -## | stopped | | y | stopped | stopped | n | | -## | killed | | y | killed | | n | | -## | absent | | y | absent | | n | y | -## +-------------+------+--------------+----------+---------------------+----------------+-------------+ -## - -## Set default values for the module variables, emulating local variable definition -## Note: must be consistent with tail part -- name: "{{docker_container}} - Set docker variable - docker_net" - set_fact: - docker_net: host - when: docker_net is undefined -- name: "{{docker_container}} - Set docker variable - docker_state" - set_fact: - docker_state: reloaded - when: docker_state is undefined -- name: "{{docker_container}} - Set docker variable - docker_volumes" - set_fact: - docker_volumes: [] - when: docker_volumes is undefined -- name: "{{docker_container}} - Set docker variable - docker_volumes_from" - set_fact: - docker_volumes_from: [] - when: docker_volumes_from is undefined -- name: "{{docker_container}} - Set docker variable - docker_privileged" - set_fact: - docker_privileged: no - when: docker_privileged is undefined -- name: "{{docker_container}} - Set docker variable - docker_log_driver" - set_fact: - docker_log_driver: json-file - when: docker_log_driver is undefined -- name: "{{docker_container}} - Set docker variable - docker_env" - set_fact: - docker_env: {} - when: docker_env is undefined -- name: "{{docker_container}} - Set docker variable - docker_tty" - set_fact: - docker_tty: yes - when: docker_tty is undefined -- name: "{{docker_container}} - Set docker variable - docker_log_opt" - set_fact: - docker_log_opt: {} - when: docker_log_driver != "syslog" -- name: "{{docker_container}} - Set docker variable - docker_log_opt" - set_fact: - docker_log_opt: - ## TRICK! TRICK! TRICK! - ## in ansible 2.0.0.2, reference set_fact varialbe will introduce recursive templating - ## so double escape by {{'...'}} and {%raw%}...{%endraw%} - tag: "{{'{%raw%}{{.ID}}({{.Name}}{%endraw%}'}})" - when: docker_log_driver == "syslog" - -## Local variables -- name: "{{docker_container}} - Set docker variable - sonicdocker_container_state" - set_fact: - sonicdocker_container_state: "{{docker_state}}" -- name: "{{docker_container}} - Set docker variable - sonicdocker_container_state" - set_fact: - sonicdocker_container_state: present - when: docker_state in ['present', 'started', 'restarted'] - -## Copy systemd config files for docker container -- name: "{{docker_container}} - Copy systemd config files for docker container" - become: true - template: - src="etc/systemd/system/{{docker_container}}.j2" - dest="/etc/systemd/system/{{docker_container}}.service" - owner=root - group=root - mode=0644 - register: configfile_result - when: "docker_state not in ['absent']" - -- name: "{{docker_container}} - Reload systemd" - command: systemctl daemon-reload - become: yes - when: configfile_result.changed - -- block: - ## Clean up images before pulling - - name: "{{docker_container}} - Clean up images before pulling" - include_tasks: sonicdocker_clean.yml - - ## Pull docker image from registry - - name: "{{docker_container}} - Pull docker image from registry" - shell: docker pull {{docker_registry_host}}/{{docker_image}} - register: pull_result - changed_when: "'Status: Downloaded newer image' in pull_result.stdout" - when: docker_state == 'reloaded' - -## Stop container service after pulled -- name: "{{docker_container}} - Stop container service after pulled" - become: true - service: name="{{docker_container}}" - state=stopped - when: "(docker_state == 'reloaded' and 'Status: Downloaded newer image' in pull_result.stdout) \ - or docker_state in ['stopped', 'killed']" - -## Clean up systemd config files for docker container -- name: "{{docker_container}} - Delete systemd config file for docker container" - become: true - file: - path="/etc/systemd/system/{{docker_container}}.service" - state=absent - when: "docker_state in ['absent']" - register: configfile_remove - -- name: "{{docker_container}} - Reload systemd" - command: systemctl daemon-reload - become: yes - when: configfile_remove.changed - -- name: "{{docker_container}} - Control docker container" - docker: - name: "{{docker_container}}" - image: "{{docker_registry_host}}/{{docker_image}}" - state: "{{sonicdocker_container_state}}" - ## Already pulled by upper task - pull: missing - detach: yes - net: "{{docker_net}}" - tty: "{{docker_tty}}" - stdin_open: yes - registry: "https://{{docker_registry_host}}" - email: "@" - volumes: "{{docker_volumes}}" - volumes_from: "{{docker_volumes_from}}" - privileged: "{{docker_privileged}}" - env: "{{docker_env}}" - log_driver: "{{docker_log_driver}}" - log_opt: "{{docker_log_opt}}" - -## Container service operation -- name: "{{docker_container}} - Post docker - restart container service" - become: true - service: name="{{docker_container}}" - state=restarted - when: "docker_state == 'restarted' or \ - docker_state == 'reloaded' and 'Status: Downloaded newer image' in pull_result.stdout" - -- name: "{{docker_container}} - Post docker - start container service" - become: true - service: name="{{docker_container}}" - state=started - when: "docker_state == 'started' or docker_state == 'reloaded'" - -- name: "{{docker_container}} - Post docker - stop and disable container service" - become: true - service: name="{{docker_container}}" - state=stopped - enabled=no - when: docker_state == 'stopped' - -- name: "{{docker_container}} - Post docker - enable container service" - become: true - service: name="{{docker_container}}" - enabled={{docker_state in ['present', 'started', 'reloaded', 'restarted']}} - -## Clean up images after pulled and running -- name: "{{docker_container}} - Clean up images after pulled and running" - include_tasks: sonicdocker_clean.yml - when: "(docker_state == 'reloaded' and 'Status: Downloaded newer image' in pull_result.stdout) or \ - docker_state == 'absent'" - -## Reset the module variables to default values to prevent global side-effect -## Note: must be consistent with header part -- name: "{{docker_container}} - Clean up sonicdocker variables" - set_fact: - docker_image: '' - docker_net: host - docker_state: reloaded - docker_volumes: [] - docker_volumes_from: [] - docker_privileged: no - docker_log_driver: json-file - docker_env: {} - docker_tty: yes diff --git a/ansible/roles/sonic-common/tasks/sonicdocker_clean.yml b/ansible/roles/sonic-common/tasks/sonicdocker_clean.yml index 05632a38b24..e69de29bb2d 100644 --- a/ansible/roles/sonic-common/tasks/sonicdocker_clean.yml +++ b/ansible/roles/sonic-common/tasks/sonicdocker_clean.yml @@ -1,5 +0,0 @@ -- name: Clean up unused docker images - script: "files/docker_clean.sh" - register: rmi_result - changed_when: rmi_result.stdout != "" - failed_when: rmi_result.stderr != "" or rmi_result.rc != 0 diff --git a/ansible/roles/sonicv2/tasks/main.yml b/ansible/roles/sonicv2/tasks/main.yml index cd7c503c215..bc63cbe943e 100644 --- a/ansible/roles/sonicv2/tasks/main.yml +++ b/ansible/roles/sonicv2/tasks/main.yml @@ -42,20 +42,20 @@ mode=0755 # SONiC -- include_tasks: sonic-brcm.yml +- include: sonic-brcm.yml when: sonic_asic_type == 'broadcom' tags: swss,unsafe -- include_tasks: sonic-mlnx.yml +- include: sonic-mlnx.yml when: sonic_asic_type == 'mellanox' tags: swss,unsafe -- include_tasks: sonic-cavm.yml +- include: sonic-cavm.yml when: sonic_asic_type == 'cavium' tags: swss,unsafe -- include_tasks: quagga.yml +- include: quagga.yml tags: quagga,unsafe -- include_tasks: teamd.yml +- include: teamd.yml tags: teamd,unsafe diff --git a/ansible/roles/sonicv2/tasks/quagga.yml b/ansible/roles/sonicv2/tasks/quagga.yml index e63f82e2344..d51437517c1 100644 --- a/ansible/roles/sonicv2/tasks/quagga.yml +++ b/ansible/roles/sonicv2/tasks/quagga.yml @@ -1,5 +1,5 @@ - name: Clean up old docker-bgp container - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: docker-bgp docker_image: "docker-bgp" @@ -7,7 +7,7 @@ docker_state: absent - name: Clean up old bgp container - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: bgp docker_image: "docker-bgp" @@ -15,7 +15,7 @@ docker_state: absent - name: Start the BGP docker container (Quagga) - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: bgp docker_image: "{{ image_id_fpm }}" @@ -26,10 +26,6 @@ docker_volumes_from: - database -- include_tasks: add_container_to_inventory.yml - vars: - container_name: bgp - - block: - name: Copy Device Specific Quagga Zebra Configuration File. become: true @@ -82,7 +78,9 @@ - isolate - unisolate - delegate_to: "{{ ansible_host }}_bgp" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i bgp python - name: Copy vtysh helper script become: true diff --git a/ansible/roles/sonicv2/tasks/sonic-brcm.yml b/ansible/roles/sonicv2/tasks/sonic-brcm.yml index 334ef690d88..9377d114f70 100644 --- a/ansible/roles/sonicv2/tasks/sonic-brcm.yml +++ b/ansible/roles/sonicv2/tasks/sonic-brcm.yml @@ -2,7 +2,7 @@ # Remove v1 docker containers - name: Remove docker-sswsyncd docker container - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: docker-sswsyncd docker_image: "docker-sswsyncd" @@ -10,7 +10,7 @@ docker_state: absent - name: Remove sswsyncd docker container - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: sswsyncd docker_image: "docker-sswsyncd" @@ -33,7 +33,7 @@ # Install docker containers - name: Start syncd docker container - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: syncd docker_image: "{{ image_id_syncd_rpc if host_saithrift is defined else image_id_syncd }}" @@ -59,7 +59,7 @@ # Remove deprecated orchagent container - name: Remove orchagent docker container - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: orchagent docker_image: "{{ image_id_orchagent }}" @@ -68,7 +68,7 @@ tags: orchagent - name: Start swss docker container - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: swss docker_image: "{{ image_id_orchagent }}" diff --git a/ansible/roles/sonicv2/tasks/sonic-cavm.yml b/ansible/roles/sonicv2/tasks/sonic-cavm.yml index 4b83285c16c..4f7f8e599b4 100644 --- a/ansible/roles/sonicv2/tasks/sonic-cavm.yml +++ b/ansible/roles/sonicv2/tasks/sonic-cavm.yml @@ -1,7 +1,7 @@ # SONiC # Setup Platform -#- include_tasks: platform-cavm.yml +#- include: platform-cavm.yml # Install Cavium Host Interface Driver - name: Install driver package @@ -18,7 +18,7 @@ # Install docker containers - name: Start syncd docker container - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: syncd docker_image: "{{ image_id_syncd_cavm }}" @@ -32,7 +32,7 @@ # Remove deprecated orchagent container - name: Remove orchagent docker container - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: orchagent docker_image: "{{ image_id_orchagent_cavm }}" @@ -41,7 +41,7 @@ tags: orchagent - name: Start swss docker container - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: swss docker_image: "{{ image_id_orchagent_cavm }}" diff --git a/ansible/roles/sonicv2/tasks/sonic-mlnx.yml b/ansible/roles/sonicv2/tasks/sonic-mlnx.yml index 0abae984e80..18ed6bfb395 100644 --- a/ansible/roles/sonicv2/tasks/sonic-mlnx.yml +++ b/ansible/roles/sonicv2/tasks/sonic-mlnx.yml @@ -2,7 +2,7 @@ # Remove v1 docker containers - name: Remove docker-sswsyncd docker container - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: docker-sswsyncd docker_image: "docker-sswsyncd" @@ -10,7 +10,7 @@ docker_state: absent - name: Remove sswsyncd docker container - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: sswsyncd docker_image: "docker-sswsyncd" @@ -26,7 +26,7 @@ # Install docker containers - name: Start syncd docker container - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: syncd docker_image: "{{ image_id_syncd_mlnx_rpc if host_saithrift is defined else image_id_syncd_mlnx }}" @@ -39,7 +39,7 @@ # Remove deprecated orchagent container - name: Remove orchagent docker container - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: orchagent docker_image: "{{ image_id_orchagent_mlnx }}" @@ -48,7 +48,7 @@ tags: orchagent - name: Start swss docker container - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: swss docker_image: "{{ image_id_orchagent_mlnx }}" diff --git a/ansible/roles/sonicv2/tasks/teamd.yml b/ansible/roles/sonicv2/tasks/teamd.yml index 207b6604892..e71835ca88e 100644 --- a/ansible/roles/sonicv2/tasks/teamd.yml +++ b/ansible/roles/sonicv2/tasks/teamd.yml @@ -1,5 +1,5 @@ - name: Start the teamd docker container - include_tasks: sonicdocker.yml + include: sonicdocker.yml vars: docker_container: teamd docker_image: "{{ image_id_teamd }}" @@ -10,5 +10,5 @@ docker_volumes_from: - database -- include_tasks: teamd_interface.yml +- include: teamd_interface.yml with_items: "{{ minigraph_portchannel_interfaces }}" diff --git a/ansible/roles/sonicv2/tasks/teamd_interface.yml b/ansible/roles/sonicv2/tasks/teamd_interface.yml index 4e7f941e4d0..feeda01322a 100644 --- a/ansible/roles/sonicv2/tasks/teamd_interface.yml +++ b/ansible/roles/sonicv2/tasks/teamd_interface.yml @@ -1,21 +1,19 @@ -- include_tasks: add_container_to_inventory.yml - vars: - container_name: teamd - - block: - - name: Ensure /etc/teamd folder exists - become: true - file: path=/etc/teamd - state=directory + - name: Ensure /etc/teamd folder exists + become: true + file: path=/etc/teamd + state=directory - - debug: msg={{ item }} + - debug: msg={{ item }} - - name: Copy teamd configuration file - become: true - template: src=teamd.j2 - dest=/etc/teamd/{{ item['name'] }}.conf - owner=root - group=root - mode=644 + - name: Copy teamd configuration file + become: true + template: src=teamd.j2 + dest=/etc/teamd/{{ item['name'] }}.conf + owner=root + group=root + mode=644 - delegate_to: "{{ ansible_host }}_teamd" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i teamd python diff --git a/ansible/roles/sonicv2/templates/etc/systemd/system/orchagent.j2 b/ansible/roles/sonicv2/templates/etc/systemd/system/orchagent.j2 new file mode 100644 index 00000000000..e69de29bb2d diff --git a/ansible/roles/sonicv2/templates/etc/systemd/system/syncd.j2 b/ansible/roles/sonicv2/templates/etc/systemd/system/syncd.j2 index dee037e90d2..e69de29bb2d 100644 --- a/ansible/roles/sonicv2/templates/etc/systemd/system/syncd.j2 +++ b/ansible/roles/sonicv2/templates/etc/systemd/system/syncd.j2 @@ -1,29 +0,0 @@ -[Unit] -Description=syncd container -Requires=database.service -After=database.service swss.service - -[Service] -User=root -{% if sonic_hwsku == 'ACS-MSN2700' %} -ExecStartPre=/etc/init.d/sxdkernel start -ExecStartPre=/usr/bin/mst start -ExecStartPre=/etc/mlnx/msn2700 start -{% elif sonic_hwsku == 'AS7512' %} -ExecStartPre=-/etc/init.d/xpnet.sh stop -ExecStartPre=/etc/init.d/xpnet.sh start -{% endif %} -ExecStart=/usr/bin/docker start -a syncd -ExecStop=/usr/bin/docker stop syncd -{% if sonic_hwsku == 'ACS-MSN2700' %} -ExecStopPost=/etc/mlnx/msn2700 stop -ExecStopPost=/etc/init.d/sxdkernel stop -ExecStopPost=/usr/bin/mst stop -{% elif sonic_hwsku == 'AS7512' %} -ExecStopPost=/etc/init.d/xpnet.sh stop -ExecStopPost=/etc/init.d/xpnet.sh start -{% endif %} -Restart=always - -[Install] -WantedBy=multi-user.target diff --git a/ansible/roles/sonicv2/templates/teamd.j2 b/ansible/roles/sonicv2/templates/teamd.j2 index 2de7f83e17c..8f549ddf7b6 100644 --- a/ansible/roles/sonicv2/templates/teamd.j2 +++ b/ansible/roles/sonicv2/templates/teamd.j2 @@ -3,12 +3,8 @@ "runner": { "name": "lacp", "active": true, -{% if PORTCHANNEL[pc]['fallback'] and ((PORTCHANNEL[pc]['members'] | length) == 1) %} - "fallback": {{ PORTCHANNEL[pc]['fallback'] }}, -{% else %} {# Use 75% links upperbound as min-links #} "min_ports": {{item['members'] | length * 0.75 | round(0, 'ceil') | int}}, -{% endif %} "tx_hash": ["eth", "ipv4", "ipv6"] }, "link_watch": { diff --git a/ansible/roles/test/files/helpers/mem_check.sh b/ansible/roles/test/files/helpers/mem_check.sh new file mode 100644 index 00000000000..eeb2250bdeb --- /dev/null +++ b/ansible/roles/test/files/helpers/mem_check.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# +# mem_check.sh +# +# Check for memory leaks in Redis client output buffers +# Returns 0 if under threshold, 1 if over threshold +# + +REDIS_CLIENT_LIST_OUTPUT_FILE=/tmp/redis_client_list + +OMEM_THRESHOLD_BYTES=1048576 # 1MB + +TOTAL_OMEM_BYTES=0 + +# Save 'redis-cli client list' output to temp file +/usr/bin/redis-cli client list > $REDIS_CLIENT_LIST_OUTPUT_FILE + +# Extract 'omem' value from each line (client) +while read LINE; do + OMEM_BYTES=$(echo $LINE | sed 's/.*omem=\([0-9][0-9]*\) .*/\1/') + TOTAL_OMEM_BYTES=$((TOTAL_OMEM_BYTES += OMEM_BYTES)) +done < $REDIS_CLIENT_LIST_OUTPUT_FILE + +# Clean up +rm $REDIS_CLIENT_LIST_OUTPUT_FILE + +if [ $TOTAL_OMEM_BYTES -gt $OMEM_THRESHOLD_BYTES ]; then + exit 1 +fi + +exit 0 + diff --git a/ansible/roles/test/files/ptftests/dip_sip.py b/ansible/roles/test/files/ptftests/dip_sip.py new file mode 100644 index 00000000000..eb6ab52c10e --- /dev/null +++ b/ansible/roles/test/files/ptftests/dip_sip.py @@ -0,0 +1,199 @@ +''' +Description: + This file contains the DIP=SIP test for SONiC + + This test uses UDP packets to validate that HW supports routing of L3 packets with DIP=SIP + +Topologies: + Supports t0, t0-16, t0-56, t0-64, t0-64-32, t0-116, t1, t1-lag t1-64-lag and t1-64-lag-clet topology + +Parameters: + testbed_type - testbed type + dst_host_mac - destination host MAC address + src_host_mac - source host MAC address + dst_router_mac - destination router MAC address + src_router_mac - source router MAC address + dst_router_ipv4 - destination router IPv4 address + src_router_ipv4 - source router IPv4 address + dst_router_ipv6 - destination router IPv6 address + src_router_ipv6 - source router IPv6 address + dst_port_ids - destination port array of indices (when router has a members) + src_port_ids - source port array of indices (when router has a members) + +Usage: + Example of how to start this script: + ptf --test-dir ptftests dip_sip.DipSipTest --platform-dir ptftests --platform remote \ + -t "testbed_type=''; \ + dst_host_mac=''; \ + src_host_mac=''; \ + dst_router_mac=''; \ + src_router_mac=''; \ + dst_router_ipv4=''; \ + src_router_ipv4=''; \ + dst_router_ipv6=''; \ + src_router_ipv6=''; \ + dst_port_ids=''; \ + src_port_ids=''" \ + --relax --debug info --log-file /tmp/dip_sip.DipSipTest.log \ + --disable-vxlan --disable-geneve --disable-erspan --disable-mpls --disable-nvgre + +Notes: + Please check the dip_sip.yml file to see the details of how this test works +''' + +#------------------------------------------------------------------------------- +# Global imports +#------------------------------------------------------------------------------- + +import logging +import ptf + +from ipaddress import ip_address +from ptf.base_tests import BaseTest + +from ptf.testutils import test_params_get +from ptf.testutils import simple_udp_packet +from ptf.testutils import simple_udpv6_packet +from ptf.testutils import send +from ptf.testutils import verify_packet_any_port + +#------------------------------------------------------------------------------- +# Testcase +#------------------------------------------------------------------------------- + +class PortLagRouterBasedTest: + def __init__(self, dipSipTest): + self.test = dipSipTest + self.testParams = dipSipTest.test_params + #-------------------------------------------------------------------------- + + def logParams(self): + self.test.log("Destination router mac is: " + self.dstRouterMac) + self.test.log("Destination router ipv4 is: " + self.dstRouterIpv4) + self.test.log("Destination router ipv6 is: " + self.dstRouterIpv6) + + self.test.log("Destination host mac is: " + self.dstHostMac) + self.test.log("Destination host ipv4 is: " + self.dstHostIpv4) + self.test.log("Destination host ipv6 is: " + self.dstHostIpv6) + + self.test.log("Source router mac is: " + self.srcRouterMac) + self.test.log("Source router ipv4 is: " + self.srcRouterIpv4) + self.test.log("Source router ipv6 is: " + self.srcRouterIpv6) + + self.test.log("Source host mac is: " + self.srcHostMac) + self.test.log("Source host ipv4 is: " + self.srcHostIpv4) + self.test.log("Source host ipv6 is: " + self.srcHostIpv6) + + self.test.log("Destination port ids is: " + str([int(portId) for portId in self.dstPortIds])) + self.test.log("Source port ids is: " + str([int(portId) for portId in self.srcPortIds])) + + self.test.log("Packet TTL/HL is: " + str(self.pktTtlHlim)) + #-------------------------------------------------------------------------- + + def setUpParams(self): + self.dstRouterMac = self.testParams['dst_router_mac'] + self.dstRouterIpv4 = self.testParams['dst_router_ipv4'] + self.dstRouterIpv6 = self.testParams['dst_router_ipv6'] + + self.dstHostMac = self.testParams['dst_host_mac'] + self.dstHostIpv4 = str(ip_address(unicode(self.testParams['dst_router_ipv4'])) + 1) + self.dstHostIpv6 = str(ip_address(unicode(self.testParams['dst_router_ipv6'])) + 1) + + self.srcRouterMac = self.testParams['src_router_mac'] + self.srcRouterIpv4 = self.testParams['src_router_ipv4'] + self.srcRouterIpv6 = self.testParams['src_router_ipv6'] + + self.srcHostMac = self.testParams['src_host_mac'] + self.srcHostIpv4 = str(ip_address(unicode(self.testParams['src_router_ipv4'])) + 1) + self.srcHostIpv6 = str(ip_address(unicode(self.testParams['src_router_ipv6'])) + 1) + + self.dstPortIds = self.testParams['dst_port_ids'] + self.srcPortIds = self.testParams['src_port_ids'] + + self.pktTtlHlim = 64 # Default packet TTL/HL value + #-------------------------------------------------------------------------- + + def runTestIpv6(self): + self.test.log("Run IPv6 based test") + + pkt = simple_udpv6_packet(eth_dst=self.srcRouterMac, + eth_src=self.srcHostMac, + ipv6_src=self.dstHostIpv6, + ipv6_dst=self.dstHostIpv6, + ipv6_hlim=self.pktTtlHlim) + send(self.test, int(self.srcPortIds[0]), pkt) + + pkt = simple_udpv6_packet(eth_dst=self.dstHostMac, + eth_src=self.dstRouterMac, + ipv6_src=self.dstHostIpv6, + ipv6_dst=self.dstHostIpv6, + ipv6_hlim=self.pktTtlHlim-1) + + verify_packet_any_port(self.test, pkt, [int(port) for port in self.dstPortIds]) + + self.test.log("IPv6 based test: done") + #-------------------------------------------------------------------------- + + def runTestIpv4(self): + self.test.log("Run IPv4 based test") + + pkt = simple_udp_packet(eth_dst=self.srcRouterMac, + eth_src=self.srcHostMac, + ip_src=self.dstHostIpv4, + ip_dst=self.dstHostIpv4, + ip_ttl=self.pktTtlHlim) + send(self.test, int(self.srcPortIds[0]), pkt) + + pkt = simple_udp_packet(eth_dst=self.dstHostMac, + eth_src=self.dstRouterMac, + ip_src=self.dstHostIpv4, + ip_dst=self.dstHostIpv4, + ip_ttl=self.pktTtlHlim-1) + + verify_packet_any_port(self.test, pkt, [int(port) for port in self.dstPortIds]) + + self.test.log("IPv4 based test: done") + #-------------------------------------------------------------------------- + + def runTest(self): + self.setUpParams() + self.logParams() + + self.runTestIpv4() + self.runTestIpv6() + #-------------------------------------------------------------------------- + +class DipSipTest(BaseTest): + def __init__(self): + BaseTest.__init__(self) + #-------------------------------------------------------------------------- + + def log(self, message): + logging.info(message) + #-------------------------------------------------------------------------- + + def setUp(self): + self.log("SetUp testbed") + + self.dataplane = ptf.dataplane_instance + self.test_params = test_params_get() + self.testbed_type = self.test_params['testbed_type'] + #-------------------------------------------------------------------------- + + def tearDown(self): + self.log("TearDown testbed") + #-------------------------------------------------------------------------- + + def runTest(self): + if self.testbed_type in ['t0', 't0-16', 't0-56', 't0-64', 't0-64-32', 't0-116', 't1', 't1-lag', 't1-64-lag', 't1-64-lag-clet']: + self.log("Run PORT/LAG-router based test") + + test = PortLagRouterBasedTest(self) + test.runTest() + + self.log("PORT/LAG-router based test: done") + + return + + self.fail("Unexpected testbed type %s!" % (self.testbed_type)) + #-------------------------------------------------------------------------- diff --git a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml index 76d7de11601..924ce88ec92 100644 --- a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml +++ b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml @@ -4,50 +4,17 @@ - debug: msg="starting loganalyzer analysis phase" -- set_fact: - loganalyzer_location: "{{ 'roles/test/files/tools/loganalyzer' }}" - -- set_fact: - match_file: loganalyzer_common_match.txt - when: match_file is not defined - -- set_fact: - ignore_file: loganalyzer_common_ignore.txt - when: ignore_file is not defined - -- set_fact: - expect_file: "loganalyzer_common_expect.txt" - when: expect_file is not defined - -- set_fact: - testname_unique: "{{ testname }}.{{lookup('pipe','date +%Y-%m-%d-%H:%M:%S')}}" - when: testname_unique is not defined - -- set_fact: - test_out_dir: "{{ out_dir }}/{{ testname_unique }}" - when: test_out_dir is not defined - - name: Init variables set_fact: - match_file_option: "-m " + match_file_option: "-m {{ match_file }}" ignore_file_option: "-i {{ ignore_file }}" expect_file_option: "-e {{ expect_file }}" - match_file_list: [] -- name: Collect list of match files - common_match +- name: Add test specific match file set_fact: - match_file_list: "{{ match_file_list + [match_file] }}" - when: skip_common_match is not defined - -- name: Collect list of match files - test_match - set_fact: - match_file_list: "{{ match_file_list + [test_match_file] }}" + match_file_option: "{{ match_file_option }},{{ test_match_file }} " when: test_match_file is defined -- name: Add match file option - set_fact: - match_file_option: "{{ match_file_option }}{{ match_file_list | join(',') }}" - - name: Add test specific ignore file set_fact: ignore_file_option: "{{ ignore_file_option }},{{ test_ignore_file }}" @@ -62,40 +29,6 @@ set_fact: tmp_log_file: '/tmp/syslog' -#---------------------------------------------------------------------------------- -# Copy all loganalyzer related file to DUT, and invoke loganalyzer with init phase -#---------------------------------------------------------------------------------- - -- name: Copy loganalyzer common match and ignore files to switch - copy: src="{{ loganalyzer_location }}/{{ item }}" dest="{{ run_dir }}/{{ item }}" - with_items: - - "{{ match_file }}" - - "{{ ignore_file }}" - - "{{ expect_file }}" - -- name: Copy test specific file match-files to switch - copy: src="{{ tests_location }}/{{ testname }}/{{ test_match_file }}" dest="{{ run_dir }}/{{ test_match_file }}" - when: test_match_file is defined - -- name: Copy test specific ignore-files to switch - copy: src="{{ tests_location }}/{{ testname }}/{{ test_ignore_file }}" dest="{{ run_dir }}/{{ test_ignore_file }}" - when: test_ignore_file is defined - -- name: Copy test specific expect-files to switch - copy: src="{{ tests_location }}/{{ testname }}/{{ test_expect_file }}" dest="{{ run_dir }}/{{ test_expect_file }}" - when: test_expect_file is defined - -- name: Copy loganalyzer.py to run directory - copy: src="{{ loganalyzer_location }}/loganalyzer.py" dest="{{ run_dir }}" - -# Create directory to hold results for different runs of loganalyzer -- name: create output directory - file: path="{{ out_dir }}" state=directory - -# Create directory where loganalyzer will write output files for current run. -- name: create output directory for current test run - file: path="{{ test_out_dir }}" state=directory - - block: - name: Disable logrotate cron task shell: sed -i 's/^/#/g' /etc/cron.d/logrotate diff --git a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt index 433755e2a06..03bada03d8b 100644 --- a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt +++ b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt @@ -1,5 +1 @@ r, ".* ERR ntpd.*routing socket reports: No buffer space available.*" -r, ".* ERR liblogging-stdlog: omfwd: error 11 sending via udp: Resource temporarily unavailable.*" -r, ".* ERR syncd#syncd: brcm_sai_get_port_stats:.* port stats get failed with error.*" -r, ".* NOTICE kernel:.*profile=""/usr/sbin/ntpd"" name=""sbin"" pid=.* comm=""ntpd"" requested_mask=.*" -r, ".* ERR snmp#snmp-subagent.*" diff --git a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_match.txt b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_match.txt index 23c2870e4a9..d17c702965f 100644 --- a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_match.txt +++ b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_match.txt @@ -1,6 +1,6 @@ r, "\.ERR", "\.WARN", "crash" r, "kernel:.*Oops", "kernel:.*hung", "kernel.*oom\s" r, "kernel:.*scheduling", "kernel:.*atomic", "kernel:.*panic" -r, "kernel:.*\serr", "kernel:.*allocation", "kernel:.*kill" -r, "kernel:.*kmemleak.*", "kernel:.* Err:" +r, "kernel:.*\serr", "kernel:.*allocation", "kernel:.*kill", +r, "kernel:.*kmemleak.*","kernel:.* Err:" s, "ERR" diff --git a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_init.yml b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_init.yml index e46fbe8faa8..c43c21ea2d4 100644 --- a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_init.yml +++ b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_init.yml @@ -1,16 +1,60 @@ #---------------------------------------------------------------------------------- -# Invoke loganalyzer with init phase +# Copy all loganalyzer related file to DUT, and invoke loganalyzer with init phase #---------------------------------------------------------------------------------- + - set_fact: loganalyzer_location: "{{ 'roles/test/files/tools/loganalyzer' }}" -- name: Copy loganalyzer.py to run directory - copy: src="{{ loganalyzer_location }}/loganalyzer.py" dest="{{ run_dir }}" +- set_fact: + match_file: loganalyzer_common_match.txt + when: match_file is not defined + +- set_fact: + ignore_file: loganalyzer_common_ignore.txt + when: ignore_file is not defined + +- set_fact: + expect_file: "loganalyzer_common_expect.txt" + when: expect_file is not defined - set_fact: testname_unique: "{{ testname }}.{{ lookup('pipe', 'date +%Y-%m-%d.%H:%M:%S') }}" when: testname_unique is not defined or (testname_unique_gen is defined and testname_unique_gen == true) +- set_fact: + test_out_dir: "{{ out_dir }}/{{ testname_unique }}" + when: test_out_dir is not defined + +- name: Copy loganalyzer common match and ignore files to switch + copy: src="{{ loganalyzer_location }}/{{ item }}" dest="{{ run_dir }}/{{ item }}" + with_items: + - "{{ match_file }}" + - "{{ ignore_file }}" + - "{{ expect_file }}" + +- name: Copy test specific file match-files to switch + copy: src="{{ tests_location }}/{{ testname }}/{{ test_match_file }}" dest="{{ run_dir }}/{{ test_match_file }}" + when: test_match_file is defined + +- name: Copy test specific ignore-files to switch + copy: src="{{ tests_location }}/{{ testname }}/{{ test_ignore_file }}" dest="{{ run_dir }}/{{ test_ignore_file }}" + when: test_ignore_file is defined + +- name: Copy test specific expect-files to switch + copy: src="{{ tests_location }}/{{ testname }}/{{ test_expect_file }}" dest="{{ run_dir }}/{{ test_expect_file }}" + when: test_expect_file is defined + +- name: Copy loganalyzer.py to run directory + copy: src="{{ loganalyzer_location }}/loganalyzer.py" dest="{{ run_dir }}" + +# Create directory to hold results for different runs of loganalyzer +- name: create output directory + file: path="{{ out_dir }}" state=directory + +# Create directory where loganalyzer will write output files for current run. +- name: create output directory for current test run + file: path="{{ test_out_dir }}" state=directory + - debug: msg="starting loganalyzer_init.py" - debug: msg="python {{ run_dir }}/loganalyzer.py --action init --run_id {{ testname_unique }}" - name: Initialize loganalyzer diff --git a/ansible/roles/test/handlers/main.yml b/ansible/roles/test/handlers/main.yml index 77e3be3265c..b3e6318e082 100644 --- a/ansible/roles/test/handlers/main.yml +++ b/ansible/roles/test/handlers/main.yml @@ -66,4 +66,4 @@ become: true - name: reboot sonic - include_tasks: roles/test/tasks/common_tasks/reboot_sonic.yml + include: roles/test/tasks/common_tasks/reboot_sonic.yml diff --git a/ansible/roles/test/tasks/acl.yml b/ansible/roles/test/tasks/acl.yml index 13a11df0b96..84b16d9886f 100644 --- a/ansible/roles/test/tasks/acl.yml +++ b/ansible/roles/test/tasks/acl.yml @@ -16,27 +16,27 @@ - block: - name: Test file input. - include_tasks: "{{ tests_location }}/acl_input_test/acl_input_test.yml" + include: "{{ tests_location }}/acl_input_test/acl_input_test.yml" when: tests_location is defined - name: Test port bind configuration. - include_tasks: "{{ tests_location }}/acl_port_bind_test/acl_port_bind_test.yml" + include: "{{ tests_location }}/acl_port_bind_test/acl_port_bind_test.yml" when: tests_location is defined - name: Test orchagent logic. - include_tasks: "{{ tests_location }}/acl_orchagent_logic_test/acl_orchagent_logic_test.yml" + include: "{{ tests_location }}/acl_orchagent_logic_test/acl_orchagent_logic_test.yml" when: tests_location is defined - name: Test traffic. - include_tasks: "{{ tests_location }}/acl_traffic_test/acl_traffic_test.yml" + include: "{{ tests_location }}/acl_traffic_test/acl_traffic_test.yml" when: tests_location is defined - name: Test counters traffic. - include_tasks: "{{ tests_location }}/acl_counter_traffic_test/acl_counter_traffic_test.yml" + include: "{{ tests_location }}/acl_counter_traffic_test/acl_counter_traffic_test.yml" when: tests_location is defined - name: Test L4 port range. - include_tasks: "{{ tests_location }}/acl_port_range_test/acl_port_range_test.yml" + include: "{{ tests_location }}/acl_port_range_test/acl_port_range_test.yml" when: tests_location is defined always: diff --git a/ansible/roles/test/tasks/acl/acl_counter_traffic_test/acl_counter_traffic_test.yml b/ansible/roles/test/tasks/acl/acl_counter_traffic_test/acl_counter_traffic_test.yml index 6ec77218e26..8f5cda3521a 100644 --- a/ansible/roles/test/tasks/acl/acl_counter_traffic_test/acl_counter_traffic_test.yml +++ b/ansible/roles/test/tasks/acl/acl_counter_traffic_test/acl_counter_traffic_test.yml @@ -38,7 +38,7 @@ rule_name_check: "{{ rule_name }}" expect_data: true expected_counter_value: 0 - include_tasks: "{{ acl_check_db }}" + include: "{{ acl_check_db }}" - name: Read initial counters value (before ping). shell: docker exec -i database redis-cli -n 2 HGET "COUNTERS:{{ table_name }}:{{ rule_name }}" Packets @@ -91,7 +91,7 @@ rule_name_check: "{{ rule_name }}" expect_data: false expected_counter_value: 0 - include_tasks: "{{ acl_check_db }}" + include: "{{ acl_check_db }}" #-------------------- # MIRROR TRAFFIC TESTING. @@ -118,7 +118,7 @@ rule_name_check: "{{ mirror_rule_name }}" expect_data: true expected_counter_value: 0 - include_tasks: "{{ acl_check_db }}" + include: "{{ acl_check_db }}" - name: Read initial counters value (before ping). shell: docker exec -i database redis-cli -n 2 HGET "COUNTERS:{{ mirror_table_name }}:{{ mirror_rule_name }}" Packets @@ -150,13 +150,13 @@ rule_name_check: "{{ mirror_rule_name }}" expect_data: false expected_counter_value: 0 - include_tasks: "{{ acl_check_db }}" + include: "{{ acl_check_db }}" # CLEANUP. - name: Run config cleanup after {{ testname}} - include_tasks: "{{ run_config_cleanup }}" + include: "{{ run_config_cleanup }}" -- block: +- always: - name: Delete neighbor. shell: ip neigh del {{ neighbor2_ip }} dev Ethernet0 ignore_errors: yes @@ -164,10 +164,8 @@ - name: Delete route. shell: ip route del {{ dst_mirror_subnet }} ignore_errors: yes - + - name: Remove all the temporary files created by the test. file: path="{{ run_dir }}/{{ item }}" state=absent with_items: - "{{ config_files }}" - tags: - - always diff --git a/ansible/roles/test/tasks/acl/acl_input_test/acl_input_test.yml b/ansible/roles/test/tasks/acl/acl_input_test/acl_input_test.yml index 3d1bc0cb7c2..78c83bd76b7 100644 --- a/ansible/roles/test/tasks/acl/acl_input_test/acl_input_test.yml +++ b/ansible/roles/test/tasks/acl/acl_input_test/acl_input_test.yml @@ -36,7 +36,7 @@ when: invalid_config_upload.rc == 0 - name: Do configuration cleanup. - include_tasks: "{{ run_config_cleanup }}" + include: "{{ run_config_cleanup }}" - name: Remove all the files created by test. file: path="{{ run_dir }}/{{ item }}" state=absent diff --git a/ansible/roles/test/tasks/acl/acl_orchagent_logic_test/acl_orchagent_logic_test.yml b/ansible/roles/test/tasks/acl/acl_orchagent_logic_test/acl_orchagent_logic_test.yml index dca4754554c..c951ab65e6f 100644 --- a/ansible/roles/test/tasks/acl/acl_orchagent_logic_test/acl_orchagent_logic_test.yml +++ b/ansible/roles/test/tasks/acl/acl_orchagent_logic_test/acl_orchagent_logic_test.yml @@ -31,7 +31,7 @@ test_expect_file: "{{ config_table_type_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Table test - invalid table type. vars: @@ -39,7 +39,7 @@ test_expect_file: "{{ config_table_type_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Table test - invalid physical port. vars: @@ -47,7 +47,7 @@ test_expect_file: "{{ config_port_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Table test - invalid physical port. vars: @@ -55,7 +55,7 @@ test_expect_file: "{{ config_port_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Table test - invalid extra field. vars: @@ -63,7 +63,7 @@ test_expect_file: "{{ config_extra_field_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" # Check return code from swssconfig - name: Table test - invalid operation applied for table ("OP" field). @@ -77,7 +77,7 @@ failed_when: invalid_oper_2.rc == 0 - name: Run config cleanup after. - include_tasks: "{{ run_config_cleanup }}" + include: "{{ run_config_cleanup }}" #------------------------------------------ # RULES TESTING @@ -89,7 +89,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid priority value. vars: @@ -97,7 +97,7 @@ test_expect_file: "{{ config_priority_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid priority value. vars: @@ -105,7 +105,7 @@ test_expect_file: "{{ config_priority_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - max priority value. vars: @@ -113,7 +113,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: true - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" #---------------------------------------------- # ETHER_TYPE @@ -124,7 +124,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - valid ether type. vars: @@ -132,7 +132,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - valid ether type. vars: @@ -140,7 +140,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid ether type. vars: @@ -148,7 +148,7 @@ test_expect_file: "{{ config_ether_type_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid ether type. vars: @@ -156,7 +156,7 @@ test_expect_file: "{{ config_ether_type_expect }}" errors_expected: true run_cleanup: true - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" #---------------------------------------------- # IP TYPE @@ -167,7 +167,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid ip type. vars: @@ -175,7 +175,7 @@ test_expect_file: "{{ config_ip_type_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid ip type. vars: @@ -183,7 +183,7 @@ test_expect_file: "{{ config_ip_type_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - valid ip type. vars: @@ -191,7 +191,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - valid ip type. vars: @@ -199,7 +199,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: true - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" #---------------------------------------------- # IP PROTO @@ -211,7 +211,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - valid IP protocol. vars: @@ -219,7 +219,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - valid IP protocol. vars: @@ -227,7 +227,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid IP protocol. vars: @@ -235,7 +235,7 @@ test_expect_file: "{{ config_ip_proto_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid IP protocol. vars: @@ -243,7 +243,7 @@ test_expect_file: "{{ config_ip_proto_expect }}" errors_expected: true run_cleanup: true - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" #---------------------------------------------- # IP ADDRESS @@ -254,7 +254,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - valid IP address. vars: @@ -262,7 +262,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - valid IP address. vars: @@ -270,7 +270,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid IP address. vars: @@ -278,7 +278,7 @@ test_expect_file: "{{ config_ip_addr_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid IP address. vars: @@ -286,7 +286,7 @@ test_expect_file: "{{ config_ip_addr_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid IP address. vars: @@ -294,7 +294,7 @@ test_expect_file: "{{ config_ip_addr_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid IP address. vars: @@ -302,7 +302,7 @@ test_expect_file: "{{ config_ip_addr_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid IP address. vars: @@ -310,7 +310,7 @@ test_expect_file: "{{ config_ip_addr_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid IP address. vars: @@ -318,7 +318,7 @@ test_expect_file: "{{ config_ip_addr_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid IP address. vars: @@ -326,7 +326,7 @@ test_expect_file: "{{ config_ip_addr_expect }}" errors_expected: true run_cleanup: true - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" #---------------------------------------------- # TCP FLAGS @@ -338,7 +338,7 @@ test_expect_file: "{{ config_empty_expect }}" run_cleanup: false errors_expected: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - valid TCP flags. vars: @@ -346,7 +346,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - valid TCP flags. vars: @@ -354,7 +354,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid TCP flags. vars: @@ -362,7 +362,7 @@ test_expect_file: "{{ config_tcp_flags_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid TCP flags. vars: @@ -370,7 +370,7 @@ test_expect_file: "{{ config_tcp_flags_expect }}" errors_expected: true run_cleanup: true - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" #---------------------------------------------- # L4 port @@ -382,7 +382,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - valid L4 port. vars: @@ -390,7 +390,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid L4 port. vars: @@ -398,7 +398,7 @@ test_expect_file: "{{ config_l4_port_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid L4 port. vars: @@ -406,7 +406,7 @@ test_expect_file: "{{ config_l4_port_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid L4 port. vars: @@ -414,7 +414,7 @@ test_expect_file: "{{ config_l4_port_expect }}" errors_expected: true run_cleanup: true - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" #---------------------------------------------- # Packet action @@ -426,7 +426,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - valid packet action. vars: @@ -434,7 +434,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid packet action. vars: @@ -442,7 +442,7 @@ test_expect_file: "{{ config_packet_action_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid packet action. vars: @@ -450,7 +450,7 @@ test_expect_file: "{{ config_packet_action_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - dscp in L3 table. vars: @@ -458,7 +458,7 @@ test_expect_file: "{{ config_dscp_in_l3_table_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - create rule in non-existing table. @@ -467,7 +467,7 @@ test_expect_file: "{{ config_rule_in_non_existing_table_expect }}" errors_expected: true run_cleanup: true - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" #---------------------------------------------------------------------------------- #---------------------------------------------------------------------------------- @@ -488,7 +488,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" # Prepare valid config: create ACL rule. - name: Create a rule inside of ACL table (which is created previously). @@ -497,7 +497,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" # Invalid table deletion: try to delete existing table that contains rules. - name: Try to delete existing table with rules (errors expected). @@ -506,7 +506,7 @@ test_expect_file: "{{ config_del_table_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" # Valid rule deletion. - name: Delete an existing rule test. @@ -515,7 +515,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" # Valid table deletion. - name: Delete an existing table test. @@ -524,7 +524,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: true - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" # Prepare valid config: create ACL table. - name: Create L3 table. @@ -533,7 +533,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" # Delete non-existing rule from existing table. - name: Delete non-existing rule test. @@ -542,7 +542,7 @@ test_expect_file: "{{ config_del_rule_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Delete non-existing table test. vars: @@ -550,7 +550,7 @@ test_expect_file: "{{ config_del_table_expect }}" errors_expected: true run_cleanup: true - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" # Prepare valid config: create ACL table. @@ -560,7 +560,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" # delete table with different fields provided - name: Delete table with different fields provided. @@ -569,7 +569,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: true - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" always: - name: Remove all the temporary files created by the test. diff --git a/ansible/roles/test/tasks/acl/acl_port_bind_test/acl_port_bind_test.yml b/ansible/roles/test/tasks/acl/acl_port_bind_test/acl_port_bind_test.yml index 7710de6cc58..b06e2e6a81c 100644 --- a/ansible/roles/test/tasks/acl/acl_port_bind_test/acl_port_bind_test.yml +++ b/ansible/roles/test/tasks/acl/acl_port_bind_test/acl_port_bind_test.yml @@ -21,7 +21,7 @@ test_expect_file: "{{ config_bind_unknown_port_expect }}" errors_expected: true run_cleanup: true - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Run test with duplicate port. vars: @@ -29,7 +29,7 @@ test_expect_file: "{{ config_bind_duplicate_port_expect }}" errors_expected: true run_cleanup: true - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Run test with valid port. vars: @@ -37,7 +37,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Run test to unbind port. vars: @@ -45,7 +45,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: true - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" always: - name: Remove all the temporary created by the test. diff --git a/ansible/roles/test/tasks/acl/acl_port_range_test/acl_port_range_test.yml b/ansible/roles/test/tasks/acl/acl_port_range_test/acl_port_range_test.yml index 9b20b5a91cb..6c3abcab5c9 100644 --- a/ansible/roles/test/tasks/acl/acl_port_range_test/acl_port_range_test.yml +++ b/ansible/roles/test/tasks/acl/acl_port_range_test/acl_port_range_test.yml @@ -50,7 +50,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - block: - name: See that Redis DB has got an appropriate field. @@ -68,10 +68,10 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Run config cleanup after. - include_tasks: "{{ run_config_cleanup }}" + include: "{{ run_config_cleanup }}" #-------------------------------- # Invalid configuration testing (valid L3 table + invalid rules). @@ -85,7 +85,7 @@ test_expect_file: "{{ config_l4_port_range_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - boundaries reversed. vars: @@ -93,7 +93,7 @@ test_expect_file: "{{ config_l4_port_range_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - port out of range. vars: @@ -101,7 +101,7 @@ test_expect_file: "{{ config_l4_port_range_expect }}" errors_expected: true run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - name: Rule test - invalid syntax used in range. vars: @@ -109,7 +109,7 @@ test_expect_file: "{{ config_l4_port_range_expect }}" errors_expected: true run_cleanup: true - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" #-------------------------------- # Test the configuration deleting. @@ -126,7 +126,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - block: - name: See that Redis-DB has NOT got an appropriate field. @@ -149,7 +149,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: false - include_tasks: "{{ run_config_test }}" + include: "{{ run_config_test }}" - block: - name: See that Redis-DB has NOT got an appropriate field. @@ -171,7 +171,7 @@ test_expect_file: "{{ config_empty_expect }}" loganalyzer_run_init: true loganalyzer_run_analyze: false - include_tasks: "{{ run_loganalyzer }}" + include: "{{ run_loganalyzer }}" - name: Create target directory file: path={{ host_testdir }} state=directory @@ -250,7 +250,7 @@ test_expect_file: "{{ config_empty_expect }}" loganalyzer_run_analyze: true loganalyzer_run_init: false - include_tasks: "{{ run_loganalyzer }}" + include: "{{ run_loganalyzer }}" always: - name: Remove all the temporary files from switch. @@ -266,4 +266,4 @@ delegate_to: "{{ ptf_host }}" - name: Run config cleanup after {{ testname}} - include_tasks: "{{ run_config_cleanup }}" + include: "{{ run_config_cleanup }}" diff --git a/ansible/roles/test/tasks/acl/acl_traffic_test/acl_traffic_test.yml b/ansible/roles/test/tasks/acl/acl_traffic_test/acl_traffic_test.yml index c670f1afec3..2be2dfd3050 100644 --- a/ansible/roles/test/tasks/acl/acl_traffic_test/acl_traffic_test.yml +++ b/ansible/roles/test/tasks/acl/acl_traffic_test/acl_traffic_test.yml @@ -35,7 +35,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: true run_cleanup: true - include_tasks: "{{ run_ping_test }}" + include: "{{ run_ping_test }}" - name: ACL drop traffic by source ip address test vars: @@ -43,7 +43,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: true run_cleanup: true - include_tasks: "{{ run_ping_test }}" + include: "{{ run_ping_test }}" - name: ACL drop traffic by destination ip address test vars: @@ -51,7 +51,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: true run_cleanup: true - include_tasks: "{{ run_ping_test }}" + include: "{{ run_ping_test }}" - name: ACL drop traffic by ip prototype test vars: @@ -59,7 +59,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: true run_cleanup: true - include_tasks: "{{ run_ping_test }}" + include: "{{ run_ping_test }}" - name: ACL drop traffic by ip type test vars: @@ -67,7 +67,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: true run_cleanup: true - include_tasks: "{{ run_ping_test }}" + include: "{{ run_ping_test }}" - name: ACL drop traffic by tcp flags test vars: @@ -75,7 +75,7 @@ test_expect_file: "{{ config_empty_expect }}" errors_expected: false run_cleanup: true - include_tasks: "{{ run_ping_test }}" + include: "{{ run_ping_test }}" always: - name: Remove all the temporary created by the test. diff --git a/ansible/roles/test/tasks/acl/acl_traffic_test/run_ping_test.yml b/ansible/roles/test/tasks/acl/acl_traffic_test/run_ping_test.yml index 0d76e5598e1..01ea8dba5ee 100644 --- a/ansible/roles/test/tasks/acl/acl_traffic_test/run_ping_test.yml +++ b/ansible/roles/test/tasks/acl/acl_traffic_test/run_ping_test.yml @@ -11,7 +11,7 @@ result_file: "result.loganalysis.{{ testname_unique }}.log" - name: Start log analyser - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_init.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_init.yml - block: - name: Execute ping from host to switch to validate link @@ -33,7 +33,7 @@ always: - name: Stop log analyser - include_tasks: "{{ loganalyzer_analyze }}" + include: "{{ loganalyzer_analyze }}" - name: Get the total number of error messages. shell: grep "TOTAL MATCHES" "{{ test_out_dir }}/{{ summary_file }}" | sed -n "s/TOTAL MATCHES:[[:space:]]*//p" @@ -44,5 +44,5 @@ when: errors_found.rc != 0 - name: Do configuration cleanup. - include_tasks: "{{ run_config_cleanup }}" + include: "{{ run_config_cleanup }}" when: run_cleanup == true diff --git a/ansible/roles/test/tasks/acl/acl_traffic_test/run_ptf_test.yml b/ansible/roles/test/tasks/acl/acl_traffic_test/run_ptf_test.yml index bab029070f3..dc8436a49dd 100644 --- a/ansible/roles/test/tasks/acl/acl_traffic_test/run_ptf_test.yml +++ b/ansible/roles/test/tasks/acl/acl_traffic_test/run_ptf_test.yml @@ -34,7 +34,7 @@ delegate_to: "{{ ptf_host2 }}" - name: Start log analyser - include_tasks: "{{ loganalyzer_init }}" + include: "{{ loganalyzer_init }}" - name: Set ACL rules. command: docker exec -i {{ orchagent }} swssconfig {{ docker_testdir }}/{{ config_file }} @@ -53,7 +53,7 @@ always: - name: Stop log analyser - include_tasks: "{{ loganalyzer_analyze }}" + include: "{{ loganalyzer_analyze }}" - name: Get the total number of error messages. shell: grep "TOTAL MATCHES" "{{ test_out_dir }}/{{ summary_file }}" | sed -n "s/TOTAL MATCHES:[[:space:]]*//p" @@ -64,5 +64,5 @@ when: errors_found.stdout != "0" - name: Do configuration cleanup. - include_tasks: "{{ run_config_cleanup }}" + include: "{{ run_config_cleanup }}" when: run_cleanup == true diff --git a/ansible/roles/test/tasks/acl/acltb_test_rules_allow_all.json b/ansible/roles/test/tasks/acl/acltb_test_rules_allow_all.json new file mode 100644 index 00000000000..072653ba557 --- /dev/null +++ b/ansible/roles/test/tasks/acl/acltb_test_rules_allow_all.json @@ -0,0 +1,29 @@ +{ + "acl": { + "acl-sets": { + "acl-set": { + "dataacl": { + "acl-entries": { + "acl-entry": { + "1": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 1 + }, + "l2": { + "config": { + "ethertype": "2048" + } + } + } + } + } + } + } + } + } +} diff --git a/ansible/roles/test/tasks/acltb.yml b/ansible/roles/test/tasks/acltb.yml index 9362abf28ea..33515340b49 100644 --- a/ansible/roles/test/tasks/acltb.yml +++ b/ansible/roles/test/tasks/acltb.yml @@ -1,100 +1,137 @@ -# This script is for testing the ACL feature on SONiC switch. It covers ingress ACL and egress ACL testing. -# For each type of ACL testing, it supports 3 scenarios: -# * Basic ACL testing -# * Toggle all ports and then test ACL -# * Reboot DUT and then test ACL to verify that ACL configuration persists through reboot -# -# This script supports selecting which type of ACL and which scenarios to run by specifying different ansible variables. -# * run_ingress: Specify whether to run the ingress ACL testing. Values: yes, no. Default: yes -# * run_egress: Specify whether to run the egress ACL testing. Values: yes, no. Default: no -# * test_basic: Specify whether to run the basic ACL testing scenario. Values: yes, no. Default: yes -# * test_port_toggle: Specify whether to run the port toggle scenario. Values: yes, no. Default: no -# * test_reboot: Specify whether to run the reboot scenario. Values: yes, no. Default: no -# -# +---------------------+---------------------+---------------------+ -# | Basic Acl Testing | Port Toggle | Reboot | -# |-- -------+----------+----------+----------+----------+----------+ -# | Yes | No | Yes | No | Yes | No | -# +---------+-----+----------+----------+----------+----------+----------+----------+ -# | | Yes | Y | N | Y | N | Y | N | -# | Ingress +-----+----------+----------+----------+----------+----------+----------+ -# | | No | N | N | N | N | N | N | -# +---------+-----+----------+----------+----------+----------+----------+----------+ -# | | Yes | Y | N | Y | N | Y | N | -# | Egress +-----+----------+----------+----------+----------+----------+----------+ -# | | No | N | N | N | N | N | N | -# +---------+-----+----------+----------+----------+----------+----------+----------+ -# -# Example: -# $ ansible-playbook test_sonic -i inventory -l dut-switch-t1 -e testbed_name=dut-switch-t1 -e testbed_type=t1 -# -e testcase_name=acl -vvvv -# This command will only run basic ACL testing for ingress ACL. -# -# $ ansible-playbook test_sonic -i inventory -l dut-switch-t1 -e testbed_name=dut-switch-t1 -e testbed_type=t1 -# -e testcase_name=acl -e run_ingress=yes -e run_egress=yes -e test_basic=yes -e test_port_toggle=yes -# -e test_reboot=no -vvvv -# This command will run ingress and egress ACL testing. The basic and port toggle scenarios will be tested. -# -# $ ansible-playbook test_sonic -i inventory -l dut-switch-t1 -e testbed_name=dut-switch-t1 -e testbed_type=t1 -# -e testcase_name=acl -e run_egress=yes -e test_port_toggle=yes -e test_reboot=yes -vvvv -# This command will run everything, including ingress&egress, and all the 3 scenarios. - -- name: set ingress test flag - set_fact: - run_ingress: yes - when: run_ingress is not defined +# Set facts for the loganalizer +- set_fact: + out_dir: /tmp/ + testname: acl + run_dir: /tmp/ -- name: set egress test flag - set_fact: - run_egress: no - when: run_egress is not defined +# Gather minigraph facts +- name: Gathering minigraph facts about the device + minigraph_facts: host={{ inventory_hostname }} -- name: set basic test flag +- name: Read port reverse alias mapping set_fact: - test_basic: yes - when: test_basic is not defined + alias_reverse_map: "{{ minigraph_port_alias_to_name_map }}" + podset_number: 200 -- name: set reboot test flag - set_fact: - test_reboot: no - when: test_reboot is not defined +# Copy ACL config to the switch +- name: Copy ACL config file to the DUT + copy: src="roles/test/tasks/acl/{{ item }}" dest="/tmp/" + with_items: + - "acltb_test_rules.json" + - "acltb_test_rules_allow_all.json" + - "acltb_test_rules-del.json" + - "acltb_test_rules_part_1.json" + - "acltb_test_rules_part_2.json" -- name: set port toggle test flag - set_fact: - test_port_toggle: no - when: test_port_toggle is not defined +# Generate file with switch information +- template: src=acltb.j2 dest=/tmp/acltb_switch_info.txt + connection: local -- name: set test filter expression - set_fact: - filter_expr: 'acl' +- name: Copy switch info file to the PTF host + copy: src=/tmp/acltb_switch_info.txt dest=/tmp/acltb_switch_info.txt + delegate_to: "{{ ptf_host }}" -- name: append filter on ingress - set_fact: - filter_expr: '{{ filter_expr }} and not ingress' - when: not run_ingress +- block: + - name: Apply allow all rule + vars: + command_to_run: "acl-loader update full /tmp/acltb_test_rules_allow_all.json" + errors_expected: false + include: roles/test/tasks/run_command_with_log_analyzer.yml -- name: append filter on egress - set_fact: - filter_expr: '{{ filter_expr }} and not egress' - when: not run_egress + - name: Apply test rules + vars: + command_to_run: "acl-loader update full /tmp/acltb_test_rules.json" + errors_expected: false + include: roles/test/tasks/run_command_with_log_analyzer.yml -- name: append filter on basic - set_fact: - filter_expr: '{{ filter_expr }} and not TestBasicAcl and not TestIncrementalAcl' - when: not test_basic + - name: copy acsbase files + copy: src=roles/test/files/acstests + dest=/root + delegate_to: "{{ ptf_host }}" -- name: append filter on reboot - set_fact: - filter_expr: '{{ filter_expr }} and not reboot' - when: not test_reboot + - name: copy ptftests + copy: src=roles/test/files/ptftests + dest=/root + delegate_to: "{{ ptf_host }}" -- name: append filter on port toggle - set_fact: - filter_expr: '{{ filter_expr }} and not port_toggle' - when: not test_port_toggle - -- name: run test - include_tasks: roles/test/tasks/pytest_runner.yml - vars: - test_node: acl - test_filter: '{{ filter_expr }}' + - name: Run the test + include: ptf_runner.yml + vars: + ptf_test_name: ACL Test + ptf_test_dir: acstests + ptf_test_path: acltb_test.AclTest + ptf_platform_dir: ptftests + ptf_platform: remote + ptf_test_params: + - verbose=True + - router_mac=\"{{ ansible_Ethernet0['macaddress'] }}\" + - switch_info=\"/tmp/acltb_switch_info.txt\" + - testbed_type=\"{{ testbed_type }}\" + + - name: Clean up ACL rules. + vars: + command_to_run: "acl-loader update full /tmp/acltb_test_rules-del.json" + errors_expected: false + include: roles/test/tasks/run_command_with_log_analyzer.yml + + - name: Apply part 1 of ACL rules. + vars: + command_to_run: "acl-loader update incremental /tmp/acltb_test_rules_part_1.json" + errors_expected: false + include: roles/test/tasks/run_command_with_log_analyzer.yml + + - name: Apply part 2 of ACL rules. + vars: + command_to_run: "acl-loader update incremental /tmp/acltb_test_rules_part_2.json" + errors_expected: false + include: roles/test/tasks/run_command_with_log_analyzer.yml + + - name: Run the test + include: ptf_runner.yml + vars: + ptf_test_name: ACL Test + ptf_test_dir: acstests + ptf_test_path: acltb_test.AclTest + ptf_platform_dir: ptftests + ptf_platform: remote + ptf_test_params: + - verbose=True + - router_mac=\"{{ ansible_Ethernet0['macaddress'] }}\" + - switch_info=\"/tmp/acltb_switch_info.txt\" + - testbed_type=\"{{ testbed_type }}\" + + - name: Save the applied ACL in ConfigDB + command: config save -y + become: true + + - name: Reboot the switch + include: common_tasks/reboot_sonic.yml + + - name: Run the test + include: ptf_runner.yml + vars: + ptf_test_name: ACL Test + ptf_test_dir: acstests + ptf_test_path: acltb_test.AclTest + ptf_platform_dir: ptftests + ptf_platform: remote + ptf_test_params: + - verbose=True + - router_mac=\"{{ ansible_Ethernet0['macaddress'] }}\" + - switch_info=\"/tmp/acltb_switch_info.txt\" + - testbed_type=\"{{ testbed_type }}\" + + always: + # Copy ACL config to the switch + - name: Copy ACL config file to the DUT + copy: src="roles/test/tasks/acl/acltb_test_rules-del.json" dest="/tmp/" + + - name: Clean up ACL rules. + vars: + command_to_run: "acl-loader update full /tmp/acltb_test_rules-del.json" + errors_expected: false + include: roles/test/tasks/run_command_with_log_analyzer.yml + + - name: Ensure ConfigDB is cleaned up + command: config save -y + become: true diff --git a/ansible/roles/test/tasks/acltb_ranges_test.yml b/ansible/roles/test/tasks/acltb_ranges_test.yml index b303bc51010..9c7d37afece 100644 --- a/ansible/roles/test/tasks/acltb_ranges_test.yml +++ b/ansible/roles/test/tasks/acltb_ranges_test.yml @@ -45,18 +45,13 @@ set_fact: alias_reverse_map: "{{ lookup('file', 'roles/sonicv2/files/ssw/{{ sonic_hwsku }}/alias_reverse_map.json') | from_json }}" -- include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_init.yml +- include: roles/test/files/tools/loganalyzer/loganalyzer_init.yml vars: tests_location: "{{ 'roles/test/tasks' }}" # Outer block to execute laganalizer in always block - block: # Perform the test: generate and apply acl jsons - - - include_tasks: add_container_to_inventory.yml - vars: - container_name: swss - - block: - name: Copy JSON configs into docker filesystem template: src={{ item }}.j2 dest=/tmp/{{ item }}.json @@ -82,10 +77,12 @@ with_items: - "{{ acltb_configs[::-1] }}" - delegate_to: "{{ ansible_host }}_swss" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i swss python always: - - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml + - include: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml vars: tests_location: "{{ 'roles/test/tasks' }}" @@ -102,6 +99,6 @@ fail: msg="{{ errors_found.stdout }} errors found while running {{ testname }} test. Please see {{ test_out_dir }}/{{ result_file }}" when: errors_found.stdout != "0" - - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_end.yml + - include: roles/test/files/tools/loganalyzer/loganalyzer_end.yml vars: tests_location: "{{ 'roles/test/tasks' }}" diff --git a/ansible/roles/test/tasks/advanced_reboot/reboot-image-handle.yml b/ansible/roles/test/tasks/advanced_reboot/reboot-image-handle.yml index dc2ff29fd46..62fd8334c3d 100755 --- a/ansible/roles/test/tasks/advanced_reboot/reboot-image-handle.yml +++ b/ansible/roles/test/tasks/advanced_reboot/reboot-image-handle.yml @@ -38,7 +38,7 @@ - name: 'Setup restoring initial image {{ current_sonic_image }}' shell: /bin/true - delegate_to: localhost + connection: local notify: - restore current image - reboot sonic diff --git a/ansible/roles/test/tasks/arpall.yml b/ansible/roles/test/tasks/arpall.yml index 59e30d7e12a..b1b7c283a89 100644 --- a/ansible/roles/test/tasks/arpall.yml +++ b/ansible/roles/test/tasks/arpall.yml @@ -7,7 +7,7 @@ - name: get all available interface names set_fact: ports: "{{ ports|default([]) + [ item | regex_replace('Ethernet', '') | int ] }}" - with_items: "{{ minigraph_ports }}" + with_items: minigraph_ports - name: get all interface numbers set_fact: @@ -27,7 +27,7 @@ set_fact: po1: "{{ item.key }}" when: intf1 in item.value['members'] - with_dict: "{{ minigraph_portchannels }}" + with_dict: minigraph_portchannels - name: move interface {{ intf1 }} out of {{ po1 }} shell: config portchannel member del {{ po1 }} {{ intf1 }} @@ -43,7 +43,7 @@ set_fact: po2: "{{ item.key }}" when: intf2 in item.value['members'] - with_dict: "{{ minigraph_portchannels }}" + with_dict: minigraph_portchannels - name: move {{ intf2 }} out of {{ po2 }} shell: config portchannel member del {{ po2 }} {{ intf2 }} @@ -78,7 +78,7 @@ become: yes - name: Start PTF runner and Send correct unicast arp packets (10.10.1.3 to 10.10.1.2 with src_mac=00:06:07:08:09:00) - include_tasks: ptf_runner.yml + include: ptf_runner.yml vars: ptf_test_name: ARP test ptf_test_dir: ptftests @@ -106,7 +106,7 @@ # Send correct ARP request from correct interface, expecting normal behavior - name: PTF funner Send correct arp packets (10.10.1.3 to 10.10.1.2 with src_mac=00:06:07:08:09:0a) - include_tasks: ptf_runner.yml + include: ptf_runner.yml vars: ptf_test_name: ARP test ptf_test_dir: ptftests @@ -134,7 +134,7 @@ become: yes - name: Send correct arp packets from other interface expect no reply(10.10.1.4 to 10.10.1.2 with src_mac=00:02:07:08:09:0a) - include_tasks: ptf_runner.yml + include: ptf_runner.yml vars: ptf_test_name: ARP test ptf_test_dir: ptftests @@ -162,7 +162,7 @@ become: yes - name: Send Src IP out of interface subnet range arp packets, expect no reply and no arp table entry (10.10.1.22 to 10.10.1.2 with src_mac=00:03:07:08:09:0a) - include_tasks: ptf_runner.yml + include: ptf_runner.yml vars: ptf_test_name: ARP test ptf_test_dir: ptftests @@ -190,7 +190,7 @@ become: yes - name: Send garp packets (10.10.1.7 to 10.10.1.7) - include_tasks: ptf_runner.yml + include: ptf_runner.yml vars: ptf_test_name: ARP test ptf_test_dir: ptftests @@ -213,7 +213,7 @@ # Test Gratuitous ARP update case, when received garp, no arp reply, update arp table if it was solved before - name: Send correct arp packets (10.10.1.3 to 10.10.1.2 with src_mac=00:06:07:08:09:0a) - include_tasks: ptf_runner.yml + include: ptf_runner.yml vars: ptf_test_name: ARP test ptf_test_dir: ptftests @@ -237,7 +237,7 @@ - pause: seconds=2 - name: Send garp packets to update arp table(10.10.1.3 to 10.10.1.3 with src_mac=00:00:07:08:09:0a) - include_tasks: ptf_runner.yml + include: ptf_runner.yml vars: ptf_test_name: ARP test ptf_test_dir: ptftests @@ -261,9 +261,11 @@ always: # Recover DUT interface IP Address before entering this test case - name: restore dut original state - include_tasks: "roles/test/tasks/common_tasks/reload_config.yml" - vars: - config_source: "config_db" + command: config reload -y + become: yes + + - name: wait 60 seconds for ports to be up + pause: seconds=60 - name: check port status interface_facts: up_ports={{ minigraph_ports }} diff --git a/ansible/roles/test/tasks/base_sanity.yml b/ansible/roles/test/tasks/base_sanity.yml index 1594613e11e..6d9f186191c 100644 --- a/ansible/roles/test/tasks/base_sanity.yml +++ b/ansible/roles/test/tasks/base_sanity.yml @@ -14,7 +14,7 @@ - block: - name: reboot - include_tasks: common_tasks/reboot_sonic.yml + include: common_tasks/reboot_sonic.yml - name: Get process information in syncd docker shell: docker exec -i syncd ps aux | grep /usr/bin/syncd diff --git a/ansible/roles/test/tasks/bgp_entry_flap.yml b/ansible/roles/test/tasks/bgp_entry_flap.yml index 1ce23bc056c..4753cbe4e19 100644 --- a/ansible/roles/test/tasks/bgp_entry_flap.yml +++ b/ansible/roles/test/tasks/bgp_entry_flap.yml @@ -10,24 +10,22 @@ hwsku: "{{ acs_devices[name]['hwsku'] }}" cred: "{{ switch_login[acs_devices[name]['hwsku']] }}" -- include_tasks: add_container_to_inventory.yml - vars: - container_name: sswsyncd - - name: Get ASIC tables switch_tables: asic="{{asic}}" nexthop=yes nexthopgroup=yes become: yes - delegate_to: "{{ ansible_host }}_sswsyncd" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i sswsyncd python - block: - name: Assert the particular entry is in nexthopgroup table - assert: + assert: that: nexthop[addr] in nexthopgroup[item] with_items: "{{ nexthopgroup }}" - name: Gathering minigraph facts about neighbor minigraph_facts: host={{ name }} filename="{{ vmhost_num }}-{{ name }}.xml" - delegate_to: localhost + connection: local become: no - name: Shut down BGP session from neighbor @@ -46,8 +44,10 @@ - name: Update list of current nexthop group(s) switch_tables: asic="{{asic}}" nexthop=yes nexthopgroup=yes become: yes - delegate_to: "{{ ansible_host }}_sswsyncd" - + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i sswsyncd python + - name: Poll for updated tables until peer is not in nexthop groups switch_tables: asic="{{asic}}" nexthop=yes nexthopgroup=yes become: yes @@ -56,7 +56,9 @@ retries: 6 delay: 10 with_items: "{{ nexthopgroup }}" - delegate_to: "{{ ansible_host }}_sswsyncd" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i sswsyncd python - name: Restart BGP session from neighbor action: cisco template=bgp_neighbor_noshut.j2 @@ -71,7 +73,9 @@ - name: Update list of current nexthop group(s) switch_tables: asic="{{asic}}" nexthop=yes nexthopgroup=yes become: yes - delegate_to: "{{ ansible_host }}_sswsyncd" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i sswsyncd python - name: Poll for updated tables until peer is in nexthop groups switch_tables: asic="{{asic}}" nexthop=yes nexthopgroup=yes @@ -81,6 +85,8 @@ retries: 6 delay: 10 with_items: "{{ nexthopgroup }}" - delegate_to: "{{ ansible_host }}_sswsyncd" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i sswsyncd python when: "'T2' in name" diff --git a/ansible/roles/test/tasks/bgp_fact.yml b/ansible/roles/test/tasks/bgp_fact.yml index 3b1148c76e6..b4f3973d21f 100644 --- a/ansible/roles/test/tasks/bgp_fact.yml +++ b/ansible/roles/test/tasks/bgp_fact.yml @@ -14,14 +14,6 @@ - name: Print bgp facts debug: msg="{{ bgp_neighbors }}" -- name: Prepare neighbor info - shell: ip neigh show - register: ip_neighbors - ignore_errors: yes - -- name: Print neighbor info - debug: msg="{{ip_neighbors.stdout_lines}}" - - name: Verify bgp sessions are established assert: { that: "'{{ bgp_neighbors[item]['state'] }}' == 'established'" } with_items: "{{ bgp_neighbors.keys() }}" diff --git a/ansible/roles/test/tasks/bgp_flap.yml b/ansible/roles/test/tasks/bgp_flap.yml index d7a2ebc1342..2cf6b5fad17 100644 --- a/ansible/roles/test/tasks/bgp_flap.yml +++ b/ansible/roles/test/tasks/bgp_flap.yml @@ -11,11 +11,11 @@ - set_fact: asic="{{ sonic_asic_type }}" -- include_tasks: bgp_nei_up.yml +- include: bgp_nei_up.yml with_items: "{{ minigraph_bgp }}" when: "'T2' in item['name']" -- include_tasks: bgp_entry_flap.yml +- include: bgp_entry_flap.yml with_items: "{{ minigraph_bgp }}" - name: recover minigraph facts about the device(above steps loaded neighbors configuration) diff --git a/ansible/roles/test/tasks/bgp_gr_helper.yml b/ansible/roles/test/tasks/bgp_gr_helper.yml index 7e17315b51f..efeb3182404 100644 --- a/ansible/roles/test/tasks/bgp_gr_helper.yml +++ b/ansible/roles/test/tasks/bgp_gr_helper.yml @@ -9,7 +9,7 @@ when: testbed_type not in ['t1', 't1-lag', 't1-64-lag', 't1-64-lag-clet'] - name: Get VM info. - include_tasks: "roles/test/tasks/bgp_gr_helper/get_vm_info.yml" + include: "roles/test/tasks/bgp_gr_helper/get_vm_info.yml" - name: Gather facts from bgp container. bgp_facts: @@ -36,7 +36,7 @@ - set_fact: test_out_dir: "{{ out_dir }}/{{ testname_unique }}" - - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_init.yml + - include: roles/test/files/tools/loganalyzer/loganalyzer_init.yml vars: test_match_file: routes_update_match.txt @@ -78,11 +78,11 @@ assert: { that: "'{{ bgp_neighbors[peer_ipv6]['capabilities']['peer af ipv6 unicast'] }}' == 'preserved'" } # Analyze syslog, no log message related with routes update should be observed - - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml + - include: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml vars: test_match_file: routes_update_match.txt - - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_end.yml + - include: roles/test/files/tools/loganalyzer/loganalyzer_end.yml always: - name: Set log level back to NOTICE @@ -96,7 +96,7 @@ - set_fact: test_out_dir: "{{ out_dir }}/{{ testname_unique }}" - - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_init.yml + - include: roles/test/files/tools/loganalyzer/loganalyzer_init.yml vars: test_expect_file: routes_update_expect.txt @@ -135,11 +135,11 @@ assert: { that: "'{{ bgp_neighbors[peer_ipv6]['capabilities']['peer af ipv6 unicast'] }}' == 'not preserved'" } # Analyze syslog, log messages related with routes update are expected - - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml + - include: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml vars: test_expect_file: routes_update_expect.txt - - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_end.yml + - include: roles/test/files/tools/loganalyzer/loganalyzer_end.yml always: - name: Set log level back to NOTICE diff --git a/ansible/roles/test/tasks/bgp_gr_helper/get_vm_info.yml b/ansible/roles/test/tasks/bgp_gr_helper/get_vm_info.yml index f2b40cdb51b..b0ba4af0927 100644 --- a/ansible/roles/test/tasks/bgp_gr_helper/get_vm_info.yml +++ b/ansible/roles/test/tasks/bgp_gr_helper/get_vm_info.yml @@ -1,6 +1,6 @@ - name: Gathering lab graph facts about the device conn_graph_facts: host={{ ansible_host }} - delegate_to: localhost + connection: local tags: always - name: Init variables. @@ -30,13 +30,11 @@ with_items: "{{ minigraph_bgp }}" when: "item.name == vm_name and item.addr|ipv6" -- include_tasks: add_container_to_inventory.yml - vars: - container_name: lldp - - name: Gather information from LLDP lldp: - delegate_to: "{{ ansible_host }}_lldp" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i lldp python - name: Get VM IP address. set_fact: diff --git a/ansible/roles/test/tasks/bgp_multipath_relax.yml b/ansible/roles/test/tasks/bgp_multipath_relax.yml index adcdf021e1f..0f924a0f78a 100644 --- a/ansible/roles/test/tasks/bgp_multipath_relax.yml +++ b/ansible/roles/test/tasks/bgp_multipath_relax.yml @@ -18,7 +18,7 @@ - name: Find all V4 bgp neighbors from minigraph set_fact: bgp_v4nei: "{{ bgp_v4nei | default({}) | combine({ item['name']: item['addr'] }) }}" - with_items: "{{ minigraph_bgp }}" + with_items: minigraph_bgp when: "'::' not in item['addr']" - include_vars: "vars/topo_{{ testbed_type }}.yml" @@ -74,7 +74,7 @@ assert: that: - vips_asn in item - with_items: "{{ bgp_route[vips_prefix]['aspath'] }}" + with_items: bgp_route[vips_prefix]['aspath'] #### Verify each t2 adv routes: this option takes time and resources; verified working but print out too much, comment this #### out unless we have a reason to do so @@ -90,7 +90,7 @@ # - item.ansible_facts['bgp_route_neiadv'][vips_prefix]['aspath'] | length == 2 # - vips_asn in item.ansible_facts['bgp_route_neiadv'][vips_prefix]['aspath'] # - vips_asn == item.ansible_facts['bgp_route_neiadv'][vips_prefix]['aspath'][-1] -# with_items: "{{ adv_t2_results.results }}" +# with_items: adv_t2_results.results ######### Verify each t2 option @@ -109,4 +109,4 @@ assert: that: - item in "{{ vips_t0 + [vips_asn] }}" - with_items: "{{ bgp_route_neiadv[vips_prefix]['aspath'] }}" + with_items: bgp_route_neiadv[vips_prefix]['aspath'] diff --git a/ansible/roles/test/tasks/bgp_speaker.yml b/ansible/roles/test/tasks/bgp_speaker.yml index 7dca4b45e08..28b5ea7180b 100644 --- a/ansible/roles/test/tasks/bgp_speaker.yml +++ b/ansible/roles/test/tasks/bgp_speaker.yml @@ -1,4 +1,217 @@ -- name: run test - include_tasks: roles/test/tasks/pytest_runner.yml - vars: - test_node: bgp/test_bgp_speaker.py +#======================================== +# Run BGP Speaker test +#======================================== + +- block: + - fail: msg="Information about tested missing" + when: (testbed_type is not defined or ptf_host is not defined) + + - fail: msg="Invalid testbed_type value '{{testbed_type}}'" + when: testbed_type not in testcases['bgp_speaker']['topologies'] + + - name: Gather minigraph facts about the device + minigraph_facts: host={{inventory_hostname}} + + - name: print deployment id + debug: msg="{{deployment_id}}" + + - name: fetch bgp speaker asn number from DUT + shell: sonic-cfggen -m -d -y /etc/sonic/deployment_id_asn_map.yml -v "deployment_id_asn_map[DEVICE_METADATA['localhost']['deployment_id']]" + register: cfggen_out + + - name: set bgp speaker asn number + set_fact: + bgp_speaker_asn={{cfggen_out.stdout}} + + - set_fact: addr_family='ipv4' + when: addr_family is not defined + + - set_fact: portchannel_name="{{minigraph_portchannel_interfaces[0].attachto}}" + when: addr_family == 'ipv6' + + - name: print bgp speaker asn number + debug: msg="{{bgp_speaker_asn}}" + + - name: Generate three ips in VLAN range + get_ip_in_range: num=3 prefix="{{minigraph_vlan_interfaces[0]['addr']}}/{{minigraph_vlan_interfaces[0]['prefixlen']}}" exclude_ips="{{minigraph_vlan_interfaces[0]['addr']}}" + become: no + connection: local + failed_when: False + + - name: Store the value of VLAN IPs + set_fact: + vlan_ips={{generated_ips}} + + - debug: msg="{{generated_ips}}" + + - name: Generate two ips in bgp speaker peer range + get_ip_in_range: num=2 prefix={{minigraph_bgp_peers_with_range[0]['ip_range'][0]}} + become: no + connection: local + + - name: Set the value of ips in bgp speaker peer range + set_fact: speaker_ips={{generated_ips}} + + - name: Flush vlan ips route + command: ip route flush {{item.split('/')[0]}}/32 + when: addr_family == 'ipv4' + become: yes + with_items: "{{vlan_ips}}" + + - name: Add vlan ips route + command: ip route add {{item.split('/')[0]}}/32 dev {{minigraph_vlan_interfaces[0]['attachto']}} + when: addr_family == 'ipv4' + become: yes + with_items: "{{vlan_ips}}" + + - debug: msg="{{generated_ips}}" + + # vlan_ips[0], speaker_ips[0], speaker_ips[1] are IPs for three bgp speakers + # vlan_ips[1], vlan_ips[2] are IPs for mux under DUT. + + - name: set exabgp folder name + set_fact: + exabgp_dir="/root/exabgp" + + - name: Create directory /exabgp to store exabgp related files. + file: path={{exabgp_dir}} state=directory + delegate_to: "{{ptf_host}}" + + - name: Copy helper files to ptf container + copy: src=roles/test/files/helpers dest=/root + delegate_to: "{{ptf_host}}" + + - name: set exabgp folder name + set_fact: + helper_dir="/root/helpers" + + - name: Generate configurations for exabgp instances + template: src=roles/test/templates/exabgp/config.j2 dest={{exabgp_dir}}/{{item.file_name}} + with_items: + - {file_name: "config_1.ini", local_ip: '{{speaker_ips[0]}}', port_num: '5000'} + - {file_name: "config_2.ini", local_ip: '{{speaker_ips[1]}}', port_num: '6000'} + - {file_name: "config_3.ini", local_ip: '{{vlan_ips[0]}}', port_num: '7000'} + delegate_to: "{{ptf_host}}" + + - set_fact: portchannel_peer="{%for p in minigraph_portchannel_interfaces%}{%if p['attachto']==portchannel_name and p['peer_addr']|ipv6%}{{p['peer_addr']}}{%endif %}{%endfor%}" + when: addr_family == 'ipv6' + + - set_fact: mux_ip_1="{% if addr_family=='ipv4' %}{{vlan_ips[1]}}{% else %}{{portchannel_peer}}{% endif %}" + - set_fact: mux_ip_2="{% if addr_family=='ipv4' %}{{vlan_ips[2]}}{% else %}{{portchannel_peer}}{% endif %}" + + - name: Set the prefix to be announced + set_fact: + announce_prefix: "{% if addr_family=='ipv4' %}10.10.10.0/26{% else %}2010::/126{% endif %}" + + - name: Generate routes to be announced + template: src=roles/test/templates/exabgp/routes.j2 dest={{exabgp_dir}}/routes + with_items: + - {speaker_ip: '{{vlan_ips[0]}}', + mux_ip_1: "{{mux_ip_1}}", + mux_ip_2: "{{mux_ip_2}}", + port_num_1: '5000', + port_num_2: '6000', + port_num_3: '7000'} + delegate_to: "{{ptf_host}}" + + - name: Generate start file for exabgp instances + template: src=roles/test/templates/exabgp/start.j2 dest={{exabgp_dir}}/{{item.file_name}} mode=u+rwx + with_items: + - {file_name: 'start.sh', + config_file_1: 'config_1.ini', + config_file_2: 'config_2.ini', + config_file_3: 'config_3.ini', + phy_ip: '{{vlan_ips[0]}}', + logical_ip_1: '{{speaker_ips[0]}}', + logical_ip_2: '{{speaker_ips[1]}}', + mux_ip_1: "{{mux_ip_1}}", + mux_ip_2: "{{mux_ip_2}}"} + delegate_to: "{{ptf_host}}" + + - name: Kill exabgp instances if existing + shell: pkill -f exabgp + delegate_to: "{{ptf_host}}" + ignore_errors: yes + + - name: Start exabgp instances + shell: sh {{exabgp_dir}}/start.sh + delegate_to: "{{ptf_host}}" + + - pause: + seconds: 5 + prompt: "make sure dynamic bgp neighbors appear" + + - name: Announce the routes + shell: python {{helper_dir}}/announce_routes.py {{exabgp_dir}}/routes >/dev/null 2>&1 & + delegate_to: "{{ptf_host}}" + + - pause: + seconds: 30 + prompt: "make sure routes announced to dynamic bgp neighbors" + + - name: Gather bgp facts from bgp container + bgp_facts: + + - debug: msg="{{bgp_neighbors}}" + + - name: Verify bgp sessions are established + assert: {that: "'{{ bgp_neighbors[item]['state'] }}' == 'established'"} + with_items: "{{ bgp_neighbors.keys() }}" + + - name: Verify accepted prefixes of the dynamic neighbors are correct + assert: {that: "'{{ bgp_neighbors[item]['accepted prefixes'] }}' == '1'"} + with_items: "['{{speaker_ips[0].split('/')[0]}}', '{{speaker_ips[1].split('/')[0]}}', '{{vlan_ips[0].split('/')[0]}}'] " + + # Send packets to verify that accepted prefixes are correctly applied in HW. + # PTF FIB test is used to send the packets. + + - name: Generate route-port map information + template: src=roles/test/templates/bgp_speaker_route.j2 + dest=/tmp/bgp_speaker_route.txt + connection: local + + - name: Copy the bgp_speaker_route to ptf container + copy: src=/tmp/bgp_speaker_route.txt dest=/root + delegate_to: "{{ptf_host}}" + + - name: Copy the test to ptf container + copy: src=roles/test/files/ptftests dest=/root + delegate_to: "{{ ptf_host }}" + + - set_fact: ipv4=true + when: addr_family == 'ipv4' + + - set_fact: ipv6=true + when: addr_family == 'ipv6' + + - name: "Start PTF runner" + include: ptf_runner.yml + vars: + ptf_test_name: FIB test + ptf_test_dir: ptftests + ptf_test_path: fib_test.FibTest + ptf_platform: remote + ptf_platform_dir: ptftests + ptf_test_params: + - testbed_type='{{testbed_type}}' + - router_mac='{{ansible_Ethernet0['macaddress']}}' + - fib_info='/root/bgp_speaker_route.txt' + - ipv4={{ ipv4|default(false) }} + - ipv6={{ ipv6|default(false) }} + - testbed_mtu={{ mtu|default(9114) }} + ptf_extra_options: "--relax --debug info --log-file /tmp/bgp_speaker_test.FibTest.log --socket-recv-size 16384" + always: + - name: Send SIGTERM to exabgp instances + shell: pkill -f exabgp + delegate_to: "{{ptf_host}}" + ignore_errors: yes + + - name: Flush vlan ips route + command: ip route flush {{item.split('/')[0]}}/32 + when: addr_family == 'ipv4' + become: yes + with_items: "{{vlan_ips}}" + + - name: Remove Assigned IPs + shell: ip addr flush dev eth{{ '%d' % (minigraph_port_indices[minigraph_vlans[minigraph_vlan_interfaces[0]['attachto']]['members'][0]])}} + delegate_to: "{{ptf_host}}" diff --git a/ansible/roles/test/tasks/buff_wm.yml b/ansible/roles/test/tasks/buff_wm.yml index ac973953d71..7f3545580ea 100644 --- a/ansible/roles/test/tasks/buff_wm.yml +++ b/ansible/roles/test/tasks/buff_wm.yml @@ -4,7 +4,7 @@ shell: bash -c 'counterpoll watermark enable; sleep 20; counterpoll watermark disable' # buffer pool watermark test -- include_tasks: qos_sai_ptf.yml +- include: qos_sai_ptf.yml vars: test_name: Ingress buffer pool watermark test, lossless traffic test_path: sai_qos_tests.BufferPoolWatermarkTest @@ -35,7 +35,7 @@ shell: bash -c 'counterpoll watermark enable; sleep 20; counterpoll watermark disable' # buffer pool watermark test -- include_tasks: qos_sai_ptf.yml +- include: qos_sai_ptf.yml vars: test_name: Egress buffer pool watermark test, lossless traffic test_path: sai_qos_tests.BufferPoolWatermarkTest @@ -62,7 +62,7 @@ shell: bash -c 'counterpoll watermark enable; sleep 20; counterpoll watermark disable' # buffer pool watermark test -- include_tasks: qos_sai_ptf.yml +- include: qos_sai_ptf.yml vars: test_name: Ingress buffer pool watermark test, lossy traffic test_path: sai_qos_tests.BufferPoolWatermarkTest @@ -93,7 +93,7 @@ shell: bash -c 'counterpoll watermark enable; sleep 20; counterpoll watermark disable' # buffer pool watermark test -- include_tasks: qos_sai_ptf.yml +- include: qos_sai_ptf.yml vars: test_name: Egress buffer pool watermark test, lossy traffic test_path: sai_qos_tests.BufferPoolWatermarkTest diff --git a/ansible/roles/test/tasks/check_fanout_interfaces.yml b/ansible/roles/test/tasks/check_fanout_interfaces.yml index 32cb8fcc25f..4a4d4e2f43f 100644 --- a/ansible/roles/test/tasks/check_fanout_interfaces.yml +++ b/ansible/roles/test/tasks/check_fanout_interfaces.yml @@ -1,7 +1,7 @@ - block: - name: Gathering lab graph facts about the device conn_graph_facts: host={{ inventory_hostname }} - delegate_to: localhost + connection: local - name: Fanout hostname set_fact: fanout_switch={{ device_conn['Ethernet0']['peerdevice'] }} diff --git a/ansible/roles/test/tasks/check_sw_vm_interfaces.yml b/ansible/roles/test/tasks/check_sw_vm_interfaces.yml index 952a01ae759..9a09d4968e4 100644 --- a/ansible/roles/test/tasks/check_sw_vm_interfaces.yml +++ b/ansible/roles/test/tasks/check_sw_vm_interfaces.yml @@ -23,7 +23,7 @@ - name: Gathering testbed information test_facts: testbed_name="{{ testbed_name }}" - delegate_to: localhost + connection: local ignore_errors: yes - name: Gather vm list from Testbed server @@ -47,7 +47,7 @@ connection: switch ignore_errors: yes when: vms["{{ item }}"]['hwsku'] == 'Arista-VM' - with_items: "{{ vms }}" + with_items: vms register: vm_portchannel_status - name: Debug Port-Channel on VMs diff --git a/ansible/roles/test/tasks/config.yml b/ansible/roles/test/tasks/config.yml index 1dae600ef26..c0aa4c3ab31 100644 --- a/ansible/roles/test/tasks/config.yml +++ b/ansible/roles/test/tasks/config.yml @@ -1,4 +1,140 @@ -- name: run test - include_tasks: roles/test/tasks/pytest_runner.yml +- debug: msg="Configuration Test" + +- name: Gather minigraph facts + minigraph_facts: host={{inventory_hostname}} + +- name: Gather interface facts + interface_facts: + +- name: Initialize portchannel + set_fact: + portchannel: "{{minigraph_portchannels | first}}" + tmp_portchannel: "PortChannel999" + +- name: Initialize portchannel_ip and portchannel_member + set_fact: + portchannel_ip: "{{ansible_interface_facts[portchannel]['ipv4']['address']}}" + portchannel_members: "{{minigraph_portchannels[portchannel]['members']}}" + +- name: Print variables vars: - test_node: pc/test_po_update.py \ No newline at end of file + msg: | + portchannel: {{ portchannel }} + portchannel_ip: {{ portchannel_ip }} + portchannel_members: {{ portchannel_members }} + debug: + msg: "{{ msg.split('\n') }}" + +- name: Initialize flags + set_fact: + remove_portchannel_members: false + remove_portchannel_ip: false + create_tmp_portchannel: false + add_tmp_portchannel_members: false + add_tmp_portchannel_ip: false + +- block: + - name: Step 1 Remove {{ portchannel_members }} from {{ portchannel }} + shell: config portchannel member del {{ portchannel }} {{ item }} + become: yes + with_items: "{{portchannel_members}}" + - set_fact: + remove_portchannel_members: true + + - name: Step 2 Remove {{ portchannel_ip }} from {{ portchannel }} + shell: config interface ip remove {{ portchannel }} {{ portchannel_ip }}/31 + become: yes + - set_fact: + remove_portchannel_ip: true + + - pause: seconds=30 + + - interface_facts: + - assert: + that: + - "{{ansible_interface_facts[portchannel]['link']}} == False" + + - bgp_facts: + - assert: + that: + - "{{bgp_statistics['ipv4_idle']}} == 1" + + - name: Step 3 Create {{ tmp_portchannel }} + shell: config portchannel add {{ tmp_portchannel }} + become: yes + - set_fact: + create_tmp_portchannel: true + + - name: Step 4 Add {{ portchannel_members }} to {{ tmp_portchannel }} + shell: config portchannel member add {{ tmp_portchannel }} {{ item }} + become: yes + with_items: "{{portchannel_members}}" + - set_fact: + add_tmp_portchannel_members: true + + - name: Step 5 Add {{ portchannel_ip }} to {{ tmp_portchannel }} + shell: config interface ip add {{ tmp_portchannel }} {{ portchannel_ip }}/31 + become: yes + - set_fact: + add_tmp_portchannel_ip: true + + - interface_facts: + - assert: + that: + - "'{{ansible_interface_facts[tmp_portchannel].ipv4.address}}' == '{{portchannel_ip}}'" + + - pause: seconds=30 + + - interface_facts: + - assert: + that: + - "{{ansible_interface_facts[tmp_portchannel]['link']}} == True" + + - bgp_facts: + - assert: + that: + - "{{bgp_statistics['ipv4_idle']}} == 0" + + always: + - name: Remove {{ portchannel_ip }} from {{ tmp_portchannel }} + shell: config interface ip remove {{ tmp_portchannel }} {{ portchannel_ip }}/31 + become: yes + when: add_tmp_portchannel_ip + + - pause: seconds=5 + + - name: Remove {{ portchannel_members }} from {{ tmp_portchannel }} + shell: config portchannel member del {{ tmp_portchannel }} {{ item }} + become: yes + when: add_tmp_portchannel_members + with_items: "{{portchannel_members}}" + + - pause: seconds=5 + + - name: Remove {{ tmp_portchannel }} + shell: config portchannel del {{ tmp_portchannel }} + become: yes + when: create_tmp_portchannel + + - name: Add {{ portchannel_ip }} to {{ portchannel }} + shell: config interface ip add {{ portchannel }} {{ portchannel_ip }}/31 + become: yes + when: remove_portchannel_ip + + - name: Add {{ portchannel_members }} to {{ portchannel }} + shell: config portchannel member add {{ portchannel }} {{ item }} + become: yes + when: remove_portchannel_members + with_items: "{{portchannel_members}}" + + - pause: seconds=30 + + - interface_facts: + - assert: + that: + - "{{ansible_interface_facts[portchannel]['link']}} == True" + + - bgp_facts: + - assert: + that: + - "{{bgp_statistics['ipv4_idle']}} == 0" diff --git a/ansible/roles/test/tasks/continuous_reboot.yml b/ansible/roles/test/tasks/continuous_reboot.yml index c0371302281..3511187f8b0 100644 --- a/ansible/roles/test/tasks/continuous_reboot.yml +++ b/ansible/roles/test/tasks/continuous_reboot.yml @@ -10,5 +10,5 @@ - debug: msg: "Execute reboot.yml {{ repeat_count }} time(s)" -- include_tasks: reboot.yml +- include: reboot.yml with_sequence: end={{repeat_count}} diff --git a/ansible/roles/test/tasks/copp.yml b/ansible/roles/test/tasks/copp.yml index 802229a1df6..12fb97d2f13 100644 --- a/ansible/roles/test/tasks/copp.yml +++ b/ansible/roles/test/tasks/copp.yml @@ -1,11 +1,3 @@ -- include_tasks: add_container_to_inventory.yml - vars: - container_name: "{{ item }}" - with_items: - - "lldp" - - "syncd" - - "swss" - - block: - fail: msg="Please set ptf_host variable" when: ptf_host is not defined @@ -13,11 +5,17 @@ - name: Ensure LLDP Daemon stopped become: yes supervisorctl: state=stopped name={{ item }} - delegate_to: "{{ ansible_host }}_lldp" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i lldp python with_items: - lldp-syncd - lldpd + - name: Disable Mellanox copp rate limiting + script: roles/test/files/mlnx/disable_copp_rate_limiting.sh + when: minigraph_hwsku is defined and minigraph_hwsku == 'ACS-MSN2700' + - name: Remove existing ip from ptf host script: roles/test/files/helpers/remove_ip.sh delegate_to: "{{ ptf_host }}" @@ -26,14 +24,14 @@ script: roles/test/files/helpers/add_ip.sh delegate_to: "{{ ptf_host }}" - - name: set default nn_target_interface if it's not defined - set_fact: nn_target_interface="{{ minigraph_ports.keys()[3] }}" - when: nn_target_interface is undefined - - name: set default nn_target_port if it's not defined - set_fact: nn_target_port="{{ minigraph_port_indices[nn_target_interface] }}" + set_fact: nn_target_port="3" when: nn_target_port is undefined + - name: set default nn_target_interface if it's not defined + set_fact: nn_target_interface="Ethernet12" + when: nn_target_interface is undefined + - name: Update ptf_nn_agent configuration inside ptf template: src=ptf_nn_agent.conf.ptf.j2 dest=/etc/supervisor/conf.d/ptf_nn_agent.conf delegate_to: "{{ ptf_host }}" @@ -44,25 +42,21 @@ - name: Update ptf_nn_agent configuration inside dut template: src=ptf_nn_agent.conf.dut.j2 dest=/etc/supervisor/conf.d/ptf_nn_agent.conf - delegate_to: "{{ ansible_host }}_syncd" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i syncd python - name: Restart ptf_nn_agent inside dut supervisorctl: state=restarted name=ptf_nn_agent - delegate_to: "{{ ansible_host }}_syncd" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i syncd python - name: copy the test to ptf container copy: src=roles/test/files/ptftests dest=/root delegate_to: "{{ ptf_host }}" - - name: copy copp configuration file - copy: src=roles/test/tasks/copp/ip2me_600.json dest=/root - delegate_to: "{{ ansible_host }}_swss" - - - name: update copp configuration - shell: "swssconfig /root/ip2me_600.json" - delegate_to: "{{ ansible_host }}_swss" - - - include_tasks: ptf_runner.yml + - include: ptf_runner.yml vars: ptf_test_name: COPP test - {{ item }} ptf_test_dir: ptftests @@ -87,14 +81,6 @@ - IP2METest always: - - name: copy copp configuration file - copy: src=roles/test/tasks/copp/ip2me_6000.json dest=/root - delegate_to: "{{ ansible_host }}_swss" - - - name: restore copp configuration - shell: "swssconfig /root/ip2me_6000.json" - delegate_to: "{{ ansible_host }}_swss" - - name: Remove existing ip from ptf host script: roles/test/files/helpers/remove_ip.sh delegate_to: "{{ ptf_host }}" @@ -115,17 +101,22 @@ - name: Update ptf_nn_agent configuration inside dut template: src=ptf_nn_agent.conf.dut.j2 dest=/etc/supervisor/conf.d/ptf_nn_agent.conf - delegate_to: "{{ ansible_host }}_syncd" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i syncd python - name: Restart ptf_nn_agent inside dut supervisorctl: state=restarted name=ptf_nn_agent - delegate_to: "{{ ansible_host }}_syncd" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i syncd python - name: Restore LLDP Daemon become: yes supervisorctl: state=started name={{ item }} - delegate_to: "{{ ansible_host }}_lldp" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i lldp python with_items: - lldpd - lldp-syncd - diff --git a/ansible/roles/test/tasks/crm.yml b/ansible/roles/test/tasks/crm.yml index a8fce2911ce..de17d369674 100644 --- a/ansible/roles/test/tasks/crm.yml +++ b/ansible/roles/test/tasks/crm.yml @@ -1,4 +1,64 @@ -- name: run test - include_tasks: roles/test/tasks/pytest_runner.yml - vars: - test_node: crm/test_crm.py +- block: + + - fail: msg="Information about tested missing" + when: (testbed_type is not defined) + + - fail: msg="Invalid testbed_type value '{{testbed_type}}'" + when: testbed_type not in ['t1', 't1-lag', 't0', 't0-52', 't0-56', 't0-64', 't0-116'] + + - set_fact: crm_intf="{{minigraph_interfaces[0].attachto}}" + crm_intf1="{{minigraph_interfaces[2].attachto}}" + when: testbed_type == "t1" + + - set_fact: crm_intf="{{minigraph_portchannel_interfaces[0].attachto}}" + crm_intf1="{{minigraph_portchannel_interfaces[2].attachto}}" + when: testbed_type in ['t0', 't1-lag', 't0-52', 't0-56', 't0-64', 't0-116'] + + - set_fact: + ansible_date_time: "{{ansible_date_time}}" + crm_update_time: 4 + + - name: Set polling interval + command: crm config polling interval 1 + + - name: Make sure CRM counters updated + pause: seconds=30 + + - name: Run test case "CRM IPv4 route resource" + include: roles/test/tasks/crm/crm_test_ipv4_route.yml + + - name: Run test case "CRM IPv6 route resource" + include: roles/test/tasks/crm/crm_test_ipv6_route.yml + + - name: Run test case "CRM IPv4 nexthop resource" + include: roles/test/tasks/crm/crm_test_ipv4_nexthop.yml + + - name: Run test case "CRM IPv6 nexthop resource" + include: roles/test/tasks/crm/crm_test_ipv6_nexthop.yml + + - name: Run test case "CRM IPv4 neighbor resource" + include: roles/test/tasks/crm/crm_test_ipv4_neighbor.yml + + - name: Run test case "CRM IPv6 neighbor resource" + include: roles/test/tasks/crm/crm_test_ipv6_neighbor.yml + + - name: Run test case "CRM nexthop group resource" + include: roles/test/tasks/crm/crm_test_nexthop_group.yml + + - name: Run test case "CRM nexthop group member resource" + include: roles/test/tasks/crm/crm_test_nexthop_group_member.yml + + - name: Run test case "CRM ACL entry resources" + include: roles/test/tasks/crm/crm_test_acl_entry.yml + + - name: Run test case "CRM ACL counter resources" + include: roles/test/tasks/crm/crm_test_acl_counter.yml + + - name: Run test case "CRM FDB entry resource" + include: roles/test/tasks/crm/crm_test_fdb_entry.yml + when: testbed_type == "t0" + + always: + + - name: Restore polling interval + command: crm config polling interval 300 diff --git a/ansible/roles/test/tasks/crm/acl.json b/ansible/roles/test/tasks/crm/acl.json new file mode 100644 index 00000000000..ba2a385128f --- /dev/null +++ b/ansible/roles/test/tasks/crm/acl.json @@ -0,0 +1,30 @@ +{ + "acl": { + "acl-sets": { + "acl-set": { + "dataacl": { + "acl-entries": { + "acl-entry": { + "1": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 1 + }, + "l2": { + "config": { + "ethertype": "2048" + } + } + } + } + } + } + } + } + } +} + diff --git a/ansible/roles/test/tasks/crm/crm_test_acl_counter.yml b/ansible/roles/test/tasks/crm/crm_test_acl_counter.yml new file mode 100644 index 00000000000..5beca76991a --- /dev/null +++ b/ansible/roles/test/tasks/crm/crm_test_acl_counter.yml @@ -0,0 +1,81 @@ +- block: + + - set_fact: crm_stats_acl_counter_used=0 + - set_fact: crm_stats_acl_counter_available=0 + + - name: Copy ACL JSON config to switch. + copy: src=roles/test/tasks/crm/acl.json dest=/tmp + + - name: Get original "crm_stats_acl_counter_available" counter value + command: redis-cli -n 2 HGET {{acl_tbl_key}} crm_stats_acl_counter_available + register: out + - set_fact: original_crm_stats_acl_counter_available={{out.stdout}} + + - name: Add ACL + command: acl-loader update full /tmp/acl.json + become: yes + + - name: Get ACL entry keys + command: redis-cli --raw -n 1 KEYS *SAI_OBJECT_TYPE_ACL_ENTRY* + register: out + - set_fact: acl_tbl_keys={{out.stdout.split()}} + + - name: Get ethertype for ACL entry in order to match ACL which was configured + command: redis-cli -n 1 HGET {{item}} SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE + with_items: "{{acl_tbl_keys}}" + register: out + + - name: Match ethertype value for ACL entry + set_fact: key={{item.item}} + with_items: "{{out.results}}" + when: item.stdout|search("2048") + + - name: Get ACL table key + command: redis-cli -n 1 HGET {{key}} SAI_ACL_ENTRY_ATTR_TABLE_ID + register: out + - set_fact: acl_tbl_key={{"CRM:ACL_TABLE_STATS:{0}".format(out.stdout|replace("oid:", ""))}} + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_acl_counter" used and available counter value + command: redis-cli --raw -n 2 HMGET {{acl_tbl_key}} crm_stats_acl_counter_used crm_stats_acl_counter_available + register: out + - set_fact: + new_crm_stats_acl_counter_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_acl_counter_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_acl_counter_used" counter was incremented + assert: {that: "{{new_crm_stats_acl_counter_used|int - crm_stats_acl_counter_used|int == 2}}"} + + - set_fact: crm_stats_acl_counter_available="{{new_crm_stats_acl_counter_available|int + new_crm_stats_acl_counter_used|int}}" + + - name: Verify thresholds for "ACL entry" CRM resource + vars: + crm_cli_res: "acl group counter" + crm_used: "{{new_crm_stats_acl_counter_used}}" + crm_avail: "{{new_crm_stats_acl_counter_available}}" + include: roles/test/tasks/crm/crm_test_threshold.yml + + - name: Remove ACL + command: acl-loader delete + become: yes + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_acl_counter" used and available counter value + command: redis-cli --raw -n 2 HMGET {{acl_tbl_key}} crm_stats_acl_counter_used crm_stats_acl_counter_available + register: out + - set_fact: + new_crm_stats_acl_counter_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_acl_counter_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_acl_counter_used" counter was decremented + assert: {that: "{{new_crm_stats_acl_counter_used|int - crm_stats_acl_counter_used|int == 0}}"} + + - name: Verify "crm_stats_acl_counter_available" counter was incremented + assert: {that: "{{new_crm_stats_acl_counter_available|int - crm_stats_acl_counter_available|int >= 0}}"} + + - name: Verify "crm_stats_acl_counter_available" counter was equal to original value + assert: {that: "{{original_crm_stats_acl_counter_available|int - new_crm_stats_acl_counter_available|int == 0}}"} diff --git a/ansible/roles/test/tasks/crm/crm_test_acl_entry.yml b/ansible/roles/test/tasks/crm/crm_test_acl_entry.yml new file mode 100644 index 00000000000..12d3968edb0 --- /dev/null +++ b/ansible/roles/test/tasks/crm/crm_test_acl_entry.yml @@ -0,0 +1,76 @@ +- block: + + - set_fact: crm_stats_acl_entry_used=0 + - set_fact: crm_stats_acl_entry_available=0 + + - name: Copy ACL JSON config to switch. + copy: src=roles/test/tasks/crm/acl.json dest=/tmp + + - name: Add ACL + command: acl-loader update full /tmp/acl.json + become: yes + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get ACL entry keys + command: redis-cli --raw -n 1 KEYS *SAI_OBJECT_TYPE_ACL_ENTRY* + register: out + - set_fact: acl_tbl_keys={{out.stdout.split()}} + + - name: Get ethertype for ACL entry in order to match ACL which was configured + command: redis-cli -n 1 HGET {{item}} SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE + with_items: "{{acl_tbl_keys}}" + register: out + + - name: Match ethertype value for ACL entry + set_fact: key={{item.item}} + with_items: "{{out.results}}" + when: item.stdout|search("2048") + + - name: Get ACL table key + command: redis-cli -n 1 HGET {{key}} SAI_ACL_ENTRY_ATTR_TABLE_ID + register: out + - set_fact: acl_tbl_key={{"CRM:ACL_TABLE_STATS:{0}".format(out.stdout|replace("oid:", ""))}} + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_acl_entry" used and available counter value + command: redis-cli --raw -n 2 HMGET {{acl_tbl_key}} crm_stats_acl_entry_used crm_stats_acl_entry_available + register: out + - set_fact: + new_crm_stats_acl_entry_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_acl_entry_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_acl_entry_used" counter was incremented + assert: {that: "{{new_crm_stats_acl_entry_used|int - crm_stats_acl_entry_used|int == 2}}"} + + - set_fact: crm_stats_acl_entry_available="{{new_crm_stats_acl_entry_available|int + new_crm_stats_acl_entry_used|int}}" + + - name: Verify thresholds for "ACL entry" CRM resource + vars: + crm_cli_res: "acl group entry" + crm_used: "{{new_crm_stats_acl_entry_used}}" + crm_avail: "{{new_crm_stats_acl_entry_available}}" + include: roles/test/tasks/crm/crm_test_threshold.yml + + - name: Remove ACL + command: acl-loader delete + become: yes + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_acl_entry" used and available counter value + command: redis-cli --raw -n 2 HMGET {{acl_tbl_key}} crm_stats_acl_entry_used crm_stats_acl_entry_available + register: out + - set_fact: + new_crm_stats_acl_entry_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_acl_entry_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_acl_entry_used" counter was decremented + assert: {that: "{{new_crm_stats_acl_entry_used|int - crm_stats_acl_entry_used|int == 0}}"} + + - name: Verify "crm_stats_acl_entry_available" counter was incremented + assert: {that: "{{new_crm_stats_acl_entry_available|int - crm_stats_acl_entry_available|int == 0}}"} diff --git a/ansible/roles/test/tasks/crm/crm_test_fdb_entry.yml b/ansible/roles/test/tasks/crm/crm_test_fdb_entry.yml new file mode 100644 index 00000000000..c4e80c25978 --- /dev/null +++ b/ansible/roles/test/tasks/crm/crm_test_fdb_entry.yml @@ -0,0 +1,86 @@ +- block: + + - name: Get "crm_stats_fdb_entry" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_fdb_entry_used crm_stats_fdb_entry_available + register: out + - set_fact: + crm_stats_fdb_entry_used: "{{ out.stdout_lines[0] }}" + crm_stats_fdb_entry_available: "{{ out.stdout_lines[1] }}" + + - name: Copy FDB JSON config to switch. + copy: src=roles/test/tasks/crm/fdb.json dest=/tmp + + - name: Copy FDB JSON config to SWSS container + command: docker cp /tmp/fdb.json swss:/ + + - name: Add FDB entry + command: docker exec -i swss swssconfig /fdb.json + + - name: Add VLAN required for FDB entry + command: config vlan add 2 + become: yes + + - name: Add VLAN member required for FDB entry + command: config vlan member add 2 Ethernet0 + become: yes + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_fdb_entry" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_fdb_entry_used crm_stats_fdb_entry_available + register: out + - set_fact: + new_crm_stats_fdb_entry_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_fdb_entry_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_fdb_entry_used" counter was incremented + assert: {that: "{{new_crm_stats_fdb_entry_used|int - crm_stats_fdb_entry_used|int == 1}}"} + + - name: Verify "crm_stats_fdb_entry_available" counter was decremented + assert: {that: "{{crm_stats_fdb_entry_available|int - new_crm_stats_fdb_entry_available|int == 1}}"} + + - name: Verify thresholds for "FDB entry" CRM resource + vars: + crm_cli_res: "fdb" + crm_used: "{{new_crm_stats_fdb_entry_used}}" + crm_avail: "{{new_crm_stats_fdb_entry_available}}" + include: roles/test/tasks/crm/crm_test_threshold.yml + + - name: Remove FDB entry + command: fdbclear + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_fdb_entry" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_fdb_entry_used crm_stats_fdb_entry_available + register: out + - set_fact: + new_crm_stats_fdb_entry_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_fdb_entry_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_fdb_entry_used" counter was decremented + assert: {that: "{{new_crm_stats_fdb_entry_used|int == 0}}"} + + - name: Verify "crm_stats_fdb_entry_available" counter was incremented + assert: {that: "{{new_crm_stats_fdb_entry_available|int - crm_stats_fdb_entry_available|int >= 0}}"} + + always: + + - name: Remove VLAN member required for FDB entry + command: config vlan member del 2 Ethernet0 + become: yes + + - name: Remove VLAN required for FDB entry + command: config vlan del 2 + become: yes + + - name: Remove FDB entry + command: fdbclear + + - name: Remove FDB JSON config from switch. + command: rm /tmp/fdb.json + + - name: Remove FDB JSON config from SWSS container + command: docker exec -i swss rm /fdb.json diff --git a/ansible/roles/test/tasks/crm/crm_test_ipv4_neighbor.yml b/ansible/roles/test/tasks/crm/crm_test_ipv4_neighbor.yml new file mode 100644 index 00000000000..01151701a33 --- /dev/null +++ b/ansible/roles/test/tasks/crm/crm_test_ipv4_neighbor.yml @@ -0,0 +1,55 @@ +- block: + + - name: Get "crm_stats_ipv4_neighbor" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_ipv4_neighbor_used crm_stats_ipv4_neighbor_available + register: out + - set_fact: + crm_stats_ipv4_neighbor_used: "{{ out.stdout_lines[0] }}" + crm_stats_ipv4_neighbor_available: "{{ out.stdout_lines[1] }}" + + - name: Add IPv4 neighbor + command: ip neigh replace 2.2.2.2 lladdr 11:22:33:44:55:66 dev {{crm_intf}} + become: yes + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_ipv4_neighbor" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_ipv4_neighbor_used crm_stats_ipv4_neighbor_available + register: out + - set_fact: + new_crm_stats_ipv4_neighbor_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_ipv4_neighbor_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_ipv4_neighbor_used" counter was incremented + assert: {that: "{{new_crm_stats_ipv4_neighbor_used|int - crm_stats_ipv4_neighbor_used|int >= 1}}"} + + - name: Verify "crm_stats_ipv4_neighbor_available" counter was decremented + assert: {that: "{{crm_stats_ipv4_neighbor_available|int - new_crm_stats_ipv4_neighbor_available|int >= 1}}"} + + - name: Remove IPv4 neighbor + command: ip neigh del 2.2.2.2 lladdr 11:22:33:44:55:66 dev {{crm_intf}} + become: yes + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_ipv4_neighbor" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_ipv4_neighbor_used crm_stats_ipv4_neighbor_available + register: out + - set_fact: + new_crm_stats_ipv4_neighbor_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_ipv4_neighbor_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_ipv4_neighbor_used" counter was decremented + assert: {that: "{{new_crm_stats_ipv4_neighbor_used|int - crm_stats_ipv4_neighbor_used|int >= 0}}"} + + - name: Verify "crm_stats_ipv4_neighbor_available" counter was incremented + assert: {that: "{{new_crm_stats_ipv4_neighbor_available|int - crm_stats_ipv4_neighbor_available|int == 0}}"} + + - name: Verify thresholds for "IPv4 neighbor" CRM resource + vars: + crm_cli_res: "ipv4 neighbor" + crm_used: "{{new_crm_stats_ipv4_neighbor_used}}" + crm_avail: "{{new_crm_stats_ipv4_neighbor_available}}" + include: roles/test/tasks/crm/crm_test_threshold.yml diff --git a/ansible/roles/test/tasks/crm/crm_test_ipv4_nexthop.yml b/ansible/roles/test/tasks/crm/crm_test_ipv4_nexthop.yml new file mode 100644 index 00000000000..896345684cd --- /dev/null +++ b/ansible/roles/test/tasks/crm/crm_test_ipv4_nexthop.yml @@ -0,0 +1,55 @@ +- block: + + - name: Get "crm_stats_ipv4_nexthop" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_ipv4_nexthop_used crm_stats_ipv4_nexthop_available + register: out + - set_fact: + crm_stats_ipv4_nexthop_used: "{{ out.stdout_lines[0] }}" + crm_stats_ipv4_nexthop_available: "{{ out.stdout_lines[1] }}" + + - name: Add IPv4 nexthop + command: ip neigh replace 2.2.2.2 lladdr 11:22:33:44:55:66 dev {{crm_intf}} + become: yes + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_ipv4_nexthop" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_ipv4_nexthop_used crm_stats_ipv4_nexthop_available + register: out + - set_fact: + new_crm_stats_ipv4_nexthop_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_ipv4_nexthop_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_ipv4_nexthop_used" counter was incremented + assert: {that: "{{new_crm_stats_ipv4_nexthop_used|int - crm_stats_ipv4_nexthop_used|int >= 1}}"} + + - name: Verify "crm_stats_ipv4_nexthop_available" counter was decremented + assert: {that: "{{crm_stats_ipv4_nexthop_available|int - new_crm_stats_ipv4_nexthop_available|int >= 1}}"} + + - name: Remove IPv4 nexthop + command: ip neigh del 2.2.2.2 lladdr 11:22:33:44:55:66 dev {{crm_intf}} + become: yes + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_ipv4_nexthop" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_ipv4_nexthop_used crm_stats_ipv4_nexthop_available + register: out + - set_fact: + new_crm_stats_ipv4_nexthop_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_ipv4_nexthop_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_ipv4_nexthop_used" counter was decremented + assert: {that: "{{new_crm_stats_ipv4_nexthop_used|int - crm_stats_ipv4_nexthop_used|int == 0}}"} + + - name: Verify "crm_stats_ipv4_nexthop_available" counter was incremented + assert: {that: "{{new_crm_stats_ipv4_nexthop_available|int - crm_stats_ipv4_nexthop_available|int == 0}}"} + + - name: Verify thresholds for "IPv4 nexthop" CRM resource + vars: + crm_cli_res: "ipv4 nexthop" + crm_used: "{{new_crm_stats_ipv4_nexthop_used}}" + crm_avail: "{{new_crm_stats_ipv4_nexthop_available}}" + include: roles/test/tasks/crm/crm_test_threshold.yml diff --git a/ansible/roles/test/tasks/crm/crm_test_ipv4_route.yml b/ansible/roles/test/tasks/crm/crm_test_ipv4_route.yml new file mode 100644 index 00000000000..5fad99aa95a --- /dev/null +++ b/ansible/roles/test/tasks/crm/crm_test_ipv4_route.yml @@ -0,0 +1,60 @@ +- block: + + - name: Get "crm_stats_ipv4_route" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_ipv4_route_used crm_stats_ipv4_route_available + register: out + - set_fact: + crm_stats_ipv4_route_used: "{{ out.stdout_lines[0] }}" + crm_stats_ipv4_route_available: "{{ out.stdout_lines[1] }}" + + - name: Get NH IP + command: ip -4 neigh show dev {{crm_intf}} nud reachable nud stale + register: out + - set_fact: nh_ip="{{out.stdout.split()[0]}}" + + - name: Add IPv4 route + command: ip route add 2.2.2.0/24 via {{nh_ip}} + become: yes + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_ipv4_route" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_ipv4_route_used crm_stats_ipv4_route_available + register: out + - set_fact: + new_crm_stats_ipv4_route_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_ipv4_route_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_ipv4_route_used" counter was incremented + assert: {that: "{{new_crm_stats_ipv4_route_used|int - crm_stats_ipv4_route_used|int == 1}}"} + + - name: Verify "crm_stats_ipv4_route_available" counter was decremented + assert: {that: "{{crm_stats_ipv4_route_available|int - new_crm_stats_ipv4_route_available|int >= 1}}"} + + - name: Remove IPv4 route + command: ip route del 2.2.2.0/24 via {{nh_ip}} + become: yes + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_ipv4_route" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_ipv4_route_used crm_stats_ipv4_route_available + register: out + - set_fact: + new_crm_stats_ipv4_route_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_ipv4_route_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_ipv4_route_used" counter was decremented + assert: {that: "{{new_crm_stats_ipv4_route_used|int - crm_stats_ipv4_route_used|int == 0}}"} + + - name: Verify "crm_stats_ipv4_route_available" counter was incremented + assert: {that: "{{new_crm_stats_ipv4_route_available|int - crm_stats_ipv4_route_available|int == 0}}"} + + - name: Verify thresholds for "IPv4 route" CRM resource + vars: + crm_cli_res: "ipv4 route" + crm_used: "{{new_crm_stats_ipv4_route_used}}" + crm_avail: "{{new_crm_stats_ipv4_route_available}}" + include: roles/test/tasks/crm/crm_test_threshold.yml diff --git a/ansible/roles/test/tasks/crm/crm_test_ipv6_neighbor.yml b/ansible/roles/test/tasks/crm/crm_test_ipv6_neighbor.yml new file mode 100644 index 00000000000..779e0be0918 --- /dev/null +++ b/ansible/roles/test/tasks/crm/crm_test_ipv6_neighbor.yml @@ -0,0 +1,55 @@ +- block: + + - name: Get "crm_stats_ipv6_neighbor" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_ipv6_neighbor_used crm_stats_ipv6_neighbor_available + register: out + - set_fact: + crm_stats_ipv6_neighbor_used: "{{ out.stdout_lines[0] }}" + crm_stats_ipv6_neighbor_available: "{{ out.stdout_lines[1] }}" + + - name: Add IPv6 neighbor + command: ip neigh replace 2001::1 lladdr 11:22:33:44:55:66 dev {{crm_intf}} + become: yes + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_ipv6_neighbor" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_ipv6_neighbor_used crm_stats_ipv6_neighbor_available + register: out + - set_fact: + new_crm_stats_ipv6_neighbor_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_ipv6_neighbor_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_ipv6_neighbor_used" counter was incremented + assert: {that: "{{new_crm_stats_ipv6_neighbor_used|int - crm_stats_ipv6_neighbor_used|int >= 1}}"} + + - name: Verify "crm_stats_ipv6_neighbor_available" counter was decremented + assert: {that: "{{crm_stats_ipv6_neighbor_available|int - new_crm_stats_ipv6_neighbor_available|int >= 1}}"} + + - name: Remove IPv6 neighbor + command: ip neigh del 2001::1 lladdr 11:22:33:44:55:66 dev {{crm_intf}} + become: yes + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_ipv6_neighbor" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_ipv6_neighbor_used crm_stats_ipv6_neighbor_available + register: out + - set_fact: + new_crm_stats_ipv6_neighbor_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_ipv6_neighbor_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_ipv6_neighbor_used" counter was decremented + assert: {that: "{{new_crm_stats_ipv6_neighbor_used|int - crm_stats_ipv6_neighbor_used|int >= 0}}"} + + - name: Verify "crm_stats_ipv6_neighbor_available" counter was incremented + assert: {that: "{{new_crm_stats_ipv6_neighbor_available|int - crm_stats_ipv6_neighbor_available|int == 0}}"} + + - name: Verify thresholds for "IPv6 neighbor" CRM resource + vars: + crm_cli_res: "ipv6 neighbor" + crm_used: "{{new_crm_stats_ipv6_neighbor_used}}" + crm_avail: "{{new_crm_stats_ipv6_neighbor_available}}" + include: roles/test/tasks/crm/crm_test_threshold.yml diff --git a/ansible/roles/test/tasks/crm/crm_test_ipv6_nexthop.yml b/ansible/roles/test/tasks/crm/crm_test_ipv6_nexthop.yml new file mode 100644 index 00000000000..7b128c9b529 --- /dev/null +++ b/ansible/roles/test/tasks/crm/crm_test_ipv6_nexthop.yml @@ -0,0 +1,55 @@ +- block: + + - name: Get "crm_stats_ipv6_nexthop" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_ipv6_nexthop_used crm_stats_ipv6_nexthop_available + register: out + - set_fact: + crm_stats_ipv6_nexthop_used: "{{ out.stdout_lines[0] }}" + crm_stats_ipv6_nexthop_available: "{{ out.stdout_lines[1] }}" + + - name: Add IPv6 nexthop + command: ip neigh replace 2001::1 lladdr 11:22:33:44:55:66 dev {{crm_intf}} + become: yes + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_ipv6_nexthop" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_ipv6_nexthop_used crm_stats_ipv6_nexthop_available + register: out + - set_fact: + new_crm_stats_ipv6_nexthop_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_ipv6_nexthop_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_ipv6_nexthop_used" counter was incremented + assert: {that: "{{new_crm_stats_ipv6_nexthop_used|int - crm_stats_ipv6_nexthop_used|int == 1}}"} + + - name: Verify "crm_stats_ipv6_nexthop_available" counter was decremented + assert: {that: "{{crm_stats_ipv6_nexthop_available|int - new_crm_stats_ipv6_nexthop_available|int >= 1}}"} + + - name: Remove IPv6 nexthop + command: ip neigh del 2001::1 lladdr 11:22:33:44:55:66 dev {{crm_intf}} + become: yes + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_ipv6_nexthop" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_ipv6_nexthop_used crm_stats_ipv6_nexthop_available + register: out + - set_fact: + new_crm_stats_ipv6_nexthop_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_ipv6_nexthop_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_ipv6_nexthop_used" counter was decremented + assert: {that: "{{new_crm_stats_ipv6_nexthop_used|int - crm_stats_ipv6_nexthop_used|int == 0}}"} + + - name: Verify "crm_stats_ipv6_nexthop_available" counter was incremented + assert: {that: "{{new_crm_stats_ipv6_nexthop_available|int - crm_stats_ipv6_nexthop_available|int == 0}}"} + + - name: Verify thresholds for "IPv6 nexthop" CRM resource + vars: + crm_cli_res: "ipv6 nexthop" + crm_used: "{{new_crm_stats_ipv6_nexthop_used}}" + crm_avail: "{{new_crm_stats_ipv6_nexthop_available}}" + include: roles/test/tasks/crm/crm_test_threshold.yml diff --git a/ansible/roles/test/tasks/crm/crm_test_ipv6_route.yml b/ansible/roles/test/tasks/crm/crm_test_ipv6_route.yml new file mode 100644 index 00000000000..a7a0daa98e4 --- /dev/null +++ b/ansible/roles/test/tasks/crm/crm_test_ipv6_route.yml @@ -0,0 +1,61 @@ +- block: + + - name: Get "crm_stats_ipv6_route" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_ipv6_route_used crm_stats_ipv6_route_available + register: out + - set_fact: + crm_stats_ipv6_route_used: "{{ out.stdout_lines[0] }}" + crm_stats_ipv6_route_available: "{{ out.stdout_lines[1] }}" + + - name: Get NH IP + shell: ip -6 neigh show dev {{crm_intf}} nud reachable nud stale | grep -v fe80 + register: out + - set_fact: nh_ip="{{out.stdout.split()[0]}}" + + - name: Add IPv6 route + command: ip -6 route add 2001::/126 via {{nh_ip}} + become: yes + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_ipv6_route" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_ipv6_route_used crm_stats_ipv6_route_available + register: out + - set_fact: + new_crm_stats_ipv6_route_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_ipv6_route_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_ipv6_route_used" counter was incremented + assert: {that: "{{new_crm_stats_ipv6_route_used|int - crm_stats_ipv6_route_used|int >= 1}}"} + + - name: Verify "crm_stats_ipv6_route_available" counter was decremented + assert: {that: "{{crm_stats_ipv6_route_available|int - new_crm_stats_ipv6_route_available|int >= 1}}"} + + - name: Remove IPv6 route + command: ip -6 route del 2001::/126 via {{nh_ip}} + become: yes + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_ipv6_route" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_ipv6_route_used crm_stats_ipv6_route_available + register: out + - set_fact: + new_crm_stats_ipv6_route_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_ipv6_route_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_ipv6_route_used" counter was decremented + assert: {that: "{{new_crm_stats_ipv6_route_used|int - crm_stats_ipv6_route_used|int == 0}}"} + + - name: Verify "crm_stats_ipv6_route_available" counter was incremented + assert: {that: "{{new_crm_stats_ipv6_route_available|int - crm_stats_ipv6_route_available|int == 0}}"} + + - name: Verify thresholds for "IPv6 route" CRM resource + vars: + crm_cli_res: "ipv6 route" + crm_used: "{{new_crm_stats_ipv6_route_used}}" + crm_avail: "{{new_crm_stats_ipv6_route_available}}" + include: roles/test/tasks/crm/crm_test_threshold.yml + diff --git a/ansible/roles/test/tasks/crm/crm_test_nexthop_group.yml b/ansible/roles/test/tasks/crm/crm_test_nexthop_group.yml new file mode 100644 index 00000000000..6325064fbaf --- /dev/null +++ b/ansible/roles/test/tasks/crm/crm_test_nexthop_group.yml @@ -0,0 +1,75 @@ +- block: + + - name: Get "crm_stats_nexthop_group" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_nexthop_group_used crm_stats_nexthop_group_available + register: out + - set_fact: + crm_stats_nexthop_group_used: "{{ out.stdout_lines[0] }}" + crm_stats_nexthop_group_available: "{{ out.stdout_lines[1] }}" + + - name: Get NH IP 1 + command: ip -4 neigh show dev {{crm_intf}} nud reachable nud stale + register: out + - set_fact: nh_ip1="{{out.stdout.split()[0]}}" + + - name: Get NH IP 2 + command: ip -4 neigh show dev {{crm_intf1}} nud reachable nud stale + register: out + - set_fact: nh_ip2="{{out.stdout.split()[0]}}" + + - name: Add nexthop group + command: ip route add 2.2.2.0/24 nexthop via {{nh_ip1}} nexthop via {{nh_ip2}} + become: yes + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_nexthop_group_used" counter value + command: docker exec -i database redis-cli -n 2 HGET CRM:STATS crm_stats_nexthop_group_used + register: out + - set_fact: new_crm_stats_nexthop_group_used={{out.stdout}} + + - name: Get new "crm_stats_nexthop_group_available" counter value + command: docker exec -i database redis-cli -n 2 HGET CRM:STATS crm_stats_nexthop_group_available + register: out + - set_fact: new_crm_stats_nexthop_group_available={{out.stdout}} + + - name: Get new "crm_stats_nexthop_group" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_nexthop_group_used crm_stats_nexthop_group_available + register: out + - set_fact: + new_crm_stats_nexthop_group_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_nexthop_group_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_nexthop_group_used" counter was incremented + assert: {that: "{{new_crm_stats_nexthop_group_used|int - crm_stats_nexthop_group_used|int == 1}}"} + + - name: Verify "crm_stats_nexthop_group_available" counter was decremented + assert: {that: "{{crm_stats_nexthop_group_available|int - new_crm_stats_nexthop_group_available|int >= 1}}"} + + - name: Remove nexthop group + command: ip route del 2.2.2.0/24 nexthop via {{nh_ip1}} nexthop via {{nh_ip2}} + become: yes + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_nexthop_group" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_nexthop_group_used crm_stats_nexthop_group_available + register: out + - set_fact: + new_crm_stats_nexthop_group_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_nexthop_group_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_nexthop_group_used" counter was decremented + assert: {that: "{{new_crm_stats_nexthop_group_used|int - crm_stats_nexthop_group_used|int == 0}}"} + + - name: Verify "crm_stats_nexthop_group_available" counter was incremented + assert: {that: "{{new_crm_stats_nexthop_group_available|int - crm_stats_nexthop_group_available|int == 0}}"} + + - name: Verify thresholds for "nexthop group" CRM resource + vars: + crm_cli_res: "nexthop group object" + crm_used: "{{new_crm_stats_nexthop_group_used}}" + crm_avail: "{{new_crm_stats_nexthop_group_available}}" + include: roles/test/tasks/crm/crm_test_threshold.yml diff --git a/ansible/roles/test/tasks/crm/crm_test_nexthop_group_member.yml b/ansible/roles/test/tasks/crm/crm_test_nexthop_group_member.yml new file mode 100644 index 00000000000..9033d3cc8ef --- /dev/null +++ b/ansible/roles/test/tasks/crm/crm_test_nexthop_group_member.yml @@ -0,0 +1,65 @@ +- block: + + - name: Get "crm_stats_nexthop_group_member" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_nexthop_group_member_used crm_stats_nexthop_group_member_available + register: out + - set_fact: + crm_stats_nexthop_group_member_used: "{{ out.stdout_lines[0] }}" + crm_stats_nexthop_group_member_available: "{{ out.stdout_lines[1] }}" + + - name: Get NH IP 1 + command: ip -4 neigh show dev {{crm_intf}} nud reachable nud stale + register: out + - set_fact: nh_ip1="{{out.stdout.split()[0]}}" + + - name: Get NH IP 2 + command: ip -4 neigh show dev {{crm_intf1}} nud reachable nud stale + register: out + - set_fact: nh_ip2="{{out.stdout.split()[0]}}" + + - name: Add nexthop group members + command: ip route add 2.2.2.0/24 nexthop via {{nh_ip1}} nexthop via {{nh_ip2}} + become: yes + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_nexthop_group_member" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_nexthop_group_member_used crm_stats_nexthop_group_member_available + register: out + - set_fact: + new_crm_stats_nexthop_group_member_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_nexthop_group_member_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_nexthop_group_member_used" counter was incremented + assert: {that: "{{new_crm_stats_nexthop_group_member_used|int - crm_stats_nexthop_group_member_used|int == 2}}"} + + - name: Verify "crm_stats_nexthop_group_member_available" counter was decremented + assert: {that: "{{crm_stats_nexthop_group_member_available|int - new_crm_stats_nexthop_group_member_available|int >= 2}}"} + + - name: Remove nexthop group members + command: ip route del 2.2.2.0/24 nexthop via {{nh_ip1}} nexthop via {{nh_ip2}} + become: yes + + - name: Make sure CRM counters updated + pause: seconds={{ crm_update_time }} + + - name: Get new "crm_stats_nexthop_group_member" used and available counter value + command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_nexthop_group_member_used crm_stats_nexthop_group_member_available + register: out + - set_fact: + new_crm_stats_nexthop_group_member_used: "{{ out.stdout_lines[0] }}" + new_crm_stats_nexthop_group_member_available: "{{ out.stdout_lines[1] }}" + + - name: Verify "crm_stats_nexthop_group_member_used" counter was decremented + assert: {that: "{{new_crm_stats_nexthop_group_member_used|int - crm_stats_nexthop_group_member_used|int == 0}}"} + + - name: Verify "crm_stats_nexthop_group_member_available" counter was incremented + assert: {that: "{{new_crm_stats_nexthop_group_member_available|int - crm_stats_nexthop_group_member_available|int == 0}}"} + + - name: Verify thresholds for "nexthop group member" CRM resource + vars: + crm_cli_res: "nexthop group member" + crm_used: "{{new_crm_stats_nexthop_group_member_used}}" + crm_avail: "{{new_crm_stats_nexthop_group_member_available}}" + include: roles/test/tasks/crm/crm_test_threshold.yml diff --git a/ansible/roles/test/tasks/crm/crm_test_threshold.yml b/ansible/roles/test/tasks/crm/crm_test_threshold.yml new file mode 100644 index 00000000000..3384f21e455 --- /dev/null +++ b/ansible/roles/test/tasks/crm/crm_test_threshold.yml @@ -0,0 +1,76 @@ +- block: + + - name: Verify "CRM_EXCEEDED" log message for "used" threshold + vars: + command_to_run: bash -c "crm config thresholds {{crm_cli_res}} type used; crm config thresholds {{crm_cli_res}} low {{crm_used|int - 1}}; crm config thresholds {{crm_cli_res}} high {{crm_used|int}}" + tests_location: roles/test/tasks + testname: crm + test_expect_file: expect_crm_th_exceeded + out_dir: /tmp + run_dir: /tmp + errors_expected: true + include: roles/test/tasks/run_command_with_log_analyzer.yml + + - name: Verify "CRM_CLEAR" log message for "used" threshold + vars: + command_to_run: bash -c "crm config thresholds {{crm_cli_res}} type used && crm config thresholds {{crm_cli_res}} low {{crm_used|int}} && crm config thresholds {{crm_cli_res}} high {{crm_used|int + 1}}" + tests_location: roles/test/tasks + testname: crm + test_expect_file: expect_crm_th_clear + out_dir: /tmp + run_dir: /tmp + errors_expected: true + include: roles/test/tasks/run_command_with_log_analyzer.yml + + - name: Verify "CRM_EXCEEDED" log message for "free" threshold + vars: + command_to_run: bash -c "crm config thresholds {{crm_cli_res}} type free && crm config thresholds {{crm_cli_res}} low {{crm_avail|int - 1}} && crm config thresholds {{crm_cli_res}} high {{crm_avail|int}}" + tests_location: roles/test/tasks + testname: crm + test_expect_file: expect_crm_th_exceeded + out_dir: /tmp + run_dir: /tmp + errors_expected: true + include: roles/test/tasks/run_command_with_log_analyzer.yml + + - name: Verify "CRM_CLEAR" log message for "free" threshold + vars: + command_to_run: bash -c "crm config thresholds {{crm_cli_res}} type free && crm config thresholds {{crm_cli_res}} low {{crm_avail|int}} && crm config thresholds {{crm_cli_res}} high {{crm_avail|int + 1}}" + tests_location: roles/test/tasks + testname: crm + test_expect_file: expect_crm_th_clear + out_dir: /tmp + run_dir: /tmp + errors_expected: true + include: roles/test/tasks/run_command_with_log_analyzer.yml + + - name: Verify "CRM_EXCEEDED" log message for "percentage" threshold + vars: + th_lo: "{{(crm_used|int * 100 / (crm_used|int + crm_avail|int)) - 1}}" + th_hi: "{{crm_used|int * 100 / (crm_used|int + crm_avail|int)}}" + command_to_run: bash -c "crm config thresholds {{crm_cli_res}} type percentage && crm config thresholds {{crm_cli_res}} low {{th_lo|int}} && crm config thresholds {{crm_cli_res}} high {{th_hi|int}}" + tests_location: roles/test/tasks + testname: crm + test_expect_file: expect_crm_th_exceeded + out_dir: /tmp + run_dir: /tmp + errors_expected: true + include: roles/test/tasks/run_command_with_log_analyzer.yml + + - name: Verify "CRM_CLEAR" log message for "percentage" threshold + vars: + th_lo: "{{crm_used|int * 100 / (crm_used|int + crm_avail|int)}}" + th_hi: "{{(crm_used|int * 100 / (crm_used|int + crm_avail|int)) + 1}}" + command_to_run: bash -c "crm config thresholds {{crm_cli_res}} type percentage && crm config thresholds {{crm_cli_res}} low {{th_lo|int}} && crm config thresholds {{crm_cli_res}} high {{th_hi|int}}" + tests_location: roles/test/tasks + testname: crm + test_expect_file: expect_crm_th_clear + out_dir: /tmp + run_dir: /tmp + errors_expected: true + include: roles/test/tasks/run_command_with_log_analyzer.yml + + always: + + - name: Restore CRM threshods + command: bash -c "crm config thresholds {{crm_cli_res}} type percentage && crm config thresholds {{crm_cli_res}} low 70 && crm config thresholds {{crm_cli_res}} high 85" diff --git a/ansible/roles/test/tasks/crm/expect_crm_th_clear b/ansible/roles/test/tasks/crm/expect_crm_th_clear new file mode 100644 index 00000000000..fab01e5131c --- /dev/null +++ b/ansible/roles/test/tasks/crm/expect_crm_th_clear @@ -0,0 +1 @@ +r, ".* THRESHOLD_CLEAR .*" diff --git a/ansible/roles/test/tasks/crm/expect_crm_th_exceeded b/ansible/roles/test/tasks/crm/expect_crm_th_exceeded new file mode 100644 index 00000000000..dd42f4412f1 --- /dev/null +++ b/ansible/roles/test/tasks/crm/expect_crm_th_exceeded @@ -0,0 +1 @@ +r, ".* THRESHOLD_EXCEEDED .*" diff --git a/ansible/roles/test/tasks/crm/fdb.json b/ansible/roles/test/tasks/crm/fdb.json new file mode 100644 index 00000000000..e280e3d68fe --- /dev/null +++ b/ansible/roles/test/tasks/crm/fdb.json @@ -0,0 +1,9 @@ +[ + { + "FDB_TABLE:Vlan2:52-54-00-25-07-E9": { + "port": "Ethernet0", + "type": "dynamic" + }, + "OP": "SET" + } +] diff --git a/ansible/roles/test/tasks/decap.yml b/ansible/roles/test/tasks/decap.yml index 70362658159..edc592a8086 100644 --- a/ansible/roles/test/tasks/decap.yml +++ b/ansible/roles/test/tasks/decap.yml @@ -1,4 +1,148 @@ -- name: run test - include_tasks: roles/test/tasks/pytest_runner.yml - vars: - test_node: decap/test_decap.py +#----------------------------------------- +# Run Decap test +#----------------------------------------- + +- block: + - name: Set dscp_mode for decap test for broadcom + set_fact: + dscp_mode: pipe + ecn_mode: copy_from_outer + when: + - sonic_hwsku in broadcom_hwskus + - dscp_mode is not defined + + - name: Set dscp_mode var for decap test for mellanox + set_fact: + dscp_mode: uniform + ecn_mode: standard + when: + - sonic_hwsku in mellanox_hwskus + - dscp_mode is not defined + + - name: Set ttl_mode var + set_fact: + ttl_mode: pipe + when: + - ttl_mode is not defined + +- fail: msg="information about testbed missing." + when: (testbed_type is not defined) or + (dscp_mode is not defined) + +- fail: msg="Invalid dscp_mode value '{{dscp_mode}}'" + when: dscp_mode not in ['pipe','uniform'] + +- fail: msg="Invalid ttl_mode value '{{ttl_mode}}'" + when: ttl_mode not in ['pipe','uniform'] + +- include_vars: "vars/topo_{{testbed_type}}.yml" + +- name: Expand properties into props + set_fact: props="{{configuration_properties['spine']}}" + when: testbed_type in ['t1', 't1-lag', 't1-64-lag', 't1-64-lag-clet'] + +- name: Expand properties into props + set_fact: props="{{configuration_properties['common']}}" + when: testbed_type in ['t0', 't0-52', 't0-64', 't0-116'] + +- name: Expand properties into props + set_fact: props_tor="{{configuration_properties['tor']}}" + when: testbed_type in ['t1', 't1-lag', 't1-64-lag', 't1-64-lag-clet'] + +# Gather minigraph facts +- name: Gathering minigraph facts about the device + minigraph_facts: host={{ inventory_hostname }} + +# Obtain loopback IPs of the device + +- name: Obtain loopback IPv4 of the device + set_fact: lo_ip="{{ minigraph_lo_interfaces | map(attribute='addr') | ipv4 | first }}" + +- name: Obtain loopback IPv6 of the device + set_fact: lo_ipv6="{{ minigraph_lo_interfaces | map(attribute='addr') | ipv6 | first }}" + +- name: print loopback IPs + debug: msg="Loopback IPs {{ lo_ip }}, {{ lo_ipv6 }}" + +# Generate file with BGP routes information +- template: src=roles/test/templates/fib.j2 dest=/tmp/fib_info.txt + connection: local + +# Copy the fib_info to ptf container +- template: src=/tmp/fib_info.txt dest=/root + connection: local + delegate_to: "{{ ptf_host }}" + +- set_fact: outer_ipv4=True + when: outer_ipv4 is not defined + +- set_fact: outer_ipv6=True + when: outer_ipv6 is not defined + +- set_fact: inner_ipv4=True + when: inner_ipv4 is not defined + +- set_fact: inner_ipv6=True + when: inner_ipv6 is not defined + +# Apply tunnel configuration +- set_fact: op="SET" + +- block: + + - template: src=roles/test/templates/decap_conf.j2 dest=/tmp/decap_conf.json + + - name: Copy configuration to swss container + shell: docker cp /tmp/decap_conf.json swss:/decap_conf.json + + - name: Apply decap_conf.json configuration + shell: docker exec swss sh -c "swssconfig /decap_conf.json" + + - set_fact: + testname: decap + + # Separate set_fact is required to be able to use 'testname' fact. + - set_fact: + testname_unique: "{{ testname }}.{{lookup('pipe','date +%Y-%m-%d-%H:%M:%S')}}" + + - debug: msg="generated run id:{{testname_unique}}" + + - debug : msg="INVOKE DECAP TEST" + + - name: copy the test to ptf container + copy: src=roles/test/files/ptftests dest=/root + delegate_to: "{{ ptf_host }}" + + - name: "Running test {{ testname }}" + include: ptf_runner.yml + vars: + ptf_test_name: '{{ testname_unique }}' + ptf_test_dir: ptftests + ptf_test_path: IP_decap_test.DecapPacketTest + ptf_platform: remote + ptf_platform_dir: ptftests + ptf_test_params: + - testbed_type='{{ testbed_type }}' + - fib_info='/root/fib_info.txt' + - router_mac='{{ ansible_interface_facts['Ethernet0']['macaddress'] }}' + - dscp_mode='{{ dscp_mode }}' + - ttl_mode='{{ ttl_mode }}' + - lo_ip='{{ lo_ip }}' + - lo_ipv6='{{ lo_ipv6 }}' + - outer_ipv4={{ outer_ipv4 }} + - outer_ipv6={{ outer_ipv6 }} + - inner_ipv4={{ inner_ipv4 }} + - inner_ipv6={{ inner_ipv6 }} + ptf_extra_options: "--relax --debug info --log-file /tmp/{{ testname_unique }}.log " + + always: + # Delete the configuration + - set_fact: op="DEL" + + - template: src=roles/test/templates/decap_conf.j2 dest=/tmp/decap_conf.json + + - name: Copy configuration to swss container + shell: docker cp /tmp/decap_conf.json swss:/decap_conf.json + + - name: Apply decap_conf.json configuration + shell: docker exec swss sh -c "swssconfig /decap_conf.json" diff --git a/ansible/roles/test/tasks/dhcp_relay.yml b/ansible/roles/test/tasks/dhcp_relay.yml index eb93fea9bda..3531e12678d 100644 --- a/ansible/roles/test/tasks/dhcp_relay.yml +++ b/ansible/roles/test/tasks/dhcp_relay.yml @@ -1,4 +1,152 @@ -- name: Run DHCP relay test (pytest-ansible) - include_tasks: roles/test/tasks/pytest_runner.yml +# We choose client port index to be index of first port on Vlan +- name: Obtain client interface name + set_fact: + client_iface_name: "{{ minigraph_vlans[minigraph_vlans.keys()[0]]['members'][0] }}" + +- name: Obtain client interface alias + set_fact: + client_iface_alias: "{{ minigraph_port_name_to_alias_map[client_iface_name] }}" + +- name: Obtain client port index + set_fact: + client_port_index: "{{ minigraph_port_indices[client_iface_name] }}" + +- name: Obtain leaf port indices + set_fact: + leaf_port_indices: [] + +- set_fact: + leaf_port_indices: "{{ leaf_port_indices }} + [ {{ minigraph_port_indices[item.key] }} ]" + with_dict: "{{ minigraph_neighbors }}" + when: minigraph_devices[item.value.name] is defined and minigraph_devices[item.value.name]['type'] == "LeafRouter" + +- name: Obtain MAC address of {{ minigraph_vlan_interfaces[0]['attachto'] }} interface + become: true + shell: "cat /sys/class/net/{{ minigraph_vlan_interfaces[0]['attachto'] }}/address" + register: result + +- set_fact: + relay_iface_mac: "{{ result.stdout | from_yaml }}" + +- name: Copy tests to the PTF container + copy: src=roles/test/files/ptftests dest=/root + delegate_to: "{{ ptf_host }}" + +# Run the DHCP relay PTF test +- include: ptf_runner.yml vars: - test_node: dhcp_relay/test_dhcp_relay.py + ptf_test_name: DHCP Relay Test + ptf_test_dir: ptftests + ptf_test_path: dhcp_relay_test.DHCPTest + ptf_platform: remote + ptf_platform_dir: ptftests + ptf_test_params: + - hostname=\"{{ inventory_hostname }}\" + - client_port_index=\"{{ client_port_index }}\" + - client_iface_alias=\"{{ client_iface_alias }}\" + - leaf_port_indices=\"{{ leaf_port_indices }}\" + - num_dhcp_servers=\"{{ dhcp_servers | length }}\" + - server_ip=\"{{ dhcp_servers[0] }}\" + - relay_iface_ip=\"{{ minigraph_vlan_interfaces[0]['addr'] }}\" + - relay_iface_mac=\"{{ relay_iface_mac }}\" + - relay_iface_netmask=\"{{ minigraph_vlan_interfaces[0]['mask'] }}\" + ptf_extra_options: "--relax" + +- name: Bring all uplink interfaces down + shell: ifconfig {{ item.key }} down + with_dict: minigraph_portchannels + become: true + +- name: Pause to ensure uplinks are down + pause: + seconds: 20 + +- name: Bring all uplink interfaces up + shell: ifconfig {{ item.key }} up + with_dict: minigraph_portchannels + become: true + +- name: Pause to ensure uplinks are up + pause: + seconds: 20 + +# Run the DHCP relay PTF test +- include: ptf_runner.yml + vars: + ptf_test_name: DHCP Relay Test + ptf_test_dir: ptftests + ptf_test_path: dhcp_relay_test.DHCPTest + ptf_platform: remote + ptf_platform_dir: ptftests + ptf_test_params: + - hostname=\"{{ inventory_hostname }}\" + - client_port_index=\"{{ client_port_index }}\" + - client_iface_alias=\"{{ client_iface_alias }}\" + - leaf_port_indices=\"{{ leaf_port_indices }}\" + - num_dhcp_servers=\"{{ dhcp_servers | length }}\" + - server_ip=\"{{ dhcp_servers[0] }}\" + - relay_iface_ip=\"{{ minigraph_vlan_interfaces[0]['addr'] }}\" + - relay_iface_mac=\"{{ relay_iface_mac }}\" + - relay_iface_netmask=\"{{ minigraph_vlan_interfaces[0]['mask'] }}\" + ptf_extra_options: "--relax" + +- name: Stop DHCP relay service + become: true + service: + name: dhcp_relay + state: stopped + +- name: Bring all uplink interfaces down + shell: ifconfig {{ item.key }} down + with_dict: minigraph_portchannels + become: true + +- name: Pause to ensure uplinks are down + pause: + seconds: 20 + +- name: Start DHCP relay service with uplinks down + become: true + service: + name: dhcp_relay + state: restarted + +- name: Give the DHCP relay container time to start up + pause: + seconds: 40 + +- name: Bring all uplink interfaces up + shell: ifconfig {{ item.key }} up + with_dict: minigraph_portchannels + become: true + +- name: Pause to ensure uplinks are up + pause: + seconds: 20 + +# Run the DHCP relay PTF test +- include: ptf_runner.yml + vars: + ptf_test_name: DHCP Relay Test + ptf_test_dir: ptftests + ptf_test_path: dhcp_relay_test.DHCPTest + ptf_platform: remote + ptf_platform_dir: ptftests + ptf_test_params: + - hostname=\"{{ inventory_hostname }}\" + - client_port_index=\"{{ client_port_index }}\" + - client_iface_alias=\"{{ client_iface_alias }}\" + - leaf_port_indices=\"{{ leaf_port_indices }}\" + - num_dhcp_servers=\"{{ dhcp_servers | length }}\" + - server_ip=\"{{ dhcp_servers[0] }}\" + - relay_iface_ip=\"{{ minigraph_vlan_interfaces[0]['addr'] }}\" + - relay_iface_mac=\"{{ relay_iface_mac }}\" + - relay_iface_netmask=\"{{ minigraph_vlan_interfaces[0]['mask'] }}\" + ptf_extra_options: "--relax" + +- name: Restart DHCP relay service to ensure it is in a healthy state + become: true + service: + name: dhcp_relay + state: restarted + tags: always diff --git a/ansible/roles/test/tasks/dip_sip.yml b/ansible/roles/test/tasks/dip_sip.yml index 2881dfe5a41..d79111dee74 100644 --- a/ansible/roles/test/tasks/dip_sip.yml +++ b/ansible/roles/test/tasks/dip_sip.yml @@ -1,4 +1,129 @@ -- name: run test - include_tasks: roles/test/tasks/pytest_runner.yml +- fail: msg="testbed_type is not defined" + when: testbed_type is not defined + +- fail: msg="testbed_type {{ test_type }} is invalid" + when: testbed_type not in ['t0', 't0-16', 't0-56', 't0-64', 't0-64-32', 't0-116', 't1', 't1-lag', 't1-64-lag', 't1-64-lag-clet'] + +- include_vars: "vars/topo_{{ testbed_type }}.yml" + +- name: "Expand properties into props" + set_fact: props="{{ configuration_properties['common'] }}" + +- name: "Gather minigraph facts about the device" + minigraph_facts: host={{ inventory_hostname }} + +- name: "Remove existing IPs from PTF host" + script: roles/test/files/helpers/remove_ip.sh + delegate_to: "{{ ptf_host }}" + +- name: "Set unique MACs to PTF interfaces" + script: roles/test/files/helpers/change_mac.sh + delegate_to: "{{ ptf_host }}" + +- name: "Gather information from LLDP" + lldp: vars: - test_node: ipfwd/test_dip_sip.py + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i lldp python + +- name: "Copy tests to PTF" + copy: src=roles/test/files/ptftests dest=/root + delegate_to: "{{ ptf_host }}" + +- block: + - fail: msg="minigraph_interfaces is not defined or zero length" + when: minigraph_interfaces is not defined or (minigraph_interfaces | length == 0) + + - name: "Set destination PORT name" + set_fact: + dst_port: "{{ minigraph_interfaces[0].attachto }}" + + - name: "Set source PORT name" + set_fact: + src_port: "{{ minigraph_interfaces[2].attachto }}" + + - name: "Start PTF runner: '{{ testbed_type }}' designated" + include: ptf_runner.yml + vars: + ptf_test_name: DipSip test + ptf_test_dir: ptftests + ptf_test_path: dip_sip.DipSipTest + ptf_platform: remote + ptf_platform_dir: ptftests + ptf_test_params: + - testbed_type='{{ testbed_type }}' + - dst_host_mac='{{ lldp[dst_port]['chassis'].mac }}' + - src_host_mac='{{ lldp[src_port]['chassis'].mac }}' + - dst_router_mac='{{ ansible_interface_facts[dst_port].macaddress }}' + - src_router_mac='{{ ansible_interface_facts[src_port].macaddress }}' + - dst_router_ipv4='{{ ansible_interface_facts[dst_port]['ipv4']['address'] }}' + - src_router_ipv4='{{ ansible_interface_facts[src_port]['ipv4']['address'] }}' + - dst_router_ipv6='{{ ansible_interface_facts[dst_port]['ipv6'] | selectattr("scope", "match", "^global$") | map(attribute='address') | list | first }}' + - src_router_ipv6='{{ ansible_interface_facts[src_port]['ipv6'] | selectattr("scope", "match", "^global$") | map(attribute='address') | list | first }}' + - dst_port_ids=[{{ minigraph_port_indices[dst_port] }}] + - src_port_ids=[{{ minigraph_port_indices[src_port] }}] + ptf_extra_options: "--relax --debug info --log-file /tmp/dip_sip.DipSipTest.{{ lookup('pipe','date +%Y-%m-%d-%H:%M:%S') }}.log" + + vars: + dst_port: "default('')" + src_port: "default('')" + when: testbed_type in ['t1'] + +- block: + - fail: msg="minigraph_portchannel_interfaces is not defined or zero length" + when: minigraph_portchannel_interfaces is not defined or (minigraph_portchannel_interfaces | length == 0) + + - name: "Set destination LAG name" + set_fact: + dst_lag: "{{ minigraph_portchannel_interfaces[0].attachto }}" + + - name: "Set source LAG name" + set_fact: + src_lag: "{{ minigraph_portchannel_interfaces[2].attachto }}" + + - name: "Gather destination port indices" + set_fact: + dst_port_ids: "{{ minigraph_port_indices[item] }}" + with_items: "{{ minigraph_portchannels[dst_lag].members }}" + register: dst_port_ids_result + + - name: "Make a list from destination port indices" + set_fact: + dst_port_ids: "{{ dst_port_ids_result.results | map(attribute='ansible_facts.dst_port_ids') | list }}" + + - name: "Gather source port indices" + set_fact: + src_port_ids: "{{ minigraph_port_indices[item] }}" + with_items: "{{ minigraph_portchannels[src_lag].members }}" + register: src_port_ids_result + + - name: "Make a list from source port indices" + set_fact: + src_port_ids: "{{ src_port_ids_result.results | map(attribute='ansible_facts.src_port_ids') | list }}" + + - name: "Start PTF runner: '{{ testbed_type }}' designated" + include: ptf_runner.yml + vars: + ptf_test_name: DipSip test + ptf_test_dir: ptftests + ptf_test_path: dip_sip.DipSipTest + ptf_platform: remote + ptf_platform_dir: ptftests + ptf_test_params: + - testbed_type='{{ testbed_type }}' + - dst_host_mac='{{ lldp[minigraph_portchannels[dst_lag].members[0]]['chassis'].mac }}' + - src_host_mac='{{ lldp[minigraph_portchannels[src_lag].members[0]]['chassis'].mac }}' + - dst_router_mac='{{ ansible_interface_facts[dst_lag].macaddress }}' + - src_router_mac='{{ ansible_interface_facts[src_lag].macaddress }}' + - dst_router_ipv4='{{ ansible_interface_facts[dst_lag]['ipv4']['address'] }}' + - src_router_ipv4='{{ ansible_interface_facts[src_lag]['ipv4']['address'] }}' + - dst_router_ipv6='{{ ansible_interface_facts[dst_lag]['ipv6'] | selectattr("scope", "match", "^global$") | map(attribute='address') | list | first }}' + - src_router_ipv6='{{ ansible_interface_facts[src_lag]['ipv6'] | selectattr("scope", "match", "^global$") | map(attribute='address') | list | first }}' + - dst_port_ids={{ dst_port_ids }} + - src_port_ids={{ src_port_ids }} + ptf_extra_options: "--relax --debug info --log-file /tmp/dip_sip.DipSipTest.{{ lookup('pipe','date +%Y-%m-%d-%H:%M:%S') }}.log" + + vars: + dst_lag: "default('')" + src_lag: "default('')" + when: testbed_type in ['t0', 't0-16', 't0-56', 't0-64', 't0-64-32', 't0-116', 't1-lag', 't1-64-lag', 't1-64-lag-clet'] diff --git a/ansible/roles/test/tasks/dir_bcast.yml b/ansible/roles/test/tasks/dir_bcast.yml index 79a0bce5d2a..f7f33009065 100644 --- a/ansible/roles/test/tasks/dir_bcast.yml +++ b/ansible/roles/test/tasks/dir_bcast.yml @@ -7,7 +7,7 @@ when: testbed_type is not defined - fail: msg="testbed_type {{testbed_type}} is invalid." - when: testbed_type not in ['t0', 't0-16', 't0-56', 't0-64', 't0-64-32', 't0-116'] + when: testbed_type not in ['t0', 't0-64', 't0-116'] - include_vars: "vars/topo_{{testbed_type}}.yml" @@ -28,7 +28,7 @@ delegate_to: "{{ptf_host}}" - name: "Start PTF runner" - include_tasks: ptf_runner.yml + include: ptf_runner.yml vars: ptf_test_name: dir_bcast test ptf_test_dir: ptftests diff --git a/ansible/roles/test/tasks/ecn_wred.yml b/ansible/roles/test/tasks/ecn_wred.yml index 3a2645db55c..309c9f46829 100644 --- a/ansible/roles/test/tasks/ecn_wred.yml +++ b/ansible/roles/test/tasks/ecn_wred.yml @@ -20,15 +20,14 @@ - set_fact: red_min_threshold={{ wred_value.stdout }} -- include_tasks: add_container_to_inventory.yml +- block: + - name: Copy test files to DUT + copy: src={{ item }} dest={{ tmp_dir }} + with_fileglob: + - "{{ test_files_dir }}/*" vars: - container_name: database - -- name: Copy test files to DUT database - copy: src={{ item }} dest={{ tmp_dir }} - delegate_to: "{{ ansible_host }}_database" - with_fileglob: - - "{{ test_files_dir }}/*" + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i database python - block: # Test case #3(MA): Check configuration applied @@ -38,7 +37,7 @@ failed_when: grep_queue.rc != 0 # Run test with different values - - include_tasks: ecn_wred_worker.yml + - include: ecn_wred_worker.yml with_items: "{{ test_wred_values }}" always: @@ -46,4 +45,4 @@ shell: ecnconfig -p AZURE_LOSSLESS -rmin {{ red_min_threshold }} become: yes register: ecn_restore - failed_when: ecn_restore.rc != 0 + failed_when: ecn_restore.rc != 0 \ No newline at end of file diff --git a/ansible/roles/test/tasks/everflow.yml b/ansible/roles/test/tasks/everflow.yml index 99126904fdb..14d6faa0d57 100644 --- a/ansible/roles/test/tasks/everflow.yml +++ b/ansible/roles/test/tasks/everflow.yml @@ -17,10 +17,10 @@ - block: - name: Test Everflow configuration validation. - include_tasks: "roles/test/tasks/everflow/config_test/config_test.yml" + include: "roles/test/tasks/everflow/config_test/config_test.yml" - name: Test Everflow session activation/deactivation logic. - include_tasks: "roles/test/tasks/everflow/logic_test/logic_test.yml" + include: "roles/test/tasks/everflow/logic_test/logic_test.yml" always: - name: General cleanup. diff --git a/ansible/roles/test/tasks/everflow/config_test/config_test.yml b/ansible/roles/test/tasks/everflow/config_test/config_test.yml index 2eabec27db7..1c04930ae2f 100644 --- a/ansible/roles/test/tasks/everflow/config_test/config_test.yml +++ b/ansible/roles/test/tasks/everflow/config_test/config_test.yml @@ -21,7 +21,7 @@ - config_delete.json - name: Initialize config test - include_tasks: roles/test/tasks/init_config_test.yml + include: roles/test/tasks/init_config_test.yml - block: - name: Config tests - invalid SRC IP address. @@ -30,7 +30,7 @@ test_expect_file: config_test_expect_file errors_expected: true run_cleanup: true - include_tasks: roles/test/tasks/run_config_test.yml + include: roles/test/tasks/run_config_test.yml - name: Config tests - invalid DST IP address. vars: @@ -38,7 +38,7 @@ test_expect_file: config_test_expect_file errors_expected: true run_cleanup: true - include_tasks: roles/test/tasks/run_config_test.yml + include: roles/test/tasks/run_config_test.yml - name: Config tests - invalid GRE type (non integer value). vars: @@ -46,7 +46,7 @@ test_expect_file: config_test_expect_file errors_expected: true run_cleanup: true - include_tasks: roles/test/tasks/run_config_test.yml + include: roles/test/tasks/run_config_test.yml - name: Config tests - invalid GRE type (value < that min bound). vars: @@ -54,7 +54,7 @@ test_expect_file: config_test_expect_file errors_expected: true run_cleanup: true - include_tasks: roles/test/tasks/run_config_test.yml + include: roles/test/tasks/run_config_test.yml - name: Config tests - invalid GRE type (value > that min bound). vars: @@ -62,7 +62,7 @@ test_expect_file: config_test_expect_file errors_expected: true run_cleanup: true - include_tasks: roles/test/tasks/run_config_test.yml + include: roles/test/tasks/run_config_test.yml - name: Config tests - invalid DSCP (non integer value). vars: @@ -70,7 +70,7 @@ test_expect_file: config_test_expect_file errors_expected: true run_cleanup: true - include_tasks: roles/test/tasks/run_config_test.yml + include: roles/test/tasks/run_config_test.yml - name: Config tests - invalid DSCP (value < that min bound). vars: @@ -78,7 +78,7 @@ test_expect_file: config_test_expect_file errors_expected: true run_cleanup: true - include_tasks: roles/test/tasks/run_config_test.yml + include: roles/test/tasks/run_config_test.yml - name: Config tests - invalid DSCP (value > that min bound). vars: @@ -86,7 +86,7 @@ test_expect_file: config_test_expect_file errors_expected: true run_cleanup: true - include_tasks: roles/test/tasks/run_config_test.yml + include: roles/test/tasks/run_config_test.yml - name: Config tests - invalid TTL (non integer value). vars: @@ -94,7 +94,7 @@ test_expect_file: config_test_expect_file errors_expected: true run_cleanup: true - include_tasks: roles/test/tasks/run_config_test.yml + include: roles/test/tasks/run_config_test.yml - name: Config tests - invalid TTL (value < that min bound). vars: @@ -102,7 +102,7 @@ test_expect_file: config_test_expect_file errors_expected: true run_cleanup: true - include_tasks: roles/test/tasks/run_config_test.yml + include: roles/test/tasks/run_config_test.yml - name: Config tests - invalid TTL (value > that min bound). vars: @@ -110,7 +110,7 @@ test_expect_file: config_test_expect_file errors_expected: true run_cleanup: true - include_tasks: roles/test/tasks/run_config_test.yml + include: roles/test/tasks/run_config_test.yml - name: Config tests - invalid queue (non integer value). vars: @@ -118,7 +118,7 @@ test_expect_file: config_test_expect_file errors_expected: true run_cleanup: true - include_tasks: roles/test/tasks/run_config_test.yml + include: roles/test/tasks/run_config_test.yml - name: Config tests - invalid queue (value < that min bound). vars: @@ -126,7 +126,7 @@ test_expect_file: config_test_expect_file errors_expected: true run_cleanup: true - include_tasks: roles/test/tasks/run_config_test.yml + include: roles/test/tasks/run_config_test.yml - name: Config tests - invalid queue (value > that max bound). vars: @@ -134,7 +134,7 @@ test_expect_file: config_test_expect_file errors_expected: true run_cleanup: true - include_tasks: roles/test/tasks/run_config_test.yml + include: roles/test/tasks/run_config_test.yml - name: Config tests - valid config (all integers are in 10 base). vars: @@ -142,7 +142,7 @@ test_expect_file: config_test_expect_file errors_expected: false run_cleanup: true - include_tasks: roles/test/tasks/run_config_test.yml + include: roles/test/tasks/run_config_test.yml - name: Config tests - valid config (all integers are in hex). vars: @@ -150,7 +150,7 @@ test_expect_file: config_test_expect_file errors_expected: false run_cleanup: true - include_tasks: roles/test/tasks/run_config_test.yml + include: roles/test/tasks/run_config_test.yml - name: Config tests - delete configuration part 1 (apply config). vars: @@ -158,7 +158,7 @@ test_expect_file: config_test_expect_file errors_expected: false run_cleanup: false - include_tasks: roles/test/tasks/run_config_test.yml + include: roles/test/tasks/run_config_test.yml - name: Config tests - delete configuration part 2 (delete config). vars: @@ -166,8 +166,8 @@ test_expect_file: config_test_expect_file errors_expected: false run_cleanup: true - include_tasks: roles/test/tasks/run_config_test.yml + include: roles/test/tasks/run_config_test.yml always: - name: Remove all the temporary files created by the test. - include_tasks: roles/test/tasks/deinit_config_test.yml + include: roles/test/tasks/deinit_config_test.yml diff --git a/ansible/roles/test/tasks/everflow/logic_test/logic_test.yml b/ansible/roles/test/tasks/everflow/logic_test/logic_test.yml index 7edd8ce6083..db6c54ae436 100644 --- a/ansible/roles/test/tasks/everflow/logic_test/logic_test.yml +++ b/ansible/roles/test/tasks/everflow/logic_test/logic_test.yml @@ -12,7 +12,7 @@ when: sw_if is not defined - name: Initialize config test. - include_tasks: roles/test/tasks/init_config_test.yml + include: roles/test/tasks/init_config_test.yml - name: Get IP address of one switch interface shell: ifconfig {{ sw_if }} | grep "inet addr:" | awk '{print $2}' | sed "s/addr://" @@ -37,7 +37,7 @@ test_expect_file: create_session_expect_file errors_expected: false run_cleanup: false - include_tasks: roles/test/tasks/run_config_test.yml + include: roles/test/tasks/run_config_test.yml - name: Create route with prefix that matches session DST IP and unresolved next hop. command: ip route add {{ session_ip_prefix_1 }} via {{ neigh_ip }} @@ -116,8 +116,8 @@ ignore_errors: yes - name: Clear session configuration - include_tasks: roles/test/tasks/run_config_cleanup.yml + include: roles/test/tasks/run_config_cleanup.yml - name: Remove all the temporary files created by the test. - include_tasks: roles/test/tasks/deinit_config_test.yml + include: roles/test/tasks/deinit_config_test.yml diff --git a/ansible/roles/test/tasks/everflow_testbed.yml b/ansible/roles/test/tasks/everflow_testbed.yml index 256cb3bc32f..d8299d0ef9e 100644 --- a/ansible/roles/test/tasks/everflow_testbed.yml +++ b/ansible/roles/test/tasks/everflow_testbed.yml @@ -1,4 +1,19 @@ -- name: run test - include_tasks: roles/test/tasks/pytest_runner.yml +- name: Apply Everflow configuration. + include: "roles/test/tasks/everflow_testbed/apply_config.yml" + tags: everflow_tb_configure + +- name: Run Everflow tests. + include: "roles/test/tasks/everflow_testbed/run_test.yml" vars: - test_node: everflow/test_everflow_testbed.py + dst_port_type: "tor" + tags: everflow_tb_test + +- name: Run Everflow tests. + include: "roles/test/tasks/everflow_testbed/run_test.yml" + vars: + dst_port_type: "spine" + tags: everflow_tb_test + +- name: Clear Everflow configuration. + include: "roles/test/tasks/everflow_testbed/del_config.yml" + tags: everflow_tb_cleanup diff --git a/ansible/roles/test/tasks/everflow_testbed/apply_config.yml b/ansible/roles/test/tasks/everflow_testbed/apply_config.yml index 4c31f009c3b..c75e1b0337d 100644 --- a/ansible/roles/test/tasks/everflow_testbed/apply_config.yml +++ b/ansible/roles/test/tasks/everflow_testbed/apply_config.yml @@ -5,59 +5,14 @@ tests_location: roles/test/tasks/everflow_testbed testname: apply_config -- name: Create running directory - command: "mkdir -p {{ run_dir }}" - - name: Get session info. - include_tasks: roles/test/tasks/everflow_testbed/get_session_info.yml - -- name: Set Everflow table name - set_fact: - acl_table_name: "EVERFLOW" - -- block: - - name: Init variables - set_fact: - acl_table_name: "EVERFLOW_EGRESS" - acl_table_ports: "{{ everflow_table_ports }}" - acl_table_stage: "{{ acl_stage }}" - acl_table_type: "MIRROR" - - - name: Remove default SONiC Everflow table (since SONiC allows only one mirror table) - command: "config acl remove table EVERFLOW" - become: yes - - - name: Set a flag that need recover config from config_db.json - set_fact: - recover_from_cfgdb_file: True + include: roles/test/tasks/everflow_testbed/get_session_info.yml - - name: Generate config for egress Everflow table - template: - src: "roles/test/templates/acltb_table.j2" - dest: "{{ run_dir }}/everflow_egress_table.json" - - - name: Create egress Everflow table - command: "sonic-cfggen -j {{ run_dir }}/everflow_egress_table.json --write-to-db" - become: yes - when: acl_stage == "egress" - -- name: Copy ACL rules configuration file - template: - src={{ tests_location }}/{{ testname}}/acl_rule_persistent.json.j2 - dest={{ run_dir }}/acl_rule_persistent.json +- name: Copy ACL rules configuration file. + copy: src={{ tests_location }}/{{ testname}}/acl_rule_persistent.json dest={{ run_dir }}/ - command: "config mirror_session add {{session_name}} {{session_src_ip}} {{session_dst_ip}} {{session_dscp}} {{session_ttl}} {{session_gre}} {{session_queue}}" become: yes -- name: Set acl-loader command - set_fact: - load_rule_cmd: "acl-loader update full {{ run_dir }}/acl_rule_persistent.json --session_name={{ session_name }}" - -- name: Append stage parameter if needed - set_fact: - load_rule_cmd: "{{ load_rule_cmd }} --mirror_stage={{ mirror_stage }}" - when: mirror_stage == "egress" - -- name: Load ACL mirror rules - command: "{{ load_rule_cmd }}" +- command: "acl-loader update full {{ run_dir }}/acl_rule_persistent.json --session_name={{ session_name }}" become: yes diff --git a/ansible/roles/test/tasks/everflow_testbed/apply_config/acl_rule_persistent.json b/ansible/roles/test/tasks/everflow_testbed/apply_config/acl_rule_persistent.json new file mode 100644 index 00000000000..2f437709137 --- /dev/null +++ b/ansible/roles/test/tasks/everflow_testbed/apply_config/acl_rule_persistent.json @@ -0,0 +1,164 @@ +{ + "acl": { + "acl-sets": { + "acl-set": { + "everflow": { + "acl-entries": { + "acl-entry": { + "1": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 1 + }, + "ip": { + "config": { + "source-ip-address": "20.0.0.10/32" + } + } + }, + "2": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 2 + }, + "ip": { + "config": { + "destination-ip-address": "30.0.0.10/32" + } + } + }, + "3": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 3 + }, + "transport": { + "config": { + "source-port": "4661" + } + } + }, + "4": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 4 + }, + "transport": { + "config": { + "destination-port": "4661" + } + } + }, + "5": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 5 + }, + "l2": { + "config": { + "ethertype": "4660" + } + } + }, + "6": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 6 + }, + "ip": { + "config": { + "protocol": 126 + } + } + }, + "7": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 7 + }, + "transport": { + "config": { + "tcp-flags": ["TCP_ACK", "TCP_SYN"] + } + } + }, + "8": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 8 + }, + "transport": { + "config": { + "source-port": "4672..4681" + } + } + }, + "9": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 9 + }, + "transport": { + "config": { + "destination-port": "4672..4681" + } + } + }, + "10": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 10 + }, + "ip": { + "config": { + "dscp": "51" + } + } + } + } + } + } + } + } + } +} diff --git a/ansible/roles/test/tasks/everflow_testbed/del_config.yml b/ansible/roles/test/tasks/everflow_testbed/del_config.yml index 5fae5e8800f..4bcff2f2d11 100644 --- a/ansible/roles/test/tasks/everflow_testbed/del_config.yml +++ b/ansible/roles/test/tasks/everflow_testbed/del_config.yml @@ -6,7 +6,7 @@ testname: del_config - name: Get session info. - include_tasks: roles/test/tasks/everflow_testbed/get_session_info.yml + include: roles/test/tasks/everflow_testbed/get_session_info.yml - name: Copy ACL rules configuration file. copy: src={{ tests_location }}/{{ testname}}/acl_rule_persistent-del.json dest={{ run_dir }}/ diff --git a/ansible/roles/test/tasks/everflow_testbed/del_config/acl_rule_persistent-del.json b/ansible/roles/test/tasks/everflow_testbed/del_config/acl_rule_persistent-del.json index cf71c8f215a..71ba44005e9 100644 --- a/ansible/roles/test/tasks/everflow_testbed/del_config/acl_rule_persistent-del.json +++ b/ansible/roles/test/tasks/everflow_testbed/del_config/acl_rule_persistent-del.json @@ -6,3 +6,4 @@ } } } + diff --git a/ansible/roles/test/tasks/everflow_testbed/get_port_info.yml b/ansible/roles/test/tasks/everflow_testbed/get_port_info.yml index 5115fe4f160..01851ded32e 100644 --- a/ansible/roles/test/tasks/everflow_testbed/get_port_info.yml +++ b/ansible/roles/test/tasks/everflow_testbed/get_port_info.yml @@ -1,11 +1,11 @@ - fail: msg="Destination port type is not defined" when: dst_port_type not in ['tor', 'spine'] -- fail: msg="TOR/SPINE ports are not defined" - when: tor_ports is not defined or spine_ports is not defined - - name: Init variables. set_fact: + tor_ports: [] + spine_ports: [] + spine_ptf_ports: [] dst_port_1_is_lag_member: "" dst_port_1_ptf_id: "" dst_port_2: "" @@ -15,6 +15,29 @@ dst_port_3_is_lag_member: "" dst_port_3_ptf_id: "" +- name: Get tor ports + set_fact: + tor_ports: "{{ tor_ports + [item.key] }}" + with_dict: "{{ minigraph_neighbors }}" + when: "'T0' in item.value.name" + +- name: Print tor ports + debug: msg={{ tor_ports }} + +- name: Get spine ports + set_fact: + spine_ports: "{{ spine_ports + [item.key] }}" + with_dict: "{{ minigraph_neighbors }}" + when: "'T2' in item.value.name" + +- name: Print spine ports + debug: msg={{ spine_ports }} + +- name: Define spine PTF ports + set_fact: + spine_ptf_ports: "{{ spine_ptf_ports + [minigraph_port_indices[item] | string] }}" + with_items: "{{ spine_ports }}" + - name: Define SRC port variables. set_fact: src_port: "{{ spine_ports[0] }}" diff --git a/ansible/roles/test/tasks/everflow_testbed/get_session_info.yml b/ansible/roles/test/tasks/everflow_testbed/get_session_info.yml index 692909f2db0..a1fcd77c058 100644 --- a/ansible/roles/test/tasks/everflow_testbed/get_session_info.yml +++ b/ansible/roles/test/tasks/everflow_testbed/get_session_info.yml @@ -8,11 +8,6 @@ session_gre: "0x6558" session_queue: "0" -- name: Set Barefoot GRE protocol type - set_fact: - session_gre: "0x22EB" - when: sonic_hwsku in barefoot_hwskus - - name: Set Mellanox GRE protocol type set_fact: session_gre: "0x8949" diff --git a/ansible/roles/test/tasks/everflow_testbed/run_test.yml b/ansible/roles/test/tasks/everflow_testbed/run_test.yml index 8008a76a8ed..a59faf17321 100644 --- a/ansible/roles/test/tasks/everflow_testbed/run_test.yml +++ b/ansible/roles/test/tasks/everflow_testbed/run_test.yml @@ -21,15 +21,15 @@ tests_location: "{{ 'roles/test/tasks' }}" - name: Get port info. - include_tasks: roles/test/tasks/everflow_testbed/get_port_info.yml + include: roles/test/tasks/everflow_testbed/get_port_info.yml - name: Get session info. - include_tasks: roles/test/tasks/everflow_testbed/get_session_info.yml + include: roles/test/tasks/everflow_testbed/get_session_info.yml - name: Get neighbor info. - include_tasks: roles/test/tasks/everflow_testbed/get_neighbor_info.yml + include: roles/test/tasks/everflow_testbed/get_neighbor_info.yml -- include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_init.yml +- include: roles/test/files/tools/loganalyzer/loganalyzer_init.yml - set_fact: test_out_dir: "{{ out_dir }}/{{testname_unique}}" @@ -47,35 +47,41 @@ shell: vtysh -e "conf t" -e "ip route {{ unresolved_nexthop_prefix }} {{ dst_port_2 }}" become: yes - - name: Run testcase 1 - Resolved route, unresolved route, best prefix match route creation and removal flows - include_tasks: roles/test/tasks/everflow_testbed/testcase_1.yml + - name: Run testcase 1 - Resolved route + include: roles/test/tasks/everflow_testbed/testcase_1.yml - - name: Run testcase 2 - Change neighbor MAC address. - include_tasks: roles/test/tasks/everflow_testbed/testcase_2.yml - when: testbed_type == "t1" + - name: Run testcase 2 - Longer prefix route with resolved next hop. + include: roles/test/tasks/everflow_testbed/testcase_2.yml - - name: Run testcase 3 - ECMP route change (remove next hop not used by session). - include_tasks: roles/test/tasks/everflow_testbed/testcase_3.yml + - name: Run testcase 3 - Remove longer prefix route. + include: roles/test/tasks/everflow_testbed/testcase_3.yml - - name: Run testcase 4 - ECMP route change (remove next hop used by session). - include_tasks: roles/test/tasks/everflow_testbed/testcase_4.yml + - name: Run testcase 4 - Change neighbor MAC address. + include: roles/test/tasks/everflow_testbed/testcase_4.yml + when: testbed_type == "t1" - - name: Run testcase 5 - Policer enforced with DSCP value/mask - include_tasks: roles/test/tasks/everflow_testbed/testcase_5.yml + - name: Run testcase 5 - Resolved ECMP route. + include: roles/test/tasks/everflow_testbed/testcase_5.yml - - name: Run testcase 6 - ARP/ND packet mirroring + - name: Run testcase 6 - ECMP route change (add next hop). include: roles/test/tasks/everflow_testbed/testcase_6.yml + - name: Run testcase 7 - ECMP route change (remove next hop used by session). + include: roles/test/tasks/everflow_testbed/testcase_7.yml + + - name: Run testcase 8 - Policer enforced with DSCP value/mask + include: roles/test/tasks/everflow_testbed/testcase_8.yml + always: - name: Remove route to unresolved next hop. shell: vtysh -e "conf t" -e "no ip route {{ unresolved_nexthop_prefix }} {{ dst_port_2 }}" become: yes - - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml + - include: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml # Output content of result files to ansible console - shell: cat {{ test_out_dir }}/* register: out - debug: var=out.stdout_lines - - - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_end.yml + + - include: roles/test/files/tools/loganalyzer/loganalyzer_end.yml diff --git a/ansible/roles/test/tasks/everflow_testbed/testcase_1.yml b/ansible/roles/test/tasks/everflow_testbed/testcase_1.yml index f3da2ad3ddd..74c4142eef9 100644 --- a/ansible/roles/test/tasks/everflow_testbed/testcase_1.yml +++ b/ansible/roles/test/tasks/everflow_testbed/testcase_1.yml @@ -1,61 +1,22 @@ -# Test case 1 - Longer prefix route with resolved next hop. -# Verify that session destination port and MAC address are changed after best match route insertion. +# Test case 1 - Resolved route. +# Verify that session with resolved route has active state. -- block: - - name: Create route with next hop on {{ dst_port_1 }}. - shell: vtysh -e "conf t" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" - - - pause: - seconds: 3 - - - include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml - vars: - dst_port: "{{ dst_port_1 }}" - dst_port_ptf_id: "{{ dst_port_1_ptf_id }}" - - - name: Create route with best match and unresolved next hop. - shell: vtysh -e "conf t" -e "ip route {{ session_prefix_2 }} {{ unresolved_nexthop }}" - - - pause: - seconds: 3 - - - include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml - vars: - dst_port: "{{ dst_port_1 }}" - dst_port_ptf_id: "{{ dst_port_1_ptf_id }}" - - - name: Create route with best match prefix and resolved next hop on destination port {{ dst_port_2 }}. - shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_2 }} {{ unresolved_nexthop }}" -e "ip route {{ session_prefix_2 }} {{ neighbor_info_2['addr'] }}" - - - pause: - seconds: 3 - - - include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml - vars: - dst_port: "{{ dst_port_2 }}" - dst_port_ptf_id: "{{ dst_port_2_ptf_id }}" - - - name: Remove route with best match prefix and resolved next hop on destination port {{ dst_port_2 }}. - shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_2 }} {{ neighbor_info_2['addr'] }}" +- name: Create route with next hop {{ dst_port_1 }}. + shell: vtysh -e "conf t" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" + become: yes - - pause: - seconds: 3 +- pause: + seconds: 3 - - include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml - vars: - dst_port: "{{ dst_port_1 }}" - dst_port_ptf_id: "{{ dst_port_1_ptf_id }}" +- block: + - name: Send traffic and verify that packets with correct Everflow header are received on destination port {{ dst_port_1 }}. + shell: ptf --test-dir acstests everflow_tb_test.EverflowTest --platform-dir ptftests --platform remote -t 'asic_type="{{ sonic_asic_type }}";hwsku="{{ sonic_hwsku }}";router_mac="{{ ansible_Ethernet0['macaddress'] }}";src_port="{{ src_port_ptf_id }}";dst_ports="{{ dst_port_1_ptf_id }}";session_src_ip="{{ session_src_ip }}";session_dst_ip="{{ session_dst_ip }}";session_ttl="{{ session_ttl }}";session_dscp="{{ session_dscp }}";verbose=True' + args: + chdir: /root + delegate_to: "{{ ptf_host }}" + register: out always: - - name: Remove route. + - name: Remove route shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" - ignore_errors: yes - - - name: Remove best match route. - shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_2 }} {{ unresolved_nexthop }}" - ignore_errors: yes - - - name: Remove best match route. - shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_2 }} {{ neighbor_info_2['addr'] }}" - ignore_errors: yes - become: yes + become: yes diff --git a/ansible/roles/test/tasks/everflow_testbed/testcase_2.yml b/ansible/roles/test/tasks/everflow_testbed/testcase_2.yml index 9e89b1ce1b3..84f414e1363 100644 --- a/ansible/roles/test/tasks/everflow_testbed/testcase_2.yml +++ b/ansible/roles/test/tasks/everflow_testbed/testcase_2.yml @@ -1,5 +1,5 @@ -# Test case 2 - Change neighbor MAC address. -# Verify that session destination MAC address is changed after neighbor MAC address update. +# Test case 2 - Longer prefix route with resolved next hop. +# Verify that session destination port and MAC address are changed after best match route insertion. - block: - name: Create route with next hop on {{ dst_port_1 }}. @@ -8,32 +8,50 @@ - pause: seconds: 3 - - include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml - vars: - dst_port: "{{ dst_port_1 }}" - dst_port_ptf_id: "{{ dst_port_1_ptf_id }}" + - name: Send traffic and verify that packets with correct Everflow header are received on destination port {{ dst_port_1 }}. + shell: ptf --test-dir acstests everflow_tb_test.EverflowTest --platform-dir ptftests --platform remote -t 'asic_type="{{ sonic_asic_type }}";hwsku="{{ sonic_hwsku }}";router_mac="{{ ansible_Ethernet0['macaddress'] }}";src_port="{{ src_port_ptf_id }}";dst_ports="{{ dst_port_1_ptf_id }}";session_src_ip="{{ session_src_ip }}";session_dst_ip="{{ session_dst_ip }}";session_ttl="{{ session_ttl }}";session_dscp="{{ session_dscp }}";verbose=True' + args: + chdir: /root + delegate_to: "{{ ptf_host }}" + register: out - - name: Change neighbor MAC address. - shell: ip neigh replace {{ neighbor_info_1['addr'] }} lladdr "00:11:22:33:44:55" nud permanent dev {{ dst_port_1 }} + - name: Create route with best match and unresolved next hop. + shell: vtysh -e "conf t" -e "ip route {{ session_prefix_2 }} {{ unresolved_nexthop }}" - pause: seconds: 3 - - include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml - vars: - dst_port: "{{ dst_port_1 }}" - dst_port_ptf_id: "{{ dst_port_1_ptf_id }}" + - name: Send traffic and verify that packets with correct Everflow header are received on destination port {{ dst_port_1 }}. + shell: ptf --test-dir acstests everflow_tb_test.EverflowTest --platform-dir ptftests --platform remote -t 'asic_type="{{ sonic_asic_type }}";hwsku="{{ sonic_hwsku }}";router_mac="{{ ansible_Ethernet0['macaddress'] }}";src_port="{{ src_port_ptf_id }}";dst_ports="{{ dst_port_1_ptf_id }}";session_src_ip="{{ session_src_ip }}";session_dst_ip="{{ session_dst_ip }}";session_ttl="{{ session_ttl }}";session_dscp="{{ session_dscp }}";verbose=True' + args: + chdir: /root + delegate_to: "{{ ptf_host }}" + register: out - become: yes + - name: Create route with best match prefix and resolved next hop on destination port {{ dst_port_2 }}. + shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_2 }} {{ unresolved_nexthop }}" -e "ip route {{ session_prefix_2 }} {{ neighbor_info_2['addr'] }}" - always: - - name: Remove neighbor MAC. - shell: ip neigh del {{ neighbor_info_1['addr'] }} dev {{ dst_port_1 }} + - pause: + seconds: 3 - - name: Recover neighbor MAC address. - shell: ping {{ neighbor_info_1['addr'] }} -c3 + - name: Send traffic and verify that packets with correct Everflow header are received on destination port {{ dst_port_2 }}. + shell: ptf --test-dir acstests everflow_tb_test.EverflowTest --platform-dir ptftests --platform remote -t 'asic_type="{{ sonic_asic_type }}";hwsku="{{ sonic_hwsku }}";router_mac="{{ ansible_Ethernet0['macaddress'] }}";src_port="{{ src_port_ptf_id }}";dst_ports="{{ dst_port_2_ptf_id }}";session_src_ip="{{ session_src_ip }}";session_dst_ip="{{ session_dst_ip }}";session_ttl="{{ session_ttl }}";session_dscp="{{ session_dscp }}";verbose=True' + args: + chdir: /root + delegate_to: "{{ ptf_host }}" + register: out + become: yes + always: - name: Remove route. shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" ignore_errors: yes + + - name: Remove best match route. + shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_2 }} {{ unresolved_nexthop }}" + ignore_errors: yes + + - name: Remove best match route. + shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_2 }} {{ neighbor_info_2['addr'] }}" + ignore_errors: yes become: yes diff --git a/ansible/roles/test/tasks/everflow_testbed/testcase_3.yml b/ansible/roles/test/tasks/everflow_testbed/testcase_3.yml index f444173b1fe..0eefbcc3cf2 100644 --- a/ansible/roles/test/tasks/everflow_testbed/testcase_3.yml +++ b/ansible/roles/test/tasks/everflow_testbed/testcase_3.yml @@ -1,55 +1,53 @@ -# Test case 3 - ECMP route change (remove next hop not used by session). -# Verify that after removal of next hop that was used by session from ECMP route session state is active. +# Test case 3 - Remove longer prefix route. +# Verify that session destination port and MAC address are changed after best match route removal. - block: - - name: Create ECMP route with next hops on {{ dst_port_1 }} and {{ dst_port_2 }}. - shell: vtysh -e "conf t" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_2['addr'] }}" + - name: Create route with next hop on {{ dst_port_1 }}. + shell: vtysh -e "conf t" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" - pause: seconds: 3 - - include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml - vars: - dst_port: "{{ dst_port_1 }}, {{ dst_port_2 }}" - dst_port_ptf_id: "{{ dst_port_1_ptf_id }}, {{ dst_port_2_ptf_id }}" + - name: Send traffic and verify that packets with correct Everflow header are received on destination port {{ dst_port_1 }}. + shell: ptf --test-dir acstests everflow_tb_test.EverflowTest --platform-dir ptftests --platform remote -t 'asic_type="{{ sonic_asic_type }}";hwsku="{{ sonic_hwsku }}";router_mac="{{ ansible_Ethernet0['macaddress'] }}";src_port="{{ src_port_ptf_id }}";dst_ports="{{ dst_port_1_ptf_id }}";session_src_ip="{{ session_src_ip }}";session_dst_ip="{{ session_dst_ip }}";session_ttl="{{ session_ttl }}";session_dscp="{{ session_dscp }}";verbose=True' + args: + chdir: /root + delegate_to: "{{ ptf_host }}" + register: out - - name: Add next hop to ECMP route. - shell: vtysh -e "conf t" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_3['addr'] }}" + - name: Create route with best match prefix and resolved next hop {{ dst_port_2 }}. + shell: vtysh -e "conf t" -e "ip route {{ session_prefix_2 }} {{ neighbor_info_2['addr'] }}" - pause: seconds: 3 - - include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml - vars: - dst_port: "{{ dst_port_1 }}, {{ dst_port_2 }}" - dst_port_ptf_id: "{{ dst_port_1_ptf_id }}, {{ dst_port_2_ptf_id }}" + - name: Send traffic and verify that packets with correct Everflow header are received on destination port {{ dst_port_2}}. + shell: ptf --test-dir acstests everflow_tb_test.EverflowTest --platform-dir ptftests --platform remote -t 'asic_type="{{ sonic_asic_type }}";hwsku="{{ sonic_hwsku }}";router_mac="{{ ansible_Ethernet0['macaddress'] }}";src_port="{{ src_port_ptf_id }}";dst_ports="{{ dst_port_2_ptf_id }}";session_src_ip="{{ session_src_ip }}";session_dst_ip="{{ session_dst_ip }}";session_ttl="{{ session_ttl }}";session_dscp="{{ session_dscp }}";verbose=True' + args: + chdir: /root + delegate_to: "{{ ptf_host }}" + register: out - - include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml - vars: - dst_port: "{{ dst_port_3 }}" - dst_port_ptf_id: "{{ dst_port_3_ptf_id }}" - expect_received: False - - - name: Delete next hop from ECMP route. - shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_3['addr'] }}" + - name: Remove best match route. + shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_2 }} {{ neighbor_info_2['addr'] }}" - pause: seconds: 3 - - include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml - vars: - dst_port: "{{ dst_port_1 }}, {{ dst_port_2 }}" - dst_port_ptf_id: "{{ dst_port_1_ptf_id }}, {{ dst_port_2_ptf_id }}" - - - include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml - vars: - dst_port: "{{ dst_port_3 }}" - dst_port_ptf_id: "{{ dst_port_3_ptf_id }}" - expect_received: False + - name: Send traffic and verify that packets with correct Everflow header are received on destination port {{ dst_port_1 }}. + shell: ptf --test-dir acstests everflow_tb_test.EverflowTest --platform-dir ptftests --platform remote -t 'asic_type="{{ sonic_asic_type }}";hwsku="{{ sonic_hwsku }}";router_mac="{{ ansible_Ethernet0['macaddress'] }}";src_port="{{ src_port_ptf_id }}";dst_ports="{{ dst_port_1_ptf_id }}";session_src_ip="{{ session_src_ip }}";session_dst_ip="{{ session_dst_ip }}";session_ttl="{{ session_ttl }}";session_dscp="{{ session_dscp }}";verbose=True' + args: + chdir: /root + delegate_to: "{{ ptf_host }}" + register: out become: yes always: - - name: Remove route - shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_2['addr'] }}" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_3['addr'] }}" + - name: Remove route. + shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" ignore_errors: yes - become: yes + + - name: Remove best match route. + shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_2 }} {{ neighbor_info_2['addr'] }}" + ignore_errors: yes + become: yes diff --git a/ansible/roles/test/tasks/everflow_testbed/testcase_4.yml b/ansible/roles/test/tasks/everflow_testbed/testcase_4.yml index 1c9e81cf3ce..b388ffdd7d6 100644 --- a/ansible/roles/test/tasks/everflow_testbed/testcase_4.yml +++ b/ansible/roles/test/tasks/everflow_testbed/testcase_4.yml @@ -1,5 +1,5 @@ -# Test case 4 - ECMP route change (remove next hop used by session). -# Verify that removal of next hop that is not used by session doesn't cause DST port and MAC change. +# Test case 4 - Change neighbor MAC address. +# Verify that session destination MAC address is changed after neighbor MAC address update. - block: - name: Create route with next hop on {{ dst_port_1 }}. @@ -8,48 +8,35 @@ - pause: seconds: 3 - - include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml - vars: - dst_port: "{{ dst_port_1 }}" - dst_port_ptf_id: "{{ dst_port_1_ptf_id }}" + - name: Send traffic and verify that packets with correct Everflow header are received on destination port {{ dst_port_1 }}. + shell: ptf --test-dir acstests everflow_tb_test.EverflowTest --platform-dir ptftests --platform remote -t 'asic_type="{{ sonic_asic_type }}";hwsku="{{ sonic_hwsku }}";router_mac="{{ ansible_Ethernet0['macaddress'] }}";src_port="{{ src_port_ptf_id }}";dst_ports="{{ dst_port_1_ptf_id }}";session_src_ip="{{ session_src_ip }}";session_dst_ip="{{ session_dst_ip }}";session_ttl="{{ session_ttl }}";session_dscp="{{ session_dscp }}";expected_dst_mac="{{ neighbor_mac_1 }}";verbose=True' + args: + chdir: /root + delegate_to: "{{ ptf_host }}" + register: out - - name: Add next hops on {{ dst_port_2 }} and {{ dst_port_3 }} to route. - shell: vtysh -e "conf t" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_2['addr'] }}" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_3['addr'] }}" + - name: Change neighbor MAC address. + shell: ip neigh replace {{ neighbor_info_1['addr'] }} lladdr "00:11:22:33:44:55" nud permanent dev {{ dst_port_1 }} - pause: seconds: 3 - - include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml - vars: - dst_port: "{{ dst_port_1 }}" - dst_port_ptf_id: "{{ dst_port_1_ptf_id }}" - - - include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml - vars: - dst_port: "{{ dst_port_2 }}, {{ dst_port_3 }}" - dst_port_ptf_id: "{{ dst_port_2_ptf_id }}, {{ dst_port_3_ptf_id }}" - expect_received: False - - - name: Delete one next hop from ECMP route. - shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" - - - pause: - seconds: 3 - - - include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml - vars: - dst_port: "{{ dst_port_1 }}" - dst_port_ptf_id: "{{ dst_port_1_ptf_id }}" - expect_received: False - - - include_tasks: roles/test/tasks/everflow_testbed/everflow_ptf.yml - vars: - dst_port: "{{ dst_port_2 }}, {{ dst_port_3 }}" - dst_port_ptf_id: "{{ dst_port_2_ptf_id }}, {{ dst_port_3_ptf_id }}" + - name: Send traffic and verify that packets with correct Everflow header are received on destination port {{ dst_port_1 }}. + shell: ptf --test-dir acstests everflow_tb_test.EverflowTest --platform-dir ptftests --platform remote -t 'asic_type="{{ sonic_asic_type }}";hwsku="{{ sonic_hwsku }}";router_mac="{{ ansible_Ethernet0['macaddress'] }}";src_port="{{ src_port_ptf_id }}";dst_ports="{{ dst_port_1_ptf_id }}";session_src_ip="{{ session_src_ip }}";session_dst_ip="{{ session_dst_ip }}";session_ttl="{{ session_ttl }}";session_dscp="{{ session_dscp }}";expected_dst_mac="00:11:22:33:44:55";verbose=True' + args: + chdir: /root + delegate_to: "{{ ptf_host }}" + register: out become: yes always: - - name: Remove route {{session_prefix_1}} - shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_2['addr'] }}" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_3['addr'] }}" + - name: Remove neighbor MAC. + shell: ip neigh del {{ neighbor_info_1['addr'] }} dev {{ dst_port_1 }} + + - name: Recover neighbor MAC address. + shell: ping {{ neighbor_info_1['addr'] }} -c3 + + - name: Remove route. + shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" ignore_errors: yes become: yes diff --git a/ansible/roles/test/tasks/everflow_testbed/testcase_5.yml b/ansible/roles/test/tasks/everflow_testbed/testcase_5.yml index ad81425393b..fe953eacfc8 100644 --- a/ansible/roles/test/tasks/everflow_testbed/testcase_5.yml +++ b/ansible/roles/test/tasks/everflow_testbed/testcase_5.yml @@ -1,108 +1,21 @@ -# Test case 5 - Policer enforced DSCP value/mask test +# Test case 5 - Resolved ECMP route. -- set_fact: - policer_name: TEST_POLICER - policer_session_name: TEST_POLICER_SESSION - policer_meter_type: "packets" - policer_mode: "sr_tcm" - policer_cir: "100" - policer_cbs: "100" - policer_red_packet_action: "drop" - policer_tolerance: "10" - dscp_table_name: EVERFLOW_DSCP - -- set_fact: - rule_action: "MIRROR_INGRESS_ACTION" - when: mirror_stage == "ingress" - -- set_fact: - rule_action: "MIRROR_EGRESS_ACTION" - when: mirror_stage == "egress" - -- name: Create route with next hop {{ dst_port_1 }}. - shell: vtysh -e "conf t" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" +- name: Create ECMP route with next hops on {{ dst_port_1 }} and {{ dst_port_2 }}. + shell: vtysh -e "conf t" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_2['addr'] }}" become: yes -- block: - - name: Create a policer - shell: | - redis-cli -n 4 hmset "POLICER|{{ policer_name }}" \ - "meter_type" "{{ policer_meter_type }}" \ - "mode" "{{ policer_mode }}" \ - "cir" "{{ policer_cir }}" \ - "cbs" "{{ policer_cbs }}" \ - "red_packet_action" "{{ policer_red_packet_action }}" - become: yes - - - name: Create a policer enforced mirror session - shell: | - config mirror_session add {{policer_session_name}} {{session_src_ip}} {{session_dst_ip}} {{session_dscp}} {{session_ttl}} {{session_gre}} --policer {{policer_name}} - become: yes +- pause: + seconds: 3 - - name: Create an ACL table with MIRROR_DSCP type - shell: config acl add table {{dscp_table_name}} "MIRROR_DSCP" --description "EVERFLOW_TEST" --stage={{ acl_stage }} - become: yes - - - name: Create a rule with DSCP value and mask - shell: | - redis-cli -n 4 hmset "ACL_RULE|{{dscp_table_name}}|RULE_1" "PRIORITY" "9999" "{{ rule_action }}" "{{policer_session_name}}" "DSCP" "8/56" - become: yes - - - name: Pause to sync the rule - pause: seconds=3 - - - name: "Start PTF runner" - include_tasks: roles/test/tasks/ptf_runner.yml - vars: - ptf_test_name: EVERFLOW Policer Test - ptf_test_dir: acstests - ptf_test_path: everflow_policer_test.EverflowPolicerTest - ptf_platform: remote - ptf_platform_dir: ptftests - ptf_test_params: - - asic_type='{{sonic_asic_type}}' - - hwsku='{{sonic_hwsku}}' - - router_mac='{{ansible_Ethernet0['macaddress']}}' - - src_port='{{src_port_ptf_id}}' - - dst_ports='{{",".join((spine_ptf_ports))}}' - - dst_mirror_ports='{{dst_port_1_ptf_id}}' - - mirror_stage='{{ mirror_stage }}' - - session_src_ip='{{ session_src_ip }}' - - session_dst_ip='{{ session_dst_ip }}' - - session_ttl='{{ session_ttl }}' - - session_dscp='{{ session_dscp }}' - - meter_type='{{ policer_meter_type }}' - - cir='{{ policer_cir }}' - - cbs='{{ policer_cbs }}' - - tolerance='{{ policer_tolerance }}' - ptf_extra_options: "--relax --debug info --log-file /tmp/everflow_policer_test.EverflowPolicerTest.{{ lookup('pipe','date +%Y-%m-%d-%H:%M:%S') }}.log" +- block: + - name: Send traffic and verify that packets with correct Everflow header are received on {{ dst_port_1 }} or {{ dst_port_2 }}. + shell: ptf --test-dir acstests everflow_tb_test.EverflowTest --platform-dir ptftests --platform remote -t 'asic_type="{{ sonic_asic_type }}";hwsku="{{ sonic_hwsku }}";router_mac="{{ ansible_Ethernet0['macaddress'] }}";src_port="{{ src_port_ptf_id }}";dst_ports="{{ dst_port_1_ptf_id }}, {{ dst_port_2_ptf_id }}";session_src_ip="{{ session_src_ip }}";session_dst_ip="{{ session_dst_ip }}";session_ttl="{{ session_ttl }}";session_dscp="{{ session_dscp }}";verbose=True' + args: + chdir: /root + delegate_to: "{{ ptf_host }}" + register: out always: - - name: Remove the rule with DSCP value and mask - shell: | - redis-cli -n 4 del "ACL_RULE|{{dscp_table_name}}|RULE_1" - ignore_errors: yes - become: yes - - - name: Remove the ACL table with MIRROR_DSCP type - shell: config acl remove table {{dscp_table_name}} - ignore_errors: yes - become: yes - - - name: Remove the policer enforced mirror session - shell: | - config mirror_session remove {{policer_session_name}} - ignore_errors: yes - become: yes - - - name: Remove policer - shell: | - redis-cli -n 4 del "POLICER|{{policer_name}}" - ignore_errors: yes - become: yes - - name: Remove route - shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" - ignore_errors: yes + shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_2['addr'] }}" become: yes - diff --git a/ansible/roles/test/tasks/everflow_testbed/testcase_6.yml b/ansible/roles/test/tasks/everflow_testbed/testcase_6.yml new file mode 100644 index 00000000000..9445d648179 --- /dev/null +++ b/ansible/roles/test/tasks/everflow_testbed/testcase_6.yml @@ -0,0 +1,65 @@ +# Test case 8 - ECMP route change (remove next hop not used by session). +# Verify that after removal of next hop that was used by session from ECMP route session state is active. + +- block: + - name: Create ECMP route with next hops on {{ dst_port_1 }} and {{ dst_port_2 }}. + shell: vtysh -e "conf t" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_2['addr'] }}" + + - pause: + seconds: 3 + + - name: Send traffic and verify that packets with correct Everflow header are received on {{ dst_port_1 }} or {{ dst_port_2 }}. + shell: ptf --test-dir acstests everflow_tb_test.EverflowTest --platform-dir ptftests --platform remote -t 'asic_type="{{ sonic_asic_type }}";hwsku="{{ sonic_hwsku }}";router_mac="{{ ansible_Ethernet0['macaddress'] }}";src_port="{{ src_port_ptf_id }}";dst_ports="{{ dst_port_1_ptf_id }}, {{ dst_port_2_ptf_id }}";session_src_ip="{{ session_src_ip }}";session_dst_ip="{{ session_dst_ip }}";session_ttl="{{ session_ttl }}";session_dscp="{{ session_dscp }}";verbose=True' + args: + chdir: /root + delegate_to: "{{ ptf_host }}" + register: out + + - name: Add next hop to ECMP route. + shell: vtysh -e "conf t" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_3['addr'] }}" + + - pause: + seconds: 3 + + - name: Send traffic and verify that packets with correct Everflow header are received on {{ dst_port_1 }} or {{ dst_port_2 }}. + shell: ptf --test-dir acstests everflow_tb_test.EverflowTest --platform-dir ptftests --platform remote -t 'asic_type="{{ sonic_asic_type }}";hwsku="{{ sonic_hwsku }}";router_mac="{{ ansible_Ethernet0['macaddress'] }}";src_port="{{ src_port_ptf_id }}";dst_ports="{{ dst_port_1_ptf_id }}, {{ dst_port_2_ptf_id }}";session_src_ip="{{ session_src_ip }}";session_dst_ip="{{ session_dst_ip }}";session_ttl="{{ session_ttl }}";session_dscp="{{ session_dscp }}";verbose=True' + args: + chdir: /root + delegate_to: "{{ ptf_host }}" + register: out + + - name: Send traffic and verify that packets are not received on {{ dst_port_3 }}. + shell: ptf --test-dir acstests everflow_tb_test.EverflowTest --platform-dir ptftests --platform remote -t 'asic_type="{{ sonic_asic_type }}";hwsku="{{ sonic_hwsku }}";router_mac="{{ ansible_Ethernet0['macaddress'] }}";src_port="{{ src_port_ptf_id }}";dst_ports="{{ dst_port_3_ptf_id }}";session_src_ip="{{ session_src_ip }}";session_dst_ip="{{ session_dst_ip }}";session_ttl="{{ session_ttl }}";session_dscp="{{ session_dscp }}";verbose=True' + args: + chdir: /root + delegate_to: "{{ ptf_host }}" + register: out + failed_when: out.rc == 0 + + - name: Delete next hop from ECMP route. + shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_3['addr'] }}" + + - pause: + seconds: 3 + + - name: Send traffic and verify that packets with correct Everflow header are received on {{ dst_port_1 }} or {{ dst_port_2 }}. + shell: ptf --test-dir acstests everflow_tb_test.EverflowTest --platform-dir ptftests --platform remote -t 'asic_type="{{ sonic_asic_type }}";hwsku="{{ sonic_hwsku }}";router_mac="{{ ansible_Ethernet0['macaddress'] }}";src_port="{{ src_port_ptf_id }}";dst_ports="{{ dst_port_1_ptf_id }}, {{ dst_port_2_ptf_id }}";session_src_ip="{{ session_src_ip }}";session_dst_ip="{{ session_dst_ip }}";session_ttl="{{ session_ttl }}";session_dscp="{{ session_dscp }}";verbose=True' + args: + chdir: /root + delegate_to: "{{ ptf_host }}" + register: out + + - name: Send traffic and verify that packets are not received on {{ dst_port_3 }}. + shell: ptf --test-dir acstests everflow_tb_test.EverflowTest --platform-dir ptftests --platform remote -t 'asic_type="{{ sonic_asic_type }}";hwsku="{{ sonic_hwsku }}";router_mac="{{ ansible_Ethernet0['macaddress'] }}";src_port="{{ src_port_ptf_id }}";dst_ports="{{ dst_port_3_ptf_id }}";session_src_ip="{{ session_src_ip }}";session_dst_ip="{{ session_dst_ip }}";session_ttl="{{ session_ttl }}";session_dscp="{{ session_dscp }}";verbose=True' + args: + chdir: /root + delegate_to: "{{ ptf_host }}" + register: out + failed_when: out.rc == 0 + become: yes + + always: + - name: Remove route + shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_2['addr'] }}" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_3['addr'] }}" + ignore_errors: yes + become: yes diff --git a/ansible/roles/test/tasks/everflow_testbed/testcase_7.yml b/ansible/roles/test/tasks/everflow_testbed/testcase_7.yml new file mode 100644 index 00000000000..92ebb6d1f1e --- /dev/null +++ b/ansible/roles/test/tasks/everflow_testbed/testcase_7.yml @@ -0,0 +1,65 @@ +# Test case 7 - ECMP route change (remove next hop used by session). +# Verify that removal of next hop that is not used by session doesn't cause DST port and MAC change. + +- block: + - name: Create route with next hop on {{ dst_port_1 }}. + shell: vtysh -e "conf t" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" + + - pause: + seconds: 3 + + - name: Send traffic and verify that packets with correct Everflow header are received on {{ dst_port_1 }}. + shell: ptf --test-dir acstests everflow_tb_test.EverflowTest --platform-dir ptftests --platform remote -t 'asic_type="{{ sonic_asic_type }}";hwsku="{{ sonic_hwsku }}";router_mac="{{ ansible_Ethernet0['macaddress'] }}";src_port="{{ src_port_ptf_id }}";dst_ports="{{ dst_port_1_ptf_id }}";session_src_ip="{{ session_src_ip }}";session_dst_ip="{{ session_dst_ip }}";session_ttl="{{ session_ttl }}";session_dscp="{{ session_dscp }}";verbose=True' + args: + chdir: /root + delegate_to: "{{ ptf_host }}" + register: out + + - name: Add next hops on {{ dst_port_2 }} and {{ dst_port_3 }} to route. + shell: vtysh -e "conf t" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_2['addr'] }}" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_3['addr'] }}" + + - pause: + seconds: 3 + + - name: Send traffic and verify that packets with correct Everflow header are received on {{ dst_port_1 }}. + shell: ptf --test-dir acstests everflow_tb_test.EverflowTest --platform-dir ptftests --platform remote -t 'asic_type="{{ sonic_asic_type }}";hwsku="{{ sonic_hwsku }}";router_mac="{{ ansible_Ethernet0['macaddress'] }}";src_port="{{ src_port_ptf_id }}";dst_ports="{{ dst_port_1_ptf_id }}";session_src_ip="{{ session_src_ip }}";session_dst_ip="{{ session_dst_ip }}";session_ttl="{{ session_ttl }}";session_dscp="{{ session_dscp }}";verbose=True' + args: + chdir: /root + delegate_to: "{{ ptf_host }}" + register: out + + - name: Send traffic and verify that packets are not received on {{ dst_port_2 }} and {{ dst_port_3 }}. + shell: ptf --test-dir acstests everflow_tb_test.EverflowTest --platform-dir ptftests --platform remote -t 'asic_type="{{ sonic_asic_type }}";hwsku="{{ sonic_hwsku }}";router_mac="{{ ansible_Ethernet0['macaddress'] }}";src_port="{{ src_port_ptf_id }}";dst_ports="{{ dst_port_2_ptf_id }},{{ dst_port_3_ptf_id }}";session_src_ip="{{ session_src_ip }}";session_dst_ip="{{ session_dst_ip }}";session_ttl="{{ session_ttl }}";session_dscp="{{ session_dscp }}";verbose=True' + args: + chdir: /root + delegate_to: "{{ ptf_host }}" + register: out + failed_when: out.rc == 0 + + - name: Delete one next hop from ECMP route. + shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" + + - pause: + seconds: 3 + + - name: Send traffic and verify that packets are not received {{ dst_port_1 }}. + shell: ptf --test-dir acstests everflow_tb_test.EverflowTest --platform-dir ptftests --platform remote -t 'asic_type="{{ sonic_asic_type }}";hwsku="{{ sonic_hwsku }}";router_mac="{{ ansible_Ethernet0['macaddress'] }}";src_port="{{ src_port_ptf_id }}";dst_ports="{{ dst_port_1_ptf_id }}";session_src_ip="{{ session_src_ip }}";session_dst_ip="{{ session_dst_ip }}";session_ttl="{{ session_ttl }}";session_dscp="{{ session_dscp }}";verbose=True' + args: + chdir: /root + delegate_to: "{{ ptf_host }}" + register: out + failed_when: out.rc == 0 + + - name: Send traffic and verify that packets with correct Everflow header are received on {{ dst_port_2 }} or {{ dst_port_3 }}. + shell: ptf --test-dir acstests everflow_tb_test.EverflowTest --platform-dir ptftests --platform remote -t 'asic_type="{{ sonic_asic_type }}";hwsku="{{ sonic_hwsku }}";router_mac="{{ ansible_Ethernet0['macaddress'] }}";src_port="{{ src_port_ptf_id }}";dst_ports="{{ dst_port_2_ptf_id }},{{ dst_port_3_ptf_id }}";session_src_ip="{{ session_src_ip }}";session_dst_ip="{{ session_dst_ip }}";session_ttl="{{ session_ttl }}";session_dscp="{{ session_dscp }}";verbose=True' + args: + chdir: /root + delegate_to: "{{ ptf_host }}" + register: out + become: yes + + always: + - name: Remove route {{session_prefix_1}} + shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_2['addr'] }}" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_3['addr'] }}" + ignore_errors: yes + become: yes diff --git a/ansible/roles/test/tasks/everflow_testbed/testcase_8.yml b/ansible/roles/test/tasks/everflow_testbed/testcase_8.yml new file mode 100644 index 00000000000..7fb8ed3b36a --- /dev/null +++ b/ansible/roles/test/tasks/everflow_testbed/testcase_8.yml @@ -0,0 +1,80 @@ +# Test case 8 - Policer enforced DSCP value/mask test + +- set_fact: + policer_name: TEST_POLICER + policer_session_name: TEST_POLICER_SESSION + dscp_table_name: EVERFLOW_DSCP + +- name: Create route with next hop {{ dst_port_1 }}. + shell: vtysh -e "conf t" -e "ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" + become: yes + +- block: + - name: Create a policer + shell: | + redis-cli -n 4 hmset "POLICER|{{policer_name}}" "meter_type" "packets" "mode" "sr_tcm" "cir" "100" "cbs" "100" "red_packet_action" "drop" + become: yes + + - name: Create a policer enforced mirror session + shell: | + config mirror_session add {{policer_session_name}} {{session_src_ip}} {{session_dst_ip}} {{session_dscp}} {{session_ttl}} --policer {{policer_name}} + become: yes + + - name: Create an ACL table with MIRROR_DSCP type + shell: config acl add table {{dscp_table_name}} "MIRROR_DSCP" --description "EVERFLOW_TEST" + become: yes + + - name: Create a rule with DSCP value and mask + shell: | + redis-cli -n 4 hmset "ACL_RULE|{{dscp_table_name}}|RULE_1" "PRIORITY" "9999" "MIRROR_ACTION" "{{policer_session_name}}" "DSCP" "8/56" + become: yes + + - name: Pause to sync the rule + pause: seconds=3 + + - name: "Start PTF runner" + include: roles/test/tasks/ptf_runner.yml + vars: + ptf_test_name: EVERFLOW Policer Test + ptf_test_dir: acstests + ptf_test_path: everflow_policer_test.EverflowPolicerTest + ptf_platform: remote + ptf_platform_dir: ptftests + ptf_test_params: + - asic_type='{{sonic_asic_type}}' + - hwsku='{{sonic_hwsku}}' + - router_mac='{{ansible_Ethernet0['macaddress']}}' + - src_port='{{src_port_ptf_id}}' + - dst_ports='{{",".join((spine_ptf_ports))}}' + - dst_mirror_ports='{{dst_port_1_ptf_id}}' + ptf_extra_options: "--relax --debug info" + + always: + - name: Remove the rule with DSCP value and mask + shell: | + redis-cli -n 4 del "ACL_RULE|{{dscp_table_name}}|RULE_1" + ignore_errors: yes + become: yes + + - name: Remove the ACL table with MIRROR_DSCP type + shell: config acl remove table {{dscp_table_name}} + ignore_errors: yes + become: yes + + - name: Remove the policer enforced mirror session + shell: | + config mirror_session remove {{policer_session_name}} + ignore_errors: yes + become: yes + + - name: Remove policer + shell: | + redis-cli -n 4 del "POLICER|{{policer_name}}" + ignore_errors: yes + become: yes + + - name: Remove route + shell: vtysh -e "conf t" -e "no ip route {{ session_prefix_1 }} {{ neighbor_info_1['addr'] }}" + ignore_errors: yes + become: yes + diff --git a/ansible/roles/test/tasks/fast-reboot.yml b/ansible/roles/test/tasks/fast-reboot.yml index d984dab3c5d..506e6e8fae8 100644 --- a/ansible/roles/test/tasks/fast-reboot.yml +++ b/ansible/roles/test/tasks/fast-reboot.yml @@ -3,14 +3,7 @@ reboot_limit: 30 when: reboot_limit is not defined -- name: set default values vnet variables - set_fact: - vnet: False - vnet_pkts: '' - when: (vnet is not defined) or (vnet_pkts is not defined) - - name: Fast-reboot test - include_tasks: advanced-reboot.yml + include: advanced-reboot.yml vars: reboot_type: fast-reboot - diff --git a/ansible/roles/test/tasks/fdb.yml b/ansible/roles/test/tasks/fdb.yml index 0eafe74a712..b4c0b8c95b6 100644 --- a/ansible/roles/test/tasks/fdb.yml +++ b/ansible/roles/test/tasks/fdb.yml @@ -4,171 +4,60 @@ - fail: msg="testbed_type {{test_type}} is invalid" when: testbed_type not in ['t0', 't0-64', 't0-116', 't0-52'] -- name: Gather minigraph facts about the device - minigraph_facts: host={{inventory_hostname}} - -- block: - - name: Remove existing IPs from PTF host - script: roles/test/files/helpers/remove_ip.sh - delegate_to: "{{ptf_host}}" - - - name: Set unique MACs to PTF interfaces - script: roles/test/files/helpers/change_mac.sh - delegate_to: "{{ptf_host}}" - - - name: Copy tests to PTF - copy: src=roles/test/files/ptftests dest=/root - delegate_to: "{{ptf_host}}" - - - name: Copy FDB information file to PTF - template: src=roles/test/templates/fdb.j2 dest=/root/fdb_info.txt - delegate_to: "{{ ptf_host }}" - - - name: clear FDB table - command: sonic-clear fdb all - - - name: Initialize variables - set_fact: - dummy_mac_prefix: "02:11:22:33" - dummy_mac_number: "10" - vlan_member_count: 0 - - - name: Set dummy mac number for SimX virtual testbed - set_fact: - dummy_mac_number: "1" - when: - - hostvars[ansible_hostname]['type'] is defined - - hostvars[ansible_hostname]['type'] == 'simx' - - - name: "Start PTF runner" - include_tasks: ptf_runner.yml - vars: - ptf_test_name: FDB test - ptf_test_dir: ptftests - ptf_test_path: fdb_test.FdbTest - ptf_platform: remote - ptf_platform_dir: ptftests - ptf_test_params: - - testbed_type='{{testbed_type}}' - - router_mac='{{ansible_Ethernet0['macaddress']}}' - - fdb_info='/root/fdb_info.txt' - - vlan_ip='{{minigraph_vlan_interfaces[0]['addr']}}' - - dummy_mac_prefix='{{ dummy_mac_prefix }}' - - dummy_mac_number='{{ dummy_mac_number }}' - ptf_extra_options: "--relax --debug info --log-file /tmp/fdb_test.FdbTest.{{lookup('pipe','date +%Y-%m-%d-%H:%M:%S')}}.log " - - - name: Get the output of 'show mac' - command: "show mac" - register: show_mac_output - - - name: Count the total number of members of all VLANs - set_fact: - vlan_member_count: "{{ vlan_member_count|int + minigraph_vlans[item.key]['members']|length }}" - with_dict: "{{ minigraph_vlans }}" - - - name: Set variables for expected number of MAC entries - set_fact: - expected_dummy_mac_number: "{{ dummy_mac_number|int * vlan_member_count|int }}" - expected_total_mac_number: "{{ dummy_mac_number|int * vlan_member_count|int + vlan_member_count|int }}" - - - name: Verify that the number of dummy MAC entries is expected - assert: { that: "{{ show_mac_output.stdout_lines|select('search', dummy_mac_prefix)|list|length == expected_dummy_mac_number|int }}"} - - - name: Verify that total number of MAC entries is expected - assert: { that: "{{ show_mac_output.stdout_lines|select('search', 'Dynamic')|list|length == expected_total_mac_number|int }}"} - - always: - - name: clear FDB table - command: sonic-clear fdb all - -- block: - - name: clear FDB table - command: sonic-clear fdb all - - - name: Get ports in portchannels - set_fact: - portchannel_members_all: [] - - set_fact: - portchannel_members_all: "{{ portchannel_members_all + item['members'] }}" - with_items: "{{ minigraph_portchannels.values() }}" +- include_vars: "vars/topo_{{testbed_type}}.yml" - - name: Get DUT ports info - set_fact: - vlan_ports: "{%for port in minigraph_vlans['Vlan1000']['members'] %}{{minigraph_port_indices[port]}}{%if not loop.last%} {%endif%}{%endfor%}" - lag_ports: "{%for port in portchannel_members_all %}{{minigraph_port_indices[port]}}{%if not loop.last%} {%endif%}{%endfor%}" +- name: Expand properties into props + set_fact: props="{{configuration_properties['common']}}" - - name: Initialize variables - set_fact: - dummy_mac_prefix: "02:11:22:33" - - - name: "Set ptf facts" - set_fact: - ptf_test_name: FDB test - ptf_test_dir: ptftests - ptf_test_path: fdb_test.FdbConfigReloadTest - ptf_platform: remote - ptf_platform_dir: ptftests - ptf_test_params: - - testbed_type='{{ testbed_type }}' - - vlan_ports='{{ vlan_ports }}' - - lag_ports='{{ lag_ports }}' - - router_mac='{{ ansible_interface_facts['Ethernet0']['macaddress'] }}' - - dummy_mac_prefix='{{ dummy_mac_prefix }}' - ptf_extra_options: "--relax --debug info --log-file /tmp/fdb_test.FdbTest.{{lookup('pipe','date +%Y-%m-%d-%H:%M:%S')}}.log " - - - debug: msg="ptf --test-dir {{ ptf_test_dir }} {{ ptf_test_path }} {% if ptf_qlen is defined %} --qlen={{ ptf_qlen }} {% endif %} {% if ptf_platform_dir is defined %} --platform-dir {{ ptf_platform_dir }} {% endif %} --platform {{ ptf_platform }} {% if ptf_test_params is defined %} -t \"{{ ptf_test_params | default([]) | join(';') }}\" {% endif %} {{ ptf_extra_options | default(\"\")}} 2>&1" - - - name: Run PTF script to inject packets with different src MAC - shell: ptf --test-dir {{ ptf_test_dir }} {{ ptf_test_path }} {% if ptf_qlen is defined %} --qlen={{ ptf_qlen }} {% endif %} {% if ptf_platform_dir is defined %} --platform-dir {{ ptf_platform_dir }} {% endif %} --platform {{ ptf_platform }} {% if ptf_test_params is defined %} -t "{{ ptf_test_params | default([]) | join(';') }}" {% endif %} {{ ptf_extra_options | default("")}} 2>&1 - args: - chdir: /root - delegate_to: "{{ ptf_host }}" - async: 300 - poll: 0 - - - name: Reload config - include_tasks: "roles/test/tasks/common_tasks/reload_config.yml" - vars: - config_source: "config_db" - - - name: Get PID of the PTF script - command: "pgrep -f '/usr/bin/python /usr/bin/ptf'" - register: ptf_script_pid - delegate_to: "{{ ptf_host }}" - ignore_errors: yes - - - name: Wait until the PTF script completed execution - shell: "tail --pid={{ ptf_script_pid.stdout }} -f /dev/null" - delegate_to: "{{ ptf_host }}" - when: ptf_script_pid.rc == 0 - - - name: Wait some extra time to ensure that all FDB entries are in DB - pause: seconds=10 - - - name: Count FDB entries from SDK - shell: "docker exec syncd sx_api_fdb_dump.py | grep AGEABLE | wc -l" - register: sdk_fdb_count - - - debug: msg="{{ sdk_fdb_count.stdout_lines }}" - - - name: Count FDB entries from 'show mac' - shell: "show mac | grep Dynamic | wc -l" - register: show_mac_output - - - debug: msg="{{ show_mac_output.stdout_lines }}" - - - name: clear FDB table when test case pass - command: sonic-clear fdb all - when: sdk_fdb_count.stdout == show_mac_output.stdout - - - fail: msg="No FDB is learned, something wrong with the switch" - when: sdk_fdb_count.stdout | int == 0 or show_mac_output.stdout | int == 0 - - - fail: msg="In consistent number MAC entries between SDK and DB" - when: sdk_fdb_count.stdout != show_mac_output.stdout - - when: sonic_asic_type == 'mellanox' +- name: Gather minigraph facts about the device + minigraph_facts: host={{inventory_hostname}} - always: - - name: clear FDB table - command: sonic-clear fdb all +- name: Remove existing IPs from PTF host + script: roles/test/files/helpers/remove_ip.sh + delegate_to: "{{ptf_host}}" + +- name: Set unique MACs to PTF interfaces + script: roles/test/files/helpers/change_mac.sh + delegate_to: "{{ptf_host}}" + +- name: Copy tests to PTF + copy: src=roles/test/files/ptftests dest=/root + delegate_to: "{{ptf_host}}" + +- name: Copy ARP responder to PTF + copy: src=roles/test/files/helpers/arp_responder.py dest=/opt + delegate_to: "{{ptf_host}}" + +- name: Copy arp responder supervisor configuration to the PTF container + template: src=arp_responder.conf.j2 dest=/etc/supervisor/conf.d/arp_responder.conf + vars: + - arp_responder_args: '' + delegate_to: "{{ ptf_host }}" + +- name: Reread supervisor configuration + shell: supervisorctl reread + delegate_to: "{{ptf_host}}" + +- name: Update supervisor configuration + shell: supervisorctl update + delegate_to: "{{ ptf_host }}" + +- name: Copy FDB information file to PTF + template: src=roles/test/templates/fdb.j2 dest=/root/fdb_info.txt + delegate_to: "{{ ptf_host }}" + +- name: "Start PTF runner" + include: ptf_runner.yml + vars: + ptf_test_name: FDB test + ptf_test_dir: ptftests + ptf_test_path: fdb_test.FdbTest + ptf_platform: remote + ptf_platform_dir: ptftests + ptf_test_params: + - testbed_type='{{testbed_type}}' + - router_mac='{{ansible_Ethernet0['macaddress']}}' + - fdb_info='/root/fdb_info.txt' + - vlan_ip='{{minigraph_vlan_interfaces[0]['addr']}}' + ptf_extra_options: "--relax --debug info --log-file /tmp/fdb_test.FdbTest.{{lookup('pipe','date +%Y-%m-%d-%H:%M:%S')}}.log " + diff --git a/ansible/roles/test/tasks/fdb_mac_expire.yml b/ansible/roles/test/tasks/fdb_mac_expire.yml index e57d0d5f8dc..56d3a070e67 100644 --- a/ansible/roles/test/tasks/fdb_mac_expire.yml +++ b/ansible/roles/test/tasks/fdb_mac_expire.yml @@ -2,7 +2,7 @@ when: testbed_type is not defined - fail: msg="testbed_type {{test_type}} is invalid" - when: testbed_type not in ['t0', 't0-64', 't0-64-32', 't0-116', 't0-52'] + when: testbed_type not in ['t0', 't0-64', 't0-64-32', 't0-116'] - name: set fdb_aging_time to default if no user input set_fact: @@ -54,7 +54,7 @@ - debug: msg="{{show_mac_output.stdout}}" - name: "Start PTF runner" - include_tasks: ptf_runner.yml + include: ptf_runner.yml vars: ptf_test_name: FDB Mac Expire test ptf_test_dir: ptftests diff --git a/ansible/roles/test/tasks/iface_naming_mode.yml b/ansible/roles/test/tasks/iface_naming_mode.yml index bfe66c26958..3c298a0d9f7 100644 --- a/ansible/roles/test/tasks/iface_naming_mode.yml +++ b/ansible/roles/test/tasks/iface_naming_mode.yml @@ -1,4 +1,41 @@ -- name: run test - include_tasks: roles/test/tasks/pytest_runner.yml +# Tests for interface_naming_mode feature +- include_vars: roles/test/tasks/iface_naming_mode/vars/iface_naming_vars.yml + +- name: Gathering minigraph facts about the device + minigraph_facts: host={{inventory_hostname}} + +- name: find interface name mapping + port_alias: hwsku="{{hwsku}}" + +# Get the default interface names list +- set_fact: + default_interfaces: "{{port_name_map | list}}" + +#Get the configured ports which are up from minigraph facts and get its alias name +- set_fact: + up_ports: "{{minigraph_ports | list}}" + +- set_fact: + upport_alias_list: "{{minigraph_ports.values()| map(attribute='alias') | list }}" + +#Sample Test interface name and its alias +- set_fact: + interface: "{{minigraph_ports | sort | first}}" +- set_fact: + interface_alias: "{{port_name_map[interface]}}" + +############################################################# +######################## START OF TESTS ##################### +############################################################# + +# All tests run for user guest in alias mode as well as in default mode + +- name: Test Interface naming mode feature in alias mode + include: "roles/test/tasks/iface_naming_mode/iface_naming_mode_tests.yml" vars: - test_node: iface_namingmode/test_iface_namingmode.py + mode: alias + +- name: Test Interface naming mode feature in default mode + include: "roles/test/tasks/iface_naming_mode/iface_naming_mode_tests.yml" + vars: + mode: default diff --git a/ansible/roles/test/tasks/iface_naming_mode/add_user.yml b/ansible/roles/test/tasks/iface_naming_mode/add_user.yml new file mode 100644 index 00000000000..02fb3cab990 --- /dev/null +++ b/ansible/roles/test/tasks/iface_naming_mode/add_user.yml @@ -0,0 +1,9 @@ +- name: create user + user: + name: "{{uname1}}" + groups: sudo + state: present + shell: /bin/bash + +- name: Set password for user + shell: "echo {{uname1}}:{{upasswd1}} | sudo chpasswd" diff --git a/ansible/roles/test/tasks/iface_naming_mode/check_userifmode.yml b/ansible/roles/test/tasks/iface_naming_mode/check_userifmode.yml new file mode 100644 index 00000000000..905b1e7e308 --- /dev/null +++ b/ansible/roles/test/tasks/iface_naming_mode/check_userifmode.yml @@ -0,0 +1,25 @@ +#When the interface naming mode is set,it is written as environment variable in bashrc file. +#The device need to be logout and login for the actual environment variable to take effect. +# As the ansible work in non interactive mode, it doesnt read the environmental varaiable set in bashrc file. Hence as a workaround the variable is extracted through check_userifmode.yml and manually set the variable 'SONIC_CLI_IFACE_MODE' to take effect. + + +- name: Extract the "SONIC_CLI_IFACE_MODE" value from bashrc file + shell: "cat /home/{{uname1}}/.bashrc | grep SONIC_CLI_IFACE_MODE" + args: + executable: /bin/bash + register: envout + +#extract the environmental variable and save it in the variable 'ifmode_env' +- set_fact: + ifmode_env: "{{envout.stdout}}" +- set_fact: ifmode="{{ifmode_env.split('=')[1]}}" + +- debug: msg="Interface mode is set to '{{ifmode}}'" + +- command: show interfaces naming_mode + register: naming_mode + environment: + SONIC_CLI_IFACE_MODE: "{{ifmode}}" + +- name: check the interface mode is properly set to {{mode}} + assert: {that: "'{{ifmode}}'=='{{mode}}' and '{{naming_mode.stdout}}' == '{{mode}}'"} diff --git a/ansible/roles/test/tasks/iface_naming_mode/iface_naming_mode_tests.yml b/ansible/roles/test/tasks/iface_naming_mode/iface_naming_mode_tests.yml new file mode 100644 index 00000000000..cc0f13b622b --- /dev/null +++ b/ansible/roles/test/tasks/iface_naming_mode/iface_naming_mode_tests.yml @@ -0,0 +1,72 @@ +- name: Create normal guest user + include: "roles/test/tasks/iface_naming_mode/add_user.yml" + +- name: set interface naming mode to {{mode}} mode + shell : sudo config interface_naming_mode {{mode}} + register: out + failed_when: out.rc != 0 + become_user: '{{uname1}}' + become: yes + +# Check whether the interface mode is set properly in bashrc file +- include: roles/test/tasks/iface_naming_mode/check_userifmode.yml + +- set_fact: + intf: "{{interface_alias if (mode=='alias') else interface}}" + +############################################################# +######################## START OF TESTS ##################### +############################################################# + +# All tests run for user guest in alias mode as well as in default mode +#Below set of testcases will run for all topologies +- block: + - name: Test show pfc counters output in {{mode}} mode + include: "roles/test/tasks/iface_naming_mode/show_pfc_counters.yml" + + - name: Test show queue counters output in {{mode}} mode + include: "roles/test/tasks/iface_naming_mode/show_queue_counters.yml" + + - name: Test show interface status, counter,description,summary output in {{mode}} mode + include: "roles/test/tasks/iface_naming_mode/show_interface.yml" + + - name: Test config interface in {{mode}} mode + include: "roles/test/tasks/iface_naming_mode/interface_config.yml" + + become_user: '{{uname1}}' + become: yes + +#Test to be run in T1 topology +- block: + - name: Test show arp output in {{mode}} mode + include: "roles/test/tasks/iface_naming_mode/show_arp.yml" + + - name: Test show acl output in {{mode}} mode + include: "roles/test/tasks/iface_naming_mode/show_acl.yml" + + - name: Test show ip/ipv6 route in {{mode}} mode + include: "roles/test/tasks/iface_naming_mode/show_ip_route.yml" + + when: testbed_type in ['t1'] + become_user: '{{uname1}}' + become: yes + +# Test to be run in t0 topology +- block: + + - name: verify show portchannel interface output in {{mode}} mode + include: "roles/test/tasks/iface_naming_mode/show_portchannel.yml" + + become_user: '{{uname1}}' + become: yes + when: testbed_type in ['t0', 't0-64', 't0-64-32', 't0-116', ] + +- always: + + - name: Remove the user + user: + name: "{{uname1}}" + groups: sudo + state: absent + shell: /bin/bash + remove: yes diff --git a/ansible/roles/test/tasks/iface_naming_mode/interface_config.yml b/ansible/roles/test/tasks/iface_naming_mode/interface_config.yml new file mode 100644 index 00000000000..100b21180de --- /dev/null +++ b/ansible/roles/test/tasks/iface_naming_mode/interface_config.yml @@ -0,0 +1,79 @@ + +#set the test interface according to default or alias mode +- set_fact: + intf: "{{interface_alias if (mode=='alias') else interface}}" + +- set_fact: + native_speed: "{{port_speed[interface_alias] if (port_speed | length != 0) else iface_speed}}" + +- block: + + - name: shutdown the interface {{intf}} in {{mode}} mode + shell: sudo config interface {{intf}} shutdown + register: out + failed_when: out.rc != 0 + + - pause: seconds=3 + + - name: Get interface status + show_interface: command="status" interfaces={{intf}} + + - pause: seconds=3 + + - name: Check whether the status is down + assert: {that: "'{{int_status[intf]['admin_state']}}' == 'down'"} + + - name: Bringup the interface {{intf}} in {{mode}} mode + shell: sudo config interface {{intf}} startup + register: out + failed_when: out.rc != 0 + + - pause: seconds=3 + + - name: Get interface status + show_interface: command="status" interfaces="{{intf}}" + + - name: Check whether the status is up + assert: {that: "'{{int_status[intf]['admin_state']}}' == 'up'"} + +# check the config interface speed + + - name: configure interface speed to 10G in {{mode}} mode + shell: sudo config interface {{intf}} speed 10000 + register: out + failed_when: out.rc != 0 + + - name: get the interface speed + shell: sudo redis-cli -n 4 HGET "PORT|{{interface}}" 'speed' + register: speed + + - debug: var=speed + + - name: Check whether the speed is set to 10G + assert: {that: "'{{speed.stdout}}' == '10000'"} + + - name: chamge interface speed to native speed and check + shell: sudo config interface {{intf}} speed {{native_speed}} + register: out + failed_when: out.rc != 0 + + - name: get the interface speed + shell: sudo redis-cli -n 4 HGET "PORT|{{interface}}" 'speed' + register: speed + + - name: Check whether the speed is set to native speed + assert: {that: "'{{speed.stdout}}' == '{{native_speed}}'"} + +# As the ansible work in non interactive mode, it doesnt read the environmental varaiable set in bashrc file. Hence as a workaround , the variable is extracted through check_userifmode.yml and manually set the variable 'SONIC_CLI_IFACE_MODE' to take effect. + + environment: + SONIC_CLI_IFACE_MODE: "{{ifmode}}" + + always: + + - name: set the interface up + shell: sudo config interface {{intf}} startup + + - name: change interface speed to native speed and check + shell: sudo config interface {{intf}} speed {{native_speed}} + diff --git a/ansible/roles/test/tasks/iface_naming_mode/show_acl.yml b/ansible/roles/test/tasks/iface_naming_mode/show_acl.yml new file mode 100644 index 00000000000..5b6837eca4c --- /dev/null +++ b/ansible/roles/test/tasks/iface_naming_mode/show_acl.yml @@ -0,0 +1,19 @@ +# As the ansible work in non interactive mode, it doesnt read the environmental varaiable set in bashrc file. Hence as a workaround, the variable is extracted through check_userifmode.yml and manually set the variable 'SONIC_CLI_IFACE_MODE' to take effect. + +- name: verify show acl table output + shell: show acl table DATAACL + register: acl_table + environment: + SONIC_CLI_IFACE_MODE: "{{ifmode}}" + +- debug: var=acl_table.stdout_lines + +- name: check acl table output shows default interface names when mode is default + assert: {that: item in acl_table.stdout} + with_items: minigraph_acls['DataAcl'] + when: mode=='default' and item not in minigraph_portchannels + +- name: check acl table output shows alias interface names when mode is set to alias + assert: {that: " '{{port_name_map[item]}}' in acl_table.stdout"} + with_items: minigraph_acls['DataAcl'] + when: mode=='alias' and item not in minigraph_portchannels diff --git a/ansible/roles/test/tasks/iface_naming_mode/show_arp.yml b/ansible/roles/test/tasks/iface_naming_mode/show_arp.yml new file mode 100644 index 00000000000..1160c90f1a8 --- /dev/null +++ b/ansible/roles/test/tasks/iface_naming_mode/show_arp.yml @@ -0,0 +1,28 @@ +- name: get arp facts + switch_arptable: + +- debug: var=arptable['v4'] + +# As the ansible work in non interactive mode, it doesnt read the environmental varaiable set in bashrc file. Hence as a workaround, the variable is extracted through check_userifmode.yml and manually set the variable 'SONIC_CLI_IFACE_MODE' to take effect. + +- name: Get the output of show arp command in {{mode}} mode + shell: show arp + register: arp_output + environment: + SONIC_CLI_IFACE_MODE: "{{ifmode}}" + +- debug: var=arp_output + +- name: Check the output shows default interface names corresponding to the arp + assert: + that: + - arp_output.stdout | search("{{item}}.*\s+{{arptable['v4'][item]['interface']}}") + with_items: arptable['v4'] + when: arptable['v4'][item]['interface']!='eth0' and mode=='default' + +- name: Check the output shows alias interface names corresponding to the arp + assert: + that: + - arp_output.stdout | search("{{item}}.*\s+{{port_name_map[arptable['v4'][item]['interface']]}}") + with_items: arptable['v4'] + when: arptable['v4'][item]['interface']!='eth0' and mode =='alias' diff --git a/ansible/roles/test/tasks/iface_naming_mode/show_interface.yml b/ansible/roles/test/tasks/iface_naming_mode/show_interface.yml new file mode 100644 index 00000000000..8afbae4d66f --- /dev/null +++ b/ansible/roles/test/tasks/iface_naming_mode/show_interface.yml @@ -0,0 +1,45 @@ +- block: + + # show interface status + - name: show interface status in {{mode}} mode + show_interface: interfaces={{intf}} command='status' + + - debug: var=int_status + + - name: check proper output is displayed for the given Interface + assert: + that: + - "'{{int_status[intf]['name']}}'=='{{interface}}' and '{{int_status[intf]['alias']}}'=='{{interface_alias}}'" + + #show interface counters + - name: Test interface counters in {{mode}} mode + show_interface: command='counter' + + - name: check counter output in alias mode + assert: {that: item in port_alias} + with_items: int_counter + when: mode=="alias" + + - name: check counter output in default mode + assert: {that: item in default_interfaces} + with_items: int_counter + when: mode=="default" + + # show interface description + + - name: show interface description {{intf}} in {{mode}} mode + shell: show interface description {{intf}} | sed -n '/^ *Eth/ p' + register: int_description + + - debug: var=int_description.stdout_lines + + - name: check the description command shows proper output + assert: + that: + - int_description.stdout | search("{{interface}}.*{{interface_alias}}") + + +# As the ansible work in non interactive mode, it doesnt read the environmental varaiable set in bashrc file. Hence as a workaround, the variable is extracted through check_userifmode.yml and manually set the variable 'SONIC_CLI_IFACE_MODE' to take effect. + + environment: + SONIC_CLI_IFACE_MODE: "{{ifmode}}" diff --git a/ansible/roles/test/tasks/iface_naming_mode/show_ip_route.yml b/ansible/roles/test/tasks/iface_naming_mode/show_ip_route.yml new file mode 100644 index 00000000000..0319b838355 --- /dev/null +++ b/ansible/roles/test/tasks/iface_naming_mode/show_ip_route.yml @@ -0,0 +1,66 @@ +- name: Init variables. + set_fact: + spine_ports: [] + spine_port_alias: [] + +- name: Get spine ports + set_fact: + spine_ports: "{{ spine_ports + [item.key]}}" + with_dict: "{{ minigraph_neighbors }}" + when: "'T2' in item.value.name" + +- name: Get spine ports alias + set_fact: + spine_port_alias: "{{ spine_port_alias + [port_name_map[item.key]]}}" + with_dict: "{{ minigraph_neighbors }}" + when: "'T2' in item.value.name" + +# Test IPv4 routes +# As the ansible work in non interactive mode, it doesnt read the environmental varaiable set in bashrc file. Hence as a workaround, the variable is extracted through check_userifmode.yml and manually set the variable 'SONIC_CLI_IFACE_MODE' to take effect. + +- name: get the show ip route for 192.168.1.1 + shell: show ip route 192.168.1.1 + register: route + environment: + SONIC_CLI_IFACE_MODE: "{{ifmode}}" + +- debug: var=route + +- name: check the output shows interface alias names in alias mode + assert: + that: + - route.stdout | search("via {{item}}") + with_items: spine_port_alias + when: mode=='alias' + +- name: check the output shows default interface names in default mdoe + assert: + that: + - route.stdout | search(" via {{item}}") + with_items: spine_ports + when: mode=='default' + +# Test ipv6 routes +# As the ansible work in non interactive mode, it doesnt read the environmental varaiable set in bashrc file. Hence as a workaround, the variable is extracted through check_userifmode.yml and manually set the variable 'SONIC_CLI_IFACE_MODE' to take effect. + +- name: get the show ipv6 route for 20c0:a800::/64 + shell: show ipv6 route 20c0:a800::/64 + register: route + environment: + SONIC_CLI_IFACE_MODE: "{{ifmode}}" + +- debug: var=route + +- name: check the output shows interface alias names in alias mode + assert: + that: + - route.stdout | search("via {{item}}") + with_items: spine_port_alias + when: mode =='alias' + +- name: check the output shows default interface names in default mdoe + assert: + that: + - route.stdout | search(" via {{item}}") + with_items: spine_ports + when: mode =='default' diff --git a/ansible/roles/test/tasks/iface_naming_mode/show_pfc_counters.yml b/ansible/roles/test/tasks/iface_naming_mode/show_pfc_counters.yml new file mode 100644 index 00000000000..b7e28d88744 --- /dev/null +++ b/ansible/roles/test/tasks/iface_naming_mode/show_pfc_counters.yml @@ -0,0 +1,32 @@ +- block: + - name: get pfc Rx counter values + shell: sudo show pfc counters | sed -n '/Port Rx/,/^$/p' + register: pfc_rx + + - debug: var=pfc_rx.stdout_lines + + - name: get pfc Tx counter values + shell: sudo show pfc counters | sed -n '/Port Tx/,/^$/p' + register: pfc_tx + + - debug: var=pfc_tx.stdout_lines + + - name: check if the output shows default interface names in default mode + assert: + that: + - "'{{item}}' in pfc_rx.stdout and '{{item}}' in pfc_tx.stdout " + - "'{{port_name_map[item]}}' not in pfc_rx.stdout and '{{port_name_map[item]}}' not in pfc_tx.stdout" + with_items: default_interfaces + when: mode=='default' + + - name: check if the output shows alias interface names in alias mode + assert: + that: + - "'{{item}}' in pfc_rx.stdout and '{{item}}' in pfc_tx.stdout " + - "'{{port_alias_map[item]}}' not in pfc_rx.stdout and '{{port_alias_map[item]}}' not in pfc_tx.stdout " + with_items: port_alias + when: mode=='alias' +# As the ansible work in non interactive mode, it doesnt read the environmental varaiable set in bashrc file. Hence as a workaround,the variable is extracted through check_userifmode.yml and manually set the variable 'SONIC_CLI_IFACE_MODE' to take effect. + + environment: + SONIC_CLI_IFACE_MODE: "{{ifmode}}" diff --git a/ansible/roles/test/tasks/iface_naming_mode/show_portchannel.yml b/ansible/roles/test/tasks/iface_naming_mode/show_portchannel.yml new file mode 100644 index 00000000000..c09f284bfc2 --- /dev/null +++ b/ansible/roles/test/tasks/iface_naming_mode/show_portchannel.yml @@ -0,0 +1,24 @@ +- block: + - name: show interface portchannel + shell: sudo show interfaces portchannel + register: int_po + + - debug: var=int_po.stdout + + - name: check show interface portchannel output shows default interface name in default mode + assert: + that: + - int_po.stdout | search("{{item.key}}\s+LACP\(A\)\(Up\).*{{item.value['members'][0]}}") + with_dict: minigraph_portchannels + when: mode=='default' + + - name: check show interface portchannel output shows alias name in alias mode + assert: + that: + - int_po.stdout | search("{{item.key}}\s+LACP\(A\)\(Up\).*{{port_name_map[item.value['members'][0]]}}") + with_dict: minigraph_portchannels + when: mode=='alias' +# As the ansible work in non interactive mode, it doesnt read the environmental varaiable set in bashrc file. Hence as a workaround, the variable is extracted through check_userifmode.yml and manually set the variable 'SONIC_CLI_IFACE_MODE' to take effect. + + environment: + SONIC_CLI_IFACE_MODE: "{{ifmode}}" diff --git a/ansible/roles/test/tasks/iface_naming_mode/show_queue_counters.yml b/ansible/roles/test/tasks/iface_naming_mode/show_queue_counters.yml new file mode 100644 index 00000000000..1aee7fbd0a0 --- /dev/null +++ b/ansible/roles/test/tasks/iface_naming_mode/show_queue_counters.yml @@ -0,0 +1,36 @@ +- block: + - name: show queue counters {{intf}} + shell: sudo show queue counters {{intf}} | grep "UC\|MC" + register: queue_counter + + - debug: var=queue_counter.stdout_lines + + - name: Check the {{mode}} interface name is present in output when mode is set to {{mode}} + assert: + that: + - queue_counter.stdout | search("{{intf}}\s+[U|M]C{{item}}\s+\d+\s+\d+\s+\d+\s+\d+") + with_sequence: start=0 end=9 + + - name: show queue counters for all interfaces + shell: sudo show queue counters | grep "UC\|MC" + register: queue_counter + + - debug: var=queue_counter.stdout_lines + + - name: Check default interface name is present in output when mode is is set to default + assert: + that: + - queue_counter.stdout | search("{{item}}\s+[UC|MC\d]+\s+\d+\s+\d+\s+\d+\s+\d+") and '{{port_name_map[item]}}' not in queue_counter.stdout + with_items: default_interfaces + when: mode=='default' + + - name: Check alias interface name is present in output when mode is set to alias + assert: + that: + - queue_counter.stdout | search("{{item}}\s+[UC|MC\d]+\s+\d+\s+\d+\s+\d+\s+\d+") and '{{port_alias_map[item]}}' not in queue_counter.stdout + with_items: port_alias + when: mode=='alias' +# As the ansible work in non interactive mode, it doesnt read the environmental varaiable set in bashrc file. Hence as a workaround, the variable is extracted through check_userifmode.yml and manually set the variable 'SONIC_CLI_IFACE_MODE' to take effect. + + environment: + SONIC_CLI_IFACE_MODE: "{{ifmode}}" diff --git a/ansible/roles/test/tasks/iface_naming_mode/vars/iface_naming_vars.yml b/ansible/roles/test/tasks/iface_naming_mode/vars/iface_naming_vars.yml new file mode 100644 index 00000000000..5a2deccdca2 --- /dev/null +++ b/ansible/roles/test/tasks/iface_naming_mode/vars/iface_naming_vars.yml @@ -0,0 +1,3 @@ +#--- +uname1 : guest +upasswd1 : guest diff --git a/ansible/roles/test/tasks/interface.yml b/ansible/roles/test/tasks/interface.yml index 0d028ac0038..b0b51531f75 100644 --- a/ansible/roles/test/tasks/interface.yml +++ b/ansible/roles/test/tasks/interface.yml @@ -18,6 +18,7 @@ - include_tasks: resume_fanout_ports.yml with_items: "{{ ansible_interface_link_down_ports }}" + ignore_errors: yes - name: pause and wait interface to be up pause: seconds=30 @@ -32,24 +33,23 @@ - debug: msg="Found link down ports {{ansible_interface_link_down_ports}}" when: ansible_interface_link_down_ports | length > 0 -- block: - - name: Verify interfaces are up correctly - assert: { that: "{{ ansible_interface_link_down_ports | length }} == 0" } +- name: Verify interfaces are up correctly + assert: { that: "{{ ansible_interface_link_down_ports | length }} == 0" } - rescue: - - include_tasks: check_fanout_interfaces.yml - vars: - check_fanout: true - - - fail: msg="Not all interfaces are up" + #rescue: + #- include: check_fanout_interfaces.yml + # vars: + # check_fanout: true + #- fail: msg="Not all interfaces are up" + - block: - name: Verify port channel interfaces are up correctly assert: { that: "'{{ ansible_interface_facts[item]['active'] }}' == 'True'" } with_items: "{{ minigraph_portchannels.keys() }}" rescue: - - include_tasks: check_sw_vm_interfaces.yml + - include: check_sw_vm_interfaces.yml vars: check_vms: true @@ -61,8 +61,3 @@ - name: Verify VLAN interfaces are up correctly assert: { that: "'{{ ansible_interface_facts[item]['active'] }}' == 'True'" } with_items: "{{ minigraph_vlans.keys() }}" - -# TODO: uncomment when issue https://github.com/Azure/sonic-buildimage/issues/2738 will be fixed -# and PR https://github.com/Azure/sonic-mgmt/pull/1165 will be merged -# - name: Verify interfaces counters -# include: interface_counters.yml diff --git a/ansible/roles/test/tasks/interface_up_down.yml b/ansible/roles/test/tasks/interface_up_down.yml index b415e46615a..f9de040a147 100644 --- a/ansible/roles/test/tasks/interface_up_down.yml +++ b/ansible/roles/test/tasks/interface_up_down.yml @@ -1,9 +1,5 @@ # This playbook tests neighbor interface flap and ACS interface status work properly # -- include_tasks: add_container_to_inventory.yml - vars: - container_name: lldp - - name: Gathering minigraph facts about the device minigraph_facts: host={{ inventory_hostname }} @@ -12,11 +8,15 @@ service: name=lldpd state=started enabled=yes - delegate_to: "{{ ansible_host }}_lldp" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i lldp python - name: Gather information from lldp lldp: - delegate_to: "{{ ansible_host }}_lldp" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i lldp python - name: If underlay exists, use underlay to replace it. set_fact: @@ -37,21 +37,23 @@ pause: seconds=5 - name: gather interface facts - setup: + setup: -- name: verify all local interfaces are up +- name: verify all local interfaces are up assert: { that: "ansible_{{ item }}['active'] == true" } - with_items: "{{ ansible_interfaces }}" + with_items: ansible_interfaces when: - item | match("Ethernet.*") - name: Gather information from lldp again after ports are enabled lldp: - delegate_to: "{{ ansible_host }}_lldp" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i lldp python - name: rearrange lldp received data structure for interface up down test interface_up_down_data_struct_facts: data="{{ lldp }}" - delegate_to: localhost + connection: local become: no - name: shutdown neighbor interfaces @@ -61,17 +63,17 @@ login: "{{ switch_login[ansible_interface_up_down_data_struct_facts[item]['nei_device_type']] }}" skip_default_user: "yes" connection: cisco - with_items: "{{ ansible_interface_up_down_data_struct_facts.keys() }}" - + with_items: ansible_interface_up_down_data_struct_facts.keys() + - name: sleep for some time pause: seconds=5 - name: gather interface facts - setup: + setup: -- name: verify all local interfaces are down +- name: verify all local interfaces are down assert: { that: "ansible_{{ item }}['active'] == false" } - with_items: "{{ ansible_interfaces }}" + with_items: ansible_interfaces when: - item | match("Ethernet.*") @@ -82,16 +84,16 @@ login: "{{ switch_login[ansible_interface_up_down_data_struct_facts[item]['nei_device_type']] }}" skip_default_user: "yes" connection: cisco - with_items: "{{ ansible_interface_up_down_data_struct_facts.keys() }}" + with_items: ansible_interface_up_down_data_struct_facts.keys() - name: sleep for some time pause: seconds=5 - name: gather interface facts - setup: + setup: -- name: verify all local interfaces are up +- name: verify all local interfaces are up assert: { that: "ansible_{{ item }}['active'] == true" } - with_items: "{{ ansible_interfaces }}" + with_items: ansible_interfaces when: - item | match("Ethernet.*") diff --git a/ansible/roles/test/tasks/lag.yml b/ansible/roles/test/tasks/lag.yml index 364e8a2f8f9..3be043c1a69 100644 --- a/ansible/roles/test/tasks/lag.yml +++ b/ansible/roles/test/tasks/lag.yml @@ -18,7 +18,7 @@ # Generate file with BGP routes information - template: src=lag.j2 dest=/tmp/lag.txt - delegate_to: localhost + connection: local - set_fact: testname: lag @@ -28,7 +28,7 @@ test_ignore_file: lag_ignore_messages.txt test_expect_file: lag_expect_messages.txt match_file: loganalyzer_common_match.txt - ignore_file: loganalyzer_common_ignore.txt + ignore_file: loganalyzer_common_ignore.txt tests_location: "{{ 'roles/test/tasks' }}" # Separate set_fact is required to be able to use 'testname' fact. @@ -44,32 +44,32 @@ - debug: msg="output directory for current test run {{ test_out_dir }}" - debug: msg="generated run id:{{testname_unique}}" -- include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_init.yml +- include: roles/test/files/tools/loganalyzer/loganalyzer_init.yml - block: - name: copy the test to ptf container copy: src=roles/test/files/acstests dest=/root delegate_to: "{{ ptf_host }}" - + - name: "Running test {{ testname }}" shell: ptf --test-dir acstests lag_test.LagAllRoutes --platform remote -t "verbose=True;router_mac='{{ ansible_Ethernet0['macaddress'] }}';lag_info='/tmp/lag.txt'" args: chdir: /root delegate_to: "{{ ptf_host }}" register: out - + - debug: var=out.stdout_lines when: out.rc != 0 - name: PortChannel structure debug: msg="{{ minigraph_portchannel_interfaces }}" - + - name: "Run {{ testname }} with changing member port state to up and down" - include_tasks: per_lag_test.yml - with_items: "{{ minigraph_portchannel_interfaces }}" + include: per_lag_test.yml + with_items: minigraph_portchannel_interfaces always: - - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml + - include: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml - - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_end.yml + - include: roles/test/files/tools/loganalyzer/loganalyzer_end.yml diff --git a/ansible/roles/test/tasks/lag_2.yml b/ansible/roles/test/tasks/lag_2.yml index a9d5a76f187..73ee77f92be 100644 --- a/ansible/roles/test/tasks/lag_2.yml +++ b/ansible/roles/test/tasks/lag_2.yml @@ -1,11 +1,76 @@ ### this is the Lag_2 lag test that tests each Lag interface minimum link and rate of sending LACP DU packets ### this test could be consider as an additional/alternative lag test from existing lagall.yml. ### Due to some labs are using two layer fanout switches, and one DUT might connects to multiple fanoutleaf switches -### so for minimum link test of lag member flaps, it requires to use lab connection facts to determine the fanout neighbor ports, -### Also, most of the traffic load balancing tests of LAG interface are covered in new FIB tests. so we are ignoring traffic test +### so for minimum link test of lag member flaps, it requires to use lab connection facts to determine the fanout neighbor ports, +### Also, most of the traffic load balancing tests of LAG interface are covered in new FIB tests. so we are ignoring traffic test ### for lag member flaps for now, will consider add traffic back if required -- name: run test - include_tasks: roles/test/tasks/pytest_runner.yml +- fail: msg="Please define ptf_host" + when: ptf_host is not defined + +- fail: msg="Please define testbed_type" + when: testbed_type is not defined + +- name: gathering lag facts from device + lag_facts: host={{ inventory_hostname }} + +- fail: msg="No lag configuration found in {{ inventory_hostname }}" + when: lag_facts.names == [] + +- set_fact: test_minlink=true + when: test_minlink is not defined + +- set_fact: test_rate=true + when: test_rate is not defined + +- name: Gathering peer VM information from lldp + lldp: vars: - test_node: pc/test_lag_2.py + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i lldp python + +- name: gathering minigraph of the device configuration + minigraph_facts: host={{ inventory_hostname }} + +- name: Gathering lab graph facts about the device + conn_graph_facts: host={{ inventory_hostname }} + connection: local + +- set_fact: + fanout_neighbors: "{{device_conn}}" + +- set_fact: + vm_neighbors: "{{ minigraph_neighbors }}" + +- name: Copy PTF test into PTF-docker for test LACP DU. + copy: src=roles/test/files/acstests/{{ item }} dest=/tmp/{{ item }} + with_items: + - lag_test.py + - acs_base_test.py + - router_utils.py + delegate_to: "{{ ptf_host }}" + +- name: Include testbed topology configuration (to get LAG IP and PTF docker interfaces, that are behind LAG VMs). + include_vars: vars/topo_t1-lag.yml + when: testbed_type == 't1-lag' + +- name: Include testbed topology configuration (to get LAG IP and PTF docker interfaces, that are behind LAG VMs). + include_vars: vars/topo_t0.yml + when: testbed_type == 't0' + +- name: Include testbed topology configuration (to get LAG IP and PTF docker interfaces, that are behind LAG VMs). + include_vars: vars/topo_t0-116.yml + when: testbed_type == 't0-116' + +- set_fact: + dut_mac: "{{ ansible_Ethernet0['macaddress'] }}" + +- name: test each lag interface minimum links and rate + include: single_lag_test.yml + with_items: lag_facts.names + when: test_minlink|bool == true + +- name: test each lag interface LACP DU rate + include: single_lag_lacp_rate_test.yml + with_items: lag_facts.names + when: test_rate|bool == true \ No newline at end of file diff --git a/ansible/roles/test/tasks/lag_fanout_ports_test.yml b/ansible/roles/test/tasks/lag_fanout_ports_test.yml index 4dca66c5fe3..da8a5b4c18f 100644 --- a/ansible/roles/test/tasks/lag_fanout_ports_test.yml +++ b/ansible/roles/test/tasks/lag_fanout_ports_test.yml @@ -23,7 +23,7 @@ - block: - name: Put down {{ portmap_dut_fn[dut_lag_member] }} on Fanout. - include_tasks: run_cisco_script.yml + include: run_cisco_script.yml vars: template: roles/fanout/templates/lag_fn_ports.j2 host: "{{ fanout_mgmt_ip }}" @@ -37,7 +37,7 @@ - pause: seconds: 60 - - include_tasks: lag_run_ptf.yml + - include: lag_run_ptf.yml vars: lag_ptf_test_name: LagMembersTrafficTest params: "dst_addr='{{ lag_ip }}';src_iface={{ not_behind_lag_iface }};check_pkts_iface={{ ifaces_behind_lag_member[iface_behind_lag_member_index] }};num_of_pkts={{ num_of_pkts }};dut_mac='{{ dut_mac }}'" @@ -45,7 +45,7 @@ always: - name: Put up {{ portmap_dut_fn[dut_lag_member] }} on Fanout. - include_tasks: run_cisco_script.yml + include: run_cisco_script.yml vars: template: roles/fanout/templates/lag_fn_ports.j2 host: "{{ fanout_mgmt_ip }}" diff --git a/ansible/roles/test/tasks/lag_lacp_timing_test.yml b/ansible/roles/test/tasks/lag_lacp_timing_test.yml index 29184b74bba..0f2c1e4cf3e 100644 --- a/ansible/roles/test/tasks/lag_lacp_timing_test.yml +++ b/ansible/roles/test/tasks/lag_lacp_timing_test.yml @@ -27,14 +27,14 @@ - debug: msg="Finding median of {{ interval_count }} packet intervals" - name: Check LACP timing on eth{{ iface_behind_lag_member[0] }} (interface behind {{ vm_name }}). - include_tasks: lag_run_ptf.yml + include: lag_run_ptf.yml vars: lag_ptf_test_name: LacpTimingTest params: "exp_iface={{ iface_behind_lag_member[0] }}; timeout={{ packet_timeout }}; packet_timing={{ packet_timing }}; ether_type={{ lacp_ether_type }}; interval_count={{ interval_count }}" change_dir: /tmp - name: Check LACP timing on eth{{ iface_behind_lag_member[1] }} (interface behind {{ vm_name }}). - include_tasks: lag_run_ptf.yml + include: lag_run_ptf.yml vars: lag_ptf_test_name: LacpTimingTest params: "exp_iface={{ iface_behind_lag_member[1] }}; timeout={{ packet_timeout }}; packet_timing={{ packet_timing }}; ether_type={{ lacp_ether_type }}; interval_count={{ interval_count }}" diff --git a/ansible/roles/test/tasks/lag_minlink.yml b/ansible/roles/test/tasks/lag_minlink.yml index 11866fe2a18..7b5af1f72c0 100644 --- a/ansible/roles/test/tasks/lag_minlink.yml +++ b/ansible/roles/test/tasks/lag_minlink.yml @@ -5,34 +5,15 @@ ### Port channel interface is up after peer port is back - block: - - name: Shut down neighbor interface {{ neighbor_interface }} on {{ peer_device }} ({{ peer_host }}) + - name: Shut down neighbor interface {{ neighbor_interface }} on {{ peer_device }} action: apswitch template=neighbor_interface_shut_single.j2 args: host: "{{peer_host}}" login: "{{switch_login[hwsku_map[peer_hwsku]]}}" connection: switch - - name: Set delay - set_fact: - delay: 5 - - - name: Set retries - set_fact: - retries: "{{ (wait_down_time | int / delay | float) | round(0, 'ceil') }}" - - - name: Let portchannel react to neighbor interface shutdown - pause: - seconds: "{{ deselect_time }}" - - - name: "Verify PortChannel interfaces are up correctly" - shell: bash -c "teamdctl {{ po }} state dump" | python -c "import sys, json; print json.load(sys.stdin)['ports']['{{ item }}']['runner']['selected']" - register: out - until: out.stdout == "True" - with_items: "{{ po_interfaces.keys() }}" - when: item != "{{ flap_intf }}" - become: "yes" - retries: "{{ retries | int }}" - delay: "{{ delay }}" + - pause: + seconds: "{{ wait_down_time }}" - lag_facts: host={{ inventory_hostname }} @@ -56,21 +37,15 @@ ### always bring back port in case test error and left testbed in unknow stage always: - - name: Bring up neighbor interface {{ neighbor_interface }} on {{ peer_device }} ({{ peer_host }}) + - name: Bring up neighbor interface {{ neighbor_interface }} on {{ peer_host }} action: apswitch template=neighbor_interface_no_shut_single.j2 args: host: "{{peer_host}}" login: "{{switch_login[hwsku_map[peer_hwsku]]}}" connection: switch - - name: "Verify PortChannel interfaces are up correctly" - shell: bash -c "teamdctl {{ po }} state dump" | python -c "import sys, json; print json.load(sys.stdin)['ports']['{{ item }}']['link']['up']" - register: out - until: out.stdout == "True" - with_items: "{{ po_interfaces.keys() }}" - become: "yes" - retries: "{{ retries | int }}" - delay: "{{ delay }}" + - pause: + seconds: 35 - lag_facts: host={{ inventory_hostname }} diff --git a/ansible/roles/test/tasks/lag_run_ptf.yml b/ansible/roles/test/tasks/lag_run_ptf.yml index a6a88b5803b..4dbdd87250c 100644 --- a/ansible/roles/test/tasks/lag_run_ptf.yml +++ b/ansible/roles/test/tasks/lag_run_ptf.yml @@ -8,7 +8,7 @@ #----------------------- - name: Run lag_test.{{ lag_ptf_test_name }} on PTF docker. - shell: ptf --test-dir . --platform-dir /root/ptftests --platform remote lag_test.{{ lag_ptf_test_name }} -t "{{ params }}" + shell: ptf --test-dir . --platform remote lag_test.{{ lag_ptf_test_name }} -t "{{ params }}" args: chdir: "{{ change_dir }}" delegate_to: "{{ ptf_host }}" diff --git a/ansible/roles/test/tasks/lag_vm_lacp_test.yml b/ansible/roles/test/tasks/lag_vm_lacp_test.yml index 3dbc765b606..4afb60c66fe 100644 --- a/ansible/roles/test/tasks/lag_vm_lacp_test.yml +++ b/ansible/roles/test/tasks/lag_vm_lacp_test.yml @@ -23,7 +23,7 @@ # --- TEST FIRST LAG MEMBER --- - name: Read first LAG member configuration to learn LACP rate. - include_tasks: run_cisco_script.yml + include: run_cisco_script.yml vars: template: roles/vm_set/templates/lag_lacp.j2 host: "{{ vm_ip }}" @@ -40,7 +40,7 @@ # --- TEST SECOND LAG MEMBER --- - name: Read second LAG member configuration to learn LACP rate. - include_tasks: run_cisco_script.yml + include: run_cisco_script.yml vars: template: roles/vm_set/templates/lag_lacp.j2 host: "{{ vm_ip }}" diff --git a/ansible/roles/test/tasks/lagall.yml b/ansible/roles/test/tasks/lagall.yml index e1cf48ab73d..116f313a3e3 100644 --- a/ansible/roles/test/tasks/lagall.yml +++ b/ansible/roles/test/tasks/lagall.yml @@ -42,13 +42,11 @@ dut_lag_members: "{{ dut_lag_members }} + [ '{{ item.1['members'][0] }}' ] + [ '{{ item.1['members'][1] }}' ]" with_indexed_items: "{{ minigraph_portchannel_interfaces }}" -- include_tasks: add_container_to_inventory.yml - vars: - container_name: lldp - - name: Gather information from LLDP. lldp: - delegate_to: "{{ ansible_host }}_lldp" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i lldp python - debug: msg: "{{ dut_lag_members }}" @@ -77,7 +75,7 @@ - name: Add VMs information to in-memory inventory. add_host: name={{ lag_vms[item][0] }} ansible_ssh_user={{ switch_login['Arista']['user'] }} ansible_ssh_pass={{ switch_login['Arista']['passwd'][0] }} - with_items: "{{ lag_vms.keys() }}" + with_items: lag_vms.keys() #----------------------------------- # Start tests @@ -95,14 +93,14 @@ include_vars: vars/topo_t1-lag.yml - name: --TEST-- LACP verification on all VMs. - include_tasks: lag_vm_lacp_test.yml + include: lag_vm_lacp_test.yml vars: login_creds: "{{ switch_login['Arista'] }}" lag_member_0: "{{ lag_vms[item][1] }}" lag_member_1: "{{ lag_vms[item][2] }}" vm_ip: "{{ lag_vms[item][0] }}" vm_name: "{{ item }}" - with_items: "{{ lag_vms.keys() }}" + with_items: lag_vms.keys() # Get a list of LAGs on DUT. - set_fact: @@ -114,7 +112,7 @@ with_indexed_items: "{{ minigraph_portchannel_interfaces }}" - name: --TEST-- LACP verification on DUT LAGs. - include_tasks: lag_dut_lacp_test.yml + include: lag_dut_lacp_test.yml vars: lag_iface: "{{ dut_lags[1] }}" when: dut_lags is defined @@ -123,7 +121,7 @@ iface_behind_lag_member_index: 0 - name: --TEST-- Fanout ports test. - include_tasks: lag_fanout_ports_test.yml + include: lag_fanout_ports_test.yml vars: fanout_mgmt_ip: "{{ hostvars[fanout_switch]['ansible_host'] }}" fanout_user: "{{ hostvars[fanout_switch]['ansible_ssh_user'] }}" diff --git a/ansible/roles/test/tasks/link_flap.yml b/ansible/roles/test/tasks/link_flap.yml index 4fe53e0c8dc..2a1ae0256a3 100644 --- a/ansible/roles/test/tasks/link_flap.yml +++ b/ansible/roles/test/tasks/link_flap.yml @@ -4,12 +4,12 @@ - name: Gathering lab graph facts about the device conn_graph_facts: host={{ inventory_hostname }} - delegate_to: localhost + connection: local tags: always - name: Set neighbor facts set_fact: neighbors: "{{ device_conn }}" -- include_tasks: link_flap/link_flap_helper.yml +- include: link_flap/link_flap_helper.yml with_items: "{{ device_conn.keys() }}" diff --git a/ansible/roles/test/tasks/link_flap/link_flap_helper.yml b/ansible/roles/test/tasks/link_flap/link_flap_helper.yml index 0cbaab99b49..5c9f5f59f10 100644 --- a/ansible/roles/test/tasks/link_flap/link_flap_helper.yml +++ b/ansible/roles/test/tasks/link_flap/link_flap_helper.yml @@ -21,7 +21,7 @@ neighbor_interface: "{{neighbors[interface]['peerport']}}" - conn_graph_facts: host={{ peer_device }} - delegate_to: localhost + connection: local - set_fact: peer_host: "{{device_info['mgmtip']}}" @@ -46,7 +46,7 @@ - name: Shutting down neighbor interface {{neighbor_interface}} on {{peer_host}} become: true - shell: config interface shutdown {{port_alias_map[neighbor_interface]}} + shell: ip link set {{port_alias_map[neighbor_interface]}} down delegate_to: "{{peer_host}}" when: peer_type == "FanoutLeafSonic" @@ -80,7 +80,7 @@ - name: Bring up neighbor interface {{neighbor_interface}} on {{peer_host}} become: true - shell: config interface startup {{port_alias_map[neighbor_interface]}} + shell: ip link set {{port_alias_map[neighbor_interface]}} up delegate_to: "{{peer_host}}" when: peer_type == "FanoutLeafSonic" diff --git a/ansible/roles/test/tasks/lldp.yml b/ansible/roles/test/tasks/lldp.yml index c1b6bb388e3..050edd95a33 100644 --- a/ansible/roles/test/tasks/lldp.yml +++ b/ansible/roles/test/tasks/lldp.yml @@ -1,4 +1,61 @@ -- name: run test - include_tasks: roles/test/tasks/pytest_runner.yml +# Gather minigraph facts +- name: Gathering minigraph facts about the device + minigraph_facts: + host: "{{ inventory_hostname }}" + +- name: Print neighbors in minigraph + debug: msg="{{ minigraph_neighbors }}" + +- name: find minigraph lldp neighbor + set_fact: + minigraph_lldp_nei: "{{ minigraph_lldp_nei|default({}) | combine({ item.key : item.value}) }}" + when: "'server' not in item.value['name'] | lower" + with_dict: minigraph_neighbors + +- name: Gather information from LLDP + lldp: vars: - test_node: lldp/test_lldp.py + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i lldp python + +- name: Print LLDP information + debug: msg="{{ lldp }}" + +- name: Verify LLDP information is available on most interfaces + assert: { that: "{{ lldp|length }} > {{ minigraph_lldp_nei|length * 0.8 }}"} + +- name: Compare the LLDP neighbor name with minigraph neigbhor name (exclude the management port) + assert: { that: "'{{ lldp[item]['chassis']['name'] }}' == '{{ minigraph_lldp_nei[item]['name'] }}'" } + with_items: "{{ lldp.keys() }}" + when: item != "eth0" + +- name: Compare the LLDP neighbor interface with minigraph neigbhor interface (exclude the management port) + assert: { that: "'{{ lldp[item]['port']['ifname'] }}' == '{{ minigraph_neighbors[item]['port'] }}'" } + with_items: "{{ lldp.keys() }}" + when: item != "eth0" + +- block: + - name: Obtain the system description of the DUT chassis + shell: "docker exec -i lldp lldpcli show chassis | grep \"SysDescr:\" | sed -e 's/^\\s*SysDescr:\\s*//g'" + register: result + + - name: Store system description of the DUT chassis as a fact + set_fact: + dut_system_description: "{{ result.stdout }}" + +###TODO: fix this lldp_neighbor validation, this part is not running +- name: Iterate through each LLDP neighbor and verify the information received by neighbor is correct + add_host: + name: "{{ lldp[item]['chassis']['mgmt-ip'] }}" + groups: "lldp_neighbors,eos" + neighbor_interface: "{{ lldp[item]['port']['ifname'] }}" + dut_interface: "{{ item }}" + hname: "{{ lldp[item]['chassis']['mgmt-ip'] }}" + dut_chassis_id: "0x{{ ansible_eth0['macaddress'] | replace(':', '') }}" + dut_hostname: "{{ inventory_hostname }}" + dut_port_alias: "{{ minigraph_ports[item]['alias'] }}" + dut_port_description: "{{ minigraph_neighbors[item]['name'] }}:{{ minigraph_neighbors[item]['port'] }}" + dut_system_description: "{{ dut_system_description }}" + with_items: "{{ lldp.keys() }}" + when: item != "eth0" + diff --git a/ansible/roles/test/tasks/lldp_neighbor.yml b/ansible/roles/test/tasks/lldp_neighbor.yml new file mode 100644 index 00000000000..54b8db32963 --- /dev/null +++ b/ansible/roles/test/tasks/lldp_neighbor.yml @@ -0,0 +1,25 @@ +- name: Gather LLDP information from all neighbors by performing a SNMP walk + lldp_facts: + host: "{{ hname }}" + version: "v2c" + community: "{{ snmp_rocommunity }}" + connection: local + +- name: Print LLDP facts from neighbors + debug: msg="{{ ansible_lldp_facts }}" + +- name: Verify the published DUT system name field is correct + assert: {that: "'{{ ansible_lldp_facts[neighbor_interface]['neighbor_sys_name'] }}' == '{{ dut_hostname }}'"} + +# FIXME: use more strict assertion +- name: Verify the published DUT chassis id field is not empty + assert: {that: "'{{ ansible_lldp_facts[neighbor_interface]['neighbor_chassis_id'] }}' == '{{ dut_chassis_id }}'"} + +- name: Verify the published DUT system description field is correct + assert: {that: "'{{ ansible_lldp_facts[neighbor_interface]['neighbor_sys_desc'] }}' == '{{ dut_system_description }}'"} + +- name: Verify the published DUT port id field is correct + assert: {that: "'{{ ansible_lldp_facts[neighbor_interface]['neighbor_port_id'] }}' == '{{ dut_port_alias }}'"} + +- name: Verify the published DUT port description field is correct + assert: {that: "'{{ ansible_lldp_facts[neighbor_interface]['neighbor_port_desc'] }}' == '{{ dut_port_description }}'"} diff --git a/ansible/roles/test/tasks/mac_update.yml b/ansible/roles/test/tasks/mac_update.yml index 740a94cb331..bd3be7aa391 100644 --- a/ansible/roles/test/tasks/mac_update.yml +++ b/ansible/roles/test/tasks/mac_update.yml @@ -1,5 +1,5 @@ - name: Gathering minigraph facts about the device minigraph_facts: host={{ inventory_hostname }} -- include_tasks: mac_entry_update.yml +- include: mac_entry_update.yml with_items: "{{ minigraph_interfaces }}" diff --git a/ansible/roles/test/tasks/main.yml b/ansible/roles/test/tasks/main.yml index 4ddd2facb6a..b59e60cdf31 100644 --- a/ansible/roles/test/tasks/main.yml +++ b/ansible/roles/test/tasks/main.yml @@ -1,10 +1,10 @@ --- # Actions for SONiC switches -- include_tasks: sonic.yml +- include: sonic.yml when: scope == 'sonic' # Actions for lldp_neighbors -- include_tasks: lldp_neighbor.yml +- include: lldp_neighbor.yml when: scope == 'lldp_neighbors' tags: lldp diff --git a/ansible/roles/test/tasks/mem_check.yml b/ansible/roles/test/tasks/mem_check.yml new file mode 100644 index 00000000000..a1c041fce71 --- /dev/null +++ b/ansible/roles/test/tasks/mem_check.yml @@ -0,0 +1,19 @@ +- name: Copy mem_check.sh to the DuT + become: true + copy: + src: roles/test/files/helpers/mem_check.sh + dest: /tmp/mem_check.sh + mode: 0755 + +- name: Run mem_check.sh + become: true + shell: /tmp/mem_check.sh + register: results + failed_when: results.rc != 0 + +- name: Delete mem_check.sh from the DuT + become: true + file: + state: absent + path: /tmp/mem_check.sh + diff --git a/ansible/roles/test/tasks/mtu.yml b/ansible/roles/test/tasks/mtu.yml index 6492725b7f1..6e97b4e80ae 100644 --- a/ansible/roles/test/tasks/mtu.yml +++ b/ansible/roles/test/tasks/mtu.yml @@ -1,7 +1,40 @@ #----------------------------------------- # Run MTU test and Perform log analysis. #----------------------------------------- -- name: Run MTU relay test (pytest-ansible) - include_tasks: roles/test/tasks/pytest_runner.yml + +# Pre-check testbed_type value +- fail: msg="testbed_type is not defined." + when: testbed_type is not defined + +- fail: msg="testbed_type {{testbed_type}} is invalid." + when: testbed_type not in ['t1-lag', 't1', 't1-64-lag', 't1-64-lag-clet'] + +- include_vars: "vars/topo_{{testbed_type}}.yml" + +- name: Expand properties into props + set_fact: props="{{configuration_properties['spine']}}" + +- name: Expand ToR properties into props + set_fact: props_tor="{{configuration_properties['tor']}}" + +- name: Gathering minigraph facts about the device + minigraph_facts: host={{ inventory_hostname }} + +- debug : msg="Start MTU Test" + +- name: copy the test to ptf container + copy: src=roles/test/files/ptftests dest=/root + delegate_to: "{{ ptf_host }}" + +- name: "Start PTF runner" + include: ptf_runner.yml vars: - test_node: ipfwd/test_mtu.py + ptf_test_name: MTU test + ptf_test_dir: ptftests + ptf_test_path: mtu_test.MtuTest + ptf_platform_dir: ptftests + ptf_platform: remote + ptf_test_params: + - testbed_type='{{testbed_type}}' + - router_mac='{{ansible_Ethernet0['macaddress']}}' + ptf_extra_options: "--relax --debug info --log-file /tmp/mtu_test.MtuTest.{{lookup('pipe','date +%Y-%m-%d-%H:%M:%S')}}.log --socket-recv-size 16384" diff --git a/ansible/roles/test/tasks/neighbour-mac-noptf.yml b/ansible/roles/test/tasks/neighbour-mac-noptf.yml index 88fe1b3d46d..6beab8b8ac5 100644 --- a/ansible/roles/test/tasks/neighbour-mac-noptf.yml +++ b/ansible/roles/test/tasks/neighbour-mac-noptf.yml @@ -11,7 +11,7 @@ register: pgrep_out - debug: var=pgrep_out.stdout_lines - + - name: Verify that orchagent process is running assert: { that: "{{ pgrep_out.stdout_lines | length }} > 0"} @@ -29,7 +29,7 @@ v6_intf_nei: "fe00::2" - name: init loganalyzer for later syslog analysis - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_init.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_init.yml - name: gather DUT interface table show_interface: command='status' @@ -65,7 +65,7 @@ command: "ip neigh change {{ v4_nei }} lladdr {{ v4_mac2 }} dev {{ v4_intf }}" - name: pause a second and check - pause: seconds=2 + pause: seconds=2 - name: gather orchagent pid, make sure orchagent is still running after v4 neighbor change mac command: "pgrep orchagent" @@ -73,7 +73,7 @@ - assert: { that: orchid.stdout != '' } - - name: check kernel arp table again + - name: check kernel arp table again switch_arptable: - name: make sure neighbor mac address was changed on SONiC @@ -87,7 +87,7 @@ - name: Check if mac changed in DUT ASIC DB. Should be {{ v4_mac2 }} shell: docker exec database redis-cli -n 1 HGETALL '{{ neighbour_key.stdout }}' | grep "_ATTR_DST_MAC_ADDRESS" -A 1 | tail -1 register: neighbour_mac - + - assert: { that: "neighbour_mac.stdout | lower == v4_mac2" } ############## Test V6 mac change ################## @@ -135,17 +135,19 @@ - assert: { that: "neighbour_mac.stdout | lower == v6_mac2" } - name: analyze the syslog to make sure no syncd errors - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml - name: close out the loganalyzer - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_end.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_end.yml become: yes always: - name: reset all changes - include_tasks: "roles/test/tasks/common_tasks/reload_config.yml" - vars: - config_source: "config_db" + shell: "config load_minigraph -y" + become: yes + + - name: wait 60 seconds for ports to be up + pause: seconds=60 - name: check port status interface_facts: up_ports={{ minigraph_ports }} diff --git a/ansible/roles/test/tasks/ntp.yml b/ansible/roles/test/tasks/ntp.yml index ddfd4cc209a..c65b4cba007 100644 --- a/ansible/roles/test/tasks/ntp.yml +++ b/ansible/roles/test/tasks/ntp.yml @@ -1,4 +1,25 @@ -- name: run test - include_tasks: roles/test/tasks/pytest_runner.yml - vars: - test_node: ntp/test_ntp.py +- name: Stop NTP service + become: true + service: + name: ntp + state: stopped + +- name: Force local clock to sync with an NTP clock + become: true + command: ntpd -gq + +- name: Start NTP service + become: true + service: + name: ntp + state: started + +- name: Check if NTP is synced + become: true + shell: ntpstat + register: ntpstat_result + until: ntpstat_result.rc == 0 + retries: 8 + delay: 10 + +- debug: msg="NTP Status {{ ntpstat_result.stdout }}" diff --git a/ansible/roles/test/tasks/per_lag_test.yml b/ansible/roles/test/tasks/per_lag_test.yml index 7be5ba76d31..d1346c8d331 100644 --- a/ansible/roles/test/tasks/per_lag_test.yml +++ b/ansible/roles/test/tasks/per_lag_test.yml @@ -21,10 +21,10 @@ - set_fact: cur_lag_member_name: "{{member_0_name}}" cur_lag_member_index: "{{member_0_index}}" -- include_tasks: roles/test/tasks/per_lag_member_test.yml +- include: roles/test/tasks/per_lag_member_test.yml - debug: msg="Test current lag with member-1 port put down" - set_fact: cur_lag_member_name: "{{member_1_name}}" cur_lag_member_index: "{{member_1_index}}" -- include_tasks: roles/test/tasks/per_lag_member_test.yml +- include: roles/test/tasks/per_lag_member_test.yml diff --git a/ansible/roles/test/tasks/pfc_asym.yml b/ansible/roles/test/tasks/pfc_asym.yml index b244c787769..d1f0d077694 100644 --- a/ansible/roles/test/tasks/pfc_asym.yml +++ b/ansible/roles/test/tasks/pfc_asym.yml @@ -1,4 +1,286 @@ -- name: Run PFC Asymmetric pytest test case - include_tasks: roles/test/tasks/pytest_runner.yml - vars: - test_node: pfc_asym/test_pfc_asym.py +- block: + + - fail: msg="Information about testbed missing" + when: (testbed_type is not defined) + + - fail: msg="Invalid testbed_type value '{{testbed_type}}'" + when: testbed_type not in ['t0'] + + # Initialize ports info + + - name: Gather minigraph facts about the device + minigraph_facts: host={{inventory_hostname}} + + - name: Initialize variables + set_fact: + server_ports: [] + + - name: Generate IP address in VLAN range for each server port + get_ip_in_range: num={{minigraph_vlans[minigraph_vlan_interfaces[0]['attachto']]['members']|length}} prefix="{{minigraph_vlan_interfaces[0]['addr']}}/{{minigraph_vlan_interfaces[0]['prefixlen']}}" exclude_ips="{{minigraph_vlan_interfaces[0]['addr']}}" + become: no + connection: local + failed_when: False + + - name: Get server ports info + set_fact: + server_ports: "{{ server_ports + [{ 'dut_name': item.1, 'ptf_name': 'eth%u' % minigraph_port_indices[item.1], 'index': minigraph_port_indices[item.1], 'ptf_ip': generated_ips[item.0] }] }}" + with_indexed_items: "{{ minigraph_vlans[minigraph_vlan_interfaces[0]['attachto']]['members'] }}" + + - name: Get non server port info + set_fact: + non_server_port: "{{ {'dut_name': minigraph_portchannels[minigraph_portchannel_interfaces[0].attachto].members[0], 'ptf_name': 'eth%u' % minigraph_port_indices[minigraph_portchannels[minigraph_portchannel_interfaces[0].attachto].members[0]], 'index': minigraph_port_indices[minigraph_portchannels[minigraph_portchannel_interfaces[0].attachto].members[0]], 'ip': minigraph_portchannel_interfaces[0].peer_addr} }}" + + # Configure ARP responder + + - name: Set unique MACs to PTF interfaces + script: roles/test/files/helpers/change_mac.sh + delegate_to: "{{ptf_host}}" + + - name: Copy ARP responder to PTF + copy: src=roles/test/files/helpers/arp_responder.py dest=/opt + delegate_to: "{{ptf_host}}" + + - name: Copy ARP responder supervisor configuration to the PTF container + template: src=arp_responder.conf.j2 dest=/etc/supervisor/conf.d/arp_responder.conf + vars: + - arp_responder_args: '-c /tmp/arp_responder_pfc_asym.json' + delegate_to: "{{ ptf_host }}" + + - name: Reread supervisor configuration + shell: /usr/local/bin/supervisorctl reread + delegate_to: "{{ptf_host}}" + + - name: Update supervisor configuration + shell: /usr/local/bin/supervisorctl update + delegate_to: "{{ ptf_host }}" + + # Copy tests to PTF host + + - name: Copy PTF tests to PTF host + copy: src=roles/test/files/ptftests dest=/root + delegate_to: "{{ ptf_host }}" + + - name: Copy SAI tests to PTF host + copy: src=roles/test/files/saitests dest=/root + delegate_to: "{{ ptf_host }}" + + - name: Copy PTF portmap to PTF host + copy: src={{ ptf_portmap }} dest=/root + delegate_to: "{{ ptf_host }}" + + # Get lossless and lossy priorities + + - name: Get port QOS map key + command: docker exec -i database redis-cli --raw -n 4 KEYS *PORT_QOS_MAP* + register: out + - set_fact: port_qos_key={{out.stdout}} + + - name: Get lossless priorities + command: docker exec -i database redis-cli -n 4 HGET "{{port_qos_key}}" "pfc_enable" + register: out + - set_fact: lossless_priorities={{out.stdout.split(",")}} + + - name: Get buffer PG keys + command: docker exec -i database redis-cli --raw -n 4 KEYS *BUFFER_PG* + register: out + - set_fact: buf_pg_keys={{out.stdout.split()}} + + - name: Get buffer PG profiles + command: docker exec -i database redis-cli -n 4 HGET {{item}} "profile" + with_items: "{{buf_pg_keys}}" + register: out + + - name: Get lossy priorities + set_fact: lossy_priorities={{item.item.split("|")[-1].split("-")}} + with_items: "{{out.results}}" + when: item.stdout|search("lossy") + + # Get PFC to DSCP mapping + + - set_fact: + pfc_to_dscp: {} + + - name: Get DSCP to TC map key + command: bash -c "docker exec -i database redis-cli --raw -n 4 KEYS *DSCP_TO_TC_MAP*" + register: out + - set_fact: dscp_to_tc_key={{out.stdout}} + + - name: Get DSCP to TC map hash keys + command: docker exec -i database redis-cli --raw -n 4 HKEYS "{{dscp_to_tc_key}}" + register: out + - set_fact: dscp_to_tc_keys={{out.stdout.split()}} + + - name: Get DSCP to TC map + command: docker exec -i database redis-cli -n 4 HGET {{dscp_to_tc_key}} {{item}} + with_items: "{{dscp_to_tc_keys}}" + register: out + + - name: Get PFC to DSCP map + set_fact: + pfc_to_dscp: "{{ pfc_to_dscp | combine({item.stdout|int: item.item|int}) }}" + with_items: "{{out.results}}" + + # Configure and deploy PFC generator + + - name: Gathering lab graph facts about the device + conn_graph_facts: host={{ ansible_host }} + connection: local + tags: always + + - set_fact: + neighbors: "{{device_conn}}" + + - set_fact: + peer_device: "{{neighbors[server_ports[0]['dut_name']]['peerdevice']}}" + neighbor_interface: "{{neighbors[server_ports[0]['dut_name']]['peerport']}}" + + - set_fact: neighbor_interfaces="{{ neighbor_interfaces|default([]) + [ neighbors[item]['peerport'] ] }}" + with_items: "{{ minigraph_vlans[minigraph_vlan_interfaces[0]['attachto']]['members'] }}" + + - conn_graph_facts: host={{ peer_device }} + connection: local + + - set_fact: + peer_host: "{{device_info['mgmtip']}}" + peer_hwsku: "{{device_info['HwSku']}}" + peer_type: "{{device_info['Type']}}" + + - name: Set PFC storm templates based on fanout platform SKU + include: roles/test/tasks/pfc_wd/functional_test/set_pfc_storm_templates.yml + + - set_fact: + pfc_gen_file: pfc_gen.py + pfc_queue_index: 0xff + pfc_frames_number: 1000000 + pfc_fanout_interface: "{{ neighbor_interfaces | join(',') }}" + ansible_eth0_ipv4_addr: "{{ansible_eth0['ipv4']['address']}}" + pfc_asym: True + + - name: Deploy PFC packet generator to fanout switch + include: roles/test/tasks/pfc_wd/functional_test/deploy_pfc_pktgen.yml + + # Run test cases + + - name: Start PTF runner + include: ptf_runner.yml + vars: + ptf_test_name: Asymmetric PFC test + ptf_test_dir: saitests + ptf_test_path: pfc_asym.PfcAsymOffOnTxTest + ptf_platform: remote + ptf_platform_dir: ptftests + ptf_test_params: + - port_map_file='/root/{{ ptf_portmap | basename }}' + - server='{{ ansible_host }}' + - server_ports={{server_ports}} + - non_server_port={{non_server_port}} + - router_mac='{{ ansible_Ethernet0['macaddress'] }}' + - pfc_to_dscp={{pfc_to_dscp}} + - lossless_priorities={{lossless_priorities}} + - lossy_priorities={{lossy_priorities}} + ptf_extra_options: "--relax --debug info --log-file /tmp/pfc_asym.PfcAsymOffTxTest.log" + + - name: Start PFC generator on fanout switch + action: apswitch template="{{pfc_wd_storm_template}}" + args: + host: "{{peer_host}}" + login: "{{switch_login[hwsku_map[peer_hwsku]]}}" + connection: switch + + - name: Start PTF runner + include: ptf_runner.yml + vars: + ptf_test_name: Asymmetric PFC test + ptf_test_dir: saitests + ptf_test_path: pfc_asym.PfcAsymOffRxTest + ptf_platform: remote + ptf_platform_dir: ptftests + ptf_test_params: + - port_map_file='/root/{{ ptf_portmap | basename }}' + - server='{{ ansible_host }}' + - server_ports={{server_ports}} + - non_server_port={{non_server_port}} + - router_mac='{{ ansible_Ethernet0['macaddress'] }}' + - pfc_to_dscp={{pfc_to_dscp}} + - lossless_priorities={{lossless_priorities}} + - lossy_priorities={{lossy_priorities}} + ptf_extra_options: "--relax --debug info --log-file /tmp/pfc_asym.PfcAsymOffRxTest.log" + + - name: Stop PFC generator on fanout switch + action: apswitch template="{{pfc_wd_storm_stop_template}}" + args: + host: "{{peer_host}}" + login: "{{switch_login[hwsku_map[peer_hwsku]]}}" + connection: switch + + - name: Enable asymmetric PFC on all server interfaces + command: config interface pfc asymmetric {{ item.dut_name }} on + become: yes + with_items: '{{ server_ports }}' + + - name: Start PTF runner + include: ptf_runner.yml + vars: + ptf_test_name: Asymmetric PFC test + ptf_test_dir: saitests + ptf_test_path: pfc_asym.PfcAsymOffOnTxTest + ptf_platform: remote + ptf_platform_dir: ptftests + ptf_test_params: + - port_map_file='/root/{{ ptf_portmap | basename }}' + - server='{{ ansible_host }}' + - server_ports={{server_ports}} + - non_server_port={{non_server_port}} + - router_mac='{{ ansible_Ethernet0['macaddress'] }}' + - pfc_to_dscp={{pfc_to_dscp}} + - lossless_priorities={{lossless_priorities}} + - lossy_priorities={{lossy_priorities}} + ptf_extra_options: "--relax --debug info --log-file /tmp/pfc_asym.PfcAsymOnTxTest.log" + + - name: Start PFC generator on fanout switch + action: apswitch template="{{pfc_wd_storm_template}}" + args: + host: "{{peer_host}}" + login: "{{switch_login[hwsku_map[peer_hwsku]]}}" + connection: switch + + - name: Start PTF runner + include: ptf_runner.yml + vars: + ptf_test_name: Asymmetric PFC test + ptf_test_dir: saitests + ptf_test_path: pfc_asym.PfcAsymOnRxTest + ptf_platform: remote + ptf_platform_dir: ptftests + ptf_test_params: + - port_map_file='/root/{{ ptf_portmap | basename }}' + - server='{{ ansible_host }}' + - server_ports={{server_ports}} + - non_server_port={{non_server_port}} + - router_mac='{{ ansible_Ethernet0['macaddress'] }}' + - pfc_to_dscp={{pfc_to_dscp}} + - lossless_priorities={{lossless_priorities}} + - lossy_priorities={{lossy_priorities}} + ptf_extra_options: "--relax --debug info --log-file /tmp/pfc_asym.PfcAsymOnRxTest.log" + + always: + + - name: Disable asymmetric PFC on all server interfaces + command: config interface pfc asymmetric {{ item.dut_name }} off + become: yes + with_items: '{{ server_ports }}' + + - name: Remove SAI tests from PTF container + file: path=/root/saitests state=absent + delegate_to: "{{ ptf_host }}" + + - name: Remove portmap + file: path=/root/={{ ptf_portmap }} state=absent + delegate_to: "{{ ptf_host }}" + + - name: Stop PFC generator on fanout switch + action: apswitch template="{{pfc_wd_storm_stop_template}}" + args: + host: "{{peer_host}}" + login: "{{switch_login[hwsku_map[peer_hwsku]]}}" + connection: switch diff --git a/ansible/roles/test/tasks/pfc_wd.yml b/ansible/roles/test/tasks/pfc_wd.yml index 188fb962b29..1da3c878c47 100644 --- a/ansible/roles/test/tasks/pfc_wd.yml +++ b/ansible/roles/test/tasks/pfc_wd.yml @@ -26,7 +26,7 @@ - name: Gathering lab graph facts about the device conn_graph_facts: host={{ inventory_hostname }} - delegate_to: localhost + connection: local tags: always - name: Gather minigraph facts about the device @@ -57,7 +57,7 @@ test_ports: {} - name: Iterate all interfaces to get all interface ports info if exists - include_tasks: roles/test/tasks/pfc_wd/iterate_interfaces.yml + include: roles/test/tasks/pfc_wd/iterate_interfaces.yml with_items: "{{minigraph_interfaces}}" when: item['addr'] | ipv4 @@ -66,13 +66,13 @@ when: minigraph_portchannels| length > 0 - name: Iterate all the portchannels to get all portchanel member ports info if exists. - include_tasks: roles/test/tasks/pfc_wd/iterate_portchannels.yml + include: roles/test/tasks/pfc_wd/iterate_portchannels.yml with_dict: "{{minigraph_portchannels}}" - name: Generate ips in VLAN range get_ip_in_range: num=1 prefix="{{minigraph_vlan_interfaces[0]['addr']}}/{{minigraph_vlan_interfaces[0]['prefixlen']}}" exclude_ips="{{minigraph_vlan_interfaces[0]['addr']}}" become: no - delegate_to: localhost + connection: local failed_when: False when: minigraph_vlans | length >0 @@ -86,7 +86,7 @@ when: minigraph_vlans | length >0 - name: Iterate all vlan members to get all vlan member ports info if exists. - include_tasks: roles/test/tasks/pfc_wd/iterate_vlans.yml + include: roles/test/tasks/pfc_wd/iterate_vlans.yml with_items: "{{vlan_members | default([])}}" when: minigraph_vlans | length >0 @@ -160,20 +160,20 @@ - name: Test PFC WD configuration validation. vars: pfc_wd_template: roles/test/templates/pfc_wd_config.j2 - include_tasks: roles/test/tasks/pfc_wd/config_test/config_test.yml + include: roles/test/tasks/pfc_wd/config_test/config_test.yml when: subtest is undefined or (subtest is defined and 'pfc_config' in subtest) - name: Test PFC WD Functional tests. - include_tasks: roles/test/tasks/pfc_wd/functional_test/functional_test.yml + include: roles/test/tasks/pfc_wd/functional_test/functional_test.yml with_dict: "{{select_test_ports}}" when: subtest is undefined or (subtest is defined and 'pfc_functional' in subtest) - name: Test PFC WD Timer accuracy. - include_tasks: roles/test/tasks/pfc_wd/functional_test/check_timer_accuracy_test.yml + include: roles/test/tasks/pfc_wd/functional_test/check_timer_accuracy_test.yml when: subtest is undefined or (subtest is defined and 'pfc_timer' in subtest) - name: Test PFC WD extreme case when all ports have storm - include_tasks: roles/test/tasks/pfc_wd/functional_test/storm_all_test.yml + include: roles/test/tasks/pfc_wd/functional_test/storm_all_test.yml when: subtest is undefined or (subtest is defined and 'pfc_all_port_storm' in subtest) - name: Set vlan members @@ -191,7 +191,7 @@ - testbed_type in ['t0'] - name: Test PFC WD Functional tests. - include_tasks: roles/test/tasks/pfc_wd/functional_test/functional_test.yml + include: roles/test/tasks/pfc_wd/functional_test/functional_test.yml with_dict: "{{select_test_ports}}" when: - pfc_asym is defined @@ -203,7 +203,7 @@ shell: bash -c 'redis-cli -n 4 hset "DEVICE_METADATA|localhost" default_pfcwd_status enable; sudo pfcwd stop; sleep 5; sudo pfcwd start_default' - name: Test PFC WD function against warm reboot - include_tasks: roles/test/tasks/pfc_wd/functional_test/functional_test_warm_reboot.yml + include: roles/test/tasks/pfc_wd/functional_test/functional_test_warm_reboot.yml when: warm_reboot_test | bool == true always: @@ -211,7 +211,7 @@ file: path: "{{ run_dir }}" state: absent - + - name: Disable asymmetric PFC on all server interfaces command: config interface pfc asymmetric on {{ item.dut_name }} become: yes diff --git a/ansible/roles/test/tasks/pfc_wd/config_test/config_test.yml b/ansible/roles/test/tasks/pfc_wd/config_test/config_test.yml index 7170120b764..38acf66ce24 100644 --- a/ansible/roles/test/tasks/pfc_wd/config_test/config_test.yml +++ b/ansible/roles/test/tasks/pfc_wd/config_test/config_test.yml @@ -10,7 +10,7 @@ template: src: "{{ pfc_wd_template }}" dest: "{{ tests_location }}/config_test/pfc_wd_fwd_action.json" - delegate_to: localhost + connection: local become: false - name: Generate config file for invalid action test. @@ -22,7 +22,7 @@ template: src: "{{ pfc_wd_template }}" dest: "{{ tests_location }}/config_test/pfc_wd_invalid_action.json" - delegate_to: localhost + connection: local become: false - name: Generate config file for invalid detection time test. @@ -34,10 +34,10 @@ template: src: "{{ pfc_wd_template }}" dest: "{{ tests_location }}/config_test/pfc_wd_invalid_detect_time.json" - delegate_to: localhost + connection: local become: false -- name: Generate config file for invalid detection time < lower bound test +- name: Generate config file for invalid detection time < lower bound test vars: pfc_wd_interface_list: "{{ pfc_wd_test_port }}" pfc_wd_action: forward @@ -46,10 +46,10 @@ template: src: "{{ pfc_wd_template }}" dest: "{{ tests_location }}/config_test/pfc_wd_low_detect_time.json" - delegate_to: localhost + connection: local become: false -- name: Generate config file for invalid detection time > higher bound test +- name: Generate config file for invalid detection time > higher bound test vars: pfc_wd_interface_list: "{{ pfc_wd_test_port }}" pfc_wd_action: forward @@ -58,7 +58,7 @@ template: src: "{{ pfc_wd_template }}" dest: "{{ tests_location }}/config_test/pfc_wd_high_detect_time.json" - delegate_to: localhost + connection: local become: false - name: Generate config file for invalid restoration time test. @@ -70,7 +70,7 @@ template: src: "{{ pfc_wd_template }}" dest: "{{ tests_location }}/config_test/pfc_wd_invalid_restore_time.json" - delegate_to: localhost + connection: local become: false - name: Generate config file for invalid restoration time < lower bound test. @@ -82,7 +82,7 @@ template: src: "{{ pfc_wd_template }}" dest: "{{ tests_location }}/config_test/pfc_wd_low_restore_time.json" - delegate_to: localhost + connection: local become: false - name: Generate config file for invalid restoration time > higher bound test. @@ -94,7 +94,7 @@ template: src: "{{ pfc_wd_template }}" dest: "{{ tests_location }}/config_test/pfc_wd_high_restore_time.json" - delegate_to: localhost + connection: local become: false - name: Define variables for test @@ -123,56 +123,56 @@ command_to_run: "sonic-cfggen -j {{ run_dir }}/pfc_wd_fwd_action.json --write-to-db" test_ignore_file: config_test_ignore_messages errors_expected: false - include_tasks: roles/test/tasks/run_command_with_log_analyzer.yml + include: roles/test/tasks/run_command_with_log_analyzer.yml - name: Config tests - Check invalid action configuration. vars: command_to_run: "sonic-cfggen -j {{ run_dir }}/pfc_wd_invalid_action.json --write-to-db" test_expect_file: config_test_expect_invalid_action errors_expected: true - include_tasks: roles/test/tasks/run_command_with_log_analyzer.yml + include: roles/test/tasks/run_command_with_log_analyzer.yml - name: Config tests - Check invalid detection time configuration. vars: command_to_run: "sonic-cfggen -j {{ run_dir }}/pfc_wd_invalid_detect_time.json --write-to-db" test_expect_file: config_test_expect_invalid_detect_time errors_expected: true - include_tasks: roles/test/tasks/run_command_with_log_analyzer.yml + include: roles/test/tasks/run_command_with_log_analyzer.yml - name: Config tests - Check invalid detection time < lower bound configuration. vars: command_to_run: "sonic-cfggen -j {{ run_dir }}/pfc_wd_low_detect_time.json --write-to-db" test_expect_file: config_test_expect_invalid_detect_time errors_expected: true - include_tasks: roles/test/tasks/run_command_with_log_analyzer.yml + include: roles/test/tasks/run_command_with_log_analyzer.yml - name: Config tests - Check invalid detection time > higher bound configuration. vars: command_to_run: "sonic-cfggen -j {{ run_dir }}/pfc_wd_high_detect_time.json --write-to-db" test_expect_file: config_test_expect_invalid_detect_time errors_expected: true - include_tasks: roles/test/tasks/run_command_with_log_analyzer.yml + include: roles/test/tasks/run_command_with_log_analyzer.yml - name: Config tests - Check invalid restoration time configuration. vars: command_to_run: "sonic-cfggen -j {{ run_dir }}/pfc_wd_invalid_restore_time.json --write-to-db" test_expect_file: config_test_expect_invalid_restore_time errors_expected: true - include_tasks: roles/test/tasks/run_command_with_log_analyzer.yml + include: roles/test/tasks/run_command_with_log_analyzer.yml - name: Config tests - Check invalid restoration time < lower bound configuration. vars: command_to_run: "sonic-cfggen -j {{ run_dir }}/pfc_wd_low_restore_time.json --write-to-db" test_expect_file: config_test_expect_invalid_restore_time errors_expected: true - include_tasks: roles/test/tasks/run_command_with_log_analyzer.yml + include: roles/test/tasks/run_command_with_log_analyzer.yml - name: Config tests - Check invalid restoration time > higher bound configuration. vars: command_to_run: "sonic-cfggen -j {{ run_dir }}/pfc_wd_high_restore_time.json --write-to-db" test_expect_file: config_test_expect_invalid_restore_time errors_expected: true - include_tasks: roles/test/tasks/run_command_with_log_analyzer.yml + include: roles/test/tasks/run_command_with_log_analyzer.yml always: - name: Clean up config @@ -180,12 +180,12 @@ command_to_run: "pfcwd stop" test_ignore_file: config_test_ignore_messages errors_expected: false - include_tasks: roles/test/tasks/run_command_with_log_analyzer.yml + include: roles/test/tasks/run_command_with_log_analyzer.yml - name: Remove all temporary files from local host. file: name: "{{ tests_location }}/config_test/{{ item }}" state: absent with_items: "{{ config_files }}" - delegate_to: localhost + connection: local become: false diff --git a/ansible/roles/test/tasks/pfc_wd/functional_test/check_timer_accuracy_test.yml b/ansible/roles/test/tasks/pfc_wd/functional_test/check_timer_accuracy_test.yml index b4ded1639a1..dc9a6d00f08 100644 --- a/ansible/roles/test/tasks/pfc_wd/functional_test/check_timer_accuracy_test.yml +++ b/ansible/roles/test/tasks/pfc_wd/functional_test/check_timer_accuracy_test.yml @@ -4,7 +4,7 @@ testname: functional_test - conn_graph_facts: host={{test_ports[pfc_wd_test_port]['peer_device']}} - delegate_to: localhost + connection: local become: no - name: Prepare variables required for PFC test @@ -19,17 +19,17 @@ peer_login: "{{switch_login[hwsku_map[device_info['HwSku']]]}}" - name: set pfc storm templates based on fanout platform sku - include_tasks: roles/test/tasks/pfc_wd/functional_test/set_pfc_storm_templates.yml + include: roles/test/tasks/pfc_wd/functional_test/set_pfc_storm_templates.yml - name: Deploy pfc packet generater file to fanout switch - include_tasks: roles/test/tasks/pfc_wd/functional_test/deploy_pfc_pktgen.yml + include: roles/test/tasks/pfc_wd/functional_test/deploy_pfc_pktgen.yml - block: - name: Apply config with proper timers to {{ pfc_wd_test_port }}. vars: command_to_run: "pfcwd start --action drop --restoration-time {{ pfc_wd_restore_time }} {{ ports }} {{ pfc_wd_detect_time }}" errors_expected: false - include_tasks: roles/test/tasks/run_command_with_log_analyzer.yml + include: roles/test/tasks/run_command_with_log_analyzer.yml - name: Enable routing from mgmt interface to localhost sysctl: @@ -51,7 +51,7 @@ pfc_frames_number: 300000 - name: Calculate detection and restoration timings - include_tasks: roles/test/tasks/pfc_wd/functional_test/timer_test.yml + include: roles/test/tasks/pfc_wd/functional_test/timer_test.yml with_sequence: start=1 end=19 - set_fact: @@ -60,9 +60,9 @@ - debug: var: "{{item}}" - with_items: - - "{{ detect_time_list }}" - - "{{ restore_time_list }}" + with_items: + - detect_time_list + - restore_time_list - name: Verify that real detection time is not greater than configured fail: @@ -89,4 +89,4 @@ vars: command_to_run: "pfcwd stop" errors_expected: false - include_tasks: roles/test/tasks/run_command_with_log_analyzer.yml + include: roles/test/tasks/run_command_with_log_analyzer.yml diff --git a/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test.yml b/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test.yml index 7fff0918888..a76e194c4d9 100644 --- a/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test.yml +++ b/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test.yml @@ -38,10 +38,10 @@ when: port_type == "vlan" - conn_graph_facts: host={{ peer_device }} - delegate_to: localhost + connection: local become: no -# pfc_frames_number intends to be large enough so that PFC storm keeps happenning until runs pfc_storm_stop command. +# pfc_frames_number intends to be large enough so that PFC storm keeps happenning until runs pfc_storm_stop command. - name: Prepare variables required for PFC test set_fact: pfc_queue_index: 4 @@ -64,7 +64,7 @@ peer_login: "{{switch_login[hwsku_map[peer_hwsku]]}}" - name: set pfc storm templates based on fanout platform sku - include_tasks: roles/test/tasks/pfc_wd/functional_test/set_pfc_storm_templates.yml + include: roles/test/tasks/pfc_wd/functional_test/set_pfc_storm_templates.yml - name: Set timers if user has specified set_fact: @@ -79,7 +79,7 @@ pfc_gen_file: pfc_gen.py - name: Deploy pfc packet generater file to fanout switch - include_tasks: roles/test/tasks/pfc_wd/functional_test/deploy_pfc_pktgen.yml + include: roles/test/tasks/pfc_wd/functional_test/deploy_pfc_pktgen.yml - name: copy the test to ptf container copy: src=roles/test/files/ptftests dest=/root @@ -106,7 +106,7 @@ seconds: 5 - name: "Buffer up the queue in {{pfc_wd_test_port}}" - include_tasks: roles/test/tasks/ptf_runner.yml + include: roles/test/tasks/ptf_runner.yml vars: ptf_test_name: PFC WD test ptf_test_dir: ptftests @@ -130,7 +130,7 @@ test_ignore_file: "ignore_pfc_wd_messages" - name: Initialize loganalyzer - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_init.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_init.yml - name: Apply drop config to port {{ pfc_wd_test_port }}. shell: "pfcwd start --action drop --restoration-time {{ pfc_wd_restore_time_large }} {{ ports }} {{ pfc_wd_detect_time }}" @@ -141,10 +141,10 @@ seconds: 5 - name: Check if logs contain message that PFC WD detected deadlock - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml - name: Check if logs contain message that PFC WD detected deadlock - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_end.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_end.yml - name: Get PFC_WD_QUEUE_STATS_TX_DROPPED_PACKETS before test shell: "docker exec -i database redis-cli -n 2 HGET COUNTERS:{{ queue_oid.stdout }} PFC_WD_QUEUE_STATS_TX_DROPPED_PACKETS" @@ -155,7 +155,7 @@ register: pfc_wd_rx_drop_before - name: "check egress drop, tx port {{pfc_wd_test_port}}" - include_tasks: roles/test/tasks/ptf_runner.yml + include: roles/test/tasks/ptf_runner.yml vars: ptf_test_name: PFC WD test ptf_test_dir: ptftests @@ -175,7 +175,7 @@ ptf_extra_options: "--relax --debug info --log-file /tmp/pfc_wd.PfcWdTest.{{lookup('pipe','date +%Y-%m-%d-%H:%M:%S')}}.log " - name: "Check ingress drop, rx port {{pfc_wd_test_port}}" - include_tasks: roles/test/tasks/ptf_runner.yml + include: roles/test/tasks/ptf_runner.yml vars: ptf_test_name: PFC WD test ptf_test_dir: ptftests @@ -195,7 +195,7 @@ ptf_extra_options: "--relax --debug info --log-file /tmp/pfc_wd.PfcWdTest.{{lookup('pipe','date +%Y-%m-%d-%H:%M:%S')}}.log " - name: "Send packets via {{pfc_wd_test_port}} to verify that other queue is not affected" - include_tasks: roles/test/tasks/ptf_runner.yml + include: roles/test/tasks/ptf_runner.yml vars: ptf_test_name: PFC WD test ptf_test_dir: ptftests @@ -215,7 +215,7 @@ ptf_extra_options: "--relax --debug info --log-file /tmp/pfc_wd.PfcWdTest.{{lookup('pipe','date +%Y-%m-%d-%H:%M:%S')}}.log " - name: "Send packets to {{pfc_wd_test_port}} to verify that other pg is not affected" - include_tasks: roles/test/tasks/ptf_runner.yml + include: roles/test/tasks/ptf_runner.yml vars: ptf_test_name: PFC WD test ptf_test_dir: ptftests @@ -238,7 +238,7 @@ test_expect_file: "expect_pfc_wd_restore" - name: Initialize loganalyzer - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_init.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_init.yml - name: Stop PFC storm on fanout switch action: apswitch template="{{pfc_wd_storm_stop_template}}" @@ -252,10 +252,10 @@ seconds: "{{ pfc_wait_for_restore_time }}" - name: Check if logs contain message that PFC WD restored from deadlock - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml - name: Check if logs contain message that PFC WD restored from deadlock - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_end.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_end.yml - name: Get PFC_WD_QUEUE_STATS_TX_DROPPED_PACKETS after test shell: "docker exec -i database redis-cli -n 2 HGET COUNTERS:{{ queue_oid.stdout }} PFC_WD_QUEUE_STATS_TX_DROPPED_PACKETS" @@ -305,7 +305,7 @@ vars: command_to_run: "pfcwd stop" errors_expected: false - include_tasks: roles/test/tasks/run_command_with_log_analyzer.yml + include: roles/test/tasks/run_command_with_log_analyzer.yml # Verify proper function of drop action - block: @@ -313,7 +313,7 @@ vars: command_to_run: "pfcwd start --action drop --restoration-time {{ pfc_wd_restore_time_large }} {{ ports }} {{ pfc_wd_detect_time }}" errors_expected: false - include_tasks: roles/test/tasks/run_command_with_log_analyzer.yml + include: roles/test/tasks/run_command_with_log_analyzer.yml - name: Get PFC_WD_QUEUE_STATS_TX_DROPPED_PACKETS before test shell: "docker exec -i database redis-cli -n 2 HGET COUNTERS:{{ queue_oid.stdout }} PFC_WD_QUEUE_STATS_TX_DROPPED_PACKETS" @@ -330,7 +330,7 @@ pfc_frames_number: 100000000 - name: Initialize loganalyzer - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_init.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_init.yml - name: Generate PFC storm on fanout switch action: apswitch template="{{pfc_wd_storm_template}}" @@ -344,13 +344,13 @@ seconds: 5 - name: Check if logs contain message that PFC WD detected deadlock - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml - name: Check if logs contain message that PFC WD detected deadlock - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_end.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_end.yml - name: "Send packets via {{pfc_wd_test_port}}" - include_tasks: roles/test/tasks/ptf_runner.yml + include: roles/test/tasks/ptf_runner.yml vars: ptf_test_name: PFC WD test ptf_test_dir: ptftests @@ -370,7 +370,7 @@ ptf_extra_options: "--relax --debug info --log-file /tmp/pfc_wd.PfcWdTest.{{lookup('pipe','date +%Y-%m-%d-%H:%M:%S')}}.log " - name: "Send packets to {{pfc_wd_test_port}}" - include_tasks: roles/test/tasks/ptf_runner.yml + include: roles/test/tasks/ptf_runner.yml vars: ptf_test_name: PFC WD test ptf_test_dir: ptftests @@ -390,7 +390,7 @@ ptf_extra_options: "--relax --debug info --log-file /tmp/pfc_wd.PfcWdTest.{{lookup('pipe','date +%Y-%m-%d-%H:%M:%S')}}.log " - name: "Send packets via {{pfc_wd_test_port}} to verify that other queue is not affected" - include_tasks: roles/test/tasks/ptf_runner.yml + include: roles/test/tasks/ptf_runner.yml vars: ptf_test_name: PFC WD test ptf_test_dir: ptftests @@ -410,7 +410,7 @@ ptf_extra_options: "--relax --debug info --log-file /tmp/pfc_wd.PfcWdTest.{{lookup('pipe','date +%Y-%m-%d-%H:%M:%S')}}.log " - name: "Send packets to {{pfc_wd_test_port}} to verify that other pg is not affected" - include_tasks: roles/test/tasks/ptf_runner.yml + include: roles/test/tasks/ptf_runner.yml vars: ptf_test_name: PFC WD test ptf_test_dir: ptftests @@ -433,7 +433,7 @@ test_expect_file: "expect_pfc_wd_restore" - name: Initialize loganalyzer - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_init.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_init.yml - name: Stop PFC storm on fanout switch action: apswitch template="{{pfc_wd_storm_stop_template}}" @@ -447,10 +447,10 @@ seconds: "{{ pfc_wait_for_restore_time }}" - name: Check if logs contain message that PFC WD restored from deadlock - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml - name: Check if logs contain message that PFC WD restored from deadlock - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_end.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_end.yml - name: Get PFC_WD_QUEUE_STATS_TX_DROPPED_PACKETS after test shell: "docker exec -i database redis-cli -n 2 HGET COUNTERS:{{ queue_oid.stdout }} PFC_WD_QUEUE_STATS_TX_DROPPED_PACKETS" @@ -504,7 +504,7 @@ vars: command_to_run: "pfcwd stop" errors_expected: false - include_tasks: roles/test/tasks/run_command_with_log_analyzer.yml + include: roles/test/tasks/run_command_with_log_analyzer.yml # Verify proper function of forward action - block: @@ -512,7 +512,7 @@ vars: command_to_run: "pfcwd start --action forward --restoration-time {{ pfc_wd_restore_time_large }} {{ ports }} {{ pfc_wd_detect_time }}" errors_expected: false - include_tasks: roles/test/tasks/run_command_with_log_analyzer.yml + include: roles/test/tasks/run_command_with_log_analyzer.yml - name: Get PFC_WD_QUEUE_STATS_TX_PACKETS before test shell: "docker exec -i database redis-cli -n 2 HGET COUNTERS:{{ queue_oid.stdout }} PFC_WD_QUEUE_STATS_TX_PACKETS" @@ -529,7 +529,7 @@ pfc_frames_number: 100000000 - name: Initialize loganalyzer - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_init.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_init.yml - name: Generate PFC storm on fanout switch action: apswitch template="{{pfc_wd_storm_template}}" @@ -543,13 +543,13 @@ seconds: 1 - name: Check if logs contain message that PFC WD detected deadlock - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml - name: Check if logs contain message that PFC WD detected deadlock - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_end.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_end.yml - name: "Send packets via {{pfc_wd_test_port}}" - include_tasks: roles/test/tasks/ptf_runner.yml + include: roles/test/tasks/ptf_runner.yml vars: ptf_test_name: PFC WD test ptf_test_dir: ptftests @@ -569,7 +569,7 @@ ptf_extra_options: "--relax --debug info --log-file /tmp/pfc_wd.PfcWdTest.{{lookup('pipe','date +%Y-%m-%d-%H:%M:%S')}}.log " - name: "Send packets to {{pfc_wd_test_port}}" - include_tasks: roles/test/tasks/ptf_runner.yml + include: roles/test/tasks/ptf_runner.yml vars: ptf_test_name: PFC WD test ptf_test_dir: ptftests @@ -592,7 +592,7 @@ test_expect_file: "expect_pfc_wd_restore" - name: Initialize loganalyzer - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_init.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_init.yml - name: Stop PFC storm on fanout switch action: apswitch template="{{pfc_wd_storm_stop_template}}" @@ -606,10 +606,10 @@ seconds: "{{ pfc_wait_for_restore_time }}" - name: Check if logs contain message that PFC WD restored from deadlock - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml - name: Check if logs contain message that PFC WD restored from deadlock - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_end.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_end.yml - name: Get PFC_WD_QUEUE_STATS_TX_PACKETS after test shell: "docker exec -i database redis-cli -n 2 HGET COUNTERS:{{ queue_oid.stdout }} PFC_WD_QUEUE_STATS_TX_PACKETS" @@ -653,7 +653,7 @@ name: "{{ tests_location }}/functional_test/{{ item }}" state: absent with_items: "{{ config_files }}" - delegate_to: localhost + connection: local become: false always: @@ -668,5 +668,5 @@ vars: command_to_run: "pfcwd stop" errors_expected: false - include_tasks: roles/test/tasks/run_command_with_log_analyzer.yml + include: roles/test/tasks/run_command_with_log_analyzer.yml diff --git a/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test_restore.yml b/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test_restore.yml index 5ecb66ec740..249a0a17f00 100644 --- a/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test_restore.yml +++ b/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test_restore.yml @@ -45,7 +45,7 @@ when: port_type == "vlan" - conn_graph_facts: host={{ peer_device }} - delegate_to: localhost + connection: local become: no - name: Prepare variables required for PFC test @@ -67,11 +67,11 @@ peer_login: "{{switch_login[hwsku_map[peer_hwsku]]}}" - name: set pfc storm templates based on fanout platform sku - include_tasks: roles/test/tasks/pfc_wd/functional_test/set_pfc_storm_templates.yml + include: roles/test/tasks/pfc_wd/functional_test/set_pfc_storm_templates.yml - name: Test PFC restore function per pg/queue - include_tasks: roles/test/tasks/pfc_wd/functional_test/functional_test_restore_perq.yml + include: roles/test/tasks/pfc_wd/functional_test/functional_test_restore_perq.yml vars: pfc_queue_index: "{{item}}" class_enable: "{{(1).__lshift__(item)}}" diff --git a/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test_restore_perq.yml b/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test_restore_perq.yml index bf27d76b19e..c123eed51ef 100644 --- a/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test_restore_perq.yml +++ b/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test_restore_perq.yml @@ -10,7 +10,7 @@ test_expect_file: "expect_pfc_wd_restore" - name: Initialize loganalyzer - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_init.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_init.yml vars: testname_unique_gen: true @@ -30,14 +30,14 @@ seconds: 15 - name: Check if logs contain message that PFC WD restored from deadlock - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml - name: Check if logs contain message that PFC WD restored from deadlock - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_end.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_end.yml # 2. Verity the forward action on both ingress and egress via ptf dataplane traffic test - name: "Send packets via {{pfc_wd_test_port}}" - include_tasks: roles/test/tasks/ptf_runner.yml + include: roles/test/tasks/ptf_runner.yml vars: ptf_test_name: PFC WD test ptf_test_dir: ptftests @@ -57,7 +57,7 @@ ptf_extra_options: "--relax --debug info --log-file /tmp/pfc_wd.PfcWdTest.{{lookup('pipe','date +%Y-%m-%d-%H:%M:%S')}}.log " - name: "Send packets to {{pfc_wd_test_port}}" - include_tasks: roles/test/tasks/ptf_runner.yml + include: roles/test/tasks/ptf_runner.yml vars: ptf_test_name: PFC WD test ptf_test_dir: ptftests diff --git a/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test_storm.yml b/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test_storm.yml index 92946969efd..29523e14493 100644 --- a/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test_storm.yml +++ b/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test_storm.yml @@ -47,7 +47,7 @@ when: port_type == "vlan" - conn_graph_facts: host={{ peer_device }} - delegate_to: localhost + connection: local become: no - name: Prepare variables required for PFC test @@ -69,11 +69,11 @@ peer_login: "{{switch_login[hwsku_map[peer_hwsku]]}}" - name: set pfc storm templates based on fanout platform sku - include_tasks: roles/test/tasks/pfc_wd/functional_test/set_pfc_storm_templates.yml + include: roles/test/tasks/pfc_wd/functional_test/set_pfc_storm_templates.yml - name: Test PFC storm function per pg/queue - include_tasks: roles/test/tasks/pfc_wd/functional_test/functional_test_storm_perq.yml + include: roles/test/tasks/pfc_wd/functional_test/functional_test_storm_perq.yml vars: pfc_queue_index: "{{item}}" class_enable: "{{(1).__lshift__(item)}}" diff --git a/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test_storm_perq.yml b/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test_storm_perq.yml index e74de57938a..ca4d630cea7 100644 --- a/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test_storm_perq.yml +++ b/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test_storm_perq.yml @@ -11,7 +11,7 @@ pfc_gen_file: pfc_gen.py - name: Deploy pfc packet generater file to fanout switch - include_tasks: roles/test/tasks/pfc_wd/functional_test/deploy_pfc_pktgen.yml + include: roles/test/tasks/pfc_wd/functional_test/deploy_pfc_pktgen.yml - name: copy the test to ptf container copy: src=roles/test/files/ptftests dest=/root @@ -23,7 +23,7 @@ test_ignore_file: "ignore_pfc_wd_messages" - name: Initialize loganalyzer - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_init.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_init.yml vars: testname_unique_gen: true @@ -44,14 +44,14 @@ seconds: 15 - name: Check if logs contain message that PFC WD detected storm - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml - name: Check if logs contain message that PFC WD detected storm - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_end.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_end.yml # 2. Verify the drop action in place on both ingress and egress via ptf dataplane traffic test - name: "check egress drop, tx port {{pfc_wd_test_port}}" - include_tasks: roles/test/tasks/ptf_runner.yml + include: roles/test/tasks/ptf_runner.yml vars: ptf_test_name: PFC WD test ptf_test_dir: ptftests @@ -71,7 +71,7 @@ ptf_extra_options: "--relax --debug info --log-file /tmp/pfc_wd.PfcWdTest.{{lookup('pipe','date +%Y-%m-%d-%H:%M:%S')}}.log " - name: "Check ingress drop, rx port {{pfc_wd_test_port}}" - include_tasks: roles/test/tasks/ptf_runner.yml + include: roles/test/tasks/ptf_runner.yml vars: ptf_test_name: PFC WD test ptf_test_dir: ptftests diff --git a/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test_warm_reboot.yml b/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test_warm_reboot.yml index 1fae92ec1ed..b96f032ff2b 100644 --- a/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test_warm_reboot.yml +++ b/ansible/roles/test/tasks/pfc_wd/functional_test/functional_test_warm_reboot.yml @@ -4,24 +4,24 @@ # PFC storm detect and restore functional after warm-reboot - block: - name: Test PFC WD storm function - include_tasks: roles/test/tasks/pfc_wd/functional_test/functional_test_storm.yml + include: roles/test/tasks/pfc_wd/functional_test/functional_test_storm.yml with_dict: "{{select_test_ports}}" - name: Test PFC WD restore function - include_tasks: roles/test/tasks/pfc_wd/functional_test/functional_test_restore.yml + include: roles/test/tasks/pfc_wd/functional_test/functional_test_restore.yml with_dict: "{{select_test_ports}}" - name: Issue warm reboot on the device - include_tasks: roles/test/tasks/common_tasks/reboot_sonic.yml + include: roles/test/tasks/common_tasks/reboot_sonic.yml vars: reboot_type: warm-reboot - name: Test PFC WD storm function - include_tasks: roles/test/tasks/pfc_wd/functional_test/functional_test_storm.yml + include: roles/test/tasks/pfc_wd/functional_test/functional_test_storm.yml with_dict: "{{select_test_ports}}" - name: Test PFC WD restore function - include_tasks: roles/test/tasks/pfc_wd/functional_test/functional_test_restore.yml + include: roles/test/tasks/pfc_wd/functional_test/functional_test_restore.yml with_dict: "{{select_test_ports}}" @@ -31,16 +31,16 @@ # PFC storm stopped and restored after warm-reboot - block: - name: Test PFC WD storm function - include_tasks: roles/test/tasks/pfc_wd/functional_test/functional_test_storm.yml + include: roles/test/tasks/pfc_wd/functional_test/functional_test_storm.yml with_dict: "{{select_test_ports}}" - name: Issue warm reboot on the device - include_tasks: roles/test/tasks/common_tasks/reboot_sonic.yml + include: roles/test/tasks/common_tasks/reboot_sonic.yml vars: reboot_type: warm-reboot - name: Test PFC WD storm function - include_tasks: roles/test/tasks/pfc_wd/functional_test/functional_test_storm.yml + include: roles/test/tasks/pfc_wd/functional_test/functional_test_storm.yml vars: start_storm: false start_marker: "NOTICE swss#orchagent: :- setWarmStartState: orchagent warm start state changed to initialized" @@ -48,7 +48,7 @@ with_dict: "{{select_test_ports}}" - name: Test PFC WD restore function - include_tasks: roles/test/tasks/pfc_wd/functional_test/functional_test_restore.yml + include: roles/test/tasks/pfc_wd/functional_test/functional_test_restore.yml with_dict: "{{select_test_ports}}" # Test case 3: @@ -58,18 +58,18 @@ # Verify PFC storm detection and restoration functional - block: - name: PFC storming from fanout switch - include_tasks: roles/test/tasks/pfc_wd/functional_test/storm_from_neighbor.yml + include: roles/test/tasks/pfc_wd/functional_test/storm_from_neighbor.yml with_dict: "{{select_test_ports}}" - name: Issue warm reboot on the device - include_tasks: roles/test/tasks/common_tasks/reboot_sonic.yml + include: roles/test/tasks/common_tasks/reboot_sonic.yml vars: reboot_type: warm-reboot - name: Test PFC WD storm function - include_tasks: roles/test/tasks/pfc_wd/functional_test/functional_test_storm.yml + include: roles/test/tasks/pfc_wd/functional_test/functional_test_storm.yml with_dict: "{{select_test_ports}}" - name: Test PFC WD restore function - include_tasks: roles/test/tasks/pfc_wd/functional_test/functional_test_restore.yml + include: roles/test/tasks/pfc_wd/functional_test/functional_test_restore.yml with_dict: "{{select_test_ports}}" diff --git a/ansible/roles/test/tasks/pfc_wd/functional_test/storm_all_action.yml b/ansible/roles/test/tasks/pfc_wd/functional_test/storm_all_action.yml index 394c4fe982f..1ee48c20267 100644 --- a/ansible/roles/test/tasks/pfc_wd/functional_test/storm_all_action.yml +++ b/ansible/roles/test/tasks/pfc_wd/functional_test/storm_all_action.yml @@ -1,5 +1,5 @@ - conn_graph_facts: host={{item.key}} - delegate_to: localhost + connection: local become: no - set_fact: @@ -12,43 +12,34 @@ pfc_fanout_interface: "{{item.value | list | join(',')}}" - name: set pfc storm templates based on fanout platform sku - include_tasks: roles/test/tasks/pfc_wd/functional_test/set_pfc_storm_templates.yml - -- set_fact: + include: roles/test/tasks/pfc_wd/functional_test/set_pfc_storm_templates.yml + +- set_fact: storm_action_template: "{%if storm_action=='start'%}{{pfc_wd_storm_template}}{%else%}{{pfc_wd_storm_stop_template}}{%endif%}" - name: Deploy pfc packet generater file to fanout switch - include_tasks: roles/test/tasks/pfc_wd/functional_test/deploy_pfc_pktgen.yml + include: roles/test/tasks/pfc_wd/functional_test/deploy_pfc_pktgen.yml - name: copy the test to ptf container copy: src=roles/test/files/ptftests dest=/root delegate_to: "{{ ptf_host }}" - name: Initialize loganalyzer - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_init.yml - -- block: - - name: Take action on fanout switch - action: apswitch template="{{storm_action_template}}" - args: - host: "{{peer_mgmt}}" - login: "{{peer_login}}" - connection: switch - - - name: Let PFC storm happen for a while - pause: - seconds: 5 - - - name: Check if logs contain message that PFC WD detected deadlock - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml - - - name: Check if logs contain message that PFC WD detected deadlock - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_end.yml - - rescue: - - name: Stop PFC storm on fanout switch - action: apswitch template="{{pfc_wd_storm_stop_template}}" - args: - host: "{{peer_mgmt}}" - login: "{{peer_login}}" - connection: switch + include: roles/test/files/tools/loganalyzer/loganalyzer_init.yml + +- name: Take action on fanout switch + action: apswitch template="{{storm_action_template}}" + args: + host: "{{peer_mgmt}}" + login: "{{peer_login}}" + connection: switch + +- name: Let PFC storm happen for a while + pause: + seconds: 5 + +- name: Check if logs contain message that PFC WD detected deadlock + include: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml + +- name: Check if logs contain message that PFC WD detected deadlock + include: roles/test/files/tools/loganalyzer/loganalyzer_end.yml diff --git a/ansible/roles/test/tasks/pfc_wd/functional_test/storm_all_test.yml b/ansible/roles/test/tasks/pfc_wd/functional_test/storm_all_test.yml index 839e475f448..fcbfcbad534 100644 --- a/ansible/roles/test/tasks/pfc_wd/functional_test/storm_all_test.yml +++ b/ansible/roles/test/tasks/pfc_wd/functional_test/storm_all_test.yml @@ -1,4 +1,4 @@ -# pfc_frames_number intends to be large enough so that PFC storm keeps happenning until runs pfc_storm_stop command. +# pfc_frames_number intends to be large enough so that PFC storm keeps happenning until runs pfc_storm_stop command. - name: Prepare variables required for PFC test set_fact: pfc_queue_index: 3 @@ -23,8 +23,8 @@ register: peer_ports_result - set_fact: - peer_devices: "{{peer_devices_result.results | map(attribute='ansible_facts.peer_device_item') | list}}" - peer_ports: "{{peer_ports_result.results | map(attribute='ansible_facts.peer_port_item') | list}}" + peer_devices: "{{peer_devices_result.results | map(attribute='ansible_facts.peer_device_item') | list}}" + peer_ports: "{{peer_ports_result.results | map(attribute='ansible_facts.peer_port_item') | list}}" - set_fact: pfc_gen_file: pfc_gen.py @@ -32,7 +32,7 @@ - name: Get the peer device and peer port map combine_list_to_dict: keys={{peer_devices}} values={{peer_ports}} become: no - delegate_to: localhost + connection: local failed_when: False - block: @@ -41,24 +41,24 @@ become: yes - name: Start to copy and send PFC packets from all peers - vars: + vars: test_expect_file: "expect_pfc_wd_detect" test_ignore_file: "ignore_pfc_wd_messages" storm_action: "start" - include_tasks: roles/test/tasks/pfc_wd/functional_test/storm_all_action.yml - with_dict: "{{combined_dict}}" + include: roles/test/tasks/pfc_wd/functional_test/storm_all_action.yml + with_dict: "{{combined_dict}}" - name: Stop PFC packets from all peers - vars: + vars: test_expect_file: "expect_pfc_wd_restore" test_ignore_file: "ignore_pfc_wd_messages" storm_action: "stop" - include_tasks: roles/test/tasks/pfc_wd/functional_test/storm_all_action.yml - with_dict: "{{combined_dict}}" + include: roles/test/tasks/pfc_wd/functional_test/storm_all_action.yml + with_dict: "{{combined_dict}}" always: - name: Clean up config vars: command_to_run: "pfcwd stop" errors_expected: false - include_tasks: roles/test/tasks/run_command_with_log_analyzer.yml + include: roles/test/tasks/run_command_with_log_analyzer.yml diff --git a/ansible/roles/test/tasks/pfc_wd/functional_test/storm_from_neighbor.yml b/ansible/roles/test/tasks/pfc_wd/functional_test/storm_from_neighbor.yml index 56b26de8fd4..f6afa7d4a34 100644 --- a/ansible/roles/test/tasks/pfc_wd/functional_test/storm_from_neighbor.yml +++ b/ansible/roles/test/tasks/pfc_wd/functional_test/storm_from_neighbor.yml @@ -4,7 +4,7 @@ pfc_wd_test_port: "{{item.key}}" - conn_graph_facts: host={{ peer_device }} - delegate_to: localhost + connection: local become: no - name: Prepare parameters required for PFC storming @@ -24,14 +24,14 @@ pfc_gen_file: pfc_gen.py - name: Deploy pfc packet generater file to fanout switch - include_tasks: roles/test/tasks/pfc_wd/functional_test/deploy_pfc_pktgen.yml + include: roles/test/tasks/pfc_wd/functional_test/deploy_pfc_pktgen.yml - set_fact: peer_login: "{{switch_login[hwsku_map[peer_hwsku]]}}" - name: PFC storming from fanout switch per queue - include_tasks: roles/test/tasks/pfc_wd/functional_test/storm_from_neighbor_perq.yml + include: roles/test/tasks/pfc_wd/functional_test/storm_from_neighbor_perq.yml vars: pfc_queue_index: "{{item}}" with_items: "{{pfc_queue_indices}}" diff --git a/ansible/roles/test/tasks/pfc_wd/functional_test/storm_from_neighbor_perq.yml b/ansible/roles/test/tasks/pfc_wd/functional_test/storm_from_neighbor_perq.yml index f809425b5f9..61ed3d00dd1 100644 --- a/ansible/roles/test/tasks/pfc_wd/functional_test/storm_from_neighbor_perq.yml +++ b/ansible/roles/test/tasks/pfc_wd/functional_test/storm_from_neighbor_perq.yml @@ -1,6 +1,6 @@ - block: - name: set pfc storm templates based on fanout platform sku - include_tasks: roles/test/tasks/pfc_wd/functional_test/set_pfc_storm_templates.yml + include: roles/test/tasks/pfc_wd/functional_test/set_pfc_storm_templates.yml - set_fact: storm_defer_time: "{{120 | random()}}" diff --git a/ansible/roles/test/tasks/pfc_wd/functional_test/timer_test.yml b/ansible/roles/test/tasks/pfc_wd/functional_test/timer_test.yml index 35f22044ad1..39e67b86d80 100644 --- a/ansible/roles/test/tasks/pfc_wd/functional_test/timer_test.yml +++ b/ansible/roles/test/tasks/pfc_wd/functional_test/timer_test.yml @@ -6,6 +6,7 @@ pause: seconds: 1 + - name: Generate PFC storm on fanout switch action: apswitch template="{{pfc_wd_storm_template}}" args: @@ -18,7 +19,7 @@ seconds: 5 - name: Find PFC storm start marker - shell: grep "[P]FC_STORM_START" /var/log/syslog + shell: grep "PFC_STORM_START" /var/log/syslog register: storm_start become: true @@ -27,7 +28,7 @@ register: storm_start_millis - name: Find PFC storm detect message - shell: grep "[d]etected PFC storm" /var/log/syslog + shell: grep "detected PFC storm" /var/log/syslog register: storm_detect become: true @@ -35,12 +36,8 @@ shell: "date -d {{storm_detect.stdout.replace(' ',' ').split(' ')[2]}} +'%s%3N'" register: storm_detect_millis -- name: Wait for PFC storm end marker to appear in logs - pause: - seconds: 1 - - name: Find PFC storm end marker - shell: grep "[P]FC_STORM_END" /var/log/syslog + shell: grep "PFC_STORM_END" /var/log/syslog register: storm_end become: true @@ -49,7 +46,7 @@ register: storm_end_millis - name: Find PFC storm restore message - shell: grep "[s]torm restored" /var/log/syslog + shell: grep "storm restored" /var/log/syslog register: storm_restore become: true @@ -64,8 +61,8 @@ - debug: var: "{{item}}" with_items: - - "{{ real_detect_time }}" - - "{{ real_restore_time }}" + - real_detect_time + - real_restore_time - name: Append detect and restore time to lists set_fact: diff --git a/ansible/roles/test/tasks/pfcwd/config_shape_rate.yml b/ansible/roles/test/tasks/pfcwd/config_shape_rate.yml index 7492b04a027..b445841a68b 100644 --- a/ansible/roles/test/tasks/pfcwd/config_shape_rate.yml +++ b/ansible/roles/test/tasks/pfcwd/config_shape_rate.yml @@ -7,19 +7,19 @@ interface: "{{item.interface}}" shape_rate: "{{item.shape_rate}}" set: "{{item.set}}" - + - conn_graph_facts: host={{ device }} - delegate_to: localhost - + connection: local + - set_fact: device_host: "{{device_info['mgmtip']}}" device_hwsku: "{{device_info['HwSku']}}" - + - name: Set shape rate of interface {{interface}} on {{device_host}} action: apswitch template=config_interface_shape_rate.j2 args: host: "{{device_host}}" login: "{{switch_login[hwsku_map[device_hwsku]]}}" connection: switch - + diff --git a/ansible/roles/test/tasks/port_toggle.yml b/ansible/roles/test/tasks/port_toggle.yml index 7bfdec72024..4e6f02b38fd 100644 --- a/ansible/roles/test/tasks/port_toggle.yml +++ b/ansible/roles/test/tasks/port_toggle.yml @@ -11,21 +11,19 @@ - name: Verify interfaces are all down assert: { that: "{{ ansible_interface_link_down_ports | length }} == {{ minigraph_ports | length }}" } -- block: - - name: build shell command string - debug: msg="PORTS={{minigraph_ports.keys() | join(' ')}}; for port in $PORTS; do config interface startup $port; done" +- always: + - name: build shell command string + debug: msg="PORTS={{minigraph_ports.keys() | join(' ')}}; for port in $PORTS; do config interface startup $port; done" - - name: turn on all ports on device - shell: PORTS="{{minigraph_ports.keys() | join(' ')}}"; for port in $PORTS; do config interface startup $port; done - become: yes + - name: turn on all ports on device + shell: PORTS="{{minigraph_ports.keys() | join(' ')}}"; for port in $PORTS; do config interface startup $port; done + become: yes - - name: wait 1 minute for ports to come up - pause: seconds=60 + - name: wait 1 minute for ports to come up + pause: seconds=60 - - name: Get interface facts - interface_facts: up_ports={{minigraph_ports}} + - name: Get interface facts + interface_facts: up_ports={{minigraph_ports}} - - name: Verify interfaces are up correctly - assert: { that: "{{ ansible_interface_link_down_ports | length }} == 0" } - tags: - - always + - name: Verify interfaces are up correctly + assert: { that: "{{ ansible_interface_link_down_ports | length }} == 0" } diff --git a/ansible/roles/test/tasks/process_checker.yml b/ansible/roles/test/tasks/process_checker.yml index 7820f266251..ef6f4426e16 100644 --- a/ansible/roles/test/tasks/process_checker.yml +++ b/ansible/roles/test/tasks/process_checker.yml @@ -5,7 +5,7 @@ # running - yes/no (optional default:yes) # # Example: -# - include_tasks: process_checker.yml +# - include: process_checker.yml # vars: # process: {{ item }} # with_items: diff --git a/ansible/roles/test/tasks/ptf_runner.yml b/ansible/roles/test/tasks/ptf_runner.yml index 87e44946d78..67a5f15d45b 100644 --- a/ansible/roles/test/tasks/ptf_runner.yml +++ b/ansible/roles/test/tasks/ptf_runner.yml @@ -12,7 +12,7 @@ # ptf_extra_options - extra options for ptf # # Example: -# - include_tasks: ptf_runner.yml +# - include: ptf_runner.yml # vars: # ptf_test_name: COPP test - {{ item }} # ptf_test_dir: ptftests diff --git a/ansible/roles/test/tasks/ptf_runner_reboot.yml b/ansible/roles/test/tasks/ptf_runner_reboot.yml index f614eb1dffa..166b7e2758b 100644 --- a/ansible/roles/test/tasks/ptf_runner_reboot.yml +++ b/ansible/roles/test/tasks/ptf_runner_reboot.yml @@ -37,11 +37,11 @@ when: item and item != 'None' - name: Update supervisor configuration - include_tasks: "roles/test/tasks/common_tasks/update_supervisor.yml" + include: "roles/test/tasks/common_tasks/update_supervisor.yml" vars: supervisor_host: "{{ ptf_host }}" - - include_tasks: ptf_runner.yml + - include: ptf_runner.yml vars: ptf_test_name: Advanced-reboot test ptf_test_dir: ptftests @@ -72,8 +72,6 @@ - allow_vlan_flooding='{{ allow_vlan_flooding }}' - sniff_time_incr={{ sniff_time_incr }} - setup_fdb_before_test=True - - vnet={{ vnet }} - - vnet_pkts='{{ vnet_pkts }}' always: @@ -148,12 +146,6 @@ dest: '/tmp/' flat: yes - - name: Clear ARP table - command: sonic-clear arp - - - name: Clear FDB table - command: sonic-clear fdb all - - name: Wait for the DUT to be ready for the next test pause: seconds=420 when: (preboot_list|length > 0 and None not in preboot_list) or diff --git a/ansible/roles/test/tasks/qos_get_ports.yml b/ansible/roles/test/tasks/qos_get_ports.yml index cb42a84208c..66bf33e90fe 100644 --- a/ansible/roles/test/tasks/qos_get_ports.yml +++ b/ansible/roles/test/tasks/qos_get_ports.yml @@ -80,7 +80,7 @@ dut_switch_ports: "{{dut_switch_ports}}" minigraph_bgp: "{{minigraph_bgp}}" minigraph_neighbors: "{{minigraph_neighbors}}" - delegate_to: localhost + connection: local - debug: var: testing_ports_ip @@ -99,7 +99,7 @@ - name: Generate IPs in VLAN range get_ip_in_range: num="{{dut_switch_ports|length}}" prefix="{{minigraph_vlan_interfaces[0]['addr']}}/{{minigraph_vlan_interfaces[0]['prefixlen']}}" exclude_ips="{{minigraph_vlan_interfaces[0]['addr']}}" become: no - delegate_to: localhost + connection: local failed_when: False when: minigraph_vlans | length > 0 @@ -136,7 +136,7 @@ # Get buffers size # Ingress lossless -- include_tasks: roles/test/tasks/qos_get_max_buff_size.yml +- include: roles/test/tasks/qos_get_max_buff_size.yml vars: target_table: 'BUFFER_PG' target_port_name: "{{dut_switch_ports[src_port_id|int]}}" @@ -158,7 +158,7 @@ # Ingress lossy -- include_tasks: roles/test/tasks/qos_get_max_buff_size.yml +- include: roles/test/tasks/qos_get_max_buff_size.yml vars: target_table: 'BUFFER_PG' target_port_name: "{{dut_switch_ports[src_port_id|int]}}" @@ -180,7 +180,7 @@ # Egress lossless -- include_tasks: roles/test/tasks/qos_get_max_buff_size.yml +- include: roles/test/tasks/qos_get_max_buff_size.yml vars: target_table: 'BUFFER_QUEUE' target_port_name: "{{dut_switch_ports[src_port_id|int]}}" @@ -197,7 +197,7 @@ # Egress lossy -- include_tasks: roles/test/tasks/qos_get_max_buff_size.yml +- include: roles/test/tasks/qos_get_max_buff_size.yml vars: target_table: 'BUFFER_QUEUE' target_port_name: "{{dut_switch_ports[src_port_id|int]}}" diff --git a/ansible/roles/test/tasks/qos_sai.yml b/ansible/roles/test/tasks/qos_sai.yml index eaa10743c2e..3b0258623a0 100644 --- a/ansible/roles/test/tasks/qos_sai.yml +++ b/ansible/roles/test/tasks/qos_sai.yml @@ -9,21 +9,13 @@ - set_fact: disable_test: "{{disable_test | default('true') | bool}}" -- include_tasks: add_container_to_inventory.yml - vars: - container_name: "{{ item }}" - with_items: - - "lldp" - - "bgp" - - "syncd" - - block: - name: Getting minigraph facts minigraph_facts: host={{inventory_hostname}} become: no - name: Get ports info. - include_tasks: roles/test/tasks/qos_get_ports.yml + include: roles/test/tasks/qos_get_ports.yml - name: Check if lossless buffer profile is derived fail: msg="Lossless Buffer profile could not be retreived" @@ -56,7 +48,9 @@ - name: Ensure LLDP Daemon stopped become: yes supervisorctl: state=stopped name={{item}} - delegate_to: "{{ ansible_host }}_lldp" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i lldp python with_items: - lldpd - lldp-syncd @@ -64,7 +58,9 @@ - name: Ensure BGP Daemon stopped become: yes supervisorctl: state=stopped name=bgpd - delegate_to: "{{ ansible_host }}_bgp" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i bgp python - name: Add iptables rule to drop BGP SYN Packet from peer so that we do not ACK back. Add at top so existing rules don't have precedence over it. shell: "iptables -I INPUT 1 -j DROP -p tcp --destination-port bgp" @@ -78,14 +74,18 @@ - name: Deploy script to DUT/syncd copy: src=roles/test/files/mlnx/packets_aging.py dest=/root/packets_aging.py - delegate_to: "{{ ansible_host }}_syncd" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i syncd python when: minigraph_hwsku is defined and minigraph_hwsku in mellanox_hwskus - name: Disable Mellanox packet aging shell: python /root/packets_aging.py disable register: result failed_when: result.stderr != '' - delegate_to: "{{ ansible_host }}_syncd" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i syncd python when: minigraph_hwsku is defined and minigraph_hwsku in mellanox_hwskus - name: copy ptf tests @@ -101,7 +101,7 @@ delegate_to: "{{ptf_host}}" - name: Init PTF base test parameters - set_fact: + set_fact: ptf_base_params: - router_mac={% if testbed_type not in ['t0', 't0-64', 't0-116'] %}'{{ansible_Ethernet0['macaddress']}}'{% else %}''{% endif %} - server='{{ansible_host}}' @@ -109,7 +109,7 @@ - sonic_asic_type='{{sonic_asic_type}}' # Unpause all paused port - - include_tasks: qos_sai_ptf.yml + - include: qos_sai_ptf.yml vars: test_name: release all paused ports test_path: sai_qos_tests.ReleaseAllPorts @@ -124,7 +124,7 @@ - debug: var: arp_entries - - include_tasks: qos_sai_ptf.yml + - include: qos_sai_ptf.yml vars: test_name: populate arp on all ports test_path: sai_qos_tests.ARPpopulate @@ -139,23 +139,8 @@ - src_port_ip='{{src_port_ip}}' when: testbed_type in ['t0', 't0-64', 't0-116'] or arp_entries.stdout.find('incomplete') == -1 - - include_tasks: qos_sai_ptf.yml - vars: - test_name: populate arp on all ports - test_path: sai_qos_tests.ARPpopulatePTF - test_params: - - dst_port_id='{{dst_port_id}}' - - dst_port_ip='{{dst_port_ip}}' - - dst_port_2_id='{{dst_port_2_id}}' - - dst_port_2_ip='{{dst_port_2_ip}}' - - dst_port_3_id='{{dst_port_3_id}}' - - dst_port_3_ip='{{dst_port_3_ip}}' - - src_port_id='{{src_port_id}}' - - src_port_ip='{{src_port_ip}}' - when: testbed_type in ['ptf32', 'ptf64'] - # XOFF limit - - include_tasks: qos_sai_ptf.yml + - include: qos_sai_ptf.yml vars: test_name: xoff limit ptf test dscp = {{qp_sc.xoff_1.dscp}}, ecn = {{qp_sc.xoff_1.ecn}} test_path: sai_qos_tests.PFCtest @@ -173,7 +158,7 @@ - pkts_num_trig_pfc='{{qp_sc.xoff_1.pkts_num_trig_pfc}}' - pkts_num_trig_ingr_drp='{{qp_sc.xoff_1.pkts_num_trig_ingr_drp}}' - - include_tasks: qos_sai_ptf.yml + - include: qos_sai_ptf.yml vars: test_name: xoff limit ptf test dscp = {{qp_sc.xoff_2.dscp}}, ecn = {{qp_sc.xoff_2.ecn}} test_path: sai_qos_tests.PFCtest @@ -192,7 +177,7 @@ - pkts_num_trig_ingr_drp='{{qp_sc.xoff_2.pkts_num_trig_ingr_drp}}' # XON limit - - include_tasks: qos_sai_ptf.yml + - include: qos_sai_ptf.yml vars: test_name: xon limit ptf test dscp = {{qp.xon_1.dscp}}, ecn = {{qp.xon_1.ecn}} test_path: sai_qos_tests.PFCXonTest @@ -213,7 +198,7 @@ - pkts_num_trig_pfc='{{qp.xon_1.pkts_num_trig_pfc}}' - pkts_num_dismiss_pfc='{{qp.xon_1.pkts_num_dismiss_pfc}}' - - include_tasks: qos_sai_ptf.yml + - include: qos_sai_ptf.yml vars: test_name: xon limit ptf test dscp = {{qp.xon_2.dscp}}, ecn = {{qp.xon_2.ecn}} test_path: sai_qos_tests.PFCXonTest @@ -235,7 +220,7 @@ - pkts_num_dismiss_pfc='{{qp.xon_2.pkts_num_dismiss_pfc}}' # Headroom pool size - - include_tasks: qos_sai_ptf.yml + - include: qos_sai_ptf.yml vars: test_name: headroom pool size ptf test ecn = {{qp_sc.hdrm_pool_size.ecn}} test_path: sai_qos_tests.HdrmPoolSizeTest @@ -257,7 +242,7 @@ minigraph_hwsku in ['Arista-7060CX-32S-C32', 'Celestica-DX010-C32', 'Arista-7260CX3-D108C8', 'Force10-S6100', 'Arista-7260CX3-Q64'] # Lossy queue - - include_tasks: qos_sai_ptf.yml + - include: qos_sai_ptf.yml vars: test_name: Lossy queue, shared buffer dynamic allocation. dscp = {{qp.lossy_queue_1.dscp}}, ecn = {{qp.lossy_queue_1.ecn}} test_path: sai_qos_tests.LossyQueueTest @@ -277,7 +262,7 @@ - pkts_num_trig_egr_drp='{{qp.lossy_queue_1.pkts_num_trig_egr_drp}}' # DSCP to queue mapping - - include_tasks: qos_sai_ptf.yml + - include: qos_sai_ptf.yml vars: test_name: dscp to queue mapping ptf test test_path: sai_qos_tests.DscpMappingPB @@ -288,7 +273,7 @@ - src_port_ip='{{src_port_ip}}' # WRR test - - include_tasks: qos_sai_ptf.yml + - include: qos_sai_ptf.yml vars: test_name: DWRR test_path: sai_qos_tests.WRRtest @@ -316,7 +301,7 @@ shell: bash -c 'counterpoll watermark enable; sleep 20; counterpoll watermark disable' # PG shared watermark test - - include_tasks: qos_sai_ptf.yml + - include: qos_sai_ptf.yml vars: test_name: PG shared watermark test, lossless traffic test_path: sai_qos_tests.PGSharedWatermarkTest @@ -346,7 +331,7 @@ shell: bash -c 'counterpoll watermark enable; sleep 20; counterpoll watermark disable' # PG shared watermark test - - include_tasks: qos_sai_ptf.yml + - include: qos_sai_ptf.yml vars: test_name: PG shared watermark test, lossy traffic test_path: sai_qos_tests.PGSharedWatermarkTest @@ -376,7 +361,7 @@ shell: bash -c 'counterpoll watermark enable; sleep 20; counterpoll watermark disable' # PG headroom watermark test - - include_tasks: qos_sai_ptf.yml + - include: qos_sai_ptf.yml vars: test_name: PG headroom watermark test test_path: sai_qos_tests.PGHeadroomWatermarkTest @@ -401,7 +386,7 @@ shell: bash -c 'counterpoll watermark enable; sleep 20; counterpoll watermark disable' # Queue shared watermark test - - include_tasks: qos_sai_ptf.yml + - include: qos_sai_ptf.yml vars: test_name: Queue shared watermark test, lossless traffic test_path: sai_qos_tests.QSharedWatermarkTest @@ -426,7 +411,7 @@ shell: bash -c 'counterpoll watermark enable; sleep 20; counterpoll watermark disable' # Queue shared watermark test - - include_tasks: qos_sai_ptf.yml + - include: qos_sai_ptf.yml vars: test_name: Queue shared watermark test, lossy traffic test_path: sai_qos_tests.QSharedWatermarkTest @@ -449,7 +434,7 @@ when: not disable_test # DSCP to pg mapping - - include_tasks: qos_sai_ptf.yml + - include: qos_sai_ptf.yml vars: test_name: dscp to pg mapping ptf test test_path: sai_qos_tests.DscpToPgMapping @@ -472,7 +457,7 @@ command: redis-cli -n 4 HSET "{{lossless_sched_profile}}" weight {{qp.wrr_chg.lossless_weight}} # WRR test - - include_tasks: qos_sai_ptf.yml + - include: qos_sai_ptf.yml vars: test_name: DWRR runtime weight change test_path: sai_qos_tests.WRRtest @@ -505,7 +490,9 @@ - name: Restore LLDP Daemon become: yes supervisorctl: state=started name={{item}} - delegate_to: "{{ ansible_host }}_lldp" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i lldp python with_items: - lldpd - lldp-syncd @@ -518,13 +505,14 @@ shell: "ip6tables -D INPUT -j DROP -p tcp --destination-port bgp" become: true - - name: Enable bgpd - become: yes + - name: Restore BGP daemon and docker supervisorctl: state=started name=bgpd - delegate_to: "{{ ansible_host }}_bgp" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i bgp python notify: - Restart Quagga Daemon - + - name: Restore original watermark polling status shell: counterpoll watermark {{watermark_status.stdout}} when: watermark_status is defined and (watermark_status.stdout == "enable" or watermark_status.stdout == "disable") @@ -539,7 +527,9 @@ shell: python /root/packets_aging.py enable register: result failed_when: result.stderr != '' - delegate_to: "{{ ansible_host }}_syncd" + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i syncd python when: minigraph_hwsku is defined and minigraph_hwsku in mellanox_hwskus - meta: flush_handlers diff --git a/ansible/roles/test/tasks/reboot.yml b/ansible/roles/test/tasks/reboot.yml index 519c8811a36..6c4f4df43d9 100644 --- a/ansible/roles/test/tasks/reboot.yml +++ b/ansible/roles/test/tasks/reboot.yml @@ -1,12 +1,12 @@ - name: reboot - include_tasks: common_tasks/reboot_sonic.yml + include: common_tasks/reboot_sonic.yml - name: sanity check to pass - include_tasks: base_sanity.yml + include: base_sanity.yml - name: Gathering minigraph facts about the device minigraph_facts: host={{ inventory_hostname }} when: minigraph_interfaces is not defined -- include_tasks: interface.yml +- include: interface.yml diff --git a/ansible/roles/test/tasks/repeat_harness.yml b/ansible/roles/test/tasks/repeat_harness.yml index d083ddf2cb9..4d140f92c1a 100644 --- a/ansible/roles/test/tasks/repeat_harness.yml +++ b/ansible/roles/test/tasks/repeat_harness.yml @@ -9,5 +9,5 @@ - debug: msg="Execute repeat_tasks.yml {{ repeat_count }} time(s)" -- include_tasks: repeat_tasks.yml +- include: repeat_tasks.yml with_sequence: end={{repeat_count}} diff --git a/ansible/roles/test/tasks/repeat_tasks.yml b/ansible/roles/test/tasks/repeat_tasks.yml index e168147b2b1..0c9ee862741 100644 --- a/ansible/roles/test/tasks/repeat_tasks.yml +++ b/ansible/roles/test/tasks/repeat_tasks.yml @@ -6,7 +6,7 @@ msg: "===================== Repeating starts ======================" # Add tasks here to be repeated: -# e.g. - include_tasks: reboot.yml +# e.g. - include: reboot.yml - debug: diff --git a/ansible/roles/test/tasks/restart_swss.yml b/ansible/roles/test/tasks/restart_swss.yml index 65062801022..cd64bd33081 100644 --- a/ansible/roles/test/tasks/restart_swss.yml +++ b/ansible/roles/test/tasks/restart_swss.yml @@ -9,12 +9,12 @@ seconds: 120 - name: check basic sanity of the device - include_tasks: base_sanity.yml + include: base_sanity.yml vars: recover: false - name: validate all interfaces are up - include_tasks: interface.yml + include: interface.yml vars: recover: false diff --git a/ansible/roles/test/tasks/restart_syncd.yml b/ansible/roles/test/tasks/restart_syncd.yml index 499da37246e..c47d144cb9a 100644 --- a/ansible/roles/test/tasks/restart_syncd.yml +++ b/ansible/roles/test/tasks/restart_syncd.yml @@ -49,12 +49,12 @@ seconds: 120 - name: check basic sanity of the device - include_tasks: base_sanity.yml + include: base_sanity.yml vars: recover: false - name: validate all interfaces are up - include_tasks: interface.yml + include: interface.yml vars: recover: false diff --git a/ansible/roles/test/tasks/resume_fanout_ports.yml b/ansible/roles/test/tasks/resume_fanout_ports.yml index 6ddde62ec47..7042da3ce32 100644 --- a/ansible/roles/test/tasks/resume_fanout_ports.yml +++ b/ansible/roles/test/tasks/resume_fanout_ports.yml @@ -10,7 +10,7 @@ neighbor_interface: "{{neighbors[interface]['peerport']}}" - conn_graph_facts: host={{ peer_device }} - delegate_to: localhost + connection: local - set_fact: peer_host: "{{device_info['mgmtip']}}" @@ -20,8 +20,5 @@ intfs_to_exclude: "{{interface}}" - name: bring up neighbor interface {{neighbor_interface}} on {{peer_host}} - action: apswitch template=neighbor_interface_no_shut_single.j2 - args: - host: "{{peer_host}}" - login: "{{switch_login[hwsku_map[peer_hwsku]]}}" - connection: switch + shell: config interface startup {{ neighbor_interface }} + become: true diff --git a/ansible/roles/test/tasks/run_analyze_and_check.yml b/ansible/roles/test/tasks/run_analyze_and_check.yml index 16bcd5650e1..422e9358054 100644 --- a/ansible/roles/test/tasks/run_analyze_and_check.yml +++ b/ansible/roles/test/tasks/run_analyze_and_check.yml @@ -1,5 +1,5 @@ - name: Use loganalyzer to check for the error messages {{ testname }}. - include_tasks: "{{ loganalyzer_analyze }}" + include: "{{ loganalyzer_analyze }}" - name: Get the total number of error messages. shell: grep "TOTAL MATCHES" "{{ test_out_dir }}/{{ summary_file }}" | sed -n "s/TOTAL MATCHES:[[:space:]]*//p" diff --git a/ansible/roles/test/tasks/run_command_with_log_analyzer.yml b/ansible/roles/test/tasks/run_command_with_log_analyzer.yml index 4bdfea2a11b..6f04d1e16d5 100644 --- a/ansible/roles/test/tasks/run_command_with_log_analyzer.yml +++ b/ansible/roles/test/tasks/run_command_with_log_analyzer.yml @@ -19,14 +19,14 @@ - block: - name: Initialize loganalizer. Put start marker to log file. - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_init.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_init.yml - name: Run command {{ command_to_run }} command: "{{ command_to_run }}" become: yes - name: Use loganalyzer to check for the error messages {{ testname }} / {{ command_to_run }}. - include_tasks: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml + include: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml - name: Read loganalyzer summary file. shell: cat "{{ test_out_dir }}/{{ summary_file }}" diff --git a/ansible/roles/test/tasks/run_config_test.yml b/ansible/roles/test/tasks/run_config_test.yml index a39b5b8821a..2c4834b4416 100644 --- a/ansible/roles/test/tasks/run_config_test.yml +++ b/ansible/roles/test/tasks/run_config_test.yml @@ -14,9 +14,9 @@ - name: Run command. vars: command_to_run: "docker exec -i swss swssconfig {{ docker_testdir }}/{{ config_file }}" - include_tasks: roles/test/tasks/run_command_with_log_analyzer.yml + include: roles/test/tasks/run_command_with_log_analyzer.yml always: - name: Do configuration cleanup after {{ testname }} / {{ config_file }} - include_tasks: roles/test/tasks/run_config_cleanup.yml + include: roles/test/tasks/run_config_cleanup.yml when: run_cleanup == true diff --git a/ansible/roles/test/tasks/run_loganalyzer.yml b/ansible/roles/test/tasks/run_loganalyzer.yml index ecdce817778..125c058b9a7 100644 --- a/ansible/roles/test/tasks/run_loganalyzer.yml +++ b/ansible/roles/test/tasks/run_loganalyzer.yml @@ -16,9 +16,9 @@ result_file: result.loganalysis.{{ testname_unique }}.log - name: Initialize loganalizer. Put start marker to log file. - include_tasks: "{{ loganalyzer_init }}" + include: "{{ loganalyzer_init }}" when: loganalyzer_run_init == true - name: Run analyze-phase and check the loganalyzer output for the errors. - include_tasks: "{{ run_analyze_and_check }}" + include: "{{ run_analyze_and_check }}" when: loganalyzer_run_analyze == true diff --git a/ansible/roles/test/tasks/saiserver.yml b/ansible/roles/test/tasks/saiserver.yml index ffa68a21794..745b4f29e61 100644 --- a/ansible/roles/test/tasks/saiserver.yml +++ b/ansible/roles/test/tasks/saiserver.yml @@ -8,7 +8,7 @@ # Stop syncd and deploy saiserver (only for SAI testing purpose) - name: Stop syncd container - include_tasks: ../../sonic-common/tasks/sonicdocker.yml + include: ../../sonic-common/tasks/sonicdocker.yml vars: docker_container: syncd docker_image: "{{ image_id_syncd if sonic_asic_type!='mellanox' else image_id_syncd_mlnx }}" @@ -17,7 +17,7 @@ when: no_deploy_saiserver is not defined - name: Start saiserver docker container - include_tasks: ../../sonic-common/tasks/sonicdocker.yml + include: ../../sonic-common/tasks/sonicdocker.yml vars: docker_container: saiserver docker_image: "{{ image_id_saiserver if sonic_asic_type!='mellanox' else image_id_saiserver_mlnx }}" @@ -40,7 +40,7 @@ - debug: var=out.stdout_lines - name: Stop saiserver docker container - include_tasks: ../../sonic-common/tasks/sonicdocker.yml + include: ../../sonic-common/tasks/sonicdocker.yml vars: docker_container: saiserver docker_image: "{{ image_id_saiserver if sonic_asic_type!='mellanox' else image_id_saiserver_mlnx }}" @@ -49,7 +49,7 @@ when: no_deploy_saiserver is not defined - name: Restart syncd container - include_tasks: ../../sonic-common/tasks/sonicdocker.yml + include: ../../sonic-common/tasks/sonicdocker.yml vars: docker_container: syncd docker_image: "{{ image_id_syncd if sonic_asic_type!='mellanox' else image_id_syncd_mlnx }}" @@ -58,7 +58,7 @@ when: no_deploy_saiserver is not defined - name: Restart orchagent container - include_tasks: ../../sonic-common/tasks/sonicdocker.yml + include: ../../sonic-common/tasks/sonicdocker.yml vars: docker_container: orchagent docker_image: "{{ image_id_orchagent if sonic_asic_type!='mellanox' else image_id_orchagent_mlnx }}" diff --git a/ansible/roles/test/tasks/service_acl.yml b/ansible/roles/test/tasks/service_acl.yml index 0fa0e99046e..957b25e4e4e 100644 --- a/ansible/roles/test/tasks/service_acl.yml +++ b/ansible/roles/test/tasks/service_acl.yml @@ -4,7 +4,7 @@ host: "{{ ansible_host }}" version: "v2c" community: "{{ snmp_rocommunity }}" - delegate_to: localhost + connection: local - name: Copy config_service_acls.sh to the DuT (this also ensures we can successfully SSH to the DuT) become: true @@ -36,7 +36,7 @@ host: "{{ ansible_host }}" version: "v2c" community: "{{ snmp_rocommunity }}" - delegate_to: localhost + connection: local register: result failed_when: "'No SNMP response received before timeout' not in result.msg" @@ -51,10 +51,6 @@ search_regex: "OpenSSH" timeout: 90 -- name: Pause for 20s to prevent timeout error which may happen when deleting config_service_acls.sh - pause: - seconds: 20 - - name: Delete config_service_acls.sh from the DuT become: true file: @@ -67,4 +63,4 @@ host: "{{ ansible_host }}" version: "v2c" community: "{{ snmp_rocommunity }}" - delegate_to: localhost + connection: local diff --git a/ansible/roles/test/tasks/shared-fib.yml b/ansible/roles/test/tasks/shared-fib.yml index 553c1d928a4..9a88cda3fb9 100644 --- a/ansible/roles/test/tasks/shared-fib.yml +++ b/ansible/roles/test/tasks/shared-fib.yml @@ -30,7 +30,11 @@ # Generate route file - name: Generate route-port map information template: src=roles/test/templates/fib.j2 - dest=/root/fib_info.txt + dest=/tmp/fib_info.txt + connection: local + +- name: copy the fib_info to ptf container + copy: src=/tmp/fib_info.txt dest=/root delegate_to: "{{ptf_host}}" diff --git a/ansible/roles/test/tasks/simple-fib.yml b/ansible/roles/test/tasks/simple-fib.yml index 2b56a8f1fad..889538da3c3 100644 --- a/ansible/roles/test/tasks/simple-fib.yml +++ b/ansible/roles/test/tasks/simple-fib.yml @@ -2,7 +2,7 @@ # Run FIB test and Perform log analysis. #----------------------------------------- # Perform prechecks -- include_tasks: shared-fib.yml +- include: shared-fib.yml - debug : msg="Start FIB Test" @@ -10,7 +10,7 @@ when: mtu is not defined - name: "Start PTF runner" - include_tasks: ptf_runner.yml + include: ptf_runner.yml vars: ptf_test_name: FIB test ptf_test_dir: ptftests diff --git a/ansible/roles/test/tasks/single_lag_lacp_rate_test.yml b/ansible/roles/test/tasks/single_lag_lacp_rate_test.yml new file mode 100644 index 00000000000..7785437dc67 --- /dev/null +++ b/ansible/roles/test/tasks/single_lag_lacp_rate_test.yml @@ -0,0 +1,99 @@ +### Part of lag test palybook lag_2.yml (--tag lag_2) +### This playbook test one single port channel member interfaces sending ACP DU rate + +# Gather information of port channel ports, minimum links and total interface member numbers +- set_fact: + po: "{{ item }}" + po_interfaces: "{{ lag_facts.lags[item]['po_config']['ports'] }}" + po_intf_num: "{{ lag_facts.lags[item]['po_config']['ports']|length }}" + po_min_links: "{{lag_facts.lags[item]['po_config']['runner']['min_ports']}}" + +# pick flap interface name and calculate when it flaps, should portchannel interface flap or not +# Current it is using a static capacity < 75%, Portchannel will flap which match Sonic configuration +# if need to be random, then will make it a var +- set_fact: + po_flap: "{{ (po_intf_num|float - 1)/(po_min_links|float)*100 < 75 }}" + flap_intf: "{{ lag_facts.lags[item]['po_config']['ports'].keys()[0] }}" + +### figure out fanout switches info for the flapping lag member +- set_fact: + peer_device: "{{ fanout_neighbors[flap_intf]['peerdevice'] }}" + neighbor_interface: "{{ fanout_neighbors[flap_intf]['peerport'] }}" + +- conn_graph_facts: host={{ peer_device }} + connection: local + +### Now figure out remote VM and interface info for the falpping lag member and run minlink test +- set_fact: + peer_device: "{{vm_neighbors[flap_intf]['name']}}" + neighbor_interface: "{{vm_neighbors[flap_intf]['port']}}" + peer_hwsku: 'Arista-VM' + +- set_fact: + peer_host: "{{ minigraph_devices[peer_device]['mgmt_addr'] }}" + +### Now prepare for the remote VM interfaces that using PTF docker to check teh LACP DU packet rate is correct + +- set_fact: + iface_behind_lag_member: [] +- set_fact: + iface_behind_lag_member: "{{iface_behind_lag_member}}+ ['{{minigraph_port_indices[item.key]}}']" + with_dict: "{{ minigraph_neighbors }}" + when: peer_device == "{{item.value.name}}" +- set_fact: + neighbor_lag_intfs: [] + +- set_fact: + neighbor_lag_intfs: "{{ neighbor_lag_intfs }} + [ '{{ vm_neighbors[item]['port'] }}' ]" + with_items: "{{ po_interfaces }}" + +- block: + # make sure portchannel peer rate is set to fast + - name: make sure all lag members on VM are set to fast + action: apswitch template=neighbor_lag_rate_fast.j2 + args: + host: "{{peer_host}}" + login: "{{switch_login[hwsku_map[peer_hwsku]]}}" + connection: switch + + - set_fact: + lag_rate_current_setting: "fast" + + - pause: + seconds: 5 + + - name: test lacp packet sending rate is 1 seconds + include: lag_lacp_timing_test.yml + vars: + vm_name: "{{ peer_device }}" + lacp_timer: 1 + + # make sure portchannel peer rate is set to slow + - name: make sure all lag members on VM are set to slow + action: apswitch template=neighbor_lag_rate_slow.j2 + args: + host: "{{peer_host}}" + login: "{{switch_login[hwsku_map[peer_hwsku]]}}" + connection: switch + + - set_fact: + lag_rate_current_setting: "slow" + + - pause: + seconds: 5 + + - name: test lacp packet sending rate is 30 seconds + include: lag_lacp_timing_test.yml + vars: + vm_name: "{{ peer_device }}" + lacp_timer: 30 + + always: + - name: Restore lag rate setting on VM in case of failure + action: apswitch template=neighbor_lag_rate_slow.j2 + args: + host: "{{peer_host}}" + login: "{{switch_login[hwsku_map[peer_hwsku]]}}" + timeout: 300 + connection: switch + when: "lag_rate_current_setting is defined and lag_rate_current_setting == 'fast'" diff --git a/ansible/roles/test/tasks/single_lag_test.yml b/ansible/roles/test/tasks/single_lag_test.yml new file mode 100644 index 00000000000..852eb5734cb --- /dev/null +++ b/ansible/roles/test/tasks/single_lag_test.yml @@ -0,0 +1,48 @@ +### Part of lag test palybook lag_2.yml (--tag lag_2) +### This playbook test one single port channel minimum link feature of one member interface shutdown +### and portchannel member interfaces sending ACP DU rate + +# Gather information of port channel ports, minimum links and total interface member numbers +- set_fact: + po: "{{ item }}" + po_interfaces: "{{ lag_facts.lags[item]['po_config']['ports'] }}" + po_intf_num: "{{ lag_facts.lags[item]['po_config']['ports']|length }}" + po_min_links: "{{lag_facts.lags[item]['po_config']['runner']['min_ports']}}" + +# pick flap interface name and calculate when it flaps, should portchannel interface flap or not +# Current it is using a static capacity < 75%, Portchannel will flap which match Sonic configuration +# if need to be random, then will make it a var +- set_fact: + po_flap: "{{ (po_intf_num|float - 1)/(po_min_links|float)*100 < 75 }}" + flap_intf: "{{ lag_facts.lags[item]['po_config']['ports'].keys()[0] }}" + +### figure out fanout switches info for the flapping lag member and run minlink test +- set_fact: + peer_device: "{{ fanout_neighbors[flap_intf]['peerdevice'] }}" + neighbor_interface: "{{ fanout_neighbors[flap_intf]['peerport'] }}" + +- conn_graph_facts: host={{ peer_device }} + connection: local + +- set_fact: + peer_host: "{{ device_info['mgmtip'] }}" + peer_hwsku: "{{ device_info['HwSku'] }}" + +- name: test fanout interface (physical) flap and lacp keep correct po status follow minimum links requirement + include: lag_minlink.yml + vars: + wait_down_time: 35 + +### Now figure out remote VM and interface info for the flapping lag member and run minlink test +- set_fact: + peer_device: "{{vm_neighbors[flap_intf]['name']}}" + neighbor_interface: "{{vm_neighbors[flap_intf]['port']}}" + peer_hwsku: 'Arista-VM' + +- set_fact: + peer_host: "{{ minigraph_devices[peer_device]['mgmt_addr'] }}" + +- name: test vm interface flap (no physical port down, more like remote port lock) that lag interface can change to correct po status follow minimum links requirement + include: lag_minlink.yml + vars: + wait_down_time: 120 diff --git a/ansible/roles/test/tasks/snmp.yml b/ansible/roles/test/tasks/snmp.yml index a84a9ddd58f..c5da4c4f202 100644 --- a/ansible/roles/test/tasks/snmp.yml +++ b/ansible/roles/test/tasks/snmp.yml @@ -1,35 +1,31 @@ # Gather facts with SNMP version 2 - name: Gathering basic snmp facts about the device snmp_facts: host={{ ansible_host }} version=v2c community={{ snmp_rocommunity }} - delegate_to: localhost + connection: local # Test SNMP is working with sysdesc - name: Validating SNMP was successful and Hostname is what is expected assert: { that: "ansible_sysname == '{{ inventory_hostname }}'" } - block: + - name: include snmp cpu test + include: roles/test/tasks/snmp/cpu.yml + + - name: inlcude snmp physical table test + include: roles/test/tasks/snmp/phys_table.yml + - name: include snmp interfaces test - include_tasks: roles/test/tasks/snmp/interfaces.yml + include: roles/test/tasks/snmp/interfaces.yml - name: include snmp pfc counter test - include_tasks: roles/test/tasks/snmp/pfc_counters.yml + include: roles/test/tasks/snmp/pfc_counters.yml - name: include snmp queues test - include_tasks: roles/test/tasks/snmp/queues.yml + include: roles/test/tasks/snmp/queues.yml + + - name: include snmp PSU test + include: roles/test/tasks/snmp/psu.yml - name: include snmp lldp test - include_tasks: roles/test/tasks/snmp/lldp.yml + include: roles/test/tasks/snmp/lldp.yml when: testcase_name is defined - -- block: - - name: include snmp cpu test - include_tasks: roles/test/tasks/snmp/cpu.yml - - - name: inlcude snmp physical table test - include_tasks: roles/test/tasks/snmp/phys_table.yml - - - name: include snmp PSU test - include_tasks: roles/test/tasks/snmp/psu.yml - when: - - testcase_name is defined - - (hostvars[ansible_hostname]['type'] is not defined) or (hostvars[ansible_hostname]['type'] != 'simx') \ No newline at end of file diff --git a/ansible/roles/test/tasks/snmp/cpu.yml b/ansible/roles/test/tasks/snmp/cpu.yml index 7e686b2e684..831cc42061a 100644 --- a/ansible/roles/test/tasks/snmp/cpu.yml +++ b/ansible/roles/test/tasks/snmp/cpu.yml @@ -1,4 +1,59 @@ -- name: run test - include_tasks: roles/test/tasks/pytest_runner.yml - vars: - test_node: snmp/test_snmp_cpu.py +# Test SNMP CPU Utilization +# +# - Pulls CPU usage via shell commans +# - Polls SNMP for CPU usage +# - Difference should be < 2% (allowing float->int rounding on each result) +# +# +# Requires: Ansible v1.x+ +# +# Usage: +# +# sudo ansible-playbook test.yml -i str --limit 10.3.147.142 --sudo --vault-password-file ~/password.txt --tags snmp_cpu +# +# +# TODO: abstract the snmp OID by SKU + +- block: + - name: Start cpu load generation + shell: cpu_load() { yes > /dev/null & }; cpu_load && cpu_load && cpu_load && cpu_load + become: yes + + - name: Wait for load to reflect in SNMP + pause: seconds=20 + + # Gather facts with SNMP version 2 + - name: Gathering basic snmp facts about the device + snmp_facts: host={{ ansible_host }} version=v2c community={{ snmp_rocommunity }} is_dell=yes + connection: local + + - name: Pull CPU utilization via shell + # Explanation: Run top command with 2 iterations, 5sec delay. Discard the first iteration, then grap the CPU line from the second, + # subtract 100% - idle, and round down to integer. + shell: top -bn2 -d5 | awk '/^top -/ { p=!p } { if (!p) print }' | awk '/Cpu/ { cpu = 100 - $8 };END { print cpu }' | awk '{printf "%.0f",$1}' + register: shell_cpu_usage + become: yes + + # If no value exists, fail instead of doing string->int conversion and weird math + - name: 'Validate SNMP CPU Utilization exists and is valid' + assert: + that: '(ansible_ChStackUnitCpuUtil5sec is defined) and ( {{ansible_ChStackUnitCpuUtil5sec|isnan}} == False ) ' + + - name: CPU usage from SNMP + debug: var=ansible_ChStackUnitCpuUtil5sec + + - name: CPU usage from TOP + debug: var=shell_cpu_usage.stdout + + - name: Difference between SNMP and TOP + debug: msg="{{ ((ansible_ChStackUnitCpuUtil5sec) - (shell_cpu_usage.stdout|int)) | abs() }}" + + # Compare results + - name: 'Validating SNMP CPU utilization matches shell "top" result' + assert: + that: "{{ ((ansible_ChStackUnitCpuUtil5sec) - (shell_cpu_usage.stdout|int)) | abs() }} <= 5" + + always: + - name: Stop cpu load generation + shell: killall yes + become: yes diff --git a/ansible/roles/test/tasks/snmp/interfaces.yml b/ansible/roles/test/tasks/snmp/interfaces.yml index 74a738081dc..fbb09c07488 100644 --- a/ansible/roles/test/tasks/snmp/interfaces.yml +++ b/ansible/roles/test/tasks/snmp/interfaces.yml @@ -1,4 +1,37 @@ -- name: run test - include_tasks: roles/test/tasks/pytest_runner.yml - vars: - test_node: snmp/test_snmp_interfaces.py +# Gather facts with SNMP version 2 +- name: Gathering basic snmp facts about the device + snmp_facts: host={{ ansible_host }} version=v2c community={{ snmp_rocommunity }} + connection: local + +- set_fact: + snmp_intf: [] + mg_intf: [] + +- name: Create snmp interfaces list + set_fact: + snmp_intf: "{{ snmp_intf + [item.value.name] }}" + with_dict: "{{ snmp_interfaces }}" + when: "{{item.value.name is defined}}" + +- name: Create minigraph interfaces list + set_fact: + mg_intf: "{{ mg_intf + [item.value.alias] }}" + with_dict: "{{ minigraph_ports }}" + +- name: Add port channel interfaces into minigraph interfaces list + set_fact: + mg_intf: "{{ mg_intf + [item.key] }}" + with_dict: "{{ minigraph_portchannels }}" + +- name: Add management port into minigraph interfaces list + set_fact: + mg_intf: "{{ mg_intf + [minigraph_mgmt_interface.alias] }}" + +- debug: var=minigraph_port_name_to_alias_map +- debug: var=snmp_intf +- debug: var=mg_intf + +- name: Check for missing interfaces in SNMP + fail: msg="Minigraph interface {{ minigraph_port_name_to_alias_map[item] if item in minigraph_port_name_to_alias_map else item }} not in SNMP interfaces" + when: "{{ (item in minigraph_port_name_to_alias_map and minigraph_port_name_to_alias_map[item] not in snmp_intf) or (item not in minigraph_port_name_to_alias_map and item not in snmp_intf) }}" + with_items: "{{ mg_intf }}" diff --git a/ansible/roles/test/tasks/snmp/lldp.yml b/ansible/roles/test/tasks/snmp/lldp.yml index 8184e1a99eb..4d483650d3f 100644 --- a/ansible/roles/test/tasks/snmp/lldp.yml +++ b/ansible/roles/test/tasks/snmp/lldp.yml @@ -1,4 +1,114 @@ -- name: run test - include_tasks: roles/test/tasks/pytest_runner.yml - vars: - test_node: snmp/test_snmp_lldp.py +# Test checks for ieee802_1ab MIBs: +# - lldpLocalSystemData 1.0.8802.1.1.2.1.3 +# - lldpLocPortTable 1.0.8802.1.1.2.1.3.7 +# - lldpLocManAddrTable 1.0.8802.1.1.2.1.3.8 +# +# - lldpRemTable 1.0.8802.1.1.2.1.4.1 +# - lldpRemManAddrTable 1.0.8802.1.1.2.1.4.2 +# +# For local data check if every OID has value +# For remote values check for availability for +# at least 80% of minigraph neighbors +# (similar to lldp test) + + +# Gather facts with SNMP version 2 +- name: Gathering basic snmp facts about the device + snmp_facts: host={{ ansible_host }} version=v2c community={{ snmp_rocommunity }} + connection: local + +- name: Print SNMP LLDP information + debug: msg="{{ snmp_lldp }}" + +# Check if lldpLocalSysData is present + +- name: "Verify {{ item }} is defined" + assert: { that: "{{ snmp_lldp[item] is defined }} + and not {{ snmp_lldp[item] | search('No Such Object currently exists') }}" } + with_items: + - lldpLocChassisIdSubtype + - lldpLocChassisId + - lldpLocSysName + - lldpLocSysDesc + +- set_fact: + snmp_ports: "{{ snmp_ports|default({}) | combine({ item.key : item.value}) }}" + when: "{{ item.value.name is defined }} + and ({{ item.value['name'].find('Ethernet') != -1 }} or {{ item.value['name'].find('eth') != -1 }})" + with_dict: "{{ snmp_interfaces }}" + +# Check if lldpLocPortTable is present for all ports +- name: Verify lldpLocPortTable is present on port {{ item.value.name }} + assert: { that: "{{ item.value['lldpLocPortNum'] is defined }} + and {{ item.value['lldpLocPortIdSubtype'] is defined }} + and {{ item.value['lldpLocPortId'] is defined }} + and {{ item.value['lldpLocPortDesc'] is defined }}" } + with_dict: "{{ snmp_ports }}" + +# Check if lldpLocManAddrTable is present +- name: "Verify {{ item }} is defined" + assert: { that: "{{ snmp_lldp[item] is defined }} + and not {{ snmp_lldp[item] | search('No Such Object currently exists') }}" } + with_items: + - lldpLocManAddrSubtype + - lldpLocManAddr + - lldpLocManAddrLen + - lldpLocManAddrIfSubtype + - lldpLocManAddrIfId + - lldpLocManAddrOID + +# Check if lldpRemTable is present +- set_fact: + active_intf: [] + +- name: find minigraph lldp neighbor + set_fact: + minigraph_lldp_nei: "{{ minigraph_lldp_nei|default({}) | combine({ item.key : item.value}) }}" + when: "'server' not in item.value['name'] | lower" + with_dict: minigraph_neighbors + +- name: Check if lldpRemTable is present on port {{ item.value.name }} + when: "{{ item.value['lldpRemTimeMark'] is defined }} + and {{ item.value['lldpRemLocalPortNum'] is defined }} + and {{ item.value['lldpRemIndex'] is defined }} + and {{ item.value['lldpRemChassisIdSubtype'] is defined }} + and {{ item.value['lldpRemChassisId'] is defined }} + and {{ item.value['lldpRemPortIdSubtype'] is defined }} + and {{ item.value['lldpRemPortId'] is defined }} + and {{ item.value['lldpRemPortDesc'] is defined }} + and {{ item.value['lldpRemSysName'] is defined }} + and {{ item.value['lldpRemSysDesc'] is defined }} + and {{ item.value['lldpRemSysCapSupported'] is defined }} + and {{ item.value['lldpRemSysCapEnabled'] is defined }}" + set_fact: + active_intf: "{{ active_intf + [item] }}" + with_dict: "{{ snmp_interfaces }}" + +- debug: + msg: "Found {{ active_intf | length }} Ifs with lldpRemTable data\n + Minigraph contains {{ minigraph_lldp_nei | length }} neighbors" + +- name: Verify lldpRemTable is available on most interfaces + assert: {that: "{{ minigraph_lldp_nei | length * 0.8 }} <= {{ active_intf | length }}" } + +# Check if lldpRemManAddrTable is present +- set_fact: + active_intf: [] + +- name: Check if lldpRemManAddrTable is present on port {{ item.value.name }} + when: "{{ item.value['lldpRemManAddrSubtype'] is defined }} + and {{ item.value['lldpRemManAddr'] is defined }} + and {{ item.value['lldpRemManAddr'] is defined }} + and {{ item.value['lldpRemManAddrIfSubtype'] is defined }} + and {{ item.value['lldpRemManAddrIfId'] is defined }} + and {{ item.value['lldpRemManAddrOID'] is defined }}" + set_fact: + active_intf: "{{ active_intf + [item] }}" + with_dict: "{{ snmp_interfaces }}" + +- debug: + msg: "Found {{ active_intf | length }} Ifs with lldpRemManAddr data\n + Minigraph contains {{ minigraph_lldp_nei | length }} neighbors" + +- name: Verify lldpRemManAddr is available on most interfaces + assert: {that: "{{ minigraph_lldp_nei | length * 0.8 }} <= {{ active_intf | length }}" } diff --git a/ansible/roles/test/tasks/snmp/pfc_counters.yml b/ansible/roles/test/tasks/snmp/pfc_counters.yml index f1f096d8a09..1170474a822 100644 --- a/ansible/roles/test/tasks/snmp/pfc_counters.yml +++ b/ansible/roles/test/tasks/snmp/pfc_counters.yml @@ -1,4 +1,15 @@ -- name: run test - include_tasks: roles/test/tasks/pytest_runner.yml - vars: - test_node: snmp/test_snmp_pfc_counters.py +# Gather facts with SNMP version 2 +- name: Gathering basic snmp facts about the device + snmp_facts: host={{ ansible_host }} version=v2c community={{ snmp_rocommunity }} + connection: local + +# Check PFC counters +# Ignore management ports, assuming the names starting with 'eth', eg. eth0 +- fail: + msg: "Port {{ item.key }} does not have PFC counters" + when: "{{ (not item.value.name.startswith('eth')) and + ('cpfcIfRequests' not in item.value.keys() or + 'cpfcIfIndications' not in item.value.keys() or + 'requestsPerPriority' not in item.value.keys() or + 'indicationsPerPriority' not in item.value.keys()) }}" + with_dict: "{{ snmp_interfaces }}" diff --git a/ansible/roles/test/tasks/snmp/phys_table.yml b/ansible/roles/test/tasks/snmp/phys_table.yml index 8efede98c22..e7f947d36a1 100644 --- a/ansible/roles/test/tasks/snmp/phys_table.yml +++ b/ansible/roles/test/tasks/snmp/phys_table.yml @@ -1,11 +1,7 @@ # Gather facts with SNMP version 2 - name: Gathering basic snmp facts about the device snmp_facts: host={{ ansible_host }} version=v2c community={{ snmp_rocommunity }} - delegate_to: localhost - -- name: Get Transceiver info - shell: redis-cli -n 6 keys *TRANSCEIVER_INFO* |cut -d "|" -f 2 - register: trans_output + connection: local - set_fact: chassis_id: "1" @@ -33,30 +29,27 @@ - debug: msg: "{{ snmp_physical_entities.keys() }}" -- name: Check SFP/SFP+ ports are connected in DUT - fail: msg="No SFP ports are connected in DUT" - when: "{{ trans_output.stdout_lines|length == 0}}" - # OID index is defined as ifindex*1000, for EthernetN the ifindex is N+1, so OID index for EthernetN will be (N + 1)*1000 - name: Check all transceivers present in Entity MIB fail: - msg: '{{ (((item.split("Ethernet")[-1] | int + 1) * 1000) | string) }}' - when: '{{ (((item.split("Ethernet")[-1] | int + 1) * 1000) | string) not in snmp_physical_entities.keys() }}' - with_items: "{{ trans_output.stdout_lines }}" + msg: '{{ (((item.value.name.split("Ethernet")[-1] | int + 1) * 1000) | string) }}' + when: '{{ (((item.value.name.split("Ethernet")[-1] | int + 1) * 1000) | string) not in snmp_physical_entities.keys() }}' + with_dict: + minigraph_ports - name: Check all transceiver DOM sensors present in Entity MIB fail: - when: '{{ ((((item[0].split("Ethernet")[-1] | int + 1) * 1000) + item[1]) | string) not in snmp_physical_entities.keys() }}' + when: '{{ ((((item[0].name.split("Ethernet")[-1] | int + 1) * 1000) + item[1]) | string) not in snmp_physical_entities.keys() }}' with_nested: - - "{{ trans_output.stdout_lines }}" + - "{{ minigraph_ports.values() }}" - "{{ sensor_partial_oids.values() }}" - debug: var=snmp_sensors - name: Check all transceiver DOM sensors present in Entity Sensor MIB fail: - when: '{{ ((((item[0].split("Ethernet")[-1] | int + 1) * 1000) + item[1]) | string) not in snmp_sensors.keys() }}' + when: '{{ ((((item[0].name.split("Ethernet")[-1] | int + 1) * 1000) + item[1]) | string) not in snmp_sensors.keys() }}' with_nested: - - "{{ trans_output.stdout_lines }}" + - "{{ minigraph_ports.values() }}" - "{{ sensor_partial_oids.values() }}" diff --git a/ansible/roles/test/tasks/snmp/psu.yml b/ansible/roles/test/tasks/snmp/psu.yml index 72f52e9d804..13cf10db969 100644 --- a/ansible/roles/test/tasks/snmp/psu.yml +++ b/ansible/roles/test/tasks/snmp/psu.yml @@ -1,7 +1,7 @@ # Gather facts with SNMP version 2 - name: Gathering basic snmp facts about the device snmp_facts: host={{ ansible_host }} version=v2c community={{ snmp_rocommunity }} - delegate_to: localhost + connection: local # We assume that all the PSUs tested should be in good status, otherwise, fix the PSU manually - fail: diff --git a/ansible/roles/test/tasks/snmp/queues.yml b/ansible/roles/test/tasks/snmp/queues.yml index 8a012864696..4d24d6ecf77 100644 --- a/ansible/roles/test/tasks/snmp/queues.yml +++ b/ansible/roles/test/tasks/snmp/queues.yml @@ -1,4 +1,9 @@ -- name: run test - include_tasks: roles/test/tasks/pytest_runner.yml - vars: - test_node: snmp/test_snmp_queue.py +# Gather facts with SNMP version 2 +- name: Gathering basic snmp facts about the device + snmp_facts: host={{ ansible_host }} version=v2c community={{ snmp_rocommunity }} + connection: local + +- fail: + msg: "Port {{ item.key }} does not have queue counters" + when: "{{ item.value.description | match('^Ethernet') }} and {{ item.value['queues'] is not defined }}" + with_dict: "{{ snmp_interfaces }}" diff --git a/ansible/roles/test/tasks/sonic.yml b/ansible/roles/test/tasks/sonic.yml index 64eddff5402..cbfd6927796 100644 --- a/ansible/roles/test/tasks/sonic.yml +++ b/ansible/roles/test/tasks/sonic.yml @@ -1,5 +1,3 @@ -- include_vars: "vars/docker_registry.yml" - - include_vars: "roles/test/vars/testcases.yml" - name: Gathering minigraph facts about the device @@ -25,11 +23,6 @@ sonic_asic_type: barefoot when: sonic_hwsku in barefoot_hwskus -- name: Set sonic_asic_type fact - set_fact: - sonic_asic_type: marvell - when: sonic_hwsku in marvell_hwskus - ########################################################################### ##### run test using ansible tag when no testbedname specified ###### ########################################################################### @@ -61,15 +54,10 @@ - block: - name: Gathering testbed information test_facts: testbed_name="{{ testbed_name }}" testbed_file="{{ testbed_file }}" - delegate_to: localhost - - - name: Set default dut index - set_fact: - dut_index: 0 - when: dut_index is not defined + connection: local - fail: msg="The DUT you are trying to run test does not belongs to this testbed" - when: testbed_facts['duts'][dut_index] != inventory_hostname + when: testbed_facts['dut'] != inventory_hostname - name: set testbed_type set_fact: @@ -82,13 +70,10 @@ vm: "{{ testbed_facts['vm_base'] }}" when: "testbed_facts['vm_base'] != ''" - - fail: msg="cannot find {{ testcase_name }} in available testcases" - when: testcases[testcase_name] is not defined - - - fail: msg="{{ testbed_type }} is not supported topology of this testbed {{ testbed_name }}" - when: testbed_type not in testcases[testcase_name]['topologies'] + - fail: msg="cannot find {{ testcase_name }} in available testcases or is not supported topology of this testbed {{ testbed_name }}" + when: (testcase_name is not defined) or (testbed_type not in testcases[testcase_name]['topologies']) - - include_tasks: test_sonic_by_testname.yml + - include: test_sonic_by_testname.yml when: - testbed_name is defined - testcase_name is defined diff --git a/ansible/roles/test/tasks/syslog.yml b/ansible/roles/test/tasks/syslog.yml index efd1be9644d..fa74c902b7f 100644 --- a/ansible/roles/test/tasks/syslog.yml +++ b/ansible/roles/test/tasks/syslog.yml @@ -17,18 +17,18 @@ - debug: msg="Starting Syslog Tests..." # Fetch the source IP of the ansible server -- name: Get ptf host facts +- name: Get localhost facts setup: - delegate_to: "{{ ptf_host }}" + connection: local register: local_facts -#- debug: var=local_facts.ansible_facts.ansible_mgmt.ipv4.address +#- debug: var=local_facts.ansible_facts.ansible_eth0.ipv4.address # Set variables for the test - name: Set variables for the test set_fact: - local_srcip: "{{ local_facts.ansible_facts.ansible_mgmt.ipv4.address }}" + local_srcip: "{{ local_facts.ansible_facts.ansible_eth0.ipv4.address }}" original_syslog_servers: "{{ syslog_servers }}" syslog_port: "{{ 65535 | random(start=65000) }}" test_message: "Basic Test Message" @@ -49,45 +49,50 @@ # Start local ryslog Server - name: Load imudp module in local syslog - delegate_to: "{{ ptf_host }}" + connection: local + become: true lineinfile: line: "module(load=\"imudp\")" dest: /etc/rsyslog.conf state: present - name: Remove imudp ports - delegate_to: "{{ ptf_host }}" + connection: local + become: true lineinfile: regexp: "input\\(type=\"imudp\" port=\"\\d+\"\\)" dest: /etc/rsyslog.conf state: absent - name: Add imudp port {{ syslog_port }} - delegate_to: "{{ ptf_host }}" + connection: local + become: true lineinfile: line: "input(type=\"imudp\" port=\"{{ syslog_port }}\")" dest: /etc/rsyslog.conf state: present - name: Stop Syslog Daemon - delegate_to: "{{ ptf_host }}" + connection: local + become: true shell: killall rsyslogd ignore_errors: true - name: Remove local /var/log/syslog - delegate_to: "{{ ptf_host }}" + become: true + connection: local file: path: /var/log/syslog state: absent - name: Start Syslog Daemon - delegate_to: "{{ ptf_host }}" - shell: service rsyslog restart - ignore_errors: true + connection: local + become: true + service: name=rsyslog state=started - name: Wait a little bit for service to start wait_for: - timeout: 30 + timeout: 2 # SSH to device and generate a syslog - name: Send test syslog @@ -105,13 +110,10 @@ become: true service: name=rsyslog state=restarted -- name: Wait a little bit for service to start - wait_for: - timeout: 30 - # Check Messages - name: Check syslog messages for the test message - delegate_to: "{{ ptf_host }}" + connection: local + become: true shell: grep {{ inventory_hostname }} /var/log/syslog | grep "{{ test_message }}" | grep -v ansible register: grep_result diff --git a/ansible/roles/test/tasks/test_sonic_by_tag.yml b/ansible/roles/test/tasks/test_sonic_by_tag.yml index f64d40003c3..a6fe3d35ec3 100644 --- a/ansible/roles/test/tasks/test_sonic_by_tag.yml +++ b/ansible/roles/test/tasks/test_sonic_by_tag.yml @@ -28,44 +28,40 @@ tags: always - name: Verify interfaces are up - include_tasks: interface.yml + include: interface.yml tags: always - name: BGP facts test - include_tasks: bgp_fact.yml + include: bgp_fact.yml tags: bgp_fact -- name: ECMP test - include: ecmp.yml - tags: ecmp - - name: Neighbor mac change test - include_tasks: neighbour-mac.yml + include: neighbour-mac.yml tags: neighbour - name: Test LLDP - include_tasks: lldp.yml + include: lldp.yml tags: lldp - name: Test NTP - include_tasks: ntp.yml + include: ntp.yml tags: ntp - name: Test SNMP Basic - include_tasks: snmp.yml + include: snmp.yml tags: snmp - name: Test DHCP Relay - include_tasks: dhcp_relay.yml + include: dhcp_relay.yml tags: dhcp_relay when: minigraph_devices[inventory_hostname]['type'] == "ToRRouter" - name: Test Control-Plain policing COPP - include_tasks: copp.yml + include: copp.yml tags: copp - name: Fast-reboot test - include_tasks: fast-reboot.yml + include: fast-reboot.yml when: minigraph_portchannel_interfaces | length > 0 and minigraph_vlan_interfaces | length > 0 tags: fast_reboot @@ -76,112 +72,112 @@ tags: sync - name: Test SyncD BGP Flaps - include_tasks: bgp_flap.yml + include: bgp_flap.yml tags: sync - name: Test Syslog Basic - include_tasks: syslog.yml + include: syslog.yml tags: syslog - name: Test SNMP CPU - include_tasks: snmp/cpu.yml + include: snmp/cpu.yml tags: snmp_cpu when: minigraph_hwsku == "Force10-S6000" or minigraph_hwsku == "ACS-S6000" - name: Test SNMP Interfaces - include_tasks: snmp/interfaces.yml + include: snmp/interfaces.yml tags: snmp_interfaces - name: Test Interface Flap from Neighbor - include_tasks: link_flap.yml + include: link_flap.yml tags: link_flap - name: Test kernel ARP behavior - include_tasks: arpall.yml + include: arpall.yml tags: arp - name: Test sensors - include_tasks: sensors_check.yml + include: sensors_check.yml tags: sensors - name: Test reboot - include_tasks: reboot.yml + include: reboot.yml tags: reboot ### When calling the following tests, please add command line of what testbed_type and which PTF docker to test against ### -e "testbed_type=t1-lag ptf_host=10.0.0.200" - name: Fib test - include_tasks: fib.yml + include: fib.yml tags: fib -- name: Hash test - include_tasks: hash.yml - tags: hash - - name: MTU test - include_tasks: mtu.yml + include: mtu.yml tags: mtu - name: Directed Broadcast test - include_tasks: dir_bcast.yml + include: dir_bcast.yml tags: dir_bcast - name: FDB test - include_tasks: fdb.yml + include: fdb.yml tags: fdb ### When calling this decap test, please add command line of what testbed_type, dscp_mode, and which PTF docker to test against #### -e "testbed_type=t1-lag dscp_mode=pipe ptf_host=10.0.0.200" - name: Decap test - include_tasks: decap.yml + include: decap.yml tags: decap - name: BGP Speaker test - include_tasks: bgp_speaker.yml + include: bgp_speaker.yml tags: bgp_speaker - name: Test Everflow - include_tasks: everflow.yml + include: everflow.yml tags: everflow - name: Test Everflow on testbed - include_tasks: everflow_testbed.yml + include: everflow_testbed.yml tags: everflow_testbed - name: Test LAG - include_tasks: lagall.yml + include: lagall.yml tags: lag - name: Test LAG using lag graph file - include_tasks: lag_2.yml + include: lag_2.yml tags: lag_2 +- name: Memory check + include: mem_check.yml + tags: mem_check + - name: ACL test - include_tasks: acltb.yml + include: acltb.yml tags: acl #- name: PFC Watchdog test -# include_tasks: pfc_wd.yml +# include: pfc_wd.yml #tags: pfc_wd - name: ECN WRED configure test - include_tasks: ecn_wred.yml + include: ecn_wred.yml tags: ecn_wred ### when calling this test, please add command line of testbed_type ### -e testbed_type=t1 - name: BGP multipath relax - include_tasks: bgp_multipath_relax.yml + include: bgp_multipath_relax.yml tags: bgp_multipath_relax - name: neighbor mac change test without using ptf - include_tasks: neighbour-mac-noptf.yml + include: neighbour-mac-noptf.yml tags: neighbour_mac_noptf - name: CRM test - include_tasks: crm.yml + include: crm.yml tags: crm - name: Asymmetric PFC test - include_tasks: pfc_asym.yml + include: pfc_asym.yml tags: pfc_asym diff --git a/ansible/roles/test/tasks/test_sonic_by_testname.yml b/ansible/roles/test/tasks/test_sonic_by_testname.yml index b686df6b334..140bb06ed74 100644 --- a/ansible/roles/test/tasks/test_sonic_by_testname.yml +++ b/ansible/roles/test/tasks/test_sonic_by_testname.yml @@ -11,12 +11,12 @@ when: allow_recover is not defined - name: do basic sanity check before each test - include_tasks: base_sanity.yml + include: base_sanity.yml vars: recover: "{{ allow_recover }}" - name: validate all interfaces are up - include_tasks: interface.yml + include: interface.yml vars: recover: "{{ allow_recover }}" @@ -32,37 +32,17 @@ - name: print system versions debug: var=versions.stdout_lines -- set_fact: - skip_test: false - -- set_fact: - skip_test: true - when: - - hostvars[ansible_hostname]['type'] is defined - - testcases[testcase_name]['vtestbed_compatible'] is defined - - hostvars[ansible_hostname]['type'] == 'kvm' or hostvars[ansible_hostname]['type'] == 'simx' - - not testcases[testcase_name]['vtestbed_compatible'] | bool - -- block: - - name: run test case {{ testcases[testcase_name]['filename'] }} file - include_tasks: "{{ testcases[testcase_name]['filename'] }}" - - - name: do basic sanity check after each test - include_tasks: base_sanity.yml +- name: run test case {{ testcases[testcase_name]['filename'] }} file + include: "{{ testcases[testcase_name]['filename'] }}" - - name: validate all interfaces are up after test - include_tasks: interface.yml +- name: do basic sanity check after each test + include: base_sanity.yml - - debug: - msg: - - "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" - - "!!!!!!!!!!!!!!!!!!!! end running test {{ testcase_name }} !!!!!!!!!!!!!!!!!!!!!!" - - "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" - when: not skip_test +- name: validate all interfaces are up after test + include: interface.yml - debug: msg: - "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" - - "!!!!!!!!!!!!!!!!!!!! test {{ testcase_name }} was skipped !!!!!!!!!!!!!!!!!!!!!!" + - "!!!!!!!!!!!!!!!!!!!! end running test {{ testcase_name }} !!!!!!!!!!!!!!!!!!!!!!" - "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" - when: skip_test diff --git a/ansible/roles/test/tasks/vlan_cleanup.yml b/ansible/roles/test/tasks/vlan_cleanup.yml new file mode 100644 index 00000000000..6011c78e99c --- /dev/null +++ b/ansible/roles/test/tasks/vlan_cleanup.yml @@ -0,0 +1,24 @@ +- name: Restore all IP addresses on the LAGs + shell: ip addr add {{ (item.addr ~ "/" ~ item.mask)|ipaddr() }} dev {{ item.attachto }} + with_items: + - "{{ minigraph_portchannel_interfaces }}" + become: true + +- name: Bring up LAGs + shell: ifconfig {{ item.attachto }} up + with_items: + - "{{ minigraph_portchannel_interfaces }}" + become: true + +- name: Remove configuration for test + file: + state: absent + path: /etc/sonic/vlan_configuration.json + become: true + +- name: Reload configuration + shell: config reload -y + become: true + +- name: wait for config reload + pause: seconds=60 diff --git a/ansible/roles/test/tasks/vlan_configure.yml b/ansible/roles/test/tasks/vlan_configure.yml new file mode 100644 index 00000000000..c8b0a668329 --- /dev/null +++ b/ansible/roles/test/tasks/vlan_configure.yml @@ -0,0 +1,55 @@ +- fail: msg="Please set ptf_host variable" + when: ptf_host is not defined + +- fail: msg="Invalid testbed_type value '{{testbed_type}}'" + when: testbed_type not in [ 't0', 't0-116' ] + +- debug: var=minigraph_portchannels + +- debug: var=minigraph_port_indices + +- debug: var=minigraph_ports + +- name: Generate VLAN ports information + template: src=roles/test/templates/vlan_info.j2 + dest=/tmp/vlan_info.yml + connection: local + +- name: Load VLAN ports info from file + include_vars: '/tmp/vlan_info.yml' + +- debug: var=vlan_ports_list +- debug: var=vlan_intf_list + +- name: Flush all IP addresses on the LAGs + shell: ip addr flush {{ item.attachto }} + with_items: + - "{{ minigraph_portchannel_interfaces }}" + become: true + +- name: Delete all IP addresses on the LAGs in config DB + shell: docker exec -i database redis-cli -n 4 del "PORTCHANNEL_INTERFACE|{{ item.attachto }}|{{ (item.addr ~ '/' ~ item.mask)|ipaddr()|upper() }}" + with_items: + - "{{ minigraph_portchannel_interfaces }}" + become: true + +- name: Shutdown LAGs + shell: ifconfig {{ item.attachto }} down + with_items: + - "{{ minigraph_portchannel_interfaces }}" + become: true + +- name: sleep for some time + pause: seconds=10 + +- name: Generate nessesary configuration for test + template: src=roles/test/templates/vlan_configuration.j2 + dest=/etc/sonic/vlan_configuration.json + become: true + +- name: Load configuration + shell: config load -y /etc/sonic/vlan_configuration.json + become: true + +- name: sleep for some time + pause: seconds=10 diff --git a/ansible/roles/test/tasks/vlan_test.yml b/ansible/roles/test/tasks/vlan_test.yml new file mode 100644 index 00000000000..0f4ce39c431 --- /dev/null +++ b/ansible/roles/test/tasks/vlan_test.yml @@ -0,0 +1,47 @@ +- name: Configure route for remote IP + shell: ip route add {{ item[0].permit_vlanid[item[1]].remote_ip }} via {{ item[0].permit_vlanid[item[1]].peer_ip }} + with_nested: + - "{{ vlan_ports_list }}" + - "{{ vlan_ports_list[0].permit_vlanid.keys() }}" + become: true + +- name: Set unique MACs to PTF interfaces + script: roles/test/files/helpers/change_mac.sh + delegate_to: "{{ptf_host}}" + +- name: Copy ARP responder to PTF + copy: src=roles/test/files/helpers/arp_responder.py dest=/opt + delegate_to: "{{ptf_host}}" + +- name: Copy arp responder supervisor configuration to the PTF container + template: src=arp_responder.conf.j2 dest=/etc/supervisor/conf.d/arp_responder.conf + vars: + - arp_responder_args: '' + delegate_to: "{{ ptf_host }}" + +- name: Reread supervisor configuration + shell: /usr/local/bin/supervisorctl reread + delegate_to: "{{ptf_host}}" + +- name: Update supervisor configuration + shell: /usr/local/bin/supervisorctl update + delegate_to: "{{ ptf_host }}" + +- name: Copy tests to the PTF container + copy: src=roles/test/files/ptftests dest=/root + delegate_to: "{{ ptf_host }}" + +- block: + - include: ptf_runner.yml + vars: + ptf_test_name: VLAN test + ptf_test_dir: ptftests + ptf_test_path: vlan_test.VlanTest + ptf_platform: remote + ptf_test_params: + - vlan_ports_list = \"{{ vlan_ports_list }}\" + - vlan_intf_list = \"{{ vlan_intf_list }}\" + - router_mac = \"{{ ansible_Ethernet0['macaddress'] }}\" + ptf_extra_options: "--relax --debug info --log-file /tmp/vlan_test.log" + rescue: + - debug: msg="PTF test raise error" diff --git a/ansible/roles/test/tasks/vlantb.yml b/ansible/roles/test/tasks/vlantb.yml index 72f879a604f..e62d84f4581 100644 --- a/ansible/roles/test/tasks/vlantb.yml +++ b/ansible/roles/test/tasks/vlantb.yml @@ -1,5 +1,20 @@ -- name: run test - include_tasks: roles/test/tasks/pytest_runner.yml - vars: - test_node: vlan/test_vlan.py +#----------------------------------------- +# Apply Vlan configuration +#----------------------------------------- +- name: Vlan test setup on testbed + include: vlan_configure.yml + tags: vlan_configure +#----------------------------------------- +# Run Vlan test +#----------------------------------------- +- name: Vlan test run on testbed + include: vlan_test.yml + tags: vlan_test + +#----------------------------------------- +# Clean up Vlan configuration +#----------------------------------------- +- name: Clean up Vlan test configuration on the testbed + include: vlan_cleanup.yml + tags: vlan_cleanup diff --git a/ansible/roles/test/tasks/vxlan-decap.yml b/ansible/roles/test/tasks/vxlan-decap.yml index 31aa917e520..4a002308858 100644 --- a/ansible/roles/test/tasks/vxlan-decap.yml +++ b/ansible/roles/test/tasks/vxlan-decap.yml @@ -1,4 +1,112 @@ -- name: run test - include_tasks: roles/test/tasks/pytest_runner.yml - vars: - test_node: vxlan/test_vxlan_decap.py +# example + +- block: + - fail: msg="Please set ptf_host variable" + when: ptf_host is not defined + + - name: Remove existing ip from ptf host + script: roles/test/files/helpers/remove_ip.sh + delegate_to: "{{ ptf_host }}" + + - name: Make all mac addresses in ptf unique - should be done in vm_set + script: roles/test/files/helpers/change_mac.sh + delegate_to: "{{ ptf_host }}" + + - name: Copy tests to the PTF container + copy: src=roles/test/files/ptftests dest=/root + delegate_to: "{{ ptf_host }}" + + - name: Copy arp responder to the PTF container + copy: src=roles/test/files/helpers/arp_responder.py dest=/opt + delegate_to: "{{ ptf_host }}" + + - name: Copy arp responder supervisor configuration to the PTF container + template: src=arp_responder.conf.j2 dest=/etc/supervisor/conf.d/arp_responder.conf + vars: + - arp_responder_args: '--conf /tmp/vxlan_arpresponder.conf' + delegate_to: "{{ ptf_host }}" + + - name: Reread supervisor configuration + shell: supervisorctl reread + delegate_to: "{{ ptf_host }}" + + - name: Update supervisor configuration + shell: supervisorctl update + delegate_to: "{{ ptf_host }}" + + - name: Restart DUT. Wait 240 seconds after SONiC started ssh + include: reboot.yml + vars: + ready_timeout: 240 + + - name: Render DUT parameters to json file for the test + template: src=vxlan_decap.json.j2 dest=/tmp/vxlan_decap.json + delegate_to: "{{ ptf_host }}" + + - name: Render DUT vxlan configuration. Tunnel + template: src=vxlan_db.tunnel.json.j2 dest=/tmp/vxlan_db.tunnel.json + + - name: Render DUT vxlan configuration. Tunnel Maps + template: src=vxlan_db.maps.json.j2 dest=/tmp/vxlan_db.maps.{{ item }}.json + with_items: minigraph_vlans + + - include: ptf_runner.yml + vars: + ptf_test_name: Vxlan decap test - No vxlan configuration + ptf_test_dir: ptftests + ptf_test_path: vxlan-decap.Vxlan + ptf_platform: remote + ptf_platform_dir: ptftests + ptf_qlen: 1000 + ptf_test_params: + - vxlan_enabled=False + - config_file='/tmp/vxlan_decap.json' + - count=10 + + - name: Configure vxlan decap tunnel + shell: sonic-cfggen -j /tmp/vxlan_db.tunnel.json --write-to-db + + - name: Configure vxlan decap tunnel maps + shell: sonic-cfggen -j /tmp/vxlan_db.maps.{{ item }}.json --write-to-db + with_items: minigraph_vlans + + - include: ptf_runner.yml + vars: + ptf_test_name: Vxlan decap test - vxlan configuration applied + ptf_test_dir: ptftests + ptf_test_path: vxlan-decap.Vxlan + ptf_platform: remote + ptf_platform_dir: ptftests + ptf_qlen: 1000 + ptf_test_params: + - vxlan_enabled=True + - config_file='/tmp/vxlan_decap.json' + - count=10 + + - name: Remove vxlan tunnel maps configuration + shell: docker exec -i database redis-cli -n 4 -c DEL "VXLAN_TUNNEL_MAP|tunnelVxlan|map{{ item }}" + with_items: minigraph_vlans + + - name: Remove vxlan tunnel configuration + shell: docker exec -i database redis-cli -n 4 -c DEL "VXLAN_TUNNEL|tunnelVxlan" + + - include: ptf_runner.yml + vars: + ptf_test_name: Vxlan decap test - vxlan configuration removed + ptf_test_dir: ptftests + ptf_test_path: vxlan-decap.Vxlan + ptf_platform: remote + ptf_platform_dir: ptftests + ptf_qlen: 1000 + ptf_test_params: + - vxlan_enabled=False + - config_file='/tmp/vxlan_decap.json' + - count=10 + +- always: + - name: Remove vxlan tunnel maps configuration + shell: docker exec -i database redis-cli -n 4 -c DEL "VXLAN_TUNNEL_MAP|tunnelVxlan|map{{ item }}" + with_items: minigraph_vlans + + - name: Remove vxlan tunnel configuration + shell: docker exec -i database redis-cli -n 4 -c DEL "VXLAN_TUNNEL|tunnelVxlan" diff --git a/ansible/roles/test/tasks/warm-reboot-fib.yml b/ansible/roles/test/tasks/warm-reboot-fib.yml index f441b75a212..8bb93dec474 100644 --- a/ansible/roles/test/tasks/warm-reboot-fib.yml +++ b/ansible/roles/test/tasks/warm-reboot-fib.yml @@ -2,7 +2,7 @@ # Run FIB Warm Reboot test and Perform log analysis. #-------------------------------------------------- # Perform prechecks -- include_tasks: shared-fib.yml +- include: shared-fib.yml - debug : msg="Start FIB_WARM Test" @@ -40,13 +40,13 @@ become: true when: warm_restart_docker is defined -- name: set restart type - set_fact: - reboot_type: "warm-reboot" +- name: set config warm_restart enable system + shell: config warm_restart enable system + become: true when: warm_restart_docker is not defined - name: wait for 10 secs - pause: + pause: seconds: 10 - name: restart {{ warm_restart_docker}} @@ -55,7 +55,8 @@ when: warm_restart_docker is defined - name: reboot dut - include_tasks: common_tasks/reboot_sonic.yml + shell: reboot + become: true when: warm_restart_docker is not defined - name: check async_ptf status diff --git a/ansible/roles/test/tasks/warm-reboot-multi-sad-inboot.yml b/ansible/roles/test/tasks/warm-reboot-multi-sad-inboot.yml index 86244607e00..915e6aa8157 100644 --- a/ansible/roles/test/tasks/warm-reboot-multi-sad-inboot.yml +++ b/ansible/roles/test/tasks/warm-reboot-multi-sad-inboot.yml @@ -8,14 +8,8 @@ set_fact: in_list: ['routing_del:50', 'routing_add:50'] -- name: set default values vnet variables - set_fact: - vnet: False - vnet_pkts: '' - when: (vnet is not defined) or (vnet_pkts is not defined) - - name: Warm-reboot test - include_tasks: advanced-reboot.yml + include: advanced-reboot.yml vars: reboot_type: warm-reboot inboot_list: "{{ in_list }}" diff --git a/ansible/roles/test/tasks/warm-reboot-multi-sad.yml b/ansible/roles/test/tasks/warm-reboot-multi-sad.yml index a53bd91fed7..4458fa2d747 100644 --- a/ansible/roles/test/tasks/warm-reboot-multi-sad.yml +++ b/ansible/roles/test/tasks/warm-reboot-multi-sad.yml @@ -14,14 +14,8 @@ pre_list: "{{ pre_list + ['dut_lag_member_down:2:{{ lag_memb_cnt }}', 'neigh_lag_member_down:3:{{ lag_memb_cnt }}']}}" when: testbed_type in ['t0-64', 't0-116', 't0-64-32'] -- name: set default values vnet variables - set_fact: - vnet: False - vnet_pkts: '' - when: (vnet is not defined) or (vnet_pkts is not defined) - - name: Warm-reboot test - include_tasks: advanced-reboot.yml + include: advanced-reboot.yml vars: reboot_type: warm-reboot preboot_list: "{{ pre_list }}" diff --git a/ansible/roles/test/tasks/warm-reboot-sad-bgp.yml b/ansible/roles/test/tasks/warm-reboot-sad-bgp.yml index 0d514761604..4992330c647 100644 --- a/ansible/roles/test/tasks/warm-reboot-sad-bgp.yml +++ b/ansible/roles/test/tasks/warm-reboot-sad-bgp.yml @@ -15,7 +15,7 @@ when: (vnet is not defined) or (vnet_pkts is not defined) - name: Warm-reboot test - include_tasks: advanced-reboot.yml + include: advanced-reboot.yml vars: reboot_type: warm-reboot preboot_list: "{{ pre_list }}" diff --git a/ansible/roles/test/tasks/warm-reboot-sad-lag-member.yml b/ansible/roles/test/tasks/warm-reboot-sad-lag-member.yml index 653a19e41e4..4e56d0b46e9 100644 --- a/ansible/roles/test/tasks/warm-reboot-sad-lag-member.yml +++ b/ansible/roles/test/tasks/warm-reboot-sad-lag-member.yml @@ -21,7 +21,7 @@ when: (vnet is not defined) or (vnet_pkts is not defined) - name: Warm-reboot test - include_tasks: advanced-reboot.yml + include: advanced-reboot.yml vars: reboot_type: warm-reboot preboot_list: "{{ pre_list }}" diff --git a/ansible/roles/test/tasks/warm-reboot-sad-lag.yml b/ansible/roles/test/tasks/warm-reboot-sad-lag.yml index 24e443d48fb..7633da1b193 100644 --- a/ansible/roles/test/tasks/warm-reboot-sad-lag.yml +++ b/ansible/roles/test/tasks/warm-reboot-sad-lag.yml @@ -15,7 +15,7 @@ when: (vnet is not defined) or (vnet_pkts is not defined) - name: Warm-reboot test - include_tasks: advanced-reboot.yml + include: advanced-reboot.yml vars: reboot_type: warm-reboot preboot_list: "{{ pre_list }}" diff --git a/ansible/roles/test/tasks/warm-reboot-sad-vlan-port.yml b/ansible/roles/test/tasks/warm-reboot-sad-vlan-port.yml index 7e2e773a86c..7d570606fb8 100644 --- a/ansible/roles/test/tasks/warm-reboot-sad-vlan-port.yml +++ b/ansible/roles/test/tasks/warm-reboot-sad-vlan-port.yml @@ -15,7 +15,7 @@ when: (vnet is not defined) or (vnet_pkts is not defined) - name: Warm-reboot test - include_tasks: advanced-reboot.yml + include: advanced-reboot.yml vars: reboot_type: warm-reboot preboot_list: "{{ pre_list }}" diff --git a/ansible/roles/test/tasks/warm-reboot-sad.yml b/ansible/roles/test/tasks/warm-reboot-sad.yml index 8875cf32429..6f37e34deed 100644 --- a/ansible/roles/test/tasks/warm-reboot-sad.yml +++ b/ansible/roles/test/tasks/warm-reboot-sad.yml @@ -3,14 +3,8 @@ reboot_limit: 1 when: reboot_limit is not defined -- name: set default values vnet variables - set_fact: - vnet: False - vnet_pkts: '' - when: (vnet is not defined) or (vnet_pkts is not defined) - - name: Warm-reboot test - include_tasks: advanced-reboot.yml + include: advanced-reboot.yml vars: reboot_type: warm-reboot preboot_list: ['neigh_bgp_down', 'dut_bgp_down', 'dut_lag_down', 'neigh_lag_down', 'dut_lag_member_down:1:1', 'neigh_lag_member_down:1:1', 'vlan_port_down'] diff --git a/ansible/roles/test/tasks/warm-reboot.yml b/ansible/roles/test/tasks/warm-reboot.yml index 68cd2bb7086..7959e2fa18a 100644 --- a/ansible/roles/test/tasks/warm-reboot.yml +++ b/ansible/roles/test/tasks/warm-reboot.yml @@ -3,13 +3,7 @@ reboot_limit: 1 when: reboot_limit is not defined -- name: set default values vnet variables - set_fact: - vnet: False - vnet_pkts: '' - when: (vnet is not defined) or (vnet_pkts is not defined) - - name: Warm-reboot test - include_tasks: advanced-reboot.yml + include: advanced-reboot.yml vars: reboot_type: warm-reboot diff --git a/ansible/roles/test/tasks/wr_arp.yml b/ansible/roles/test/tasks/wr_arp.yml index 91e8db0122b..47c7c6aa7f5 100644 --- a/ansible/roles/test/tasks/wr_arp.yml +++ b/ansible/roles/test/tasks/wr_arp.yml @@ -9,10 +9,6 @@ script: roles/test/files/helpers/change_mac.sh delegate_to: "{{ ptf_host }}" -- name: set ptf_host_ip - set_fact: - ptf_host_ip: "{{ hostvars[ptf_host]['ansible_host'] }}" - - name: Remove old keys file: path: "{{ item }}" @@ -52,12 +48,12 @@ key: "{{ lookup('file', '/tmp/id_rsa.pub') }}" - name: Restart DUT. Wait 240 seconds after SONiC started ssh - include_tasks: reboot.yml + include: reboot.yml vars: ready_timeout: 240 - name: Find appropriate VXLAN sender - shell: ip route show type unicast | sed -e '/proto 186\|proto zebra/!d' -e '/default/d' -ne '/0\//p' | head -n 1 | sed -ne 's/0\/.*$/1/p' + shell: ip route show proto zebra type unicast | sed -e '/default/d' -ne '/0\//p' | head -n 1 | sed -ne 's/0\/.*$/1/p' register: dip_output - name: Check that VXLAN DIP was found @@ -65,7 +61,7 @@ when: dip_output.stdout | length == 0 - name: Check if the DUT has access to our ptf host - command: ip route get {{ ptf_host_ip }} + command: ip route get {{ ptf_host }} register: route_output - name: Find the gateway for management port @@ -77,7 +73,7 @@ when: gw_output.stdout | length == 0 - name: Install explicit route through eth0 (mgmt) interface, when we don't have correct route to ptf - command: ip route add {{ ptf_host_ip }}/32 {{ gw_output.stdout }} + command: ip route add {{ ptf_host }}/32 {{ gw_output.stdout }} become: yes when: "'PortChannel' in route_output.stdout" @@ -106,7 +102,7 @@ delegate_to: "{{ ptf_host }}" - name: Update supervisor configuration - include_tasks: "roles/test/tasks/common_tasks/update_supervisor.yml" + include: "roles/test/tasks/common_tasks/update_supervisor.yml" vars: supervisor_host: "{{ ptf_host }}" @@ -119,7 +115,7 @@ - name: Dump debug info. DIP debug: var=dip_output.stdout -- include_tasks: ptf_runner.yml +- include: ptf_runner.yml vars: ptf_test_name: Warm-Reboot Control-Plane assist feature ptf_test_dir: ptftests @@ -128,7 +124,7 @@ ptf_platform_dir: ptftests ptf_qlen: 1000 ptf_test_params: - - ferret_ip='{{ ptf_host_ip }}' + - ferret_ip='{{ ptf_host }}' - dut_ssh='{{ ansible_eth0.ipv4.address }}' - config_file='/tmp/vxlan_decap.json' - how_long=370 diff --git a/ansible/roles/test/templates/acltb.j2 b/ansible/roles/test/templates/acltb.j2 new file mode 100644 index 00000000000..dedaebdc0f3 --- /dev/null +++ b/ansible/roles/test/templates/acltb.j2 @@ -0,0 +1,21 @@ +{% if testbed_type == "t1" or testbed_type == "t1-lag" or testbed_type == "t1-64-lag" or testbed_type == "t1-64-lag-clet" %} +{# tor ports #} +{% for ifname, v in minigraph_neighbors.iteritems() %}{% if "T0" in v.name %}{{ '%d' % minigraph_port_indices[ifname] }},{% endif %}{% endfor %} + +{# spine ports #} +{% for ifname, v in minigraph_neighbors.iteritems() %}{% if "T2" in v.name %}{{ '%d' % minigraph_port_indices[ifname] }},{% endif %}{% endfor %} + +192.168.0.0 +192.168.0.16 +172.16.1.0 +172.16.2.0 +{% elif testbed_type == "t0" %} +{# todo #} +{% for ifname, info in minigraph_vlans.iteritems() %}{% for m in info['members'] %}{{ '%d' % minigraph_port_indices[m] }},{% endfor %}{% endfor %} + +{% for ifname, v in minigraph_neighbors.iteritems() %}{% if "T1" in v.name %}{{ '%d' % minigraph_port_indices[ifname] }},{% endif %}{% endfor %} + +192.168.4.0 +192.168.8.0 + +{% endif %} diff --git a/ansible/roles/test/templates/bgp_speaker_route.j2 b/ansible/roles/test/templates/bgp_speaker_route.j2 index 95ec43273a2..52d993bed50 100644 --- a/ansible/roles/test/templates/bgp_speaker_route.j2 +++ b/ansible/roles/test/templates/bgp_speaker_route.j2 @@ -1,5 +1,5 @@ {% if addr_family == 'ipv6' %} -{{announce_prefix}} {% for portchannel, v in minigraph_portchannels.iteritems() %}[{% for member in v.members %}{{ '%d' % minigraph_port_indices[member]}}{% if not loop.last %} {% endif %}{% endfor %}]{% if not loop.last %} {% endif %}{% endfor %} +{{announce_prefix}} [{% for member in minigraph_portchannels[portchannel_name].members %}{{ '%d' % minigraph_port_indices[member]}}{% if not loop.last %} {% endif %}{% endfor %}]{% if not loop.last %} {% endif %} {% elif addr_family == 'ipv4' %} 0.0.0.0/0 {% for portchannel, v in minigraph_portchannels.iteritems() %}[{% for member in v.members %}{{ '%d' % minigraph_port_indices[member]}}{% if not loop.last %} {% endif %}{% endfor %}]{% if not loop.last %} {% endif %}{% endfor %} diff --git a/ansible/roles/test/templates/decap_conf.j2 b/ansible/roles/test/templates/decap_conf.j2 index dc3952a80f2..74f1fdee6fd 100644 --- a/ansible/roles/test/templates/decap_conf.j2 +++ b/ansible/roles/test/templates/decap_conf.j2 @@ -1,5 +1,5 @@ [ -{% if outer_ipv4 %} +{% if outer_ipv4 | bool %} { "TUNNEL_DECAP_TABLE:IPINIP_V4_TUNNEL" : { "tunnel_type":"IPINIP", @@ -11,9 +11,9 @@ "OP": "{{ op }}" } {% endif %} -{% if outer_ipv6 and outer_ipv4 %} , +{% if outer_ipv6 | bool and outer_ipv4 | bool %} , {% endif %} -{% if outer_ipv6 %} +{% if outer_ipv6 | bool %} { "TUNNEL_DECAP_TABLE:IPINIP_V6_TUNNEL" : { "tunnel_type":"IPINIP", diff --git a/ansible/roles/test/templates/exabgp/start.j2 b/ansible/roles/test/templates/exabgp/start.j2 index f9ed32f7dfb..8d82af951c3 100755 --- a/ansible/roles/test/templates/exabgp/start.j2 +++ b/ansible/roles/test/templates/exabgp/start.j2 @@ -3,12 +3,12 @@ ifconfig eth{{ '%d' % (minigraph_port_indices[minigraph_vlans[minigraph_vlan_int ifconfig eth{{ '%d' % (minigraph_port_indices[minigraph_vlans[minigraph_vlan_interfaces[0]['attachto']]['members'][0]])}}:1 {{item.logical_ip_2}} {% set intf = 'eth%d' % (minigraph_port_indices[minigraph_vlans[minigraph_vlan_interfaces[0]['attachto']]['members'][1]]) %} -ifconfig {{intf}} {% if item.addr_family=='ipv6' %} inet6 add {% endif %} {{item.mux_ip_1}} -i=0; until [ $i -eq 10 ] || ping {{minigraph_vlan_interfaces[0]['addr']}} -I {{intf}} -c 1 >/dev/null 2>&1; do i=`expr $i + 1`; done & +ifconfig {{intf}} {{item.mux_ip_1}} +i=0; until [$i -eq 10] || ping {{minigraph_vlan_interfaces[0]['addr']}} -I {{intf}} -c 1 >/dev/null 2>&1; do i=`expr $i + 1`; done & {% set intf = 'eth%d' % (minigraph_port_indices[minigraph_vlans[minigraph_vlan_interfaces[0]['attachto']]['members'][2]]) %} -ifconfig {{intf}} {% if item.addr_family=='ipv6' %} inet6 add {% endif %} {{item.mux_ip_2}} -i=0; until [ $i -eq 10 ] || ping {{minigraph_vlan_interfaces[0]['addr']}} -I {{intf}} -c 1 >/dev/null 2>&1; do i=`expr $i + 1`; done & +ifconfig {{intf}} {{item.mux_ip_2}} +i=0; until [$i -eq 10] || ping {{minigraph_vlan_interfaces[0]['addr']}} -I {{intf}} -c 1 >/dev/null 2>&1; do i=`expr $i + 1`; done & ip route flush {{minigraph_lo_interfaces[0]['addr']}}/{{minigraph_lo_interfaces[0]['prefixlen']}} ip route add {{minigraph_lo_interfaces[0]['addr']}}/{{minigraph_lo_interfaces[0]['prefixlen']}} via {{ minigraph_vlan_interfaces[0]['addr']}} diff --git a/ansible/roles/test/templates/fib.j2 b/ansible/roles/test/templates/fib.j2 index fbe348d4aa7..be18fc23814 100644 --- a/ansible/roles/test/templates/fib.j2 +++ b/ansible/roles/test/templates/fib.j2 @@ -1,36 +1,24 @@ {# defualt route#} {% if testbed_type == 't1' %} -0.0.0.0/0 {% for ifname, v in minigraph_neighbors.iteritems() %} -{% if "T2" in v.name %}{{ '[%d]' % minigraph_port_indices[ifname]}}{% if not loop.last %} {% endif %}{% endif %}{% endfor %} - -::/0 {% for ifname, v in minigraph_neighbors.iteritems() %} -{% if "T2" in v.name %}{{ '[%d]' % minigraph_port_indices[ifname]}}{% if not loop.last %} {% endif %}{% endif %}{% endfor %} - +0.0.0.0/0 {% for ifname, v in minigraph_neighbors.iteritems() %}{% if "T2" in v.name %}{{ '[%d]' % minigraph_port_indices[ifname]}}{% if not loop.last %} {% endif %}{% endif %}{% endfor %} {% elif testbed_type == 't0' or testbed_type == 't0-52'or testbed_type == 't0-64' or testbed_type == 't1-lag' or testbed_type == 't0-64-32' %} 0.0.0.0/0 {% for portchannel, v in minigraph_portchannels.iteritems() %} [{% for member in v.members %}{{ '%d' % minigraph_port_indices[member]}}{% if not loop.last %} {% endif %}{% endfor %}]{% if not loop.last %} {% endif %}{% endfor %} - -::/0 {% for portchannel, v in minigraph_portchannels.iteritems() %} -[{% for member in v.members %}{{ '%d' % minigraph_port_indices[member]}}{% if not loop.last %} {% endif %}{% endfor %}]{% if not loop.last %} {% endif %}{% endfor %} - {% elif (testbed_type == 't1-64-lag') or (testbed_type == 't1-64-lag-clet') %} 0.0.0.0/0 [0 1] [4 5] [16 17] [20 21] -::/0 [0 1] [4 5] [16 17] [20 21] {% elif testbed_type == 't0-116' %} 0.0.0.0/0 [24 25] [26 27] [28 29] [30 31] -::/0 [24 25] [26 27] [28 29] [30 31] {% endif %} + {#routes to uplink#} {#Limit the number of podsets and subnets to be covered to limit script execution time#} {% for podset in range(0, [props.podset_number, 10]|min) %} {% for tor in range(0, [props.tor_number, 10]|min) %} {% for subnet in range(0, props.tor_subnet_number) %} {% if testbed_type == 't1' %} -192.168.{{ podset }}.{{ tor * 16 + subnet }}/32 {% for ifname, v in minigraph_neighbors.iteritems() %} -{% if "T2" in v.name %}{{ '[%d]' % minigraph_port_indices[ifname]}}{% if not loop.last %} {% endif %}{% endif %}{% endfor %} +192.168.{{ podset }}.{{ tor * 16 + subnet }}/32 {% for ifname, v in minigraph_neighbors.iteritems() %}{% if "T2" in v.name %}{{ '[%d]' % minigraph_port_indices[ifname]}}{% if not loop.last %} {% endif %}{% endif %}{% endfor %} -20C0:A8{{ '%02X' % podset }}:0:{{ '%02X' % (tor * 16 + subnet)}}::/64 {% for ifname, v in minigraph_neighbors.iteritems() %} -{% if "T2" in v.name %}{{ '[%d]' % minigraph_port_indices[ifname]}}{% if not loop.last %} {% endif %}{% endif %}{% endfor %} +20C0:A8{{ '%02X' % podset }}:0:{{ '%02X' % (tor * 16 + subnet)}}::/64 {% for ifname, v in minigraph_neighbors.iteritems() %}{% if "T2" in v.name %}{{ '[%d]' % minigraph_port_indices[ifname]}}{% if not loop.last %} {% endif %}{% endif %}{% endfor %} {% elif testbed_type == 't1-lag' %} 192.168.{{ podset }}.{{ tor * 16 + subnet }}/32 {% for portchannel, v in minigraph_portchannels.iteritems() %} @@ -41,6 +29,7 @@ {% elif (testbed_type == 't1-64-lag') or (testbed_type == 't1-64-lag-clet') %} 192.168.{{ podset }}.{{ tor * 16 + subnet }}/32 [0 1] [4 5] [16 17] [20 21] + 20C0:A8{{ '%02X' % podset }}:0:{{ '%02X' % (tor * 16 + subnet)}}::/64 [0 1] [4 5] [16 17] [20 21] {% elif testbed_type == 't0' or testbed_type == 't0-52' or testbed_type == 't0-64' or testbed_type == 't0-64-32' %} {% set suffix = ( (podset * props.tor_number * props.max_tor_subnet_number * props.tor_subnet_size) + @@ -54,11 +43,9 @@ {% set prefixlen_v4 = (32 - ((props.tor_subnet_size | log(2))) | int) %} {# Skip 192.168.0.0 as it is in Vlan1000 subnet #} {% if octet2 != 168 and octet3 != 0 and octet4 != 0 %} -{{ octet1 }}.{{ octet2 }}.{{ octet3 }}.{{ octet4 }}/{{ prefixlen_v4 }} {% for portchannel, v in minigraph_portchannels.iteritems() %} -[{% for member in v.members %}{{ '%d' % minigraph_port_indices[member]}}{% if not loop.last %} {% endif %}{% endfor %}]{% if not loop.last %} {% endif %}{% endfor %} +{{ octet1 }}.{{ octet2 }}.{{ octet3 }}.{{ octet4 }}/{{ prefixlen_v4 }} {% for portchannel, v in minigraph_portchannels.iteritems() %}[{% for member in v.members %}{{ '%d' % minigraph_port_indices[member]}}{% if not loop.last %} {% endif %}{% endfor %}]{% if not loop.last %} {% endif %}{% endfor %} -{{ '20%02x' % octet1 }}:{{ '%02X%02X' % (octet2, octet3) }}:0:{{ '%02X' % octet4 }}::/64 {% for portchannel, v in minigraph_portchannels.iteritems() %} -[{% for member in v.members %}{{ '%d' % minigraph_port_indices[member]}}{% if not loop.last %} {% endif %}{% endfor %}]{% if not loop.last %} {% endif %}{% endfor %} +{{ '20%02x' % octet1 }}:{{ '%02X%02X' % (octet2, octet3) }}:0:{{ '%02X' % octet4 }}::/64 {% for portchannel, v in minigraph_portchannels.iteritems() %}[{% for member in v.members %}{{ '%d' % minigraph_port_indices[member]}}{% if not loop.last %} {% endif %}{% endfor %}]{% if not loop.last %} {% endif %}{% endfor %} {% endif %} {% elif testbed_type == 't0-116' %} diff --git a/ansible/roles/test/templates/neighbor_interface_no_shut_single.j2 b/ansible/roles/test/templates/neighbor_interface_no_shut_single.j2 index 81e41fab68a..f704f975cb8 100644 --- a/ansible/roles/test/templates/neighbor_interface_no_shut_single.j2 +++ b/ansible/roles/test/templates/neighbor_interface_no_shut_single.j2 @@ -1,4 +1,4 @@ -configure t +configure interface {{ neighbor_interface }} no shutdown exit diff --git a/ansible/roles/test/templates/neighbor_interface_shut_single.j2 b/ansible/roles/test/templates/neighbor_interface_shut_single.j2 index 35f5f08f1ef..3c8a2e8e38e 100644 --- a/ansible/roles/test/templates/neighbor_interface_shut_single.j2 +++ b/ansible/roles/test/templates/neighbor_interface_shut_single.j2 @@ -1,4 +1,4 @@ -configure t +configure interface {{ neighbor_interface }} shutdown exit diff --git a/ansible/roles/test/templates/ptf_nn_agent.conf.dut.j2 b/ansible/roles/test/templates/ptf_nn_agent.conf.dut.j2 index 7e327fde45d..ba0245578b2 100644 --- a/ansible/roles/test/templates/ptf_nn_agent.conf.dut.j2 +++ b/ansible/roles/test/templates/ptf_nn_agent.conf.dut.j2 @@ -1,5 +1,5 @@ [program:ptf_nn_agent] -command=/usr/bin/python /opt/ptf_nn_agent.py --device-socket 1@tcp://0.0.0.0:10900 -i 1-{{ nn_target_port }}@{{ nn_target_interface }} --set-nn-rcv-buffer=109430400 --set-iface-rcv-buffer=109430400 --set-nn-snd-buffer=109430400 --set-iface-snd-buffer=109430400 +command=/usr/bin/python /opt/ptf_nn_agent.py --device-socket 1@tcp://0.0.0.0:10900 -i 1-{{ nn_target_port }}@{{ nn_target_interface }} --set-nn-rcv-buffer=609430400 --set-iface-rcv-buffer=609430400 --set-nn-snd-buffer=609430400 --set-iface-snd-buffer=609430400 process_name=ptf_nn_agent stdout_logfile=/tmp/ptf_nn_agent.out.log stderr_logfile=/tmp/ptf_nn_agent.err.log diff --git a/ansible/roles/test/templates/vlan_configuration.j2 b/ansible/roles/test/templates/vlan_configuration.j2 new file mode 100644 index 00000000000..bbdb2026b5e --- /dev/null +++ b/ansible/roles/test/templates/vlan_configuration.j2 @@ -0,0 +1,29 @@ +{ + "PORTCHANNEL_INTERFACE": {}, + "VLAN": { +{% for vlan_intf in vlan_intf_list %} + "Vlan{{ vlan_intf.vlan_id }}": { + "vlanid": {{ vlan_intf.vlan_id }} + }{{ "," if not loop.last else "" }} +{% endfor %} + }, + "VLAN_INTERFACE": { +{% for vlan_intf in vlan_intf_list %} + "Vlan{{ vlan_intf.vlan_id }}|{{ vlan_intf.ip }}": {}{{ "," if not loop.last else "" }} +{% endfor %} + }, + "VLAN_MEMBER": { +{% for vlan_port in vlan_ports_list %} +{% set outer_loop = loop %} +{% for permit_vlanid in vlan_port['permit_vlanid'].keys() %} + "Vlan{{ permit_vlanid }}|{{ vlan_port['dev'] }}": { +{% if vlan_port['pvid'] == permit_vlanid %} + "tagging_mode": "untagged" +{% else %} + "tagging_mode": "tagged" +{% endif %} + }{{ "," if not outer_loop.last or not loop.last else "" }} +{% endfor %} +{% endfor %} + } +} diff --git a/ansible/roles/test/templates/vlan_info.j2 b/ansible/roles/test/templates/vlan_info.j2 new file mode 100644 index 00000000000..dec14186944 --- /dev/null +++ b/ansible/roles/test/templates/vlan_info.j2 @@ -0,0 +1,35 @@ +--- + +{% set vlan_id_list = [ 100, 200 ] %} +vlan_ports_list: +{% for portchannel in minigraph_portchannels.keys()[:2] %} +{% set members = minigraph_portchannels[portchannel].members %} + - dev: {{ portchannel }} + port_index: {{ minigraph_port_indices[members[0]] }} + pvid: '{{ vlan_id_list[loop.index0%2] }}' + permit_vlanid: +{% for vlan in vlan_id_list %} + '{{ vlan }}': + peer_ip: '192.168.{{ vlan }}.{{ 2 + minigraph_port_indices.keys().index(members[0]) }}' + remote_ip: '{{vlan}}.1.1.{{ 2 + minigraph_port_indices.keys().index(members[0]) }}' +{% endfor %} +{% endfor %} +{% for port in minigraph_ports.keys()[:2] %} + - dev: {{ port }} + port_index: '{{ minigraph_port_indices[port]}}' + pvid: '{{ vlan_id_list[loop.index0%2] }}' + permit_vlanid: +{% for vlan in vlan_id_list %} + '{{ vlan }}': + peer_ip: '192.168.{{ vlan }}.{{ 2 + minigraph_port_indices.keys().index(port) }}' + remote_ip: '{{vlan}}.1.1.{{ 2 + minigraph_port_indices.keys().index(port) }}' +{% endfor %} +{% endfor %} + +vlan_intf_list: +{% for vlan in vlan_id_list %} + - vlan_id: '{{ (vlan|int) }}' + ip: '192.168.{{ vlan }}.1/24' +{% endfor %} + +... diff --git a/ansible/roles/test/templates/vxlan_db.json.j2 b/ansible/roles/test/templates/vxlan_db.json.j2 new file mode 100644 index 00000000000..697c3aba3e6 --- /dev/null +++ b/ansible/roles/test/templates/vxlan_db.json.j2 @@ -0,0 +1,15 @@ +{ + "VXLAN_TUNNEL": { + "tunnel{{ item }}": { + "src_ip": "{{ minigraph_lo_interfaces[0]['addr'] }}", + "dst_ip": "8.8.{{ item | replace("Vlan", "") | int // 256 }}.{{ item | replace("Vlan", "") | int % 254 + 1 }}" + } + }, + "VXLAN_TUNNEL_MAP": { + "tunnel{{ item }}|map1": { + "vni": "{{ item | replace("Vlan", "") | int + 336 }}", + "vlan": "{{ item }}" + } + } +} + diff --git a/ansible/roles/test/templates/vxlan_db.maps.json.j2 b/ansible/roles/test/templates/vxlan_db.maps.json.j2 new file mode 100644 index 00000000000..1be0cf7c6ea --- /dev/null +++ b/ansible/roles/test/templates/vxlan_db.maps.json.j2 @@ -0,0 +1,9 @@ +{ + "VXLAN_TUNNEL_MAP": { + "tunnelVxlan|map{{ item }}": { + "vni": "{{ item | replace("Vlan", "") | int + 336 }}", + "vlan": "{{ item }}" + } + } +} + diff --git a/ansible/roles/test/templates/vxlan_db.tunnel.json.j2 b/ansible/roles/test/templates/vxlan_db.tunnel.json.j2 new file mode 100644 index 00000000000..f4671fe6e21 --- /dev/null +++ b/ansible/roles/test/templates/vxlan_db.tunnel.json.j2 @@ -0,0 +1,8 @@ +{ + "VXLAN_TUNNEL": { + "tunnelVxlan": { + "src_ip": "{{ minigraph_lo_interfaces[0]['addr'] }}", + "dst_ip": "8.8.8.8" + } + } +} diff --git a/ansible/roles/test/templates/vxlan_decap.json.j2 b/ansible/roles/test/templates/vxlan_decap.json.j2 new file mode 100644 index 00000000000..ab68c860c33 --- /dev/null +++ b/ansible/roles/test/templates/vxlan_decap.json.j2 @@ -0,0 +1,9 @@ +{ + "minigraph_port_indices": {{ minigraph_port_indices | to_nice_json }}, + "minigraph_portchannel_interfaces": {{ minigraph_portchannel_interfaces | to_nice_json }}, + "minigraph_portchannels": {{ minigraph_portchannels | to_nice_json }}, + "minigraph_lo_interfaces": {{ minigraph_lo_interfaces | to_nice_json }}, + "minigraph_vlans": {{ minigraph_vlans | to_nice_json }}, + "minigraph_vlan_interfaces": {{ minigraph_vlan_interfaces | to_nice_json }}, + "dut_mac": {{ ansible_Ethernet0['macaddress'] | to_nice_json }} +} diff --git a/ansible/roles/test/vars/testcases.yml b/ansible/roles/test/vars/testcases.yml index e40bd880012..2b5aee110a3 100644 --- a/ansible/roles/test/vars/testcases.yml +++ b/ansible/roles/test/vars/testcases.yml @@ -1,65 +1,56 @@ testcases: acl: filename: acltb.yml - topologies: [t1, t1-lag, t1-64-lag, t1-64-lag-clet] + topologies: [t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet] required_vars: ptf_host: testbed_type: arp: filename: arpall.yml - topologies: [ptf32, ptf64, t1, t1-lag, t1-64-lag, t1-64-lag-clet] + topologies: [ptf32, ptf64, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet] required_vars: ptf_host: - ecmp: - filename: ecmp.yml - topologies: [t1] - - bgp_bounce: - filename: bgp_bounce.yml - topologies: [t1] + bgp_fact: + filename: bgp_fact.yml + topologies: [t0, t0-16, t0-52, t0-56, t0-64, t0-64-32, t0-116, t0-slx, t0-e1031, t1,t1-slx, t1-lag, t1-64-lag,t1-slx, t1-64-lag-clet] bgp_gr_helper: filename: bgp_gr_helper.yml - topologies: [t1, t1-lag, t1-64-lag, t1-64-lag-clet] - - bgp_fact: - filename: bgp_fact.yml - topologies: [t0, t0-16, t0-52, t0-56, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet] + topologies: [t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet] bgp_multipath_relax: filename: bgp_multipath_relax.yml - topologies: [t1, t1-lag, t1-64-lag, t1-64-lag-clet] + topologies: [t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet] required_vars: ptf_host: testbed_type: bgp_speaker: filename: bgp_speaker.yml - topologies: [t0, t0-16, t0-52, t0-56, t0-64, t0-64-32, t0-116] + topologies: [t0, t0-16, t0-52, t0-56, t0-64, t0-64-32, t0-116,t0-slx,t0-e1031] required_vars: ptf_host: testbed_type: config: filename: config.yml - topologies: [t1-lag, t1-64-lag, t1-64-lag-clet, t0, t0-64, t0-116] + topologies: [t1-lag, t1-64-lag, t1-64-lag-clet, t0, t0-64, t0-116,t0-slx, t0-e1031] continuous_reboot: filename: continuous_reboot.yml - vtestbed_compatible: no - topologies: [t0, t0-52, t0-56, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet] + topologies: [t0, t0-52, t0-56, t0-64, t0-64-32, t0-116,t0-slx,t0-e1031, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet] copp: filename: copp.yml - topologies: [ptf32, ptf64, t1, t1-lag] + topologies: [ptf32, ptf64, t1,t1-slx, t1-lag] required_vars: ptf_host: decap: filename: decap.yml - topologies: [t1, t1-lag, t1-64-lag, t1-64-lag-clet, t0, t0-52, t0-56, t0-64, t0-116] + topologies: [t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet, t0, t0-52, t0-56, t0-64, t0-116,t0-slx, t0-e1031] required_vars: ptf_host: testbed_type: @@ -67,56 +58,52 @@ testcases: dhcp_relay: filename: dhcp_relay.yml - topologies: [t0, t0-16, t0-52, t0-56, t0-64, t0-64-32, t0-116] + topologies: [t0, t0-16, t0-52, t0-56, t0-64, t0-64-32, t0-116,t0-slx, t0-e1031] required_vars: ptf_host: ecn_wred: filename: ecn_wred.yml - topologies: [t0, t1, t1-lag] + topologies: [t0, t1,t0-slx, t0-e1031, t1,t1-slx, t1-lag] everflow_testbed: filename: everflow_testbed.yml - topologies: [t1, t1-lag, t1-64-lag, t1-64-lag-clet] + topologies: [t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet] required_vars: ptf_host: testbed_type: fast-reboot: filename: fast-reboot.yml - vtestbed_compatible: no - topologies: [t0, t0-56, t0-64, t0-64-32, t0-116] + topologies: [t0, t0-56, t0-64, t0-64-32, t0-116,t0-slx, t0-e1031] required_vars: ptf_host: vm_hosts: warm-reboot: filename: warm-reboot.yml - vtestbed_compatible: no - topologies: [t0, t0-64, t0-64-32, t0-116] + topologies: [t0, t0-64, t0-64-32, t0-116,t0-slx, t0-e1031] required_vars: ptf_host: vm_hosts: warm-reboot-sad: filename: warm-reboot-sad.yml - vtestbed_compatible: no - topologies: [t0, t0-64, t0-64-32, t0-116] + topologies: [t0, t0-64, t0-64-32, t0-116,t0-slx, t0-e1031] required_vars: ptf_host: vm_hosts: warm-reboot-multi-sad: filename: warm-reboot-multi-sad.yml - vtestbed_compatible: no - topologies: [t0, t0-64, t0-64-32, t0-116] + topologies: [t0, t0-64, t0-64-32, t0-116,t0-slx, t0-e1031] required_vars: ptf_host: vm_hosts: warm-reboot-multi-sad-inboot: filename: warm-reboot-multi-sad-inboot.yml - topologies: [t0, t0-64, t0-64-32, t0-116, t0-56] + topologies: [t0, t0-64, t0-64-32, t0-116, t0-56,t0-slx, t0-e1031] required_vars: ptf_host: vm_hosts: @@ -124,7 +111,7 @@ testcases: warm-reboot-sad-bgp: filename: warm-reboot-sad-bgp.yml vtestbed_compatible: no - topologies: [t0, t0-64, t0-64-32, t0-116, t0-56] + topologies: [t0, t0-64, t0-64-32, t0-116, t0-56,t0-slx, t0-e1031] required_vars: ptf_host: vm_hosts: @@ -132,7 +119,7 @@ testcases: warm-reboot-sad-lag: filename: warm-reboot-sad-lag.yml vtestbed_compatible: no - topologies: [t0, t0-64, t0-64-32, t0-116, t0-56] + topologies: [t0, t0-64, t0-64-32, t0-116, t0-56,t0-slx, t0-e1031] required_vars: ptf_host: vm_hosts: @@ -140,7 +127,7 @@ testcases: warm-reboot-sad-lag-member: filename: warm-reboot-sad-lag-member.yml vtestbed_compatible: no - topologies: [t0, t0-64, t0-64-32, t0-116, t0-56] + topologies: [t0, t0-64, t0-64-32, t0-116, t0-56,t0-slx, t0-e1031] required_vars: ptf_host: vm_hosts: @@ -148,43 +135,35 @@ testcases: warm-reboot-sad-vlan-port: filename: warm-reboot-sad-vlan-port.yml vtestbed_compatible: no - topologies: [t0, t0-64, t0-64-32, t0-116, t0-56] + topologies: [t0, t0-64, t0-64-32, t0-116, t0-56,t0-slx, t0-e1031] required_vars: ptf_host: vm_hosts: fib: filename: simple-fib.yml - topologies: [t0, t0-16, t0-52, t0-56, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet] - required_vars: - ptf_host: - testbed_type: - - hash: - filename: hash.yml - topologies: [t0, t0-16, t0-56, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet] + topologies: [t0, t0-16, t0-52, t0-56, t0-64, t0-64-32, t0-116,t0-slx, t0-e1031, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet] required_vars: ptf_host: testbed_type: warm-reboot-fib: filename: warm-reboot-fib.yml - vtestbed_compatible: no - topologies: [t0, t0-16, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet] + topologies: [t0, t0-16, t0-64, t0-64-32, t0-116,t0-slx, t0-e1031, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet] required_vars: ptf_host: testbed_type: fdb: filename: fdb.yml - topologies: [t0, t0-16, t0-52, t0-56, t0-64, t0-64-32, t0-116] + topologies: [t0, t0-16, t0-52, t0-56, t0-64, t0-64-32, t0-116,t0-slx, t0-e1031] required_vars: ptf_host: testbed_type: fdb_mac_expire: filename: fdb_mac_expire.yml - topologies: [t0, t0-64, t0-64-32, t0-116, t0-52] + topologies: [t0, t0-64, t0-64-32, t0-116,t0-slx, t0-e1031] required_vars: fdb_aging_time: ptf_host: @@ -192,35 +171,33 @@ testcases: dir_bcast: filename: dir_bcast.yml - topologies: [t0, t0-16, t0-56, t0-64, t0-64-32, t0-116] + topologies: [t0, t0-16, t0-56, t0-64, t0-64-32, t0-116,t0-slx, t0-e1031] required_vars: ptf_host: testbed_type: lag_2: filename: lag_2.yml - topologies: [t0, t0-52, t0-56, t0-64, t0-64-32, t0-116, t1-lag, t1-64-lag, t1-64-lag-clet] + topologies: [t0, t0-52, t0-56, t0-64, t0-64-32, t0-116,t0-slx, t0-e1031, t1-lag, t1-64-lag, t1-64-lag-clet] required_vars: ptf_host: testbed_type: lldp: filename: lldp.yml - topologies: [t0, t0-16, t0-52, t0-56, t0-64, t0-116, t0-64-32, t1, t1-lag, t1-64-lag, t1-64-lag-clet] + topologies: [t0, t0-16, t0-52, t0-56, t0-64, t0-116, t0-64-32, t0-slx, t0-e1031, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet] link_flap: filename: link_flap.yml - vtestbed_compatible: no - topologies: [t0, t0-16, t0-52, t0-56, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] + topologies: [t0, t0-16, t0-52, t0-56, t0-64, t0-64-32, t0-116, t0-slx, t0-e1031, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] - continuous_link_flap: - filename: continuous_link_flap.yml - vtestbed_compatible: no - topologies: [t0, t0-16, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet] + mem_check: + filename: mem_check.yml + topologies: [t0, t0-16, t0-52, t0-56, t0-64, t0-64-32, t0-116, t0-slx, t0-e1031, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] mtu: filename: mtu.yml - topologies: [t1, t1-lag, t1-64-lag, t1-64-lag-clet] + topologies: [t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet] required_vars: ptf_host: testbed_type: @@ -233,122 +210,101 @@ testcases: neighbour_mac_noptf: filename: neighbour-mac-noptf.yml - topologies: [t0, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] + topologies: [t0, t0-64, t0-64-32, t0-116, t0-slx, t0-e1031, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] ntp: filename: ntp.yml - topologies: [t0, t0-52, t0-56, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] + topologies: [t0, t0-52, t0-56, t0-64, t0-64-32, t0-116, t0-slx, t0-e1031, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] pfc_wd: filename: pfc_wd.yml - vtestbed_compatible: no - topologies: [t0, t0-56, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] - - portstat: - filename: portstat.yml - topologies: [t0, t0-16, t0-56, t0-64, t0-116, t1] + topologies: [t0, t0-56, t0-64, t0-64-32, t0-116, t0-slx, t0-e1031, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] port_toggle: filename: port_toggle.yml - vtestbed_compatible: no - topologies: [t0, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] + topologies: [t0, t0-64, t0-64-32, t0-116, t0-slx, t0-e1031, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] + qos: + filename: qos.yml + topologies: [ptf32, ptf64] + qos_sai: filename: qos_sai.yml topologies: [ptf32, ptf64, t1, t1-lag, t0, t0-64, t0-116] reboot: filename: reboot.yml - topologies: [t0, t0-52, t0-56, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] + topologies: [t0, t0-52, t0-56, t0-64, t0-64-32, t0-116, t0-slx, t0-e1031, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] repeat_harness: filename: repeat_harness.yml - topologies: [t0, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] + topologies: [t0, t0-64, t0-64-32, t0-116, t0-slx, t0-e1031, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] restart_swss: filename: run_config_cleanup.yml - topologies: [t0, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] + topologies: [t0, t0-64, t0-64-32, t0-116, t0-slx, t0-e1031, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] restart_swss_service: filename: restart_swss.yml - topologies: [t0, t0-16, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] + topologies: [t0, t0-16, t0-64, t0-64-32, t0-116, t0-slx, t0-e1031, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] restart_syncd: filename: restart_syncd.yml - topologies: [t0, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] + topologies: [t0, t0-64, t0-64-32, t0-116, t0-slx, t0-e1031, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] sensors: filename: sensors_check.yml - vtestbed_compatible: no - topologies: [t0, t0-52, t0-56, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] + topologies: [t0, t0-52, t0-56, t0-64, t0-64-32, t0-116, t0-slx, t0-e1031, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] service_acl: filename: service_acl.yml - topologies: [t0, t0-52, t0-56, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] + topologies: [t0, t0-52, t0-56, t0-64, t0-64-32, t0-116, t0-slx, t0-e1031, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] snmp: filename: snmp.yml - topologies: [t0, t0-52, t0-56, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] + topologies: [t0, t0-52, t0-56, t0-64, t0-64-32, t0-116, t0-slx, t0-e1031, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] syslog: filename: syslog.yml - topologies: [t0, t0-56, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] + topologies: [t0, t0-56, t0-64, t0-64-32, t0-116, t0-slx, t0-e1031, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet, ptf32, ptf64] vlan: filename: vlantb.yml - topologies: [t0, t0-16, t0-116] + topologies: [t0, t0-16, t0-116, t0-slx, t0-e1031] required_vars: ptf_host: testbed_type: crm: filename: crm.yml - topologies: [t1, t1-lag, t0, t0-52, t0-56, t0-64, t0-116] + topologies: [ t0-slx, t0-e1031, t1,t1-slx, t1-lag, t0, t0-52, t0-56, t0-64, t0-116] dip_sip: filename: dip_sip.yml - topologies: [t0, t0-16, t0-56, t0-64, t0-64-32, t0-116, t1, t1-lag, t1-64-lag, t1-64-lag-clet] + topologies: [t0, t0-16, t0-56, t0-64, t0-64-32, t0-116, t0-slx, t0-e1031, t1,t1-slx, t1-lag, t1-64-lag, t1-64-lag-clet] required_vars: ptf_host: testbed_type: vxlan_decap: filename: vxlan-decap.yml - vtestbed_compatible: no - topologies: [t0, t0-16, t0-52, t0-56, t0-64, t0-64-32, t0-116] + topologies: [t0, t0-16, t0-52, t0-56, t0-64, t0-64-32, t0-116, t0-slx, t0-e1031] required_vars: ptf_host: wr_arp: filename: wr_arp.yml - vtestbed_compatible: no - topologies: [t0, t0-16, t0-52, t0-56, t0-64, t0-64-32, t0-116] - required_vars: - ptf_host: - - vnet_vxlan: - filename: vnet_vxlan.yml - vtestbed_compatible: no - topologies: [t0, t0-16, t0-52, t0-56, t0-64, t0-64-32, t0-116] - required_vars: - ptf_host: - - warm_reboot_vnet: - filename: warm-reboot-vnet.yml - topologies: [t0, t0-64, t0-64-32, t0-116] + topologies: [t0, t0-16, t0-52, t0-56, t0-64, t0-64-32, t0-116, t0-slx, t0-e1031] required_vars: ptf_host: pfc_asym: filename: pfc_asym.yml - topologies: [t0] + topologies: [t0, t0-slx, t0-e1031] iface_mode: filename: iface_naming_mode.yml - topologies: [t0, t0-16, t0-64, t0-64-32, t0-116, t1, ptf32, ptf64] + topologies: [t0, t0-16, t0-64, t0-64-32, t0-116, t0-slx, t0-e1031,t1, t1-slx,ptf32, ptf64] required_vars: testbed_type: - read_mac: - filename: read_mac_metadata.yml - topologies: [t0, t1, t1-lag] diff --git a/ansible/roles/vm_set/library/kvm_port.py b/ansible/roles/vm_set/library/kvm_port.py index ef53f0933e9..70b9c5b9ab2 100644 --- a/ansible/roles/vm_set/library/kvm_port.py +++ b/ansible/roles/vm_set/library/kvm_port.py @@ -29,9 +29,9 @@ def main(): try: output = subprocess.check_output( - "virsh domiflist %s" % vmname, - env={"LIBVIRT_DEFAULT_URI": "qemu:///system"}, - shell=True).decode('utf-8') + "virsh domiflist %s" % vmname, + env={"LIBVIRT_DEFAULT_URI": "qemu:///system"}, + shell=True) except subprocess.CalledProcessError: module.fail_json(msg="failed to iflist dom %s" % vmname) @@ -39,7 +39,7 @@ def main(): fp_ports = [] for l in output.split('\n'): - fds = re.split('\s+', l.lstrip()) + fds = re.split('\s+', l) if len(fds) != 5: continue if fds[1] == "ethernet": diff --git a/ansible/roles/vm_set/library/vm_topology.py b/ansible/roles/vm_set/library/vm_topology.py index dde570608ec..d8e8af640cc 100644 --- a/ansible/roles/vm_set/library/vm_topology.py +++ b/ansible/roles/vm_set/library/vm_topology.py @@ -5,7 +5,7 @@ import os import os.path import re -import docker +from docker import Client from ansible.module_utils.basic import * import traceback from pprint import pprint @@ -51,10 +51,7 @@ - vm_names: list of VMs represented on a current host - vm_base: which VM consider the first VM in the current vm set - ptf_mgmt_ip_addr: ip address with prefixlen for the injected docker container - - ptf_mgmt_ipv6_addr: ipv6 address with prefixlen for the injected docker container - ptf_mgmt_ip_gw: default gateway for the injected docker container - - ptf_bp_ip_addr: ipv6 address with prefixlen for the injected docker container - - ptf_bp_ipv6_addr: ipv6 address with prefixlen for the injected docker container - mgmt_bridge: a bridge which is used as mgmt bridge on the host - dut_fp_ports: dut ports - dut_mgmt_port: dut mgmt port @@ -76,10 +73,7 @@ vm_names: "{{ VM_hosts }}" vm_base: "{{ VM_base }}" ptf_mgmt_ip_addr: "{{ ptf_ip }}" - ptf_mgmt_ipv6_addr: "{{ ptf_ipv6 }}" ptf_mgmt_ip_gw: "{{ mgmt_gw }}" - ptf_bp_ip_addr: "{{ ptf_ip }}" - ptf_bp_ipv6_addr: "{{ ptf_ip }}" mgmt_bridge: "{{ mgmt_bridge }}" dut_mgmt_port: "{{ dut_mgmt_port }}" dut_fp_ports: "{{ dut_fp_ports }}" @@ -91,47 +85,26 @@ DEFAULT_MTU = 0 NUM_FP_VLANS_PER_FP = 4 VM_SET_NAME_MAX_LEN = 8 # used in interface names. So restricted -MGMT_PORT_NAME = 'mgmt' -BP_PORT_NAME = 'backplane' -CMD_DEBUG_FNAME = "/tmp/vmtopology.cmds.%s.txt" -EXCEPTION_DEBUG_FNAME = "/tmp/vmtopology.exception.%s.txt" +MGMT_BR_NAME = 'mgmt' +CMD_DEBUG_FNAME = '/tmp/vmtopology.cmds.txt' +EXCEPTION_DEBUG_FNAME = '/tmp/vmtopology.exception.txt' OVS_FP_BRIDGE_REGEX = 'br-%s-\d+' OVS_FP_BRIDGE_TEMPLATE = 'br-%s-%d' OVS_FP_TAP_TEMPLATE = '%s-t%d' -OVS_BP_TAP_TEMPLATE = '%s-back' +OVS_BRIDGE_BACK_TEMPLATE = 'br-%s-back' INJECTED_INTERFACES_TEMPLATE = 'inje-%s-%d' PTF_NAME_TEMPLATE = 'ptf_%s' PTF_MGMT_IF_TEMPLATE = 'ptf-%s-m' -PTF_BP_IF_TEMPLATE = 'ptf-%s-b' ROOT_BACK_BR_TEMPLATE = 'br-b-%s' PTF_FP_IFACE_TEMPLATE = 'eth%d' +BACK_ROOT_END_IF_TEMPLATE = 'veth-bb-%s' +BACK_VM_END_IF_TEMPLATE = 'veth-bv-%s' RETRIES = 3 -cmd_debug_fname = None - - -class HostInterfaces(object): - """Data descriptor that supports multi-DUTs interface definition.""" - - def __get__(self, obj, objtype): - return obj._host_interfaces - - def __set__(self, obj, host_interfaces): - """Parse and set host interfaces.""" - if obj._is_multi_duts: - obj._host_interfaces = [] - for intf in host_interfaces: - obj._host_interfaces.append( - tuple(map(int, intf.strip().split(".")))) - else: - obj._host_interfaces = host_interfaces - class VMTopology(object): - host_interfaces = HostInterfaces() - def __init__(self, vm_names, fp_mtu, max_fp_num): self.vm_names = vm_names self.fp_mtu = fp_mtu @@ -141,26 +114,22 @@ def __init__(self, vm_names, fp_mtu, max_fp_num): return - def init(self, vm_set_name, topo, vm_base, dut_fp_ports, ptf_exists=True, - is_multi_duts=False): + def init(self, vm_set_name, topo, vm_base, dut_fp_ports, ptf_exists=True): self.vm_set_name = vm_set_name - self.VMs = {} if 'VMs' in topo: + self.VMs = topo['VMs'] self.vm_base = vm_base if vm_base in self.vm_names: self.vm_base_index = self.vm_names.index(vm_base) else: raise Exception('VM_base "%s" should be presented in current vm_names: %s' % (vm_base, str(self.vm_names))) - for k, v in topo['VMs'].items(): - if self.vm_base_index + v['vm_offset'] < len(self.vm_names): - self.VMs[k] = v - - for hostname, attrs in self.VMs.items(): + for hostname, attrs in self.VMs.iteritems(): vmname = self.vm_names[self.vm_base_index + attrs['vm_offset']] if len(attrs['vlans']) > len(self.get_bridges(vmname)): raise Exception("Wrong vlans parameter for hostname %s, vm %s. Too many vlans. Maximum is %d" % (hostname, vmname, len(self.get_bridges(vmname)))) - - self._is_multi_duts = is_multi_duts + else: + self.VMs = {} + if 'host_interfaces' in topo: self.host_interfaces = topo['host_interfaces'] else: @@ -175,49 +144,38 @@ def init(self, vm_set_name, topo, vm_base, dut_fp_ports, ptf_exists=True, else: self.pid = None - self.bp_bridge = ROOT_BACK_BR_TEMPLATE % self.vm_set_name - self.update() return def update(self): - errmsg = [] - i = 0 - while i < 3: - try: - self.host_br_to_ifs, self.host_if_to_br = VMTopology.brctl_show() - self.host_ifaces = VMTopology.ifconfig('ifconfig -a') - if self.pid is not None: - self.cntr_ifaces = VMTopology.ifconfig('nsenter -t %s -n ifconfig -a' % self.pid) - else: - self.cntr_ifaces = [] - break - except Exception as error: - errmsg.append(str(error)) - i += 1 - - if i == 3: - raise Exception("update failed for %d times. %s" % (i, "|".join(errmsg))) + self.host_br_to_ifs, self.host_if_to_br = VMTopology.brctl('brctl show') + self.host_ifaces = VMTopology.ifconfig('ifconfig -a') + if self.pid is not None: + self.cntr_ifaces = VMTopology.ifconfig('nsenter -t %s -n ifconfig -a' % self.pid) + else: + self.cntr_ifaces = [] return def extract_vm_vlans(self): vlans = [] - for attr in self.VMs.values(): + for attr in self.VMs.itervalues(): vlans.extend(attr['vlans']) return vlans def create_bridges(self): for vm in self.vm_names: - for fp_num in range(self.max_fp_num): + for fp_num in xrange(self.max_fp_num): fp_br_name = OVS_FP_BRIDGE_TEMPLATE % (vm, fp_num) - self.create_ovs_bridge(fp_br_name, self.fp_mtu) + self.create_bridge(fp_br_name, self.fp_mtu) + bport_br_name = OVS_BRIDGE_BACK_TEMPLATE = 'br-%s-back' % vm + self.create_bridge(bport_br_name, self.fp_mtu) return - def create_ovs_bridge(self, bridge_name, mtu): + def create_bridge(self, bridge_name, mtu): if bridge_name not in self.host_ifaces: VMTopology.cmd('ovs-vsctl add-br %s' % bridge_name) @@ -232,11 +190,13 @@ def destroy_bridges(self): for vm in self.vm_names: for ifname in self.host_ifaces: if re.compile(OVS_FP_BRIDGE_REGEX % vm).match(ifname): - self.destroy_ovs_bridge(ifname) + self.destroy_bridge(ifname) + bport_br_name = OVS_BRIDGE_BACK_TEMPLATE = 'br-%s-back' % vm + self.destroy_bridge(bport_br_name) return - def destroy_ovs_bridge(self, bridge_name): + def destroy_bridge(self, bridge_name): if bridge_name in self.host_ifaces: VMTopology.cmd('ifconfig %s down' % bridge_name) VMTopology.cmd('ovs-vsctl del-br %s' % bridge_name) @@ -259,16 +219,9 @@ def add_veth_ports_to_docker(self): return - def add_mgmt_port_to_docker(self, mgmt_bridge, mgmt_ip, mgmt_gw, mgmt_ipv6_addr=None): - self.add_br_if_to_docker(mgmt_bridge, PTF_MGMT_IF_TEMPLATE % self.vm_set_name, MGMT_PORT_NAME) - self.add_ip_to_docker_if(MGMT_PORT_NAME, mgmt_ip, mgmt_ipv6_addr=mgmt_ipv6_addr, mgmt_gw=mgmt_gw) - - return - - def add_bp_port_to_docker(self, mgmt_ip, mgmt_ipv6): - self.add_br_if_to_docker(self.bp_bridge, PTF_BP_IF_TEMPLATE % self.vm_set_name, BP_PORT_NAME) - self.add_ip_to_docker_if(BP_PORT_NAME, mgmt_ip, mgmt_ipv6) - VMTopology.iface_disable_txoff(BP_PORT_NAME, self.pid) + def add_mgmt_port_to_docker(self, mgmt_bridge, mgmt_ip, mgmt_gw): + self.add_br_if_to_docker(mgmt_bridge, PTF_MGMT_IF_TEMPLATE % self.vm_set_name, MGMT_BR_NAME) + self.add_ip_to_docker_if(MGMT_BR_NAME, mgmt_ip, mgmt_gw) return @@ -291,15 +244,12 @@ def add_br_if_to_docker(self, bridge, ext_if, int_if): return - def add_ip_to_docker_if(self, int_if, mgmt_ip_addr, mgmt_ipv6_addr=None, mgmt_gw=None): + def add_ip_to_docker_if(self, int_if, mgmt_ip_addr, mgmt_gw): self.update() if int_if in self.cntr_ifaces: VMTopology.cmd("nsenter -t %s -n ip addr flush dev %s" % (self.pid, int_if)) VMTopology.cmd("nsenter -t %s -n ip addr add %s dev %s" % (self.pid, mgmt_ip_addr, int_if)) - if mgmt_ipv6_addr: - VMTopology.cmd("nsenter -t %s -n ip -6 addr add %s dev %s" % (self.pid, mgmt_ipv6_addr, int_if)) - if mgmt_gw: - VMTopology.cmd("nsenter -t %s -n ip route add default via %s dev %s" % (self.pid, mgmt_gw, int_if)) + VMTopology.cmd("nsenter -t %s -n ip route add default via %s dev %s" % (self.pid, mgmt_gw, int_if)) return @@ -382,63 +332,79 @@ def unbind_mgmt_port(self, mgmt_port): return def bind_fp_ports(self, disconnect_vm=False): - for attr in self.VMs.values(): + for attr in self.VMs.itervalues(): for vlan_num, vlan in enumerate(attr['vlans']): - injected_iface = INJECTED_INTERFACES_TEMPLATE % (self.vm_set_name, vlan) - br_name = OVS_FP_BRIDGE_TEMPLATE % (self.vm_names[self.vm_base_index + attr['vm_offset']], vlan_num) - vm_iface = OVS_FP_TAP_TEMPLATE % (self.vm_names[self.vm_base_index + attr['vm_offset']], vlan_num) - self.bind_ovs_ports(br_name, self.dut_fp_ports[vlan], injected_iface, vm_iface, disconnect_vm) + injected_iface = INJECTED_INTERFACES_TEMPLATE % (self.vm_set_name, vlan) + br_name = OVS_FP_BRIDGE_TEMPLATE % (self.vm_names[self.vm_base_index + attr['vm_offset']], vlan_num) + vm_iface = OVS_FP_TAP_TEMPLATE % (self.vm_names[self.vm_base_index + attr['vm_offset']], vlan_num) + self.bind_ovs_ports(br_name, self.dut_fp_ports[vlan], injected_iface, vm_iface, disconnect_vm) return def unbind_fp_ports(self): - for attr in self.VMs.values(): + for attr in self.VMs.itervalues(): for vlan_num, vlan in enumerate(attr['vlans']): - br_name = OVS_FP_BRIDGE_TEMPLATE % (self.vm_names[self.vm_base_index + attr['vm_offset']], vlan_num) - vm_iface = OVS_FP_TAP_TEMPLATE % (self.vm_names[self.vm_base_index + attr['vm_offset']], vlan_num) - self.unbind_ovs_ports(br_name, vm_iface) + br_name = OVS_FP_BRIDGE_TEMPLATE % (self.vm_names[self.vm_base_index + attr['vm_offset']], vlan_num) + vm_iface = OVS_FP_TAP_TEMPLATE % (self.vm_names[self.vm_base_index + attr['vm_offset']], vlan_num) + self.unbind_ovs_ports(br_name, vm_iface) return def bind_vm_backplane(self): + root_back_bridge = ROOT_BACK_BR_TEMPLATE % self.vm_set_name - if self.bp_bridge not in self.host_ifaces: - VMTopology.cmd('brctl addbr %s' % self.bp_bridge) + if root_back_bridge not in self.host_ifaces: + VMTopology.cmd('ovs-vsctl add-br %s' % root_back_bridge) - VMTopology.iface_up(self.bp_bridge) + VMTopology.iface_up(root_back_bridge) - self.update() - - for attr in self.VMs.values(): + for attr in self.VMs.itervalues(): vm_name = self.vm_names[self.vm_base_index + attr['vm_offset']] - bp_port_name = OVS_BP_TAP_TEMPLATE % vm_name + br_name = OVS_BRIDGE_BACK_TEMPLATE % vm_name + + back_int_name = BACK_ROOT_END_IF_TEMPLATE % vm_name + vm_int_name = BACK_VM_END_IF_TEMPLATE % vm_name + + if back_int_name not in self.host_ifaces: + VMTopology.cmd("ip link add %s type veth peer name %s" % (back_int_name, vm_int_name)) - if bp_port_name not in self.host_br_to_ifs[self.bp_bridge]: - VMTopology.cmd("brctl addif %s %s" % (self.bp_bridge, bp_port_name)) + if vm_int_name not in VMTopology.get_ovs_br_ports(br_name): + VMTopology.cmd("ovs-vsctl add-port %s %s" % (br_name, vm_int_name)) - VMTopology.iface_up(bp_port_name) + if back_int_name not in VMTopology.get_ovs_br_ports(root_back_bridge): + VMTopology.cmd("ovs-vsctl add-port %s %s" % (root_back_bridge, back_int_name)) + + VMTopology.iface_up(vm_int_name) + VMTopology.iface_up(back_int_name) return def unbind_vm_backplane(self): + root_back_bridge = ROOT_BACK_BR_TEMPLATE % self.vm_set_name + + if root_back_bridge in self.host_ifaces: + VMTopology.iface_down(root_back_bridge) + VMTopology.cmd('ovs-vsctl del-br %s' % root_back_bridge) + + for attr in self.VMs.itervalues(): + vm_name = self.vm_names[self.vm_base_index + attr['vm_offset']] + br_name = OVS_BRIDGE_BACK_TEMPLATE % vm_name - if self.bp_bridge in self.host_ifaces: - VMTopology.iface_down(self.bp_bridge) - VMTopology.cmd('brctl delbr %s' % self.bp_bridge) + back_int_name = BACK_ROOT_END_IF_TEMPLATE % vm_name + vm_int_name = BACK_VM_END_IF_TEMPLATE % vm_name + + self.unbind_ovs_port(br_name, vm_int_name) + + if back_int_name in self.host_ifaces: + VMTopology.iface_down(back_int_name) + VMTopology.cmd("ip link delete dev %s" % back_int_name) return def bind_ovs_ports(self, br_name, dut_iface, injected_iface, vm_iface, disconnect_vm=False): """bind dut/injected/vm ports under an ovs bridge""" - br = VMTopology.get_ovs_bridge_by_port(injected_iface) - if br is not None and br != br_name: - VMTopology.cmd('ovs-vsctl del-port %s %s' % (br, injected_iface)) - - br = VMTopology.get_ovs_bridge_by_port(dut_iface) - if br is not None and br != br_name: - VMTopology.cmd('ovs-vsctl del-port %s %s' % (br, dut_iface)) - ports = VMTopology.get_ovs_br_ports(br_name) + if injected_iface not in ports: VMTopology.cmd('ovs-vsctl add-port %s %s' % (br_name, injected_iface)) @@ -494,28 +460,16 @@ def unbind_ovs_port(self, br_name, port): def inject_host_ports(self): """inject dut port into the ptf docker""" self.update() - for i, intf in enumerate(self.host_interfaces): - if self._is_multi_duts: - fp_port = self.dut_fp_ports[intf[0]][intf[1]] - ptf_intf = PTF_FP_IFACE_TEMPLATE % i - else: - fp_port = self.dut_fp_ports[intf] - ptf_intf = PTF_FP_IFACE_TEMPLATE % intf - self.add_dut_if_to_docker(ptf_intf, fp_port) + for vlan in self.host_interfaces: + self.add_dut_if_to_docker(PTF_FP_IFACE_TEMPLATE % vlan, self.dut_fp_ports[vlan]) return def deject_host_ports(self): """deject dut port from the ptf docker""" self.update() - for i, intf in enumerate(self.host_interfaces): - if self._is_multi_duts: - fp_port = self.dut_fp_ports[intf[0]][intf[1]] - ptf_intf = PTF_FP_IFACE_TEMPLATE % i - else: - fp_port = self.dut_fp_ports[intf] - ptf_intf = PTF_FP_IFACE_TEMPLATE % intf - self.remove_dut_if_from_docker(ptf_intf, fp_port) + for vlan in self.host_interfaces: + self.remove_dut_if_from_docker(PTF_FP_IFACE_TEMPLATE % vlan, self.dut_fp_ports[vlan]) @staticmethod def iface_up(iface_name, pid=None): @@ -532,16 +486,9 @@ def iface_updown(iface_name, state, pid): else: return VMTopology.cmd('nsenter -t %s -n ip link set %s %s' % (pid, iface_name, state)) - @staticmethod - def iface_disable_txoff(iface_name, pid=None): - if pid is None: - return VMTopology.cmd('ethtool -K %s tx off' % (iface_name)) - else: - return VMTopology.cmd('nsenter -t %s -n ethtool -K %s tx off' % (pid, iface_name)) - @staticmethod def cmd(cmdline): - with open(cmd_debug_fname, 'a') as fp: + with open(CMD_DEBUG_FNAME, 'a') as fp: pprint("CMD: %s" % cmdline, fp) cmd = cmdline.split(' ') process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) @@ -551,9 +498,9 @@ def cmd(cmdline): if ret_code != 0: raise Exception("ret_code=%d, error message=%s. cmd=%s" % (ret_code, stderr, cmdline)) - with open(cmd_debug_fname, 'a') as fp: + with open(CMD_DEBUG_FNAME, 'a') as fp: pprint("OUTPUT: %s" % stdout, fp) - return stdout.decode('utf-8') + return stdout @staticmethod def get_ovs_br_ports(bridge): @@ -564,16 +511,6 @@ def get_ovs_br_ports(bridge): ports.add(port) return ports - @staticmethod - def get_ovs_bridge_by_port(port): - try: - out = VMTopology.cmd('ovs-vsctl port-to-br %s' % port) - except: - return None - - bridge = out.rstrip() - return bridge - @staticmethod def get_ovs_port_bindings(bridge, vlan_iface = None): # Vlan interface addition may take few secs to reflect in OVS Command, @@ -592,7 +529,7 @@ def get_ovs_port_bindings(bridge, vlan_iface = None): if vlan_iface is None or vlan_iface in result: return result time.sleep(2*retries+1) - # Flow reaches here when vlan_iface not present in result + # Flow reaches here when vlan_iface not present in result raise Exception("Can't find vlan_iface_id") @staticmethod @@ -613,17 +550,17 @@ def ifconfig(cmdline): @staticmethod def get_pid(ptf_name): - cli = docker.from_env() + cli = Client(base_url='unix://var/run/docker.sock') try: - ctn = cli.containers.get(ptf_name) + result = cli.inspect_container(ptf_name) except: return None - return ctn.attrs['State']['Pid'] + return result['State']['Pid'] @staticmethod - def brctl_show(): - out = VMTopology.cmd("brctl show") + def brctl(cmdline): + out = VMTopology.cmd(cmdline) br_to_ifs = {} if_to_br = {} @@ -646,13 +583,7 @@ def brctl_show(): return br_to_ifs, if_to_br - -def check_topo(topo, is_multi_duts=False): - - def _assert(condition, exctype, msg): - if not condition: - raise exctype(msg) - +def check_topo(topo): hostif_exists = False vms_exists = False all_vlans = set() @@ -660,61 +591,44 @@ def _assert(condition, exctype, msg): if 'host_interfaces' in topo: vlans = topo['host_interfaces'] - _assert(isinstance(vlans, list), TypeError, - "topo['host_interfaces'] should be a list") + if not isinstance(vlans, list): + raise Exception("topo['host_interfaces'] should be a list of integers") for vlan in vlans: - if is_multi_duts: - condition = (isinstance(vlan, str) and - re.match(r"\d+.\d+", vlan)) - _assert(condition, ValueError, - "topo['host_interfaces'] should be a " - "list of strings of format '.'") + if not isinstance(vlan, int) or vlan < 0: + raise Exception("topo['host_interfaces'] should be a list of integers") + if vlan in all_vlans: + raise Exception("topo['host_interfaces'] double use of vlan: %d" % vlan) else: - condition = isinstance(vlan, int) and vlan >= 0 - _assert(condition, ValueError, - "topo['host_interfaces'] should be a " - "list of positive integers") - _assert(vlan not in all_vlans, ValueError, - "topo['host_interfaces'] double use of vlan: %s" % vlan) - all_vlans.add(vlan) + all_vlans.add(vlan) hostif_exists = True if 'VMs' in topo: VMs = topo['VMs'] - _assert(isinstance(VMs, dict), TypeError, - "topo['VMs'] should be a dictionary") + if not isinstance(VMs, dict): + raise Exception("topo['VMs'] should be a dictionary") - for hostname, attrs in VMs.items(): - _assert('vlans' in attrs and isinstance(attrs['vlans'], list), - ValueError, - "topo['VMs']['%s'] should contain " - "'vlans' with a list of vlans" % hostname) + for hostname, attrs in VMs.iteritems(): + if 'vlans' not in attrs or not isinstance(attrs['vlans'], list): + raise Exception("topo['VMs']['%s'] should contain 'vlans' with a list of vlans" % hostname) - _assert(('vm_offset' in attrs and - isinstance(attrs['vm_offset'], int)), - ValueError, - "topo['VMs']['%s'] should contain " - "'vm_offset' with a number" % hostname) + if 'vm_offset' not in attrs or not isinstance(attrs['vm_offset'], int): + raise Exception("topo['VMs']['%s'] should contain 'vm_offset' with a number" % hostname) for vlan in attrs['vlans']: - _assert(isinstance(vlan, int) and vlan >= 0, - ValueError, - "topo['VMs'][%s]['vlans'] should contain" - " a list with integers" % hostname) - _assert(vlan not in all_vlans, - ValueError, - "topo['VMs'][%s]['vlans'] double use " - "of vlan: %s" % (hostname, vlan)) - all_vlans.add(vlan) + if not isinstance(vlan, int) or vlan < 0: + raise Exception("topo['VMs'][%s]['vlans'] should contain a list with integers" % hostname) + if vlan in all_vlans: + raise Exception("topo['VMs'][%s]['vlans'] double use of vlan: %d" % (hostname, vlan)) + else: + all_vlans.add(vlan) vms_exists = True return hostif_exists, vms_exists - def check_params(module, params, mode): for param in params: if param not in module.params: @@ -722,7 +636,6 @@ def check_params(module, params, mode): return - def main(): module = AnsibleModule( argument_spec=dict( @@ -732,16 +645,12 @@ def main(): vm_names=dict(required=True, type='list'), vm_base=dict(required=False, type='str'), ptf_mgmt_ip_addr=dict(required=False, type='str'), - ptf_mgmt_ipv6_addr=dict(required=False, type='str'), ptf_mgmt_ip_gw=dict(required=False, type='str'), - ptf_bp_ip_addr=dict(required=False, type='str'), - ptf_bp_ipv6_addr=dict(required=False, type='str'), mgmt_bridge=dict(required=False, type='str'), dut_fp_ports=dict(required=False, type='list'), dut_mgmt_port=dict(required=False, type='str'), fp_mtu=dict(required=False, type='int', default=DEFAULT_MTU), max_fp_num=dict(required=False, type='int', default=NUM_FP_VLANS_PER_FP), - is_multi_duts=dict(required=False, type='bool', default=False), ), supports_check_mode=False) @@ -751,15 +660,9 @@ def main(): max_fp_num = module.params['max_fp_num'] dut_mgmt_port = None - curtime = datetime.datetime.now().isoformat() - - global cmd_debug_fname - cmd_debug_fname = CMD_DEBUG_FNAME % curtime - exception_debug_fname = EXCEPTION_DEBUG_FNAME % curtime - try: - if os.path.exists(cmd_debug_fname) and os.path.isfile(cmd_debug_fname): - os.remove(cmd_debug_fname) + if os.path.exists(CMD_DEBUG_FNAME) and os.path.isfile(CMD_DEBUG_FNAME): + os.remove(CMD_DEBUG_FNAME) net = VMTopology(vm_names, fp_mtu, max_fp_num) @@ -771,22 +674,18 @@ def main(): check_params(module, ['vm_set_name', 'topo', 'ptf_mgmt_ip_addr', - 'ptf_mgmt_ipv6_addr', 'ptf_mgmt_ip_gw', - 'ptf_bp_ip_addr', - 'ptf_bp_ipv6_addr', 'mgmt_bridge', 'dut_fp_ports'], cmd) vm_set_name = module.params['vm_set_name'] topo = module.params['topo'] dut_fp_ports = module.params['dut_fp_ports'] - is_multi_duts = module.params['is_multi_duts'] if len(vm_set_name) > VM_SET_NAME_MAX_LEN: raise Exception("vm_set_name can't be longer than %d characters: %s (%d)" % (VM_SET_NAME_MAX_LEN, vm_set_name, len(vm_set_name))) - hostif_exists, vms_exists = check_topo(topo, is_multi_duts) + hostif_exists, vms_exists = check_topo(topo) if vms_exists: check_params(module, ['vm_base'], cmd) @@ -794,27 +693,20 @@ def main(): else: vm_base = None - net.init(vm_set_name, topo, vm_base, dut_fp_ports, - is_multi_duts=is_multi_duts) + net.init(vm_set_name, topo, vm_base, dut_fp_ports) ptf_mgmt_ip_addr = module.params['ptf_mgmt_ip_addr'] - ptf_mgmt_ipv6_addr = module.params['ptf_mgmt_ipv6_addr'] ptf_mgmt_ip_gw = module.params['ptf_mgmt_ip_gw'] mgmt_bridge = module.params['mgmt_bridge'] - net.add_mgmt_port_to_docker(mgmt_bridge, ptf_mgmt_ip_addr, ptf_mgmt_ip_gw, ptf_mgmt_ipv6_addr) - - ptf_bp_ip_addr = module.params['ptf_bp_ip_addr'] - ptf_bp_ipv6_addr = module.params['ptf_bp_ipv6_addr'] - - if module.params['dut_mgmt_port']: - net.bind_mgmt_port(mgmt_bridge, module.params['dut_mgmt_port']) + net.add_mgmt_port_to_docker(mgmt_bridge, ptf_mgmt_ip_addr, ptf_mgmt_ip_gw) if vms_exists: net.add_veth_ports_to_docker() + if module.params['dut_mgmt_port']: + net.bind_mgmt_port(mgmt_bridge, module.params['dut_mgmt_port']) net.bind_fp_ports() net.bind_vm_backplane() - net.add_bp_port_to_docker(ptf_bp_ip_addr, ptf_bp_ipv6_addr) if hostif_exists: net.inject_host_ports() @@ -826,12 +718,11 @@ def main(): vm_set_name = module.params['vm_set_name'] topo = module.params['topo'] dut_fp_ports = module.params['dut_fp_ports'] - is_multi_duts = module.params['is_multi_duts'] if len(vm_set_name) > VM_SET_NAME_MAX_LEN: raise Exception("vm_set_name can't be longer than %d characters: %s (%d)" % (VM_SET_NAME_MAX_LEN, vm_set_name, len(vm_set_name))) - hostif_exists, vms_exists = check_topo(topo, is_multi_duts) + hostif_exists, vms_exists = check_topo(topo) if vms_exists: check_params(module, ['vm_base'], cmd) @@ -839,13 +730,11 @@ def main(): else: vm_base = None - net.init(vm_set_name, topo, vm_base, dut_fp_ports, - is_multi_duts=is_multi_duts) - - if module.params['dut_mgmt_port']: - net.unbind_mgmt_port(module.params['dut_mgmt_port']) + net.init(vm_set_name, topo, vm_base, dut_fp_ports) if vms_exists: + if module.params['dut_mgmt_port']: + net.unbind_mgmt_port(module.params['dut_mgmt_port']) net.unbind_vm_backplane() net.unbind_fp_ports() @@ -855,22 +744,18 @@ def main(): check_params(module, ['vm_set_name', 'topo', 'ptf_mgmt_ip_addr', - 'ptf_mgmt_ipv6_addr', 'ptf_mgmt_ip_gw', - 'ptf_bp_ip_addr', - 'ptf_bp_ipv6_addr', 'mgmt_bridge', 'dut_fp_ports'], cmd) vm_set_name = module.params['vm_set_name'] topo = module.params['topo'] dut_fp_ports = module.params['dut_fp_ports'] - is_multi_duts = module.params['is_multi_duts'] if len(vm_set_name) > VM_SET_NAME_MAX_LEN: raise Exception("vm_set_name can't be longer than %d characters: %s (%d)" % (VM_SET_NAME_MAX_LEN, vm_set_name, len(vm_set_name))) - hostif_exists, vms_exists = check_topo(topo, is_multi_duts) + hostif_exists, vms_exists = check_topo(topo) if vms_exists: check_params(module, ['vm_base'], cmd) @@ -878,18 +763,13 @@ def main(): else: vm_base = None - net.init(vm_set_name, topo, vm_base, dut_fp_ports, True, - is_multi_duts) + net.init(vm_set_name, topo, vm_base, dut_fp_ports, True) ptf_mgmt_ip_addr = module.params['ptf_mgmt_ip_addr'] - ptf_mgmt_ipv6_addr = module.params['ptf_mgmt_ipv6_addr'] ptf_mgmt_ip_gw = module.params['ptf_mgmt_ip_gw'] mgmt_bridge = module.params['mgmt_bridge'] - net.add_mgmt_port_to_docker(mgmt_bridge, ptf_mgmt_ip_addr, ptf_mgmt_ip_gw, ptf_mgmt_ipv6_addr) - - ptf_bp_ip_addr = module.params['ptf_bp_ip_addr'] - ptf_bp_ipv6_addr = module.params['ptf_bp_ipv6_addr'] + net.add_mgmt_port_to_docker(mgmt_bridge, ptf_mgmt_ip_addr, ptf_mgmt_ip_gw) if vms_exists: net.unbind_fp_ports() @@ -905,12 +785,11 @@ def main(): vm_set_name = module.params['vm_set_name'] topo = module.params['topo'] dut_fp_ports = module.params['dut_fp_ports'] - is_multi_duts = module.params['is_multi_duts'] if len(vm_set_name) > VM_SET_NAME_MAX_LEN: raise Exception("vm_set_name can't be longer than %d characters: %s (%d)" % (VM_SET_NAME_MAX_LEN, vm_set_name, len(vm_set_name))) - hostif_exists, vms_exists = check_topo(topo, is_multi_duts) + hostif_exists, vms_exists = check_topo(topo) if vms_exists: check_params(module, ['vm_base'], cmd) @@ -918,8 +797,7 @@ def main(): else: vm_base = None - net.init(vm_set_name, topo, vm_base, dut_fp_ports, - is_multi_duts=is_multi_duts) + net.init(vm_set_name, topo, vm_base, dut_fp_ports) if vms_exists: if cmd == 'connect-vms': @@ -930,7 +808,7 @@ def main(): raise Exception("Got wrong cmd: %s. Ansible bug?" % cmd) except Exception as error: - with open(exception_debug_fname, 'w') as fp: + with open(EXCEPTION_DEBUG_FNAME, 'w') as fp: traceback.print_exc(file=fp) module.fail_json(msg=str(error)) diff --git a/ansible/roles/vm_set/tasks/add_topo.yml b/ansible/roles/vm_set/tasks/add_topo.yml index 01871d8ad97..4904de3f8ab 100644 --- a/ansible/roles/vm_set/tasks/add_topo.yml +++ b/ansible/roles/vm_set/tasks/add_topo.yml @@ -1,121 +1,63 @@ # The PTF image built from different branches may be incompatible. The ptf_imagetag variable added here is to # support using different PTF images for different branches. When the ptf_imagetag variable is not specified, -# the PTF image with default "latest" tag will be used. When a specific PTF image version is required, we can -# specify a value for the ptf_imagetag variable somewhere, for example, specify from command line: -# ./testbed-cli.sh add-topo - vault -e ptf_imagetag=201811 +# the PTF image with default "201811" tag will be used in this sonic-mgmt 201811 branch. When a different PTF +# image version is required, we can specify a different value for the ptf_imagetag variable somewhere to +# override the default value, for example, specify from command line: +# ./testbed-cli.sh add-topo - vault -e ptf_imagetag=myversion # By using this practice, we suggest to add different tags for different PTF image versions in docker registry. +# And we suggest to add tag "201811" for PTF image built from the 201811 branch. - name: Set default value for ptf_imagetag set_fact: - ptf_imagetag: "latest" + ptf_imagetag: "201811" when: ptf_imagetag is not defined -- name: set "PTF" container type, by default - set_fact: - container_type: "PTF" - -- name: set "API-SERVER" container type if Keysight Api Server is used - set_fact: - container_type: "API-SERVER" - when: ptf_imagename is defined and ptf_imagename == "docker-keysight-api-server" - -- name: Try to login into docker registry - docker_login: - registry_url: "{{ docker_registry_host }}" +- name: Create a docker container ptf_{{ vm_set_name }} + docker: + registry: "{{ docker_registry_host }}" username: "{{ docker_registry_username }}" password: "{{ docker_registry_password }}" + name: ptf_{{ vm_set_name }} + image: "{{ docker_registry_host }}/{{ ptf_imagename }}" + pull: missing + state: reloaded + net: none + detach: True + cap_add: NET_ADMIN + privileged: yes become: yes - when: docker_registry_username is defined and docker_registry_password is defined - -- name: Start Keysight API Server container - block: - - name: default secret_group_vars if not defined - set_fact: - secret_group_vars: - ixia_api_server: - rest_port: 443 - when: > - secret_group_vars is not defined - or secret_group_vars.ixia_api_server is not defined - or secret_group_vars.ixia_api_server.rest_port is not defined - - - name: Pull and start Keysight API Server container - docker_container: - name: apiserver - image: "{{ docker_registry_host }}/{{ ptf_imagename }}:{{ ptf_imagetag }}" - pull: yes - state: started - restart: no - published_ports: "{{ secret_group_vars.ixia_api_server.rest_port }}:443" - detach: True - capabilities: - - net_admin - privileged: yes - become: yes - when: container_type == "API-SERVER" - - -- name: Start PTF container - block: - - name: Create ptf container ptf_{{ vm_set_name }} - docker_container: - name: ptf_{{ vm_set_name }} - image: "{{ docker_registry_host }}/{{ ptf_imagename }}:{{ ptf_imagetag }}" - pull: yes - state: started - restart: no - network_mode: none - detach: True - capabilities: - - net_admin - privileged: yes - become: yes - - name: Enable ipv6 for docker container ptf_{{ vm_set_name }} - command: docker exec -i ptf_{{ vm_set_name }} sysctl -w net.ipv6.conf.all.disable_ipv6=0 - become: yes - - - name: Set front panel/mgmt port for dut - include_tasks: set_dut_port.yml - - - name: Setup vlan port for vlan tunnel - vlan_port: - external_port: "{{ external_port }}" - vlan_ids: "{{ device_vlan_list }}" - is_multi_duts: "{{ dut_name is defined and dut_name.split(',')|length > 1 }}" - cmd: "create" - become: yes - when: external_port is defined - - - include_tasks: add_ceos_list.yml - when: vm_type is defined and vm_type == "ceos" - - - name: Bind topology {{ topo }} to VMs. base vm = {{ VM_base }} - vm_topology: - cmd: "bind" - vm_set_name: "{{ vm_set_name }}" - topo: "{{ topology }}" - vm_names: "{{ VM_hosts }}" - vm_base: "{{ VM_base }}" - ptf_mgmt_ip_addr: "{{ ptf_ip }}" - ptf_mgmt_ipv6_addr: "{{ ptf_ipv6 }}" - ptf_mgmt_ip_gw: "{{ mgmt_gw }}" - ptf_bp_ip_addr: "{{ ptf_bp_ip }}" - ptf_bp_ipv6_addr: "{{ ptf_bp_ipv6 }}" - mgmt_bridge: "{{ mgmt_bridge }}" - dut_fp_ports: "{{ dut_fp_ports }}" - dut_mgmt_port: "{{ dut_mgmt_port }}" - fp_mtu: "{{ fp_mtu_size }}" - max_fp_num: "{{ max_fp_num }}" - is_multi_duts: "{{ dut_name is defined and dut_name.split(',')|length > 1 }}" - become: yes +- name: Enable ipv6 for docker container ptf_{{ vm_set_name }} + command: docker exec -i ptf_{{ vm_set_name }} sysctl -w net.ipv6.conf.all.disable_ipv6=0 + become: yes - - name: Send arp ping packet to gw for flusing the ARP table - command: docker exec -i ptf_{{ vm_set_name }} python -c "from scapy.all import *; arping('{{ mgmt_gw }}')" - become: yes +- name: Set front panel/mgmt port for dut + include: set_dut_port.yml - - name: Start ptf_tgen service - include_tasks: start_ptf_tgen.yml - when: topo == 'fullmesh' +- name: Setup vlan port for vlan tunnel + vlan_port: + external_port: "{{ external_port }}" + vlan_ids: "{{ device_vlan_list }}" + cmd: "create" + become: yes + when: external_port is defined + +- name: Bind topology {{ topo }} to VMs. base vm = {{ VM_base }} + vm_topology: + cmd: "bind" + vm_set_name: "{{ vm_set_name }}" + topo: "{{ topology }}" + vm_names: "{{ VM_hosts }}" + vm_base: "{{ VM_base }}" + ptf_mgmt_ip_addr: "{{ ptf_ip }}" + ptf_mgmt_ip_gw: "{{ mgmt_gw }}" + mgmt_bridge: "{{ mgmt_bridge }}" + dut_fp_ports: "{{ dut_fp_ports }}" + dut_mgmt_port: "{{ dut_mgmt_port }}" + fp_mtu: "{{ fp_mtu_size }}" + max_fp_num: "{{ max_fp_num }}" + become: yes - when: container_type == "PTF" +- name: Send arp ping packet to gw for flusing the ARP table + command: docker exec -i ptf_{{ vm_set_name }} python -c "from scapy.all import *; arping('{{ mgmt_gw }}')" + become: yes diff --git a/ansible/roles/vm_set/tasks/connect_vms.yml b/ansible/roles/vm_set/tasks/connect_vms.yml index c99fd4262cc..00c62bc9448 100644 --- a/ansible/roles/vm_set/tasks/connect_vms.yml +++ b/ansible/roles/vm_set/tasks/connect_vms.yml @@ -1,5 +1,5 @@ - name: Set front panel/mgmt port for dut - include_tasks: set_dut_port.yml + include: set_dut_port.yml - name: Connect VMs to {{ topo }}. base vm = {{ VM_base }} vm_topology: diff --git a/ansible/roles/vm_set/tasks/disconnect_vms.yml b/ansible/roles/vm_set/tasks/disconnect_vms.yml index 28743968b91..4574eaa1ec4 100644 --- a/ansible/roles/vm_set/tasks/disconnect_vms.yml +++ b/ansible/roles/vm_set/tasks/disconnect_vms.yml @@ -1,5 +1,5 @@ - name: Set front panel/mgmt port for dut - include_tasks: set_dut_port.yml + include: set_dut_port.yml - name: Disconnect VMs to {{ topo }}. base vm = {{ VM_base }} vm_topology: diff --git a/ansible/roles/vm_set/tasks/docker.yml b/ansible/roles/vm_set/tasks/docker.yml index 53fc7eca336..80cd14bdbc6 100644 --- a/ansible/roles/vm_set/tasks/docker.yml +++ b/ansible/roles/vm_set/tasks/docker.yml @@ -1,8 +1,3 @@ -- name: Add docker official GPG key - apt_key: url=https://download.docker.com/linux/ubuntu/gpg state=present - become: yes - environment: "{{ proxy_env | default({}) }}" - - name: Add docker repository for 16.04 apt_repository: repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable @@ -24,34 +19,21 @@ become: yes when: host_distribution_version.stdout == "18.04" -- name: Add docker repository for 20.04 - apt_repository: - repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable - state: present - become: yes - when: host_distribution_version.stdout == "20.04" - -- name: Install docker-ce - apt: pkg=docker-ce update_cache=yes - become: yes - environment: "{{ proxy_env | default({}) }}" - -- name: Get default pip_executable - set_fact: - pip_executable: pip - when: pip_executable is not defined and host_distribution_version.stdout != "20.04" - -- name: Get default pip_executable - set_fact: - pip_executable: pip3 - when: pip_executable is not defined and host_distribution_version.stdout == "20.04" - -- name: remove old python packages - pip: name=docker-py state=absent executable={{ pip_executable }} - become: yes - environment: "{{ proxy_env | default({}) }}" - -- name: Install python packages - pip: name=docker version=4.1.0 state=forcereinstall executable={{ pip_executable }} - become: yes - environment: "{{ proxy_env | default({}) }}" +###### removed by Vincent Meng ######### +#- name: Add docker official GPG key +# apt_key: +# url: https://download.docker.com/linux/ubuntu/gpg +# state: present +# become: yes +# environment: "{{ proxy_env | default({}) }}" + +#- name: Install docker-ce +# apt: pkg=docker-ce update_cache=yes +# become: yes +# environment: "{{ proxy_env | default({}) }}" + +#- name: Install python packages +# pip: name=docker-py state=present version=1.7.2 +# become: yes +# environment: "{{ proxy_env | default({}) }}" +###### end by Vincent Meng ####### diff --git a/ansible/roles/vm_set/tasks/internal_mgmt_network.yml b/ansible/roles/vm_set/tasks/internal_mgmt_network.yml index e2f3790915a..76ae0facfef 100644 --- a/ansible/roles/vm_set/tasks/internal_mgmt_network.yml +++ b/ansible/roles/vm_set/tasks/internal_mgmt_network.yml @@ -1,26 +1,8 @@ -- name: Setup internal management bridge - copy: - src: 60-kvm-testbed.yaml - dest: /etc/netplan/60-kvm-testbed.yaml +- name: create management bridge + shell: brctl add {{ mgmt_bridge }} become: yes - register: br1 + ignore_errors: yes -- name: apply bridge change - command: "netplan apply" - become: yes - when: br1.changed - -- name: Enable traffic forward - iptables: - chain: FORWARD - policy: ACCEPT - become: yes - -- name: Enable NAT - iptables: - table: nat - chain: POSTROUTING - source: 10.250.0.0/24 - out_interface: eth0 - jump: MASQUERADE +- name: bring up external port + shell: /sbin/ifconfig {{ mgmt_bridge }} up become: yes diff --git a/ansible/roles/vm_set/tasks/kickstart_vm.yml b/ansible/roles/vm_set/tasks/kickstart_vm.yml index 2fdd6a460d7..34542c8aa6c 100644 --- a/ansible/roles/vm_set/tasks/kickstart_vm.yml +++ b/ansible/roles/vm_set/tasks/kickstart_vm.yml @@ -22,10 +22,9 @@ new_password={{ eos_password }} new_root_password={{ eos_root_password }} register: kickstart_output - until: '"kickstart_code" in kickstart_output and kickstart_output.kickstart_code == 0' + until: '"kickstart_code" in kickstart_output and kickstart_output.kickstart_code != -1' retries: 5 delay: 10 - ignore_errors: true - name: Destroy vm {{ vm_name }} if it hangs virt: name={{ vm_name }} @@ -33,7 +32,6 @@ uri=qemu:///system when: kickstart_output.kickstart_code != 0 become: yes - ignore_errors: true - name: Start vm again {{ vm_name }} virt: name={{ vm_name }} @@ -41,7 +39,6 @@ uri=qemu:///system when: kickstart_output.kickstart_code != 0 become: yes - ignore_errors: true - name: Wait until vm {{ vm_name }} is loaded kickstart: telnet_port={{ serial_port }} @@ -54,15 +51,13 @@ new_password={{ eos_password }} new_root_password={{ eos_root_password }} register: kickstart_output_final - until: '"kickstart_code" in kickstart_output_final and kickstart_output_final.kickstart_code == 0' + until: '"kickstart_code" in kickstart_output_final and kickstart_output_final.kickstart_code != -1' retries: 5 delay: 10 - ignore_errors: true when: kickstart_output.kickstart_code != 0 - - name: Kickstart gives error again vm {{ vm_name }} - set_fact: - kickstart_failed_vms: "{{ kickstart_failed_vms + [vm_name] }}" + - name: Fail if kickstart gives error again vm {{ vm_name }} + fail: msg="Two attempts to start vm weren't successful" when: '"kickstart_code" in kickstart_output_final and kickstart_output_final.kickstart_code != 0' - name: Set VM to autostart diff --git a/ansible/roles/vm_set/tasks/main.yml b/ansible/roles/vm_set/tasks/main.yml index 6d65c734d5c..a3750b5b08c 100644 --- a/ansible/roles/vm_set/tasks/main.yml +++ b/ansible/roles/vm_set/tasks/main.yml @@ -1,4 +1,4 @@ -# This role creates a set of VM with veos or Ubuntu for Kubernetes master +# This role creates a set of VM with veos # Input parameters for the role: # - action: 'start', 'stop' or 'renumber' for creating, removeing, or renumbering vm set respectively # - id: sequence number for vm set on the host. @@ -59,6 +59,7 @@ become: yes with_items: - ifupdown + - python - qemu - openvswitch-switch - net-tools @@ -66,49 +67,22 @@ - util-linux - iproute2 - vlan + - libvirt-bin + - python-libvirt + - python-pip - apt-transport-https - ca-certificates - curl - software-properties-common - - libvirt-clients - -- name: - apt: - pkg: - - python - - libvirt-bin - - python-libvirt - - python-pip - become: yes - when: host_distribution_version.stdout == "18.04" - -- name: - apt: - pkg: - - python3-libvirt - - python3-pip - - libvirt-daemon-system - - qemu-system-x86 - become: yes - when: host_distribution_version.stdout == "20.04" -- include_tasks: docker.yml +- include: docker.yml - name: Ensure {{ ansible_user }} in docker,sudo group user: name: "{{ ansible_user }}" - append: yes groups: docker,sudo become: yes -- name: Ensure {{ ansible_user }} in libvirt group - user: - name: "{{ ansible_user }}" - append: yes - groups: libvirt - become: yes - when: host_distribution_version.stdout == "20.04" - - name: Install br_netfilter kernel module become: yes modprobe: name=br_netfilter state=present @@ -116,7 +90,7 @@ - name: Set sysctl bridge parameters for testbed sysctl: name: "{{ item }}" - value: "0" + value: 0 sysctl_set: yes become: yes with_items: @@ -127,23 +101,23 @@ - name: Set sysctl RCVBUF max parameter for testbed sysctl: name: "net.core.rmem_max" - value: "509430500" + value: 509430500 sysctl_set: yes become: yes - name: Set sysctl RCVBUF default parameter for testbed sysctl: name: "net.core.rmem_default" - value: "31457280" + value: 31457280 sysctl_set: yes become: yes - name: Setup external front port - include_tasks: external_port.yml + include: external_port.yml when: external_port is defined - name: Setup internal management network - include_tasks: internal_mgmt_network.yml + include: internal_mgmt_network.yml when: internal_mgmt_network is defined and internal_mgmt_network == True - block: @@ -153,25 +127,21 @@ split: ":" - set_fact: home_path: "{{ getent_passwd[ansible_user][4] }}" - when: home_path is not defined - debug: msg="{{ home_path }}" -- name: Require veos VMs by default - set_fact: - veos_vm_required: true +- name: Ensure {{ root_path }} exists + file: path={{ root_path }} state=directory -- name: veos VMs not needed when setting up Kubernetes master - set_fact: - veos_vm_required: false - when: - - k8s is defined +- name: Install cleanup script + template: src=cleanup.sh.j2 + dest={{ root_path }}/cleanup.sh -- name: VMs not needed in case of Keysight API Server - set_fact: - veos_vm_required: false - when: - - ptf_imagename is defined - - ptf_imagename == "docker-keysight-api-server" +- name: Copy vm_resumer.py to the {{ root_path }} + become: true + copy: + src: roles/vm_set/files/vm_resumer.py + dest: "{{ root_path }}" + mode: 0755 - name: Retrieve a list of the defined VMs virt: command=list_vms @@ -186,98 +156,44 @@ register: vm_list_running become: true -- name: Retrieve a list of the paused VMs - virt: command=list_vms - uri=qemu:///system - state=pause - register: vm_list_paused - become: true - -- block: - - name: Ensure {{ root_path }} exists - file: path={{ root_path }} state=directory - - - name: Install cleanup script - template: src=cleanup.sh.j2 - dest={{ root_path }}/cleanup.sh - - - name: Copy vm_resumer.py to the {{ root_path }} - become: true - copy: - src: roles/vm_set/files/vm_resumer.py - dest: "{{ root_path }}" - mode: 0755 - - - name: Find current server group - set_fact: current_server={{ group_names | extract_by_prefix('server_') }} +- name: Find current server group + set_fact: current_server={{ group_names | extract_by_prefix('server_') }} - - name: Extract VM names from the inventory - set_fact: VM_hosts={{ groups[current_server] | filter_by_prefix('VM') | sort}} +- name: Extract VM names from the inventory + set_fact: VM_hosts={{ groups[current_server] | filter_by_prefix('VM') | sort}} - - name: Limit VM - set_fact: VM_hosts="{{ VM_hosts | first_n_elements(VM_num) }}" - when: VM_num is defined and VM_num|int > 0 +- name: Stop VMs + include: stop.yml + when: action == 'stop' - - name: Generate vm list of target VMs - set_fact: VM_targets={{ VM_hosts | filter_vm_targets(topology['VMs'], VM_base) | sort }} - when: topology['VMs'] is defined - - - name: Stop VMs - include_tasks: stop.yml - when: action == 'stop' - - - name: Start VMs - include_tasks: start.yml - when: action == 'start' - - - name: Connect VMs - include_tasks: connect_vms.yml - when: action == 'connect_vms' - - - name: Disconnect VMs - include_tasks: disconnect_vms.yml - when: action == 'disconnect_vms' - - - name: Renumber topology - include_tasks: renumber_topo.yml - when: action == 'renumber_topo' - - when: veos_vm_required is defined and veos_vm_required == True +- name: Start VMs + include: start.yml + when: action == 'start' - name: Add topology - include_tasks: add_topo.yml + include: add_topo.yml when: action == 'add_topo' - name: Remove topology - include_tasks: remove_topo.yml + include: remove_topo.yml when: action == 'remove_topo' -- name: Stop Kubernetes VMs - include_tasks: stop_k8s.yml - when: action == 'stop_k8s' - -- name: Start Kubernetes VMs - include_tasks: start_k8s.yml - when: action == 'start_k8s' +- name: Renumber topology + include: renumber_topo.yml + when: action == 'renumber_topo' -- block: - - name: Start SONiC VM - include_tasks: start_sonic_vm.yml - when: action == 'start_sonic_vm' and hostvars[dut_name]['type'] == 'kvm' +- name: Connect VMs + include: connect_vms.yml + when: action == 'connect_vms' - - name: Stop SONiC VM - include_tasks: stop_sonic_vm.yml - when: action == 'stop_sonic_vm' and hostvars[dut_name]['type'] == 'kvm' +- name: Disconnect VMs + include: disconnect_vms.yml + when: action == 'disconnect_vms' - - name: Start SID - include_tasks: start_sid.yml - when: action == 'start_sid' and hostvars[dut_name]['type'] == 'simx' +# - name: Start SONiC VM +# include: start_sonic_vm.yml +# when: action == 'start_sonic_vm' and hostvars[dut_name].type is defined and hostvars[dut_name]['type'] == 'kvm' - - name: Stop SID - include_tasks: stop_sid.yml - when: action == 'stop_sid' and hostvars[dut_name]['type'] == 'simx' - when: - - dut_name is defined - - dut_name.split(',')|length == 1 - - hostvars[dut_name] is defined - - hostvars[dut_name].type is defined +# - name: Stop SONiC VM +# include: stop_sonic_vm.yml +# when: action == 'stop_sonic_vm' and hostvars[dut_name].type is defined and hostvars[dut_name]['type'] == 'kvm' diff --git a/ansible/roles/vm_set/tasks/remove_topo.yml b/ansible/roles/vm_set/tasks/remove_topo.yml index 9c081b7b649..9201357d588 100644 --- a/ansible/roles/vm_set/tasks/remove_topo.yml +++ b/ansible/roles/vm_set/tasks/remove_topo.yml @@ -1,55 +1,29 @@ -- name: set "PTF" container type, by default - set_fact: - container_type: "PTF" - -- name: set "API-SERVER" container type if Keysight Api Server is used - set_fact: - container_type: "API-SERVER" - when: ptf_imagename is defined and ptf_imagename == "docker-keysight-api-server" - -- block: - - name: Set front panel/mgmt port for dut - include_tasks: set_dut_port.yml - - - name: Unbind topology {{ topo }} to VMs. base vm = {{ VM_base }} - vm_topology: - cmd: "unbind" - vm_set_name: "{{ vm_set_name }}" - topo: "{{ topology }}" - vm_names: "{{ VM_hosts }}" - vm_base: "{{ VM_base }}" - dut_fp_ports: "{{ dut_fp_ports }}" - dut_mgmt_port: "{{ dut_mgmt_port }}" - max_fp_num: "{{ max_fp_num }}" - is_multi_duts: "{{ dut_name is defined and dut_name.split(',')|length > 1 }}" - become: yes - - - include_tasks: remove_ceos_list.yml - when: vm_type is defined and vm_type == "ceos" - - - name: Remove vlan port for vlan tunnel - vlan_port: - external_port: "{{ external_port }}" - vlan_ids: "{{ device_vlan_list }}" - is_multi_duts: "{{ dut_name is defined and dut_name.split(',')|length > 1 }}" - cmd: "remove" - become: yes - when: external_port is defined - - - name: Remove ptf docker container ptf_{{ vm_set_name }} - docker_container: - name: "ptf_{{ vm_set_name }}" - state: absent - become: yes - - when: container_type == "PTF" - - -- block: - - name: Remove Keysight API Server container - docker_container: - name: apiserver - state: absent - become: yes - when: container_type == "API-SERVER" - +- name: Set front panel/mgmt port for dut + include: set_dut_port.yml + +- name: Unbind topology {{ topo }} to VMs. base vm = {{ VM_base }} + vm_topology: + cmd: "unbind" + vm_set_name: "{{ vm_set_name }}" + topo: "{{ topology }}" + vm_names: "{{ VM_hosts }}" + vm_base: "{{ VM_base }}" + dut_fp_ports: "{{ dut_fp_ports }}" + dut_mgmt_port: "{{ dut_mgmt_port }}" + max_fp_num: "{{ max_fp_num }}" + become: yes + +- name: Remove vlan port for vlan tunnel + vlan_port: + external_port: "{{ external_port }}" + vlan_ids: "{{ device_vlan_list }}" + cmd: "remove" + become: yes + when: external_port is defined + +- name: Remove ptf docker container ptf_{{ vm_set_name }} + docker: + name: ptf_{{ vm_set_name }} + image: "{{ docker_registry_host }}/{{ ptf_imagename }}" + state: absent + become: yes diff --git a/ansible/roles/vm_set/tasks/renumber_topo.yml b/ansible/roles/vm_set/tasks/renumber_topo.yml index 018ee0143f6..1f98a251ae3 100644 --- a/ansible/roles/vm_set/tasks/renumber_topo.yml +++ b/ansible/roles/vm_set/tasks/renumber_topo.yml @@ -1,25 +1,26 @@ - name: Remove ptf docker container ptf_{{ vm_set_name }} - docker_container: + docker: name: ptf_{{ vm_set_name }} + image: "{{ docker_registry_host }}/{{ ptf_imagename }}" state: absent become: yes -- name: Create ptf container ptf_{{ vm_set_name }} - docker_container: +- name: Create a docker container ptf_{{ vm_set_name }} + docker: + registry: "{{ docker_registry_host }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" name: ptf_{{ vm_set_name }} - image: "{{ docker_registry_host }}/{{ ptf_imagename }}:{{ ptf_imagetag }}" - pull: yes - state: started - restart: yes - network_mode: none + image: "{{ docker_registry_host }}/{{ ptf_imagename }}" + pull: always + state: reloaded + net: none detach: True - capabilities: - - net_admin - privileged: yes + cap_add: NET_ADMIN become: yes - name: Set front panel/mgmt port for dut - include_tasks: set_dut_port.yml + include: set_dut_port.yml - name: Renumber topology {{ topo }} to VMs. base vm = {{ VM_base }} vm_topology: @@ -29,7 +30,6 @@ vm_names: "{{ VM_hosts }}" vm_base: "{{ VM_base }}" ptf_mgmt_ip_addr: "{{ ptf_ip }}" - ptf_mgmt_ipv6_addr: "{{ ptf_ipv6 }}" ptf_mgmt_ip_gw: "{{ mgmt_gw }}" mgmt_bridge: "{{ mgmt_bridge }}" dut_fp_ports: "{{ dut_fp_ports }}" diff --git a/ansible/roles/vm_set/tasks/set_dut_port.yml b/ansible/roles/vm_set/tasks/set_dut_port.yml index b8ed60f385f..df42e09b930 100644 --- a/ansible/roles/vm_set/tasks/set_dut_port.yml +++ b/ansible/roles/vm_set/tasks/set_dut_port.yml @@ -2,7 +2,6 @@ vlan_port: external_port: "{{ external_port }}" vlan_ids: "{{ device_vlan_list }}" - is_multi_duts: "{{ dut_name is defined and dut_name.split(',')|length > 1 }}" cmd: "list" become: yes when: external_port is defined @@ -15,11 +14,5 @@ - name: Get front panel and mgmt port for kvm vm kvm_port: vmname: "{{ dut_name }}" - when: external_port is not defined and hostvars[dut_name].type is defined and hostvars[dut_name]['type'] == 'kvm' - become: yes - -- name: Get front panel and mgmt port for SID - mellanox_simx_port: - vmname: "{{ dut_name }}" - when: external_port is not defined and hostvars[dut_name].type is defined and hostvars[dut_name]['type'] == 'simx' + when: external_port is not defined become: yes diff --git a/ansible/roles/vm_set/tasks/start.yml b/ansible/roles/vm_set/tasks/start.yml index 46ef53932bc..6c458eb8c2a 100644 --- a/ansible/roles/vm_set/tasks/start.yml +++ b/ansible/roles/vm_set/tasks/start.yml @@ -60,7 +60,7 @@ when: interval is not defined - name: Start VMs - include_tasks: start_vm.yml + include: start_vm.yml vars: vm_name: "{{ item }}" hostname: "{{ vm_name }}" @@ -70,14 +70,12 @@ disk_image: "{{ home_path }}/{{ root_path }}/disks/{{ vm_name }}_hdd.vmdk" cdrom_image: "{{ home_path }}/{{ root_path }}/images/{{ cd_image_filename }}" mgmt_tap: "{{ vm_name }}-m" - backplane_tap: "{{ vm_name }}-back" + port1_bridge: "br-{{ vm_name }}-back" + port1_tap: "{{ vm_name }}-back" with_items: "{{ VM_hosts }}" -- set_fact: - kickstart_failed_vms: [] - - name: Kickstart VMs - include_tasks: kickstart_vm.yml + include: kickstart_vm.yml vars: vm_name: "{{ item }}" hostname: "{{ vm_name }}" @@ -87,13 +85,6 @@ disk_image: "{{ root_path }}/disks/{{ vm_name }}_hdd.vmdk" cdrom_image: "{{ root_path }}/images/{{ cd_image_filename }}" mgmt_tap: "{{ vm_name }}-m" - backplane_tap: "{{ vm_name }}-back" + port1_bridge: "br-{{ vm_name }}-back" + port1_tap: "{{ vm_name }}-back" with_items: "{{ VM_hosts }}" - -- block: - - name: Log all kickstart failed VMs - debug: msg="{{ kickstart_failed_vms }}" - - - name: Fail if kickstart any VM failed - fail: msg="Please run start-vms again with -e 'respin_vms=["VMXXX"]' to retry the failed VMs" - when: kickstart_failed_vms | length > 0 diff --git a/ansible/roles/vm_set/tasks/start_sonic_vm.yml b/ansible/roles/vm_set/tasks/start_sonic_vm.yml index 3ac78e0a201..ebc70a10e89 100644 --- a/ansible/roles/vm_set/tasks/start_sonic_vm.yml +++ b/ansible/roles/vm_set/tasks/start_sonic_vm.yml @@ -1,22 +1,23 @@ -- set_fact: - sonic_vm_storage_location: "{{ home_path }}/sonic-vm" - when: sonic_vm_storage_location is not defined +- name: Print mess + debug: msg="{{ hostvars }}" - name: Create directory for vm images and vm disks file: path={{ item }} state=directory mode=0755 with_items: - - "{{ sonic_vm_storage_location }}/images" - - "{{ sonic_vm_storage_location }}/disks" + - "sonic-vm/images" + - "sonic-vm/disks" + +- name: Print mess + debug: msg="{{ hostvars }}" - set_fact: - src_disk_image: "{{ sonic_vm_storage_location }}/images/sonic-vs.img" - disk_image: "{{ sonic_vm_storage_location }}/disks/sonic_{{ dut_name }}.img" + src_disk_image: "{{ home_path }}/sonic-vm/images/sonic-vs.img" + disk_image: "{{ home_path }}/sonic-vm/disks/sonic_{{ dut_name }}.img" mgmt_ip_address: " {{ hostvars[dut_name]['ansible_host'] }}" - mgmt_gw: "{{ vm_mgmt_gw | default(mgmt_gw) }}" - serial_port: "{{ hostvars[dut_name]['serial_port'] }}" + serial_port: 9000 - name: Device debug output - debug: msg="hostname = {{ dut_name }} serial port = {{ serial_port }} ip = {{ mgmt_ip_address }}/{{ mgmt_prefixlen }} mgmt_gw = {{ mgmt_gw }}" + debug: msg="hostname = {{ dut_name }} serial port = {{ serial_port }} ip = {{ mgmt_ip_address }}" - name: Check destination file existance stat: path={{ disk_image }} @@ -26,11 +27,7 @@ copy: src={{ src_disk_image }} dest={{ disk_image }} remote_src=True when: not file_stat.stat.exists -- name: Get DUT port alias - port_alias: hwsku={{ hostvars[dut_name].hwsku }} - delegate_to: localhost - -- name: Define SONiC vm {{ dut_name }} +- name: Define vm {{ dut_name }} virt: name={{ dut_name }} command=define xml="{{ lookup('template', 'templates/sonic.xml.j2') }}" @@ -38,13 +35,12 @@ when: dut_name not in vm_list_defined.list_vms become: yes -- name: Start SONiC vm {{ dut_name }} +- name: Start vm {{ dut_name }} virt: name={{ dut_name }} state=running uri=qemu:///system when: dut_name not in vm_list_running.list_vms become: yes - register: sonic_vm_start - name: Wait until vm {{ dut_name }} is loaded sonic_kickstart: telnet_port={{ serial_port }} diff --git a/ansible/roles/vm_set/tasks/start_vm.yml b/ansible/roles/vm_set/tasks/start_vm.yml index 2c85a13b17c..09bd4e35913 100644 --- a/ansible/roles/vm_set/tasks/start_vm.yml +++ b/ansible/roles/vm_set/tasks/start_vm.yml @@ -40,7 +40,6 @@ uri=qemu:///system when: vm_name in respin_vms become: yes - ignore_errors: true - name: Start vm {{ vm_name }} virt: name={{ vm_name }} diff --git a/ansible/roles/vm_set/tasks/stop.yml b/ansible/roles/vm_set/tasks/stop.yml index 515ad485bf2..3bdd402247a 100644 --- a/ansible/roles/vm_set/tasks/stop.yml +++ b/ansible/roles/vm_set/tasks/stop.yml @@ -7,7 +7,7 @@ when: topology['VMs'] is defined and VM_base is defined - name: Remove VMs. - include_tasks: stop_vm.yml + include: stop_vm.yml vars: vm_name: "{{ item }}" disk_image: "{{ root_path }}/disks/{{ vm_name }}_hdd.vmdk" diff --git a/ansible/roles/vm_set/tasks/stop_sonic_vm.yml b/ansible/roles/vm_set/tasks/stop_sonic_vm.yml index 7cc85f4b1bf..f08a27711e7 100644 --- a/ansible/roles/vm_set/tasks/stop_sonic_vm.yml +++ b/ansible/roles/vm_set/tasks/stop_sonic_vm.yml @@ -5,7 +5,7 @@ virt: name={{ dut_name }} state=destroyed uri=qemu:///system - when: dut_name in vm_list_running.list_vms or dut_name in vm_list_paused.list_vms + when: dut_name in vm_list_running.list_vms become: yes - name: Undefine vm {{ dut_name }} diff --git a/ansible/roles/vm_set/templates/arista.xml.j2 b/ansible/roles/vm_set/templates/arista.xml.j2 index ab2e8f623d3..5fd1d357d53 100644 --- a/ansible/roles/vm_set/templates/arista.xml.j2 +++ b/ansible/roles/vm_set/templates/arista.xml.j2 @@ -45,9 +45,11 @@ {% endfor %} - + - + + + diff --git a/ansible/roles/vm_set/templates/sonic.xml.j2 b/ansible/roles/vm_set/templates/sonic.xml.j2 index 1bd3d3c4a2c..4838566d7c5 100644 --- a/ansible/roles/vm_set/templates/sonic.xml.j2 +++ b/ansible/roles/vm_set/templates/sonic.xml.j2 @@ -1,13 +1,13 @@ {{ dut_name }} - 3072000 - 3072000 - 4 + 2048000 + 2048000 + 1 /machine - hvm + hvm @@ -31,15 +31,104 @@ - -{% for i in range(port_alias|length) %} - -{% endfor %} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ansible/roles/vm_set/vars/main.yml b/ansible/roles/vm_set/vars/main.yml index b94aaff812b..344784ecec7 100644 --- a/ansible/roles/vm_set/vars/main.yml +++ b/ansible/roles/vm_set/vars/main.yml @@ -1,5 +1,19 @@ +login: "admin" +password: "" +new_login: admin +new_password: 123456 +new_root_password: 123456 + +sonic_login: "admin" +sonic_passwords: + - "YourPaSsWoRd" + - "password" +sonic_new_password: "password" + tor_memory: 1572864 spine_memory: 2097152 +port1_bridge: br-port1-{{ id }} + fp_mtu_size: 9216 diff --git a/ansible/shell_plugins/docker.py b/ansible/shell_plugins/docker.py new file mode 100644 index 00000000000..ca9f2d6aebf --- /dev/null +++ b/ansible/shell_plugins/docker.py @@ -0,0 +1,94 @@ +from __future__ import (absolute_import, division) +__metaclass__ = type + +import os +import re +import pipes +import ansible.constants as C +import time +import random +import shlex +import getopt +from ansible.compat.six import text_type +from ansible.plugins.shell.sh import ShellModule as sh +from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound + +class ShellModule(sh): + + def __init__(self, *args, **kwargs): + super(ShellModule, self).__init__(*args, **kwargs) + self.dtemps = [] + + def join_path(self, *args): + ## HACK! HACK! HACK! + ## We observe the interactions between ShellModule and ActionModule, and + ## find the temporary directories Ansible created on remote machine. So we + ## collect them and copied to docker container in build_module_command + if len(args) >= 2 and (args[0].startswith('/home/') or args[0].startswith('/root/')) and args[1] == '': + self.dtemps.append(args[0]) + + return super(ShellModule, self).join_path(*args) + + def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None): + # assert(self.container_name) + argv = shlex.split(shebang.replace("#!", "")) + assert(argv[0] == 'docker') + assert(argv[1] == 'exec') + opts, args = getopt.getopt(argv[2:], 'i') + self.container_name = args[0] + + # Inject environment variable before python in the shebang string + assert(args[1].endswith('python')) + args[1] = 'env {0} {1}'.format(env_string, args[1]) + argv_env = argv[0:2] + [o for opt in opts for o in opt] + args + shebang_env = ' '.join(argv_env) + + ## Note: Docker cp behavior + ## DEST_PATH exists and is a directory + ## SRC_PATH does end with /. + ## the content of the source directory is copied into this directory + ## Ref: https://docs.docker.com/engine/reference/commandline/cp/ + pre = ''.join('docker exec {1} mkdir -p {0}; docker cp {0}/. {1}:{0}; ' + .format(dtemp, self.container_name) for dtemp in self.dtemps) + + if rm_tmp: + post = ''.join('docker exec {1} rm -rf {0}; ' + .format(dtemp, self.container_name) for dtemp in self.dtemps) + else: + post = '' + + return pre + super(ShellModule, self).build_module_command('', shebang_env, cmd, arg_path, rm_tmp) + '; ' + post + + def checksum(self, path, python_interp): + """ + Return the command to calculate the checksum for the file in ansible controlled machine + Arguments: + path: + the file path + python_interp: + the path for the python interpreter + Example: + path: + /zebra.conf + python_interp: + docker exec -i debian python + cmd: + rc=flag; [ -r /zebra.conf ] || rc=2; [ -f /zebra.conf ] || rc=1; [ -d /zebra.conf ] && rc=3; python -V 2>/dev/null || rc=4; [ x"$rc" != "xflag" ] && echo "${rc} "/zebra.conf && exit 0; (python -c '...' 2>/dev/null) || (echo '0 '/zebra.conf) + returns: + docker exec -i debian sh -c "rc=flag; [ -r /zebra.conf ] || rc=2; [ -f /zebra.conf ] || rc=1; [ -d /zebra.conf ] && rc=3; python -V 2>/dev/null || rc=4; [ x\"\$rc\" != \"xflag\" ] && echo \"\${rc} \"/zebra.conf && exit 0; (python -c '...' 2>/dev/null) || (echo '0 '/zebra.conf)" + """ + ## Super class implements this function by sh commands and python scripts + ## If python_interp is modified to 'docker CONTAINER python', it will only influence the python + ## script part in super class. Instead we should influence both + simple_interp = 'python' + assert(python_interp.startswith('docker exec ')) + assert(python_interp.endswith(' ' + simple_interp)) + + docker_prefix = re.sub(simple_interp, '', python_interp) + cmd = super(ShellModule, self).checksum(path, simple_interp) + ## Escape the cmd: + ## " --> \" + cmd_escaped = cmd.replace('"', '\\"') + ## $ --> \$ + cmd_escaped = cmd_escaped.replace('$', '\\$') + return '%s sh -c "%s"' % (docker_prefix, cmd_escaped) diff --git a/ansible/swap_syncd.yml b/ansible/swap_syncd.yml index e62587a6df8..00e713458c0 100644 --- a/ansible/swap_syncd.yml +++ b/ansible/swap_syncd.yml @@ -68,7 +68,7 @@ # Note: no_log requires passlib python library - name: Pull syncd-rpc docker from registry - shell: docker pull {{docker_registry_host}}/{{docker_rpc_image_name}}:{{sonic_image_version}} + shell: docker login -u {{docker_registry_username}} -p {{docker_registry_password}} {{docker_registry_host}}; docker pull {{docker_registry_host}}/{{docker_rpc_image_name}}:{{sonic_image_version}} no_log: true - name: Tag pulled images as syncd diff --git a/ansible/templates/minigraph_device.j2 b/ansible/templates/minigraph_device.j2 index 8524e72de2b..cfaad980078 100644 --- a/ansible/templates/minigraph_device.j2 +++ b/ansible/templates/minigraph_device.j2 @@ -26,8 +26,8 @@ true 0 - {{ hwsku }} + {{ hwsku }} - + diff --git a/ansible/templates/minigraph_dpg.j2 b/ansible/templates/minigraph_dpg.j2 index 92d6ea3dd62..83fb41893b3 100644 --- a/ansible/templates/minigraph_dpg.j2 +++ b/ansible/templates/minigraph_dpg.j2 @@ -32,9 +32,9 @@ V6HostIP eth0 - {{ ansible_hostv6 if ansible_hostv6 is defined else 'FC00:2::32' }}/64 + FC00:2::32/64 - {{ ansible_hostv6 if ansible_hostv6 is defined else 'FC00:2::32' }}/64 + FC00:2::32/64 @@ -56,21 +56,19 @@ {% if 'tor' in vm_topo_config['dut_type'] | lower %} -{% for vlan, vlan_param in vlan_configs.items() %} +{% set vlan_intf_str=';'.join(vlan_intfs) %} - {{ vlan }} -{% set vlan_intf_str=';'.join(vlan_param['intfs']) %} - {{ vlan_intf_str }} + Vlan1000 + {{ vlan_intf_str }} False 0.0.0.0/0 {% set dhcp_servers_str=';'.join(dhcp_servers) %} {{ dhcp_servers_str }} - {{ vlan_param['id'] }} - {{ vlan_param['tag'] }} - {{ vlan_param['prefix'] | ipaddr('network') }}/{{ vlan_param['prefix'] | ipaddr('prefix') }} + 1000 + 1000 + 192.168.0.0/21 -{% endfor %} {% endif %} @@ -95,22 +93,11 @@ {% endfor %} {% if 'tor' in vm_topo_config['dut_type'] | lower %} -{% for vlan, vlan_param in vlan_configs.items() %} - - - {{ vlan }} - {{ vlan_param['prefix'] }} - -{% endfor %} -{% for vlan, vlan_param in vlan_configs.items() %} -{% if 'prefix_v6' in vlan_param %} - {{ vlan }} - {{ vlan_param['prefix_v6'] }} + Vlan1000 + 192.168.0.1/21 -{% endif %} -{% endfor %} {% endif %} @@ -135,7 +122,7 @@ ssh-only SSH -{% if enable_data_plane_acl|default('true')|bool %} +{%- if enable_data_plane_acl|default('true')|bool %} {%- for index in range(vms_number) %} @@ -143,7 +130,7 @@ PortChannel{{ ((index+1) |string).zfill(4) }}{% if not loop.last %};{% endif %} {% endif %} {% endfor %} -{% for index in range(vms_number) %} +{%- for index in range(vms_number) %} {% if 'port-channel' not in vm_topo_config['vm'][vms[index]]['ip_intf']|lower %} {{ port_alias[vm_topo_config['vm'][vms[index]]['interface_indexes'][0]] }}{% if not loop.last %};{% endif %} {% endif %} @@ -152,7 +139,7 @@ PortChannel{{ ((index+1) |string).zfill(4) }}{% if not loop.last %};{% endif %} DataAcl DataPlane -{% endif %} +{% endif -%} diff --git a/ansible/test_sonic.yml b/ansible/test_sonic.yml index 21b76171718..923525c862e 100644 --- a/ansible/test_sonic.yml +++ b/ansible/test_sonic.yml @@ -14,6 +14,8 @@ - hosts: sonic + vars_files: + - vars/docker_registry.yml roles: - { role: test, scope: 'sonic' } force_handlers: true diff --git a/ansible/testbed-new.yaml b/ansible/testbed-new.yaml index 90314860142..960b6d54dc1 100644 --- a/ansible/testbed-new.yaml +++ b/ansible/testbed-new.yaml @@ -227,7 +227,7 @@ veos_groups: servers: children: [server_1, server_2] # source: sonic-mgmt/veos vars: - topologies: ['t1', 't1-lag', 't1-64-lag', 't1-64-lag-clet', 't0', 't0-56', 't0-52', 'ptf32', 'ptf64', 't0-64', 't0-64-32', 't0-116'] # source: sonic-mgmt/veos + topologies: ['t1','t1-slx', 't1-lag', 't1-64-lag', 't0', 't0-56', 't0-52', 'ptf32', 'ptf64', 't0-64', 't0-64-32', 't0-116'] # source: sonic-mgmt/veos server_1: children: [vm_host_1, vms_1] # source: sonic-mgmt/veos vars: @@ -259,8 +259,8 @@ veos: root_path: /home/azure/veos-vm # source: sonic-mgmt/ansible/group_vars/vm_host/main.yml vm_images_url: https://acsbe.blob.core.windows.net/vmimages # source: sonic-mgmt/ansible/group_vars/vm_host/main.yml cd_image_filename: Aboot-veos-serial-8.0.0.iso # source: sonic-mgmt/ansible/group_vars/vm_host/main.yml - hdd_image_filename: vEOS-lab-4.20.15M.vmdk # source: sonic-mgmt/ansible/group_vars/vm_host/main.yml - skip_image_downloading: false # source: sonic-mgmt/ansible/group_vars/vm_host/main.yml + hdd_image_filename: vEOS-lab-4.15.10M.vmdk # source: sonic-mgmt/ansible/group_vars/vm_host/main.yml + skip_image_downloading: true # source: sonic-mgmt/ansible/group_vars/vm_host/main.yml vm_console_base: 7000 # source: sonic-mgmt/ansible/group_vars/vm_host/main.yml memory: 2097152 # source: sonic-mgmt/ansible/group_vars/vm_host/main.yml max_fp_num: 4 # source: sonic-mgmt/ansible/group_vars/vm_host/main.yml @@ -745,3 +745,7 @@ topology: # source: sonic-mgmt/ansible/files/sonic # docker_registry dictionary does not cross reference with other files docker_registry: docker_registry_host: sonicdev-microsoft.azurecr.io:443 + docker_registry_username: 1dafc8d7-d19c-4f58-8653-e8d904f30dab + docker_registry_password: sonic + + diff --git a/ansible/testbed.csv b/ansible/testbed.csv index 45a324fa2fb..529156d6aa9 100644 --- a/ansible/testbed.csv +++ b/ansible/testbed.csv @@ -1,4 +1,8 @@ # conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,comment +cel_slx_t0,ptf2,t0-slx,docker-ptf,ptf2,10.251.0.110/24,,server_2,VM0300,cel-seastone-01,Tests ptf +cel_slx2_t0,ptf2,t0-slx,docker-ptf,ptf2,10.251.0.110/24,,server_2,VM0300,cel-seastone2-01,Tests ptf +cel_slx_t1,ptf2,t1-slx,docker-ptf,ptf2,10.251.0.110/24,,server_3,VM0400,cel-seastone-01,Tests ptf +cel_e1031_t0,ptf1,t0-e1031,docker-ptf,ptf1,10.250.0.110/24,,server_1,VM0100,cel-e1031-01,Tests ptf ptf1-m,ptf1,ptf32,docker-ptf-sai-mlnx,ptf-unknown,10.255.0.188/24,,server_1,,str-msn2700-01,Test ptf Mellanox ptf2-b,ptf2,ptf64,docker-ptf-sai-brcm,ptf-unknown,10.255.0.189/24,,server_1,,lab-s6100-01,Test ptf Broadcom vms-sn2700-t1,vms1-1,t1,docker-ptf-sai-mlnx,ptf-unknown,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,Tests Mellanox SN2700 vms diff --git a/ansible/testbed_add_vm_topology.yml b/ansible/testbed_add_vm_topology.yml index 7893ec5d451..23ee028e9ad 100644 --- a/ansible/testbed_add_vm_topology.yml +++ b/ansible/testbed_add_vm_topology.yml @@ -6,7 +6,7 @@ # - configuration property # - configuration # -# topology key contains a dictionary of hostnames with 'vm_offset' and 'vlans' keys in it. +# topology key contains a dictionary of hostnames with 'vm_offset' and 'vlans' keys in it. # 'vm_offset' is used to map current hostname vm_set VM to server VM (like ARISTA01T0 -> VM0300). # This offset is used on VM_base # 'vlans' is a list of vlan offsets which helps us to calculate vlan numbers which will be connected to Eth1/1..Eth1/8 interfaces. @@ -24,10 +24,8 @@ # -e dut_name=str-msn2700-01 - the name of target dut # -e VM_base=VM0300 - the VM name which is used to as base to calculate VM name for this set # -e ptf_ip=10.255.0.255/23 - the ip address and prefix of ptf container mgmt interface -# -e ptf_ipv6=fec0::ffff:afa:1/64 - the ipv6 address and prefix of ptf container mgmt interface # -e topo=t0 - the name of removed topo # -e ptf_imagename=docker-ptf - name of a docker-image which will be used for the ptf docker container -# -e vm_type=veos|ceos - hosts: servers:&vm_host gather_facts: no @@ -54,10 +52,6 @@ fail: msg="Define ptf ip variable with -e ptf_ip=something" when: ptf_ip is not defined - - name: Check that variable ptf_ipv6 is defined - fail: msg="Define ptf ipv6 variable with -e ptf_ipv6=something" - when: ptf_ipv6 is not defined - - name: Check that variable topo is defined fail: msg="Define topo variable with -e topo=something" when: topo is not defined @@ -74,52 +68,40 @@ include_vars: "vars/topo_{{ topo }}.yml" - name: Read dut minigraph - conn_graph_facts: - host: "{{ dut_name }}" - delegate_to: localhost - when: dut_name.split(',')|length == 1 - - - name: Read duts minigraph - conn_graph_facts: - hosts: "{{ dut_name.split(',') }}" - delegate_to: localhost - when: dut_name.split(',')|length > 1 + conn_graph_facts: host={{ dut_name }} + connection: local roles: - { role: vm_set, action: 'start_sonic_vm' } - - { role: vm_set, action: 'start_sid' } - { role: vm_set, action: 'add_topo' } - hosts: servers:&eos gather_facts: no pre_tasks: - - block: - - name: Check that variable topo is defined - fail: msg="Define topo variable with -e topo=something" - when: topo is not defined + - name: Check that variable topo is defined + fail: msg="Define topo variable with -e topo=something" + when: topo is not defined - - name: Check if it is a known topology - fail: msg="Unknown topology {{ topo }}" - when: topo not in topologies + - name: Check if it is a known topology + fail: msg="Unknown topology {{ topo }}" + when: topo not in topologies - - name: Check that variable VM_base is defined - fail: msg="Define VM_base variable with -e VM_base=something" - when: VM_base is not defined + - name: Check that variable VM_base is defined + fail: msg="Define VM_base variable with -e VM_base=something" + when: VM_base is not defined - - name: Load topo variables - include_vars: "vars/topo_{{ topo }}.yml" + - name: Load topo variables + include_vars: "vars/topo_{{ topo }}.yml" - - name: Find current server group - set_fact: current_server={{ group_names | extract_by_prefix('server_') }} + - name: Find current server group + set_fact: current_server={{ group_names | extract_by_prefix('server_') }} - - name: Extract VM names from the inventory - set_fact: VM_hosts={{ groups[current_server] | filter_by_prefix('VM') | sort }} + - name: Extract VM names from the inventory + set_fact: VM_hosts={{ groups[current_server] | filter_by_prefix('VM') }} - - name: Generate vm list of target VMs - set_fact: VM_targets={{ VM_hosts | filter_vm_targets(topology['VMs'], VM_base) }} - when: topology['VMs'] is defined - run_once: True - delegate_to: localhost + - name: Generate vm list of target VMs + set_fact: VM_targets={{ VM_hosts | filter_vm_targets(topology['VMs'], VM_base) }} + when: topology['VMs'] is defined roles: - { role: eos, when: topology.VMs is defined and inventory_hostname in VM_targets } # role eos will be executed in any case, and when will evaluate with every task diff --git a/ansible/testbed_connect_vms.yml b/ansible/testbed_connect_vms.yml index b5aae29394d..0bd464407d3 100644 --- a/ansible/testbed_connect_vms.yml +++ b/ansible/testbed_connect_vms.yml @@ -43,7 +43,7 @@ - name: Read dut minigraph conn_graph_facts: host={{ dut_name }} - delegate_to: localhost + connection: local roles: - { role: vm_set, action: 'connect_vms' } diff --git a/ansible/testbed_disconnect_vms.yml b/ansible/testbed_disconnect_vms.yml index a75593a4ea8..1b32bb17311 100644 --- a/ansible/testbed_disconnect_vms.yml +++ b/ansible/testbed_disconnect_vms.yml @@ -43,7 +43,7 @@ - name: Read dut minigraph conn_graph_facts: host={{ dut_name }} - delegate_to: localhost + connection: local roles: - { role: vm_set, action: 'disconnect_vms' } diff --git a/ansible/testbed_refresh_dut.yml b/ansible/testbed_refresh_dut.yml index 1ad3638e43a..2ae764bfb22 100644 --- a/ansible/testbed_refresh_dut.yml +++ b/ansible/testbed_refresh_dut.yml @@ -6,7 +6,7 @@ # - configuration property # - configuration # -# topology key contains a dictionary of hostnames with 'vm_offset' and 'vlans' keys in it. +# topology key contains a dictionary of hostnames with 'vm_offset' and 'vlans' keys in it. # 'vm_offset' is used to map current hostname vm_set VM to server VM (like ARISTA01T0 -> VM0300). # This offset is used on VM_base # 'vlans' is a list of vlan offsets which helps us to calculate vlan numbers which will be connected to Eth1/1..Eth1/8 interfaces. @@ -24,7 +24,6 @@ # -e dut_name=str-msn2700-01 - the name of target dut # -e VM_base=VM0300 - the VM name which is used to as base to calculate VM name for this set # -e ptf_ip=10.255.0.255/23 - the ip address and prefix of ptf container mgmt interface -# -e ptf_ipv6=fec0::ffff:afa:1/64 - the ipv6 address and prefix of ptf container mgmt interface # -e topo=t0 - the name of removed topo # -e ptf_imagename=docker-ptf - name of a docker-image which will be used for the ptf docker container @@ -53,10 +52,6 @@ fail: msg="Define ptf ip variable with -e ptf_ip=something" when: ptf_ip is not defined - - name: Check that variable ptf_ipv6 is defined - fail: msg="Define ptf ipv6 variable with -e ptf_ipv6=something" - when: ptf_ipv6 is not defined - - name: Check that variable topo is defined fail: msg="Define topo variable with -e topo=something" when: topo is not defined @@ -74,7 +69,7 @@ - name: Read dut minigraph conn_graph_facts: host={{ dut_name }} - delegate_to: localhost + connection: local roles: - { role: vm_set, action: 'stop_sonic_vm' } diff --git a/ansible/testbed_remove_vm_topology.yml b/ansible/testbed_remove_vm_topology.yml index 4f9f9c67f2a..a05108164ad 100644 --- a/ansible/testbed_remove_vm_topology.yml +++ b/ansible/testbed_remove_vm_topology.yml @@ -51,19 +51,10 @@ include_vars: "vars/topo_{{ topo }}.yml" - name: Read dut minigraph - conn_graph_facts: - host: "{{ dut_name }}" - delegate_to: localhost - when: dut_name.split(',')|length == 1 - - - name: Read duts minigraph - conn_graph_facts: - hosts: "{{ dut_name.split(',') }}" - delegate_to: localhost - when: dut_name.split(',')|length > 1 + conn_graph_facts: host={{ dut_name }} + connection: local roles: - { role: vm_set, action: 'remove_topo' } - - { role: vm_set, action: 'stop_sid' } - { role: vm_set, action: 'stop_sonic_vm' } diff --git a/ansible/testbed_renumber_vm_topology.yml b/ansible/testbed_renumber_vm_topology.yml index 5179a855f59..5fe11f05370 100644 --- a/ansible/testbed_renumber_vm_topology.yml +++ b/ansible/testbed_renumber_vm_topology.yml @@ -38,10 +38,6 @@ fail: msg="Define ptf ip variable with -e ptf_ip=something" when: ptf_ip is not defined - - name: Check that variable ptf_ipv6 is defined - fail: msg="Define ptf ipv6 variable with -e ptf_ipv6=something" - when: ptf_ipv6 is not defined - - name: Check that variable topo is defined fail: msg="Define topo variable with -e topo=something" when: topo is not defined or topo not in topologies @@ -55,7 +51,7 @@ - name: Read dut minigraph conn_graph_facts: host={{ dut_name }} - delegate_to: localhost + connection: local roles: - { role: vm_set, action: 'renumber_topo' } diff --git a/ansible/vars/azure_storage.yml b/ansible/vars/azure_storage.yml index 08a19101834..6bdae04494c 100644 --- a/ansible/vars/azure_storage.yml +++ b/ansible/vars/azure_storage.yml @@ -1,5 +1,4 @@ ## saskey are treated as credential in Credscan vmimage_saskey: use_own_value cdimage_saskey: use_own_value -ceosimage_saskey: use_own_value -k8s_vmimage_saskey: use_own_value + diff --git a/ansible/vars/configlet/t1-64-lag-clet/apply_clet.sh b/ansible/vars/configlet/t1-64-lag-clet/apply_clet.sh index bb00c8cd3e8..fa0d4a8481f 100755 --- a/ansible/vars/configlet/t1-64-lag-clet/apply_clet.sh +++ b/ansible/vars/configlet/t1-64-lag-clet/apply_clet.sh @@ -2,5 +2,5 @@ # Sleep to let all BGP sessions go up & running before adding a T0 sleep 1m -configlet -j /etc/sonic/clet-to_clear.json -d -configlet -j /etc/sonic/clet-add.json -u +/usr/bin/configlet -j /etc/sonic/clet-to_clear.json -d +/usr/bin/configlet -j /etc/sonic/clet-add.json -u diff --git a/ansible/vars/docker_registry.yml b/ansible/vars/docker_registry.yml index 238cf566850..2e00c6f4c32 100644 --- a/ansible/vars/docker_registry.yml +++ b/ansible/vars/docker_registry.yml @@ -1 +1,4 @@ -docker_registry_host: sonicdev-microsoft.azurecr.io:443 +docker_registry_host: localhost:5000 + +docker_registry_username: clsnet +docker_registry_password: sonic diff --git a/ansible/vars/run_config_test_vars.yml b/ansible/vars/run_config_test_vars.yml new file mode 100644 index 00000000000..686892092d4 --- /dev/null +++ b/ansible/vars/run_config_test_vars.yml @@ -0,0 +1,13 @@ +--- + +testname_unique: "{{ testname }}.{{ unique_timestamp }}" + +test_out_dir: "{{ out_dir }}/{{ testname_unique }}" +loganalyzer_init: roles/test/files/tools/loganalyzer/loganalyzer_init.yml +loganalyzer_analyze: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml + +match_file: loganalyzer_common_match.txt +ignore_file: loganalyzer_common_ignore.txt + +summary_file: summary.loganalysis.{{ testname_unique }}.log +result_file: result.loganalysis.{{ testname_unique }}.log diff --git a/ansible/vars/run_loganalyzer_vars.yml b/ansible/vars/run_loganalyzer_vars.yml new file mode 100644 index 00000000000..e3df78a571e --- /dev/null +++ b/ansible/vars/run_loganalyzer_vars.yml @@ -0,0 +1,15 @@ +--- + +testname_unique: "{{ testname }}.{{ unique_timestamp }}" + +test_out_dir: "{{ out_dir }}/{{ testname_unique }}" +loganalyzer_init: roles/test/files/tools/loganalyzer/loganalyzer_init.yml +loganalyzer_analyze: roles/test/files/tools/loganalyzer/loganalyzer_analyze.yml + +match_file: loganalyzer_common_match.txt +ignore_file: loganalyzer_common_ignore.txt + +summary_file: summary.loganalysis.{{ testname_unique }}.log +result_file: result.loganalysis.{{ testname_unique }}.log + +run_analyze_and_check: "roles/test/tasks/run_analyze_and_check.yml" diff --git a/ansible/vars/run_ping_test_vars.yml b/ansible/vars/run_ping_test_vars.yml new file mode 100644 index 00000000000..a4dd27773c4 --- /dev/null +++ b/ansible/vars/run_ping_test_vars.yml @@ -0,0 +1,7 @@ +--- + +testname_unique: "{{ testname }}.{{ unique_timestamp }}" + +test_out_dir: "{{ out_dir }}/{{ testname_unique }}" +summary_file: "summary.loganalysis.{{ testname_unique }}.log" +result_file: "result.loganalysis.{{ testname_unique }}.log" diff --git a/ansible/vars/run_ptf_test_vars.yml b/ansible/vars/run_ptf_test_vars.yml new file mode 100644 index 00000000000..a4dd27773c4 --- /dev/null +++ b/ansible/vars/run_ptf_test_vars.yml @@ -0,0 +1,7 @@ +--- + +testname_unique: "{{ testname }}.{{ unique_timestamp }}" + +test_out_dir: "{{ out_dir }}/{{ testname_unique }}" +summary_file: "summary.loganalysis.{{ testname_unique }}.log" +result_file: "result.loganalysis.{{ testname_unique }}.log" diff --git a/ansible/vars/topo_t0-116.yml b/ansible/vars/topo_t0-116.yml index fdc2edf33f3..9630f489925 100644 --- a/ansible/vars/topo_t0-116.yml +++ b/ansible/vars/topo_t0-116.yml @@ -133,40 +133,23 @@ topology: - 30 - 31 vm_offset: 3 - DUT: - vlan_configs: - default_vlan_config: one_vlan_a - one_vlan_a: - Vlan1000: - id: 1000 - intfs: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119] - prefix: 192.168.0.1/21 - prefix_v6: fc02:1000::1/64 - tag: 1000 - two_vlan_a: - Vlan100: - id: 100 - intfs: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63] - prefix: 192.168.100.1/21 - prefix_v6: fc02:100::1/64 - tag: 100 - Vlan200: - id: 200 - intfs: [64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119] - prefix: 192.168.200.1/21 - prefix_v6: fc02:200::1/64 - tag: 200 configuration_properties: common: dut_asn: 4200065100 dut_type: ToRRouter swrole: leaf + podset_number: 200 + tor_number: 16 + tor_subnet_number: 2 + max_tor_subnet_number: 16 + tor_subnet_size: 128 spine_asn: 65534 leaf_asn_start: 4200064600 - tor_asn_start: 4200065500 - nhipv4: 10.10.246.254 - nhipv6: FC0A::FF + tor_asn_start: 4200065100 + failure_rate: 0 + nhipv4: 10.10.246.100 + nhipv6: FC0A::C9 configuration: ARISTA01T1: diff --git a/ansible/vars/topo_t0-16.yml b/ansible/vars/topo_t0-16.yml index c267b29a14e..8c177d7c3c3 100644 --- a/ansible/vars/topo_t0-16.yml +++ b/ansible/vars/topo_t0-16.yml @@ -106,29 +106,6 @@ topology: vlans: - 53 vm_offset: 5 - DUT: - vlan_configs: - default_vlan_config: one_vlan_a - one_vlan_a: - Vlan1000: - id: 1000 - intfs: [32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47] - prefix: 192.168.0.1/21 - prefix_v6: fc02:1000::1/64 - tag: 1000 - two_vlan_a: - Vlan100: - id: 100 - intfs: [32, 33, 34, 35, 36, 37, 38, 39] - prefix: 192.168.100.1/21 - prefix_v6: fc02:100::1/64 - tag: 100 - Vlan200: - id: 200 - intfs: [40, 41, 42, 43, 44, 45, 46, 47] - prefix: 192.168.200.1/21 - prefix_v6: fc02:200::1/64 - tag: 200 configuration_properties: common: diff --git a/ansible/vars/topo_t0-52.yml b/ansible/vars/topo_t0-52.yml index 623b4376618..ccf12b89fcb 100644 --- a/ansible/vars/topo_t0-52.yml +++ b/ansible/vars/topo_t0-52.yml @@ -65,29 +65,6 @@ topology: vlans: - 51 vm_offset: 3 - DUT: - vlan_configs: - default_vlan_config: one_vlan_a - one_vlan_a: - Vlan1000: - id: 1000 - intfs: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47] - prefix: 192.168.0.1/21 - prefix_v6: fc02:1000::1/64 - tag: 1000 - two_vlan_a: - Vlan100: - id: 100 - intfs: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] - prefix: 192.168.100.1/21 - prefix_v6: fc02:100::1/64 - tag: 100 - Vlan200: - id: 200 - intfs: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47] - prefix: 192.168.200.1/21 - prefix_v6: fc02:200::1/64 - tag: 200 configuration_properties: common: diff --git a/ansible/vars/topo_t0-56.yml b/ansible/vars/topo_t0-56.yml index 2db3902baaf..d3a7ea2c51e 100644 --- a/ansible/vars/topo_t0-56.yml +++ b/ansible/vars/topo_t0-56.yml @@ -106,29 +106,6 @@ topology: vlans: - 43 vm_offset: 7 - DUT: - vlan_configs: - default_vlan_config: one_vlan_a - one_vlan_a: - Vlan1000: - id: 1000 - intfs: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55] - prefix: 192.168.0.1/21 - prefix_v6: fc02:1000::1/64 - tag: 1000 - two_vlan_a: - Vlan100: - id: 100 - intfs: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] - prefix: 192.168.100.1/21 - prefix_v6: fc02:100::1/64 - tag: 100 - Vlan200: - id: 200 - intfs: [27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55] - prefix: 192.168.200.1/21 - prefix_v6: fc02:200::1/64 - tag: 200 configuration_properties: common: diff --git a/ansible/vars/topo_t0-64-32.yml b/ansible/vars/topo_t0-64-32.yml index 6b88de1234e..f84d7173e6e 100644 --- a/ansible/vars/topo_t0-64-32.yml +++ b/ansible/vars/topo_t0-64-32.yml @@ -45,29 +45,6 @@ topology: - 20 - 21 vm_offset: 3 - DUT: - vlan_configs: - default_vlan_config: one_vlan_a - one_vlan_a: - Vlan1000: - id: 1000 - intfs: [2, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18, 19, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] - prefix: 192.168.0.1/21 - prefix_v6: fc02:1000::1/64 - tag: 1000 - two_vlan_a: - Vlan100: - id: 100 - intfs: [2, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - prefix: 192.168.100.1/21 - prefix_v6: fc02:100::1/64 - tag: 100 - Vlan200: - id: 200 - intfs: [18, 19, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] - prefix: 192.168.200.1/21 - prefix_v6: fc02:200::1/64 - tag: 200 configuration_properties: common: @@ -83,8 +60,8 @@ configuration_properties: leaf_asn_start: 64802 tor_asn_start: 64601 failure_rate: 0 - nhipv4: 10.10.246.254 - nhipv6: FC0A::FF + nhipv4: 10.10.246.100 + nhipv6: FC0A::C9 configuration: ARISTA01T1: @@ -182,3 +159,4 @@ configuration: bp_interface: ipv4: 10.10.246.4/24 ipv6: fc0a::4/64 + diff --git a/ansible/vars/topo_t0-64.yml b/ansible/vars/topo_t0-64.yml index 774e29bca41..ea2f6ba5038 100644 --- a/ansible/vars/topo_t0-64.yml +++ b/ansible/vars/topo_t0-64.yml @@ -98,29 +98,6 @@ topology: - 20 - 21 vm_offset: 3 - DUT: - vlan_configs: - default_vlan_config: one_vlan_a - one_vlan_a: - Vlan1000: - id: 1000 - intfs: [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 36, 37, 38, 39, 40, 41, 42, 48, 52, 53, 54, 55, 56, 57, 58] - prefix: 192.168.0.1/21 - prefix_v6: fc02:1000::1/64 - tag: 1000 - two_vlan_a: - Vlan100: - id: 100 - intfs: [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 22, 23, 24, 25, 26, 27, 28, 29] - prefix: 192.168.100.1/21 - prefix_v6: fc02:100::1/64 - tag: 100 - Vlan200: - id: 200 - intfs: [30, 31, 32, 36, 37, 38, 39, 40, 41, 42, 48, 52, 53, 54, 55, 56, 57, 58] - prefix: 192.168.200.1/21 - prefix_v6: fc02:200::1/64 - tag: 200 configuration_properties: common: @@ -136,8 +113,8 @@ configuration_properties: leaf_asn_start: 64802 tor_asn_start: 64601 failure_rate: 0 - nhipv4: 10.10.246.254 - nhipv6: FC0A::FF + nhipv4: 10.10.246.100 + nhipv6: FC0A::C9 configuration: ARISTA01T1: diff --git a/ansible/vars/topo_t0-e1031.yml b/ansible/vars/topo_t0-e1031.yml new file mode 100644 index 00000000000..acf0bc38de2 --- /dev/null +++ b/ansible/vars/topo_t0-e1031.yml @@ -0,0 +1,179 @@ +topology: + host_interfaces: + - 0 + - 1 + - 2 + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 + - 11 + - 12 + - 13 + - 14 + - 15 + - 16 + - 17 + - 18 + - 19 + - 20 + - 21 + - 22 + - 23 + - 24 + - 25 + - 26 + - 27 + disabled_host_interfaces: + - 0 + - 1 + - 2 + - 4 + - 6 + - 7 + - 8 + - 9 + - 10 + - 11 + - 12 + - 13 + - 14 + - 15 + - 16 + - 17 + - 19 + - 20 + - 21 + - 22 + - 23 + - 24 + - 25 + - 26 + - 27 + VMs: + ARISTA01T1: + vlans: + - 28 + vm_offset: 0 + ARISTA02T1: + vlans: + - 29 + vm_offset: 1 + ARISTA03T1: + vlans: + - 30 + vm_offset: 2 + ARISTA04T1: + vlans: + - 31 + vm_offset: 3 + +configuration_properties: + common: + dut_asn: 65100 + dut_type: ToRRouter + swrole: leaf + podset_number: 64 + tor_number: 16 + tor_subnet_number: 2 + max_tor_subnet_number: 16 + tor_subnet_size: 128 + spine_asn: 65534 + leaf_asn_start: 64600 + tor_asn_start: 65100 + failure_rate: 0 + nhipv4: 10.10.246.100 + nhipv6: FC0A::C9 + +configuration: + ARISTA01T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.56 + - FC00::71 + interfaces: + Loopback0: + ipv4: 100.1.0.29/32 + ipv6: 2064:100::1d/128 + Ethernet1: + lacp: 1 + Port-Channel1: + ipv4: 10.0.0.57/31 + ipv6: fc00::72/126 + bp_interface: + ipv4: 10.10.246.29/24 + ipv6: fc0a::3a/64 + + ARISTA02T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.58 + - FC00::75 + interfaces: + Loopback0: + ipv4: 100.1.0.30/32 + ipv6: 2064:100::1e/128 + Ethernet1: + lacp: 1 + Port-Channel1: + ipv4: 10.0.0.59/31 + ipv6: fc00::76/126 + bp_interface: + ipv4: 10.10.246.30/24 + ipv6: fc0a::3d/64 + + ARISTA03T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.60 + - FC00::79 + interfaces: + Loopback0: + ipv4: 100.1.0.31/32 + ipv6: 2064:100::1f/128 + Ethernet1: + lacp: 1 + Port-Channel1: + ipv4: 10.0.0.61/31 + ipv6: fc00::7a/126 + bp_interface: + ipv4: 10.10.246.31/24 + ipv6: fc0a::3e/64 + + ARISTA04T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.62 + - FC00::7D + interfaces: + Loopback0: + ipv4: 100.1.0.32/32 + ipv6: 2064:100::20/128 + Ethernet1: + lacp: 1 + Port-Channel1: + ipv4: 10.0.0.63/31 + ipv6: fc00::7e/126 + bp_interface: + ipv4: 10.10.246.32/24 + ipv6: fc0a::41/64 diff --git a/ansible/vars/topo_t0-slx.yml b/ansible/vars/topo_t0-slx.yml new file mode 100644 index 00000000000..ba5649b5442 --- /dev/null +++ b/ansible/vars/topo_t0-slx.yml @@ -0,0 +1,154 @@ +topology: + host_interfaces: + - 0 + - 1 + - 2 + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 + - 11 + - 12 + - 13 + - 14 + - 15 + - 16 + - 17 + - 18 + - 19 + - 20 + - 21 + - 22 + - 23 + - 24 + - 25 + - 26 + disabled_host_interfaces: + - 27 + VMs: + ARISTA01T1: + vlans: + - 27 + vm_offset: 0 + ARISTA02T1: + vlans: + - 28 + vm_offset: 1 + ARISTA03T1: + vlans: + - 29 + vm_offset: 2 + ARISTA04T1: + vlans: + - 30 + vm_offset: 3 + +configuration_properties: + common: + dut_asn: 65100 + dut_type: ToRRouter + swrole: leaf + podset_number: 200 + tor_number: 16 + tor_subnet_number: 2 + max_tor_subnet_number: 16 + tor_subnet_size: 128 + spine_asn: 65534 + leaf_asn_start: 64600 + tor_asn_start: 65100 + failure_rate: 0 + nhipv4: 10.10.246.100 + nhipv6: FC0A::C9 + +configuration: + ARISTA01T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.56 + - FC00::71 + interfaces: + Loopback0: + ipv4: 100.1.0.29/32 + ipv6: 2064:100::1d/128 + Ethernet1: + lacp: 1 + Port-Channel1: + ipv4: 10.0.0.57/31 + ipv6: fc00::72/126 + bp_interface: + ipv4: 10.10.246.29/24 + ipv6: fc0a::3a/64 + + ARISTA02T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.58 + - FC00::75 + interfaces: + Loopback0: + ipv4: 100.1.0.30/32 + ipv6: 2064:100::1e/128 + Ethernet1: + lacp: 1 + Port-Channel1: + ipv4: 10.0.0.59/31 + ipv6: fc00::76/126 + bp_interface: + ipv4: 10.10.246.30/24 + ipv6: fc0a::3d/64 + + ARISTA03T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.60 + - FC00::79 + interfaces: + Loopback0: + ipv4: 100.1.0.31/32 + ipv6: 2064:100::1f/128 + Ethernet1: + lacp: 1 + Port-Channel1: + ipv4: 10.0.0.61/31 + ipv6: fc00::7a/126 + bp_interface: + ipv4: 10.10.246.31/24 + ipv6: fc0a::3e/64 + + ARISTA04T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.62 + - FC00::7D + interfaces: + Loopback0: + ipv4: 100.1.0.32/32 + ipv6: 2064:100::20/128 + Ethernet1: + lacp: 1 + Port-Channel1: + ipv4: 10.0.0.63/31 + ipv6: fc00::7e/126 + bp_interface: + ipv4: 10.10.246.32/24 + ipv6: fc0a::41/64 diff --git a/ansible/vars/topo_t0.yml b/ansible/vars/topo_t0.yml index 3f5a1d74cea..219406f0280 100644 --- a/ansible/vars/topo_t0.yml +++ b/ansible/vars/topo_t0.yml @@ -30,6 +30,28 @@ topology: - 27 disabled_host_interfaces: - 0 + - 1 + - 2 + - 4 + - 6 + - 7 + - 8 + - 9 + - 10 + - 11 + - 12 + - 13 + - 14 + - 15 + - 16 + - 17 + - 18 + - 19 + - 20 + - 21 + - 22 + - 23 + - 24 - 25 - 26 - 27 @@ -50,37 +72,23 @@ topology: vlans: - 31 vm_offset: 3 - DUT: - vlan_configs: - default_vlan_config: one_vlan_a - one_vlan_a: - Vlan1000: - id: 1000 - intfs: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] - prefix: 192.168.0.1/21 - prefix_v6: fc02:1000::1/64 - tag: 1000 - two_vlan_a: - Vlan100: - id: 100 - intfs: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] - prefix: 192.168.100.1/21 - prefix_v6: fc02:100::1/64 - tag: 100 - Vlan200: - id: 200 - intfs: [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] - prefix: 192.168.200.1/21 - prefix_v6: fc02:200::1/64 - tag: 200 configuration_properties: common: dut_asn: 65100 dut_type: ToRRouter swrole: leaf - nhipv4: 10.10.246.254 - nhipv6: FC0A::FF + podset_number: 200 + tor_number: 16 + tor_subnet_number: 2 + max_tor_subnet_number: 16 + tor_subnet_size: 128 + spine_asn: 65534 + leaf_asn_start: 64600 + tor_asn_start: 65100 + failure_rate: 0 + nhipv4: 10.10.246.100 + nhipv6: FC0A::C9 configuration: ARISTA01T1: @@ -103,7 +111,7 @@ configuration: ipv6: fc00::72/126 bp_interface: ipv4: 10.10.246.29/24 - ipv6: fc0a::1d/64 + ipv6: fc0a::3a/64 ARISTA02T1: properties: @@ -125,7 +133,7 @@ configuration: ipv6: fc00::76/126 bp_interface: ipv4: 10.10.246.30/24 - ipv6: fc0a::1e/64 + ipv6: fc0a::3d/64 ARISTA03T1: properties: @@ -147,7 +155,7 @@ configuration: ipv6: fc00::7a/126 bp_interface: ipv4: 10.10.246.31/24 - ipv6: fc0a::1f/64 + ipv6: fc0a::3e/64 ARISTA04T1: properties: @@ -169,4 +177,4 @@ configuration: ipv6: fc00::7e/126 bp_interface: ipv4: 10.10.246.32/24 - ipv6: fc0a::20/64 + ipv6: fc0a::41/64 diff --git a/ansible/vars/topo_t1-64-lag.yml b/ansible/vars/topo_t1-64-lag.yml index c6dd92f1fde..b64994d4843 100644 --- a/ansible/vars/topo_t1-64-lag.yml +++ b/ansible/vars/topo_t1-64-lag.yml @@ -105,12 +105,19 @@ configuration_properties: common: dut_asn: 65100 dut_type: LeafRouter - nhipv4: 10.10.246.254 - nhipv6: FC0A::FF + nhipv4: 10.10.246.100 + nhipv6: FC0A::C9 spine: swrole: spine + podset_number: 200 + tor_number: 16 + tor_subnet_number: 2 + leaf_asn_start: 62001 + tor_asn_start: 65501 + failure_rate: 0 tor: swrole: tor + tor_subnet_number: 5 configuration: ARISTA01T2: diff --git a/ansible/vars/topo_t1-64.yml b/ansible/vars/topo_t1-64.yml index 61885d4ef95..4be821f844b 100644 --- a/ansible/vars/topo_t1-64.yml +++ b/ansible/vars/topo_t1-64.yml @@ -261,12 +261,19 @@ configuration_properties: common: dut_asn: 65100 dut_type: LeafRouter - nhipv4: 10.10.246.254 - nhipv6: FC0A::FF + nhipv4: 10.10.246.100 + nhipv6: FC0A::C9 spine: swrole: spine + podset_number: 200 + tor_number: 16 + tor_subnet_number: 2 + leaf_asn_start: 62001 + tor_asn_start: 65501 + failure_rate: 0 tor: swrole: tor + tor_subnet_number: 5 configuration: ARISTA01T2: diff --git a/ansible/vars/topo_t1-lag.yml b/ansible/vars/topo_t1-lag.yml index bddf2c4d6e1..b9c54ff961b 100644 --- a/ansible/vars/topo_t1-lag.yml +++ b/ansible/vars/topo_t1-lag.yml @@ -109,12 +109,19 @@ configuration_properties: common: dut_asn: 65100 dut_type: LeafRouter - nhipv4: 10.10.246.254 - nhipv6: FC0A::FF + nhipv4: 10.10.246.100 + nhipv6: FC0A::C9 spine: swrole: spine + podset_number: 200 + tor_number: 16 + tor_subnet_number: 2 + leaf_asn_start: 62001 + tor_asn_start: 65501 + failure_rate: 0 tor: swrole: tor + tor_subnet_number: 5 configuration: ARISTA01T2: diff --git a/ansible/vars/topo_t1-slx.yml b/ansible/vars/topo_t1-slx.yml new file mode 100644 index 00000000000..c9f44a6a8cc --- /dev/null +++ b/ansible/vars/topo_t1-slx.yml @@ -0,0 +1,824 @@ +topology: + disabled_host_interfaces: + - 31 + VMs: + ARISTA01T2: + vlans: + - 0 + vm_offset: 0 + ARISTA02T2: + vlans: + - 1 + vm_offset: 1 + ARISTA03T2: + vlans: + - 2 + vm_offset: 2 + ARISTA04T2: + vlans: + - 3 + vm_offset: 3 + ARISTA05T2: + vlans: + - 4 + vm_offset: 4 + ARISTA06T2: + vlans: + - 5 + vm_offset: 5 + ARISTA07T2: + vlans: + - 6 + vm_offset: 6 + ARISTA08T2: + vlans: + - 7 + vm_offset: 7 + ARISTA09T2: + vlans: + - 8 + vm_offset: 8 + ARISTA10T2: + vlans: + - 9 + vm_offset: 9 + ARISTA11T2: + vlans: + - 10 + vm_offset: 10 + ARISTA12T2: + vlans: + - 11 + vm_offset: 11 + ARISTA13T2: + vlans: + - 12 + vm_offset: 12 + ARISTA14T2: + vlans: + - 13 + vm_offset: 13 + ARISTA15T2: + vlans: + - 14 + vm_offset: 14 + ARISTA16T2: + vlans: + - 15 + vm_offset: 15 + ARISTA01T0: + vlans: + - 16 + vm_offset: 16 + ARISTA02T0: + vlans: + - 17 + vm_offset: 17 + ARISTA03T0: + vlans: + - 18 + vm_offset: 18 + ARISTA04T0: + vlans: + - 19 + vm_offset: 19 + ARISTA05T0: + vlans: + - 20 + vm_offset: 20 + ARISTA06T0: + vlans: + - 21 + vm_offset: 21 + ARISTA07T0: + vlans: + - 22 + vm_offset: 22 + ARISTA08T0: + vlans: + - 23 + vm_offset: 23 + ARISTA09T0: + vlans: + - 24 + vm_offset: 24 + ARISTA10T0: + vlans: + - 25 + vm_offset: 25 + ARISTA11T0: + vlans: + - 26 + vm_offset: 26 + ARISTA12T0: + vlans: + - 27 + vm_offset: 27 + ARISTA13T0: + vlans: + - 28 + vm_offset: 28 + ARISTA14T0: + vlans: + - 29 + vm_offset: 29 + ARISTA15T0: + vlans: + - 30 + vm_offset: 30 + +configuration_properties: + common: + dut_asn: 65100 + dut_type: LeafRouter + nhipv4: 10.10.246.100 + nhipv6: FC0A::C9 + spine: + swrole: spine + podset_number: 200 + tor_number: 16 + tor_subnet_number: 2 + leaf_asn_start: 62001 + tor_asn_start: 65501 + failure_rate: 0 + tor: + swrole: tor + tor_subnet_number: 5 + +configuration: + ARISTA01T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.0 + - FC00::1 + interfaces: + Loopback0: + ipv4: 100.1.0.1/32 + ipv6: 2064:100::1/128 + Ethernet1: + ipv4: 10.0.0.1/31 + ipv6: fc00::2/126 + bp_interface: + ipv4: 10.10.246.1/24 + ipv6: fc0a::2/64 + + ARISTA02T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.2 + - FC00::5 + interfaces: + Loopback0: + ipv4: 100.1.0.2/32 + ipv6: 2064:100::2/128 + Ethernet1: + ipv4: 10.0.0.3/31 + ipv6: fc00::6/126 + bp_interface: + ipv4: 10.10.246.2/24 + ipv6: fc0a::5/64 + + ARISTA03T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.4 + - FC00::9 + interfaces: + Loopback0: + ipv4: 100.1.0.3/32 + ipv6: 2064:100::3/128 + Ethernet1: + ipv4: 10.0.0.5/31 + ipv6: fc00::a/126 + bp_interface: + ipv4: 10.10.246.3/24 + ipv6: fc0a::6/64 + + ARISTA04T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.6 + - FC00::D + interfaces: + Loopback0: + ipv4: 100.1.0.4/32 + ipv6: 2064:100::4/128 + Ethernet1: + ipv4: 10.0.0.7/31 + ipv6: fc00::e/126 + bp_interface: + ipv4: 10.10.246.4/24 + ipv6: fc0a::9/64 + + ARISTA05T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.8 + - FC00::11 + interfaces: + Loopback0: + ipv4: 100.1.0.5/32 + ipv6: 2064:100::5/128 + Ethernet1: + ipv4: 10.0.0.9/31 + ipv6: fc00::12/126 + bp_interface: + ipv4: 10.10.246.5/24 + ipv6: fc0a::a/64 + + ARISTA06T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.10 + - FC00::15 + interfaces: + Loopback0: + ipv4: 100.1.0.6/32 + ipv6: 2064:100::6/128 + Ethernet1: + ipv4: 10.0.0.11/31 + ipv6: fc00::16/126 + bp_interface: + ipv4: 10.10.246.6/24 + ipv6: fc0a::d/64 + + ARISTA07T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.12 + - FC00::19 + interfaces: + Loopback0: + ipv4: 100.1.0.7/32 + ipv6: 2064:100::7/128 + Ethernet1: + ipv4: 10.0.0.13/31 + ipv6: fc00::1a/126 + bp_interface: + ipv4: 10.10.246.7/24 + ipv6: fc0a::e/64 + + ARISTA08T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.14 + - FC00::1D + interfaces: + Loopback0: + ipv4: 100.1.0.8/32 + ipv6: 2064:100::8/128 + Ethernet1: + ipv4: 10.0.0.15/31 + ipv6: fc00::1e/126 + bp_interface: + ipv4: 10.10.246.8/24 + ipv6: fc0a::11/64 + + ARISTA09T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.16 + - FC00::21 + interfaces: + Loopback0: + ipv4: 100.1.0.9/32 + ipv6: 2064:100::9/128 + Ethernet1: + ipv4: 10.0.0.17/31 + ipv6: fc00::22/126 + bp_interface: + ipv4: 10.10.246.9/24 + ipv6: fc0a::12/64 + + ARISTA10T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.18 + - FC00::25 + interfaces: + Loopback0: + ipv4: 100.1.0.10/32 + ipv6: 2064:100::a/128 + Ethernet1: + ipv4: 10.0.0.19/31 + ipv6: fc00::26/126 + bp_interface: + ipv4: 10.10.246.10/24 + ipv6: fc0a::15/64 + + ARISTA11T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.20 + - FC00::29 + interfaces: + Loopback0: + ipv4: 100.1.0.11/32 + ipv6: 2064:100::b/128 + Ethernet1: + ipv4: 10.0.0.21/31 + ipv6: fc00::2a/126 + bp_interface: + ipv4: 10.10.246.11/24 + ipv6: fc0a::16/64 + + ARISTA12T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.22 + - FC00::2D + interfaces: + Loopback0: + ipv4: 100.1.0.12/32 + ipv6: 2064:100::c/128 + Ethernet1: + ipv4: 10.0.0.23/31 + ipv6: fc00::2e/126 + bp_interface: + ipv4: 10.10.246.12/24 + ipv6: fc0a::19/64 + + ARISTA13T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.24 + - FC00::31 + interfaces: + Loopback0: + ipv4: 100.1.0.13/32 + ipv6: 2064:100::d/128 + Ethernet1: + ipv4: 10.0.0.25/31 + ipv6: fc00::32/126 + bp_interface: + ipv4: 10.10.246.13/24 + ipv6: fc0a::1a/64 + + ARISTA14T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.26 + - FC00::35 + interfaces: + Loopback0: + ipv4: 100.1.0.14/32 + ipv6: 2064:100::e/128 + Ethernet1: + ipv4: 10.0.0.27/31 + ipv6: fc00::36/126 + bp_interface: + ipv4: 10.10.246.14/24 + ipv6: fc0a::1d/64 + + ARISTA15T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.28 + - FC00::39 + interfaces: + Loopback0: + ipv4: 100.1.0.15/32 + ipv6: 2064:100::f/128 + Ethernet1: + ipv4: 10.0.0.29/31 + ipv6: fc00::3a/126 + bp_interface: + ipv4: 10.10.246.15/24 + ipv6: fc0a::1e/64 + + ARISTA16T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.30 + - FC00::3D + interfaces: + Loopback0: + ipv4: 100.1.0.16/32 + ipv6: 2064:100::10/128 + Ethernet1: + ipv4: 10.0.0.31/31 + ipv6: fc00::3e/126 + bp_interface: + ipv4: 10.10.246.16/24 + ipv6: fc0a::21/64 + + ARISTA01T0: + properties: + - common + - tor + tornum: 1 + bgp: + asn: 64001 + peers: + 65100: + - 10.0.0.32 + - FC00::41 + vips: + ipv4: + prefixes: + - 200.0.1.0/26 + asn: 64700 + interfaces: + Loopback0: + ipv4: 100.1.0.17/32 + ipv6: 2064:100::11/128 + Ethernet1: + ipv4: 10.0.0.33/31 + ipv6: fc00::42/126 + bp_interface: + ipv4: 10.10.246.17/24 + ipv6: fc0a::22/64 + + ARISTA02T0: + properties: + - common + - tor + tornum: 2 + bgp: + asn: 64002 + peers: + 65100: + - 10.0.0.34 + - FC00::45 + interfaces: + Loopback0: + ipv4: 100.1.0.18/32 + ipv6: 2064:100::12/128 + Ethernet1: + ipv4: 10.0.0.35/31 + ipv6: fc00::46/126 + bp_interface: + ipv4: 10.10.246.18/24 + ipv6: fc0a::25/64 + + ARISTA03T0: + properties: + - common + - tor + tornum: 3 + bgp: + asn: 64003 + peers: + 65100: + - 10.0.0.36 + - FC00::49 + vips: + ipv4: + prefixes: + - 200.0.1.0/26 + asn: 64700 + interfaces: + Loopback0: + ipv4: 100.1.0.19/32 + ipv6: 2064:100::13/128 + Ethernet1: + ipv4: 10.0.0.37/31 + ipv6: fc00::4a/126 + bp_interface: + ipv4: 10.10.246.19/24 + ipv6: fc0a::26/64 + + ARISTA04T0: + properties: + - common + - tor + tornum: 4 + bgp: + asn: 64004 + peers: + 65100: + - 10.0.0.38 + - FC00::4D + interfaces: + Loopback0: + ipv4: 100.1.0.20/32 + ipv6: 2064:100::14/128 + Ethernet1: + ipv4: 10.0.0.39/31 + ipv6: fc00::4e/126 + bp_interface: + ipv4: 10.10.246.20/24 + ipv6: fc0a::29/64 + + ARISTA05T0: + properties: + - common + - tor + tornum: 5 + bgp: + asn: 64005 + peers: + 65100: + - 10.0.0.40 + - FC00::51 + interfaces: + Loopback0: + ipv4: 100.1.0.21/32 + ipv6: 2064:100::15/128 + Ethernet1: + ipv4: 10.0.0.41/31 + ipv6: fc00::52/126 + bp_interface: + ipv4: 10.10.246.21/24 + ipv6: fc0a::2a/64 + + ARISTA06T0: + properties: + - common + - tor + tornum: 6 + bgp: + asn: 64006 + peers: + 65100: + - 10.0.0.42 + - FC00::55 + interfaces: + Loopback0: + ipv4: 100.1.0.22/32 + ipv6: 2064:100::16/128 + Ethernet1: + ipv4: 10.0.0.43/31 + ipv6: fc00::56/126 + bp_interface: + ipv4: 10.10.246.22/24 + ipv6: fc0a::2d/64 + + ARISTA07T0: + properties: + - common + - tor + tornum: 7 + bgp: + asn: 64007 + peers: + 65100: + - 10.0.0.44 + - FC00::59 + interfaces: + Loopback0: + ipv4: 100.1.0.23/32 + ipv6: 2064:100::17/128 + Ethernet1: + ipv4: 10.0.0.45/31 + ipv6: fc00::5a/126 + bp_interface: + ipv4: 10.10.246.23/24 + ipv6: fc0a::2e/64 + + ARISTA08T0: + properties: + - common + - tor + tornum: 8 + bgp: + asn: 64008 + peers: + 65100: + - 10.0.0.46 + - FC00::5D + interfaces: + Loopback0: + ipv4: 100.1.0.24/32 + ipv6: 2064:100::18/128 + Ethernet1: + ipv4: 10.0.0.47/31 + ipv6: fc00::5e/126 + bp_interface: + ipv4: 10.10.246.24/24 + ipv6: fc0a::31/64 + + ARISTA09T0: + properties: + - common + - tor + tornum: 9 + bgp: + asn: 64009 + peers: + 65100: + - 10.0.0.48 + - FC00::61 + interfaces: + Loopback0: + ipv4: 100.1.0.25/32 + ipv6: 2064:100::19/128 + Ethernet1: + ipv4: 10.0.0.49/31 + ipv6: fc00::62/126 + bp_interface: + ipv4: 10.10.246.25/24 + ipv6: fc0a::32/64 + + ARISTA10T0: + properties: + - common + - tor + tornum: 10 + bgp: + asn: 64010 + peers: + 65100: + - 10.0.0.50 + - FC00::65 + interfaces: + Loopback0: + ipv4: 100.1.0.26/32 + ipv6: 2064:100::1a/128 + Ethernet1: + ipv4: 10.0.0.51/31 + ipv6: fc00::66/126 + bp_interface: + ipv4: 10.10.246.26/24 + ipv6: fc0a::35/64 + + ARISTA11T0: + properties: + - common + - tor + tornum: 11 + bgp: + asn: 64011 + peers: + 65100: + - 10.0.0.52 + - FC00::69 + interfaces: + Loopback0: + ipv4: 100.1.0.27/32 + ipv6: 2064:100::1b/128 + Ethernet1: + ipv4: 10.0.0.53/31 + ipv6: fc00::6a/126 + bp_interface: + ipv4: 10.10.246.27/24 + ipv6: fc0a::36/64 + + ARISTA12T0: + properties: + - common + - tor + tornum: 12 + bgp: + asn: 64012 + peers: + 65100: + - 10.0.0.54 + - FC00::6D + interfaces: + Loopback0: + ipv4: 100.1.0.28/32 + ipv6: 2064:100::1c/128 + Ethernet1: + ipv4: 10.0.0.55/31 + ipv6: fc00::6e/126 + bp_interface: + ipv4: 10.10.246.28/24 + ipv6: fc0a::39/64 + + ARISTA13T0: + properties: + - common + - tor + tornum: 13 + bgp: + asn: 64013 + peers: + 65100: + - 10.0.0.56 + - FC00::71 + interfaces: + Loopback0: + ipv4: 100.1.0.29/32 + ipv6: 2064:100::1d/128 + Ethernet1: + ipv4: 10.0.0.57/31 + ipv6: fc00::72/126 + bp_interface: + ipv4: 10.10.246.29/24 + ipv6: fc0a::3a/64 + + ARISTA14T0: + properties: + - common + - tor + tornum: 14 + bgp: + asn: 64014 + peers: + 65100: + - 10.0.0.58 + - FC00::75 + interfaces: + Loopback0: + ipv4: 100.1.0.30/32 + ipv6: 2064:100::1e/128 + Ethernet1: + ipv4: 10.0.0.59/31 + ipv6: fc00::76/126 + bp_interface: + ipv4: 10.10.246.30/24 + ipv6: fc0a::3d/64 + + ARISTA15T0: + properties: + - common + - tor + tornum: 15 + bgp: + asn: 64015 + peers: + 65100: + - 10.0.0.60 + - FC00::79 + interfaces: + Loopback0: + ipv4: 100.1.0.31/32 + ipv6: 2064:100::1f/128 + Ethernet1: + ipv4: 10.0.0.61/31 + ipv6: fc00::7a/126 + bp_interface: + ipv4: 10.10.246.31/24 + ipv6: fc0a::3e/64 + diff --git a/ansible/vars/topo_t1.yml b/ansible/vars/topo_t1.yml index 6c4bb639fa8..1dcf210e2e6 100644 --- a/ansible/vars/topo_t1.yml +++ b/ansible/vars/topo_t1.yml @@ -133,12 +133,19 @@ configuration_properties: common: dut_asn: 65100 dut_type: LeafRouter - nhipv4: 10.10.246.254 - nhipv6: FC0A::FF + nhipv4: 10.10.246.100 + nhipv6: FC0A::C9 spine: swrole: spine + podset_number: 200 + tor_number: 16 + tor_subnet_number: 2 + leaf_asn_start: 62001 + tor_asn_start: 65501 + failure_rate: 0 tor: swrole: tor + tor_subnet_number: 5 configuration: ARISTA01T2: diff --git a/ansible/veos b/ansible/veos index ffd7c518c8b..a4f01b3ec00 100644 --- a/ansible/veos +++ b/ansible/veos @@ -1,204 +1,132 @@ -all: - children: - vm_host: - children: - vm_host_1: - vm_host_2: - eos: - children: - vms_1: - vms_2: - servers: - vars: - topologies: - - t1 - - t1-lag - - t1-64-lag - - t1-64-lag-clet - - t0 - - t0-16 - - t0-56 - - t0-52 - - ptf32 - - ptf64 - - t0-64 - - t0-64-32 - - t0-116 - children: - server_1: - server_2: - -vm_host_1: - hosts: - STR-ACS-SERV-01: - ansible_host: 10.251.0.245 - -vm_host_2: - hosts: - STR-ACS-SERV-02: - ansible_host: 10.251.0.192 - -vms_1: - hosts: - VM0100: - ansible_host: 10.250.0.2 - VM0101: - ansible_host: 10.250.0.3 - VM0102: - ansible_host: 10.250.0.4 - VM0103: - ansible_host: 10.250.0.5 - VM0104: - ansible_host: 10.250.0.6 - VM0105: - ansible_host: 10.250.0.7 - VM0106: - ansible_host: 10.250.0.8 - VM0107: - ansible_host: 10.250.0.9 - VM0108: - ansible_host: 10.250.0.10 - VM0109: - ansible_host: 10.250.0.11 - VM0110: - ansible_host: 10.250.0.12 - VM0111: - ansible_host: 10.250.0.13 - VM0112: - ansible_host: 10.250.0.14 - VM0113: - ansible_host: 10.250.0.15 - VM0114: - ansible_host: 10.250.0.16 - VM0115: - ansible_host: 10.250.0.17 - VM0116: - ansible_host: 10.250.0.18 - VM0117: - ansible_host: 10.250.0.19 - VM0118: - ansible_host: 10.250.0.20 - VM0119: - ansible_host: 10.250.0.21 - VM0120: - ansible_host: 10.250.0.22 - VM0121: - ansible_host: 10.250.0.23 - VM0122: - ansible_host: 10.250.0.24 - VM0123: - ansible_host: 10.250.0.25 - VM0124: - ansible_host: 10.250.0.26 - VM0125: - ansible_host: 10.250.0.27 - VM0126: - ansible_host: 10.250.0.28 - VM0127: - ansible_host: 10.250.0.29 - VM0128: - ansible_host: 10.250.0.30 - VM0129: - ansible_host: 10.250.0.31 - VM0130: - ansible_host: 10.250.0.32 - VM0131: - ansible_host: 10.250.0.33 - VM0132: - ansible_host: 10.250.0.34 - VM0133: - ansible_host: 10.250.0.35 - VM0134: - ansible_host: 10.250.0.36 - VM0135: - ansible_host: 10.250.0.37 - VM0136: - ansible_host: 10.250.0.38 - VM0137: - ansible_host: 10.250.0.39 - VM0138: - ansible_host: 10.250.0.40 - VM0139: - ansible_host: 10.250.0.41 - VM0140: - ansible_host: 10.250.0.42 - VM0141: - ansible_host: 10.250.0.43 - VM0142: - ansible_host: 10.250.0.44 - VM0143: - ansible_host: 10.250.0.45 - VM0144: - ansible_host: 10.250.0.46 - VM0145: - ansible_host: 10.250.0.47 - VM0146: - ansible_host: 10.250.0.48 - VM0147: - ansible_host: 10.250.0.49 - VM0148: - ansible_host: 10.250.0.50 - VM0149: - ansible_host: 10.250.0.51 - VM0150: - ansible_host: 10.250.0.52 - VM0151: - ansible_host: 10.250.0.53 - VM0152: - ansible_host: 10.250.0.54 - VM0153: - ansible_host: 10.250.0.55 - VM0154: - ansible_host: 10.250.0.56 - VM0155: - ansible_host: 10.250.0.57 - VM0156: - ansible_host: 10.250.0.58 - VM0157: - ansible_host: 10.250.0.59 - VM0158: - ansible_host: 10.250.0.60 - VM0159: - ansible_host: 10.250.0.61 - VM0160: - ansible_host: 10.250.0.62 - VM0161: - ansible_host: 10.250.0.63 - VM0162: - ansible_host: 10.250.0.64 - VM0163: - ansible_host: 10.250.0.65 - VM0164: - ansible_host: 10.250.0.66 - VM0165: - ansible_host: 10.250.0.67 - VM0166: - ansible_host: 10.250.0.68 - VM0167: - ansible_host: 10.250.0.69 - -vms_2: - hosts: - VM0200: - ansible_host: 10.250.0.51 - VM0201: - ansible_host: 10.250.0.52 - VM0202: - ansible_host: 10.250.0.53 - VM0203: - ansible_host: 10.250.0.54 - -# The groups below are helper to limit running playbooks to specific server(s) only -server_1: - vars: - host_var_file: host_vars/STR-ACS-SERV-01.yml - children: - vm_host_1: - vms_1: - -server_2: - vars: - host_var_file: host_vars/STR-ACS-SERV-02.yml - children: - vm_host_2: - vms_2: +[vm_host_1] +STR-ACS-SERV-01 ansible_host=10.250.0.1 + +[vm_host_2] +STR-ACS-SERV-02 ansible_host=10.251.0.1 + +[vm_host_3] +STR-ACS-SERV-03 ansible_host=10.251.0.1 + +[vm_host:children] +vm_host_1 +vm_host_2 +vm_host_3 + +[vms_1] +VM0100 ansible_host=10.250.0.2 +VM0101 ansible_host=10.250.0.3 +VM0102 ansible_host=10.250.0.4 +VM0103 ansible_host=10.250.0.5 + +[vms_2] +VM0200 ansible_host=10.250.0.10 +VM0201 ansible_host=10.250.0.11 +VM0202 ansible_host=10.250.0.12 +VM0203 ansible_host=10.250.0.13 +VM0204 ansible_host=10.250.0.14 +VM0205 ansible_host=10.250.0.15 +VM0206 ansible_host=10.250.0.16 +VM0207 ansible_host=10.250.0.17 +VM0208 ansible_host=10.250.0.18 +VM0209 ansible_host=10.250.0.19 +VM0210 ansible_host=10.250.0.20 +VM0211 ansible_host=10.250.0.21 +VM0212 ansible_host=10.250.0.22 +VM0213 ansible_host=10.250.0.23 +VM0214 ansible_host=10.250.0.24 +VM0215 ansible_host=10.250.0.25 +VM0216 ansible_host=10.250.0.26 +VM0217 ansible_host=10.250.0.27 +VM0218 ansible_host=10.250.0.28 +VM0219 ansible_host=10.250.0.29 +VM0220 ansible_host=10.250.0.30 +VM0221 ansible_host=10.250.0.31 +VM0222 ansible_host=10.250.0.32 +VM0223 ansible_host=10.250.0.33 +VM0224 ansible_host=10.250.0.34 +VM0225 ansible_host=10.250.0.35 +VM0226 ansible_host=10.250.0.36 +VM0227 ansible_host=10.250.0.37 +VM0228 ansible_host=10.250.0.38 +VM0229 ansible_host=10.250.0.39 +VM0230 ansible_host=10.250.0.40 +VM0231 ansible_host=10.250.0.41 + + +[vms_3] +VM0300 ansible_host=10.251.0.2 +VM0301 ansible_host=10.251.0.3 +VM0302 ansible_host=10.251.0.4 +VM0303 ansible_host=10.251.0.5 + + +[vms_4] +VM0400 ansible_host=10.251.0.10 +VM0401 ansible_host=10.251.0.11 +VM0402 ansible_host=10.251.0.12 +VM0403 ansible_host=10.251.0.13 +VM0404 ansible_host=10.251.0.14 +VM0405 ansible_host=10.251.0.15 +VM0406 ansible_host=10.251.0.16 +VM0407 ansible_host=10.251.0.17 +VM0408 ansible_host=10.251.0.18 +VM0409 ansible_host=10.251.0.19 +VM0410 ansible_host=10.251.0.20 +VM0411 ansible_host=10.251.0.21 +VM0412 ansible_host=10.251.0.22 +VM0413 ansible_host=10.251.0.23 +VM0414 ansible_host=10.251.0.24 +VM0415 ansible_host=10.251.0.25 +VM0416 ansible_host=10.251.0.26 +VM0417 ansible_host=10.251.0.27 +VM0418 ansible_host=10.251.0.28 +VM0419 ansible_host=10.251.0.29 +VM0420 ansible_host=10.251.0.30 +VM0421 ansible_host=10.251.0.31 +VM0422 ansible_host=10.251.0.32 +VM0423 ansible_host=10.251.0.33 +VM0424 ansible_host=10.251.0.34 +VM0425 ansible_host=10.251.0.35 +VM0426 ansible_host=10.251.0.36 +VM0427 ansible_host=10.251.0.37 +VM0428 ansible_host=10.251.0.38 +VM0429 ansible_host=10.251.0.39 +VM0430 ansible_host=10.251.0.40 +VM0431 ansible_host=10.251.0.41 + +[eos:children] +vms_1 +vms_2 +vms_3 +vms_4 + +## The groups below are helper to limit running playbooks to server_1, server_2 or server_3 only +## vms_2 for t1,need open it +[server_1:children] +vm_host_1 +vms_1 + +[server_1:vars] +host_var_file=host_vars/STR-ACS-SERV-01.yml + +[server_2:children] +vm_host_2 +vms_3 + +[server_2:vars] +host_var_file=host_vars/STR-ACS-SERV-02.yml + +[server_3:children] +vm_host_3 +vms_4 + +[server_3:vars] +host_var_file=host_vars/STR-ACS-SERV-03.yml + +[servers:children] +server_1 +server_2 +server_3 + +[servers:vars] +topologies=['t1', 't1-lag', 't1-64-lag', 't1-slx', 't0', 't0-16', 't0-56', 't0-52', 't0-slx', 't0-e1031', 'ptf32', 'ptf64', 't0-64', 't0-64-32', 't0-116'] diff --git a/ansible/veos.vtb b/ansible/veos.vtb new file mode 100644 index 00000000000..4f7bdc1c2b8 --- /dev/null +++ b/ansible/veos.vtb @@ -0,0 +1,32 @@ +[vm_host_1] +STR-ACS-VSERV-01 ansible_host=172.17.0.1 ansible_user=use_own_value + +[vm_host:children] +vm_host_1 + +[vms_1] +VM0100 ansible_host=10.250.0.51 +VM0101 ansible_host=10.250.0.52 +VM0102 ansible_host=10.250.0.53 +VM0103 ansible_host=10.250.0.54 + + +[eos:children] +vms_1 + +## The groups below are helper to limit running playbooks to server_1, server_2 or server_3 only +[server_1:children] +vm_host_1 +vms_1 + +[server_1:vars] +host_var_file=host_vars/STR-ACS-VSERV-01.yml + +[servers:children] +server_1 + +[servers:vars] +topologies=['t1','t1-slx','t1-lag', 't1-64-lag', 't0', 't0-16', 't0-56', 't0-52', 'ptf32', 'ptf64', 't0-64', 't0-64-32', 't0-116'] + +[sonic] +vlab-01 ansible_host=10.250.0.101 type=kvm diff --git a/ansible/vtestbed.csv b/ansible/vtestbed.csv index a4f578c8eed..7a6f4ff385e 100644 --- a/ansible/vtestbed.csv +++ b/ansible/vtestbed.csv @@ -1,5 +1,2 @@ -# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,comment -vms-kvm-t0,vms6-1,t0,docker-ptf,ptf-01,10.250.0.102/24,fec0::ffff:afa:2/64,server_1,VM0100,[vlab-01],Tests virtual switch vm -vms-kvm-t0-64,vms6-1,t0-64,docker-ptf,ptf-01,10.250.0.102/24,fec0::ffff:afa:2/64,server_1,VM0100,[vlab-02],Tests virtual switch vm -vms-kvm-t1-lag,vms6-2,t1-lag,docker-ptf,ptf-02,10.250.0.106/24,fec0::ffff:afa:6/64,server_1,VM0104,[vlab-03],Tests virtual switch vm -vms-kvm-t0-2,vms6-3,t0,docker-ptf,ptf-03,10.250.0.108/24,fec0::ffff:afa:8/64,server_1,VM0104,[vlab-04],Tests virtual switch vm +# conf-name,group-name,topo,ptf_image_name,ptf_ip,server,vm_base,dut,comment +vms-kvm-t0,vms6-1,t0,docker-ptf-brcm,10.250.0.102/24,server_1,VM0100,vlab-01,Tests virtual switch vm diff --git a/index.html b/index.html new file mode 100644 index 00000000000..2cbc927f9a7 --- /dev/null +++ b/index.html @@ -0,0 +1,2 @@ + + 百度一下,你就知道

关于百度 About Baidu

©2017 Baidu 使用百度前必读  意见反馈 京ICP证030173号 

diff --git a/tests/conftest.py b/tests/conftest.py old mode 100644 new mode 100755 index 71316804185..1e8b452c09d --- a/tests/conftest.py +++ b/tests/conftest.py @@ -461,4 +461,4 @@ def disable_container_autorestart(duthost, request): for name, state in container_autorestart_states.items(): if state == "enabled": cmds_enable.append(cmd_enable.format(name)) - duthost.shell_cmds(cmds=cmds_enable) \ No newline at end of file + duthost.shell_cmds(cmds=cmds_enable) diff --git a/tests/platform_tests/api/watchdog.yml b/tests/platform_tests/api/watchdog.yml index 60faba34bc7..fb05d9ad350 100644 --- a/tests/platform_tests/api/watchdog.yml +++ b/tests/platform_tests/api/watchdog.yml @@ -51,3 +51,9 @@ x86_64-mlnx_msn2700-r0: greater_timeout: 100 too_big_timeout: 66000 + +x86_64-cel_seastone-r0: + default: + valid_timeout: 10 + greater_timeout: 20 + too_big_timeout: 16779 diff --git a/tests/run_tests.sh b/tests/run_tests.sh index 8907bf5c399..b26554746cb 100755 --- a/tests/run_tests.sh +++ b/tests/run_tests.sh @@ -141,7 +141,9 @@ function setup_test_options() PRET_LOGGING_OPTIONS="--junit-xml=${LOG_PATH}/pretest.xml --log-file=${LOG_PATH}/pretest.log" POST_LOGGING_OPTIONS="--junit-xml=${LOG_PATH}/posttest.xml --log-file=${LOG_PATH}/posttest.log" - TEST_LOGGING_OPTIONS="--junit-xml=${LOG_PATH}/tr.xml --log-file=${LOG_PATH}/test.log" + # TEST_LOGGING_OPTIONS="--junit-xml=${LOG_PATH}/tr.xml --log-file=${LOG_PATH}/test.log" + TEST_LOGGING_OPTIONS="--html=${LOG_PATH}/result.html --log-file=${LOG_PATH}/test.log" + fi UTIL_TOPOLOGY_OPTIONS="--topology util" if [[ -z ${TOPOLOGY} ]]; then @@ -194,7 +196,7 @@ function run_debug_tests() function prepare_dut() { echo "=== Preparing DUT for subsequent tests ===" - pytest ${PYTEST_UTIL_OPTS} ${PRET_LOGGING_OPTIONS} ${UTIL_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} -m pretest + pytest ${PYTEST_UTIL_OPTS} ${PRET_LOGGING_OPTIONS} ${UTIL_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} -m pretest -v # Give some delay for the newly announced routes to propagate. sleep 120 @@ -203,13 +205,13 @@ function prepare_dut() function cleanup_dut() { echo "=== Cleaning up DUT after tests ===" - pytest ${PYTEST_UTIL_OPTS} ${POST_LOGGING_OPTIONS} ${UTIL_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} -m posttest + pytest ${PYTEST_UTIL_OPTS} ${POST_LOGGING_OPTIONS} ${UTIL_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} -m posttest -v } function run_group_tests() { echo "=== Running tests in groups ===" - pytest ${TEST_CASES} ${PYTEST_COMMON_OPTS} ${TEST_LOGGING_OPTIONS} ${TEST_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} + pytest ${TEST_CASES} ${PYTEST_COMMON_OPTS} ${TEST_LOGGING_OPTIONS} ${TEST_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} -v } function run_individual_tests() @@ -225,10 +227,11 @@ function run_individual_tests() if [[ ${test_dir} != "." ]]; then mkdir -p ${LOG_PATH}/${test_dir} fi - TEST_LOGGING_OPTIONS="--log-file ${LOG_PATH}/${test_dir}/${test_name}.log --junitxml=${LOG_PATH}/${test_dir}/${test_name}.xml" + # TEST_LOGGING_OPTIONS="--log-file ${LOG_PATH}/${test_dir}/${test_name}.log --junitxml=${LOG_PATH}/${test_dir}/${test_name}.xml" + TEST_LOGGING_OPTIONS="--log-file ${LOG_PATH}/${test_dir}/${test_name}.log --html=${LOG_PATH}/result.html" fi - pytest ${test_script} ${PYTEST_COMMON_OPTS} ${TEST_LOGGING_OPTIONS} ${TEST_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} + pytest ${test_script} ${PYTEST_COMMON_OPTS} ${TEST_LOGGING_OPTIONS} ${TEST_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} -v ret_code=$? # If test passed, no need to keep its log. @@ -330,4 +333,5 @@ if [[ x"${TEST_METHOD}" != x"debug" && x"${BYPASS_UTIL}" == x"False" ]]; then cleanup_dut fi +# run_debug_tests exit ${RC}