Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Software for Open Networking in the Cloud - SONiC
# Management

# cls test
# Description
Tools for managing, configuring and monitoring SONiC

Expand Down
64 changes: 35 additions & 29 deletions ansible/TestbedProcessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,14 @@

Requirement:
python version: 2.X
python package: PyYAML 3.12 (or later)
python package: PyYAML 3.12 (or later)

PyYaml Install Instructions:
[1] Download PyYAML from https://pyyaml.org/wiki/PyYAML
[1] Download PyYAML from https://pyyaml.org/wiki/PyYAML
[2] Unpack the archive
[3] Install the package by executing (python setup.py install)
[4] Test if installation was successful (python setup.py test)
[4] Test if installation was successful (python setup.py test)

Usage:
put TestbedProcessing.py and testbed.yaml under sonic-mgmt/ansible
python TestbedProcessing.py
Expand All @@ -25,12 +25,12 @@
Arguments:
-i : the testbed.yaml file to parse
-basedir : the basedir for the project
-backupdir : the backup directory for the files
-backupdir : the backup directory for the files

Script Procedure
[1] Backup the files we will be copying
[1] Backup the files we will be copying
[2] Load testbed.yaml into dictionaries for easy processing
[3] Generate the files via methods defined below
[3] Generate the files via methods defined below
"""

# ARGUMENTS TO PARSE
Expand Down Expand Up @@ -86,7 +86,7 @@

"""
represent_none(self, _)
modifies yaml to replace null values with blanks
modifies yaml to replace null values with blanks
SOURCE: https://stackoverflow.com/questions/37200150/can-i-dump-blank-instead-of-null-in-yaml-pyyaml/37201633#3720163
"""
def represent_none(self, _):
Expand All @@ -98,7 +98,7 @@ def represent_none(self, _):
generateDictionary(data, result, category)
@:parameter data - the dictionary to iterate through
@:parameter result - the resulting dictionary
Generates the dictionaries that are used when creating csv, yml, or text files
Generates the dictionaries that are used when creating csv, yml, or text files
"""
def generateDictionary(data, result, category):
for key, value in data[category].items():
Expand All @@ -108,7 +108,7 @@ def generateDictionary(data, result, category):
"""
makeMain(data, outfile)
@:parameter data - the dictionary to look through
@:parameter outfile - the file to write to
@:parameter outfile - the file to write to
makeMain generates the vm_host/main.yml file
it pulls two sets of information; dictionary data and proxy data
"""
Expand All @@ -122,7 +122,9 @@ def makeMain(data, outfile):
"skip_image_downloading": veos.get("skip_image_downloading"),
"vm_console_base": veos.get("vm_console_base"),
"memory": veos.get("memory"),
"max_fp_num": veos.get("max_fp_num")
"max_fp_num": veos.get("max_fp_num"),
"ptf_bp_ip": veos.get("ptf_bp_ip"),
"ptf_bp_ipv6": veos.get("ptf_bp_ipv6")
}
proxy = {
"proxy_env": {
Expand All @@ -141,21 +143,21 @@ def makeMain(data, outfile):
@:parameter data - the dictionary to look for (in this case: veos)
@:parameter outfile - the file to write to
generates /group_vars/vm_host/creds.yml
pulls ansible_user, ansible_password, ansible_sudo_pass from vm_host_ansible into a dictionary
pulls ansible_user, ansible_password, ansible_become_pass from vm_host_ansible into a dictionary
"""
def makeVMHostCreds(data, outfile):
veos = data
result = {
"ansible_user": veos.get("vm_host_ansible").get("ansible_user"),
"ansible_password": veos.get("vm_host_ansible").get("ansible_password"),
"ansible_sudo_password": veos.get("vm_host_ansible").get("ansible_sudo_pass")
"ansible_become_pass": veos.get("vm_host_ansible").get("ansible_become_pass")
}
with open(outfile, "w") as toWrite:
toWrite.write("---\n")
yaml.dump(result, stream=toWrite, default_flow_style=False)

"""
makeSonicLabDevices(data, outfile)
makeSonicLabDevices(data, outfile)
@:parameter data - the dictionary to look through (devices dictionary)
@:parameter outfile - the file to write to
generates files/sonic_lab_devices.csv by pulling hostname, managementIP, hwsku, and type
Expand Down Expand Up @@ -190,14 +192,14 @@ def makeSonicLabDevices(data, outfile):


"""
makeTestbed(data, outfile)
makeTestbed(data, outfile)
@:parameter data - the dictionary to look through (devices dictionary)
@:parameter outfile - the file to write to
generates /testbed.csv by pulling confName, groupName, topo, ptf_image_name, ptf_ip, server, vm_base, dut, and comment
generates /testbed.csv by pulling confName, groupName, topo, ptf_image_name, ptf_ip, ptf_ipv6, server, vm_base, dut, and comment
error handling: checks if attribute values are None type or string "None"
"""
def makeTestbed(data, outfile):
csv_columns = "# conf-name,group-name,topo,ptf_image_name,ptf_ip,server,vm_base,dut,comment"
csv_columns = "# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,comment"
topology = data
csv_file = outfile

Expand All @@ -210,9 +212,11 @@ def makeTestbed(data, outfile):
topo = groupDetails.get("topo")
ptf_image_name = groupDetails.get("ptf_image_name")
ptf_ip = groupDetails.get("ptf_ip")
ptf_ipv6 = groupDetails.get("ptf_ipv6")
server = groupDetails.get("server")
vm_base = groupDetails.get("vm_base")
dut = groupDetails.get("dut")
ptf = groupDetails.get("ptf")
comment = groupDetails.get("comment")

# catch empty types
Expand All @@ -224,16 +228,20 @@ def makeTestbed(data, outfile):
ptf_image_name = ""
if not ptf_ip:
ptf_ip = ""
if not ptf_ipv6:
ptf_ipv6 = ""
if not server:
server = ""
if not vm_base:
vm_base = ""
if not dut:
dut = ""
if not ptf:
ptf = ""
if not comment:
comment = ""

row = confName + "," + groupName + "," + topo + "," + ptf_image_name + "," + ptf_ip + "," + server + "," + vm_base + "," + dut + "," + comment
row = confName + "," + groupName + "," + topo + "," + ptf_image_name + "," + ptf + "," + ptf_ip + "," + ptf_ipv6 + ","+ server + "," + vm_base + "," + dut + "," + comment
f.write(row + "\n")
except IOError:
print("I/O error: issue creating testbed.csv")
Expand All @@ -242,9 +250,9 @@ def makeTestbed(data, outfile):
"""
makeSonicLabLinks(data, outfile)
@:parameter data - the dictionary to look through (devices dictionary)
@:parameter outfile - the file to write to
@:parameter outfile - the file to write to
generates /files/sonic_lab_links.csv by pulling startPort, endPort, bandWidth, vlanID, vlanMode
error handling: checks if attribute values are None type or string "None"
error handling: checks if attribute values are None type or string "None"
"""
def makeSonicLabLinks(data, outfile):
csv_columns = "StartDevice,StartPort,EndDevice,EndPort,BandWidth,VlanID,VlanMode"
Expand Down Expand Up @@ -305,7 +313,7 @@ def makeEOSCreds(data, outfile):
"""
makeFanout_secrets(data, outfile)
@:parameter data - reads from devices dictionary
@:parameter outfile - the file to write to
@:parameter outfile - the file to write to
Makes /group_vars/fanout/secrets.yml
Finds the fanout secret credentials by using "fanout" as the value to search for under device_type
Under github and personal topology configuration, there is only one designated fanout switch credential
Expand Down Expand Up @@ -425,7 +433,7 @@ def makeLab(data, devices, testbed, outfile):
"""
makeVeos(data, veos, devices, outfile)
@:parameter data - reads from either veos-groups, this helps separate the function into 3 components; children, host, vars
@:parameter veos - reads from either veos
@:parameter veos - reads from either veos
@:parameter devices - reads from devices
@:parameter outfile - writes to veos
"""
Expand Down Expand Up @@ -484,18 +492,16 @@ def makeHostVar(data):

"""
updateDockerRegistry
@:parameter outfile - the file to write to
hard codes the docker registry to search locally rather than externally
@:parameter outfile - the file to write to
hard codes the docker registry to search locally rather than externally
"""
def updateDockerRegistry(docker_registry, outfile):
if (not docker_registry.get("docker_registry_host")) or (not docker_registry.get("docker_registry_username")) or (not docker_registry.get("docker_registry_password")):
if not docker_registry.get("docker_registry_host"):
print("\t\tREGISTRY FIELD BLANK - SKIPPING THIS STEP")
else:
with open(outfile, "w") as toWrite:
toWrite.write("docker_registry_host: " + docker_registry.get("docker_registry_host"))
toWrite.write("\n\n")
toWrite.write("docker_registry_username: " + docker_registry.get("docker_registry_username") + "\n")
toWrite.write("docker_registry_password: root" + docker_registry.get("docker_registry_password"))


def main():
Expand Down Expand Up @@ -571,4 +577,4 @@ def main():

if __name__ == '__main__':
main()

47 changes: 33 additions & 14 deletions ansible/config_sonic_basedon_testbed.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
# -e topo=t0 - the name of topology to generate minigraph file
# -e testbed_name=vms1-1 - the testbed name specified in testbed.csv file
# (if you give 'testbed_name' option, will use info from testbed and ignore topo and vm_base options)
# -e vm_file=veos - the virtual machine file name
# -e deploy=True - if deploy the newly generated minigraph to the targent DUT, default is false if not defined
# -e save=True - if save the newly generated minigraph to the targent DUT as starup-config, default is false if not defined
#
Expand All @@ -40,12 +41,17 @@
testbed_file: testbed.csv
when: testbed_file is not defined

- name: Set default dut index
set_fact:
dut_index: 0
when: dut_index is not defined

- name: Gathering testbed information
test_facts: testbed_name="{{ testbed_name }}" testbed_file="{{ testbed_file }}"
connection: local
delegate_to: localhost

- fail: msg="The DUT you are trying to run test does not belongs to this testbed"
when: testbed_facts['dut'] != inventory_hostname
when: testbed_facts['duts'][dut_index] != inventory_hostname

- name: set testbed_type
set_fact:
Expand All @@ -58,15 +64,21 @@
when: testbed_name is defined

- topo_facts: topo={{ topo }}
connection: local
delegate_to: localhost

- name: set default vm file path
set_fact:
vm_file: veos
when: vm_file is not defined

- set_fact:
VM_topo: "{% if 'ptf' in topo %}False{% else %}True{% endif %}"
remote_dut: "{{ ansible_ssh_host }}"

- name: gather testbed VM informations
testbed_vm_info: base_vm={{ testbed_facts['vm_base'] }} topo={{ testbed_facts['topo'] }}
connection: local
# testbed_vm_info: base_vm={{ testbed_facts['vm_base'] }} topo={{ testbed_facts['topo'] }} vm_file={{ vm_file }}
testbed_vm_info: base_vm="{{vm_base}}" topo="{{topo}}"
delegate_to: localhost
when: "VM_topo | bool"

- name: find interface name mapping and individual interface speed if defined
Expand All @@ -80,29 +92,29 @@
set_fact:
vlan_intfs: "{{ vlan_intfs|default([])}} + ['{{ port_alias[item] }}' ]"
with_items: "{{ host_if_indexes }}"
when: ("'host_interfaces' in vm_topo_config") and ("'tor' in vm_topo_config['dut_type'] | lower")
when: "('host_interfaces' in vm_topo_config) and ('tor' in vm_topo_config['dut_type'] | lower)"

- name: find all interface indexes mapping connecting to VM
set_fact:
interface_to_vms: "{{ interface_to_vms|default({}) | combine({ item.key: item.value['interface_indexes'] }) }}"
with_dict: vm_topo_config['vm']
with_dict: "{{ vm_topo_config['vm'] }}"

- name: find all interface indexes connecting to VM
set_fact:
ifindex_to_vms: "{{ ifindex_to_vms|default([]) }} + {{ item.value['interface_indexes']}}"
with_dict: vm_topo_config['vm']
with_dict: "{{ vm_topo_config['vm'] }}"

- name: find all interface names
set_fact:
intf_names: "{{ intf_names | default({}) | combine({item.key: port_alias[item.value[0]|int:item.value[-1]|int+1] }) }}"
with_dict: interface_to_vms
with_dict: "{{ interface_to_vms }}"

- name: create minigraph file in ansible minigraph folder
template: src=templates/minigraph_template.j2
dest=minigraph/{{ inventory_hostname}}.{{ topo }}.xml
connection: local
delegate_to: localhost
when: local_minigraph is defined and local_minigraph|bool == true

- block:
- name: Init telemetry keys
set_fact:
Expand Down Expand Up @@ -169,7 +181,6 @@
-out "{{ dsmsroot_cer }}"
become: true


- block:
- name: saved original minigraph file in SONiC DUT(ignore errors when file doesnot exist)
shell: mv /etc/sonic/minigraph.xml /etc/sonic/minigraph.xml.orig
Expand All @@ -188,8 +199,8 @@
delegate_to: localhost

- name: debug print stat_result
debug:
msg: Stat result is {{ stat_result }}
debug:
msg: Stat result is {{ stat_result }}

- name: Copy corresponding configlet files if exist
copy: src=vars/configlet/{{ topo }}/
Expand Down Expand Up @@ -258,12 +269,20 @@
regexp: '^enabled='
line: 'enabled=false'
become: true
register: updategraph_conf

- name: restart automatic minigraph update service
become: true
service:
name: updategraph
state: restarted
when: updategraph_conf.changed

- name: docker status
shell: docker ps
register: docker_status

- debug: msg={{ docker_status.stdout_lines }}

- name: execute cli "config load_minigraph -y" to apply new minigraph
become: true
Expand Down
4 changes: 2 additions & 2 deletions ansible/fanout_connect.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,5 +25,5 @@

- set_fact: connect_leaf=false

- include: roles/fanout/tasks/rootfanout_connect.yml
when: external_port is defined
# - include: roles/fanout/tasks/rootfanout_connect.yml
# when: external_port is defined
3 changes: 2 additions & 1 deletion ansible/files/creategraph.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,8 @@ def generate_dpg(self):
for dev in self.devices:
hostname = dev.get('Hostname', '')
managementip = dev.get('ManagementIp', '')
if hostname and 'fanout' in dev['Type'].lower():
devtype = dev['Type'].lower()
if hostname and ('fanout' in devtype or 'ixiachassis' in devtype):
###### Build Management interface IP here, if we create each device indivial minigraph file, we may comment this out
l3inforoot = etree.SubElement(self.dpgroot, 'DevicesL3Info', {'Hostname': hostname})
etree.SubElement(l3inforoot, 'ManagementIPInterface', {'Name': 'ManagementIp', 'Prefix': managementip})
Expand Down
3 changes: 1 addition & 2 deletions ansible/files/sonic_lab_devices.csv
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@

Hostname,ManagementIp,HwSku,Type
cel-e1031-01,10.250.0.100/23,Celestica-E1031-T48S4,DevSonic
cel-seastone-01,10.251.0.100/23,Celestica-DX010-C32,DevSonic
cel-seastone-02,10.250.0.100/23,Seastone-DX010-10-50,DevSonic
cel-seastone-03,10.250.0.100/23,Seastone-DX010-50,DevSonic
e1031-fanout,10.250.0.235/23,Celestica-E1031-T48S4,FanoutLeafSonic
Hostname,ManagementIp,HwSku,Type
seastone-fanout,10.251.0.235/23,Celestica-DX010-C32,FanoutLeafSonic
str-msn2700-01,10.251.0.188/23,Mellanox-2700,DevSonic
str-7260-10,10.251.0.13/23,Arista-7260QX-64,FanoutLeaf
Expand Down
2 changes: 1 addition & 1 deletion ansible/files/sonic_lab_links.csv
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
StartDevice,StartPort,EndDevice,EndPort,BandWidth,VlanID,VlanMode
cel-e1031-01,Ethernet1,e1031-fanout,Ethernet1,1000,100,Access
cel-e1031-01,Ethernet2,e1031-fanout,Ethernet2,1000,101,Access
cel-e1031-01,Ethernet3,e1031-fanout,Ethernet3,1000,102,Access
Expand Down Expand Up @@ -61,4 +62,3 @@ cel-seastone-01,Ethernet108,seastone-fanout,Ethernet108,100000,127,Access
cel-seastone-01,Ethernet112,seastone-fanout,Ethernet112,100000,128,Access
cel-seastone-01,Ethernet116,seastone-fanout,Ethernet116,100000,129,Access
cel-seastone-01,Ethernet120,seastone-fanout,Ethernet120,100000,130,Access
StartDevice,StartPort,EndDevice,EndPort,BandWidth,VlanID,VlanMode
1 change: 1 addition & 0 deletions ansible/group_vars/all/creds.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ eos_default_login: "admin"
eos_default_password: ""
eos_login: admin
eos_password: 123456
eos_root_user: root
eos_root_password: 123456

sonic_login: "admin"
Expand Down
Loading