--- /dev/null
+- hosts: jenkins
+
+ vars:
+ jenkins_user: jenkins
+ jobs_dir: /home/{{ jenkins_user }}/workspace
+ rally_job_name: rally
+ tempest_job_name: tempest
+ patching_job_name: patch_environment
+
+ tasks:
+
+ - name: Rally | upload inventory
+ copy: src=inventory.ini dest={{ jobs_dir }}/{{ rally_job_name }}
+
+ - name: Rally | upload playbooks
+ copy: src=playbooks/{{ item }} dest={{ jobs_dir }}/{{ rally_job_name }}
+ with_items:
+ - cleanup_test_vm.yml
+ - common_vars.yml
+ - perform_rally.yml
+ - recreate_pubnet.yml
+ - fix_ssh_config.yml
+
+ - name: Rally | mkdir contrib
+ file: path={{ jobs_dir }}/{{ rally_job_name }}/contrib state=directory
+
+ - name: Rally | upload contrib
+ copy: src=playbooks/contrib/{{ item }} dest={{ jobs_dir }}/{{ rally_job_name }}/contrib
+ with_items:
+ - instance_dd_test.sh
+ - openrc
+ - rally-mos.yaml
+ - template_centos.xml
+ - template_ubuntu.xml
+ - testing-network_pub.xml
+
+ - name: Tempest | upload inventory
+ copy: src=inventory.ini dest={{ jobs_dir }}/{{ tempest_job_name }}
+
+ - name: Tempest | upload playbooks
+ copy: src=playbooks/{{ item }} dest={{ jobs_dir }}/{{ tempest_job_name }}
+ with_items:
+ - common_vars.yml
+ - perform_tempest.yml
+ - recreate_pubnet.yml
+ - fix_ssh_config.yml
+ - cleanup_knownhosts.yml
+
+ - name: Tempest | mkdir contrib
+ file: path={{ jobs_dir }}/{{ tempest_job_name }}/contrib state=directory
+
+ - name: Tempest | upload contrib
+ copy: src=playbooks/contrib/{{ item }} dest={{ jobs_dir }}/{{ tempest_job_name }}/contrib
+ with_items:
+ - tempest-tmux
+
+ - name: Patching | upload inventory
+ copy: src=inventory.ini dest={{ jobs_dir }}/{{ patching_job_name }}
+
+ - name: Patching | upload playbooks
+ copy: src=playbooks/{{ item }} dest={{ jobs_dir }}/{{ patching_job_name }}
+ with_items:
+ - common_vars.yml
+ - patch_environment.yml
--- /dev/null
+[jenkins]
+cz7377.bud.mirantis.net ansible_ssh_user=jenkins ansible_ssh_pass=slave
\ No newline at end of file
--- /dev/null
+localhost ansible_connection=local ansible_sudo_pass=slave
--- /dev/null
+- hosts: localhost
+
+ tasks:
+
+ - include_vars: common_vars.yml
+
+ - name: patch ssh config to turn off known_hosts hashing
+ lineinfile:
+ dest={{ home_dir }}/.ssh/known_hosts
+ regexp="{{ master_node_ip }}"
+ state=absent
--- /dev/null
+- hosts: localhost
+
+ tasks:
+
+ - include_vars: common_vars.yml
+
+ - name: destroy vm
+ virt: command=destroy name={{ vm_to_destroy }}
+ ignore_errors: True
+
+ - name: undefine vm
+ virt: command=undefine name={{ vm_to_destroy }}
+ ignore_errors: True
+
+ - name: remove disk image
+ shell: virsh vol-delete --pool {{ storage_pool }} {{ vm_to_destroy }}.img
+ ignore_errors: True
--- /dev/null
+home_dir: /home/jenkins
+vm_template_xml_path: "{{ home_dir }}/workspace/templates"
+libvirt_images_path: /var/lib/libvirt/images/
+qemu_uri: qemu:///system
+storage_pool: default
+iso_path: "{{ home_dir }}/workspace/iso/"
+default_dist: ubuntu
+default_pub_subnet: testing-network_pub
+vm_template_name: template
+test_login: tester
+test_password: test
+ubuntu_iso: ubuntu-14.04.3-server-amd64.iso
+centos_iso: CentOS-6.6-x86_64-minimal.iso
+host_key_checking: False
+report_filename: rally_report.html
+rally_report_path: "{{ home_dir }}/workspace/rally/"
--- /dev/null
+#!/bin/sh
+
+tmux new-session -d
+tmux send-keys -t 0 'cd mos-tempest-runner' enter
+tmux send-keys -t 0 './rejoin.sh' enter
+tmux send-keys -t 0 'run_tests' enter
\ No newline at end of file
--- /dev/null
+<domain type='kvm' id='572'>
+ <name>testing_vm_centos</name>
+ <uuid>176a7835-9c14-4859-9f96-934b0d0ff2ee</uuid>
+ <memory unit='KiB'>8388608</memory>
+ <currentMemory unit='KiB'>8388608</currentMemory>
+ <vcpu placement='static'>4</vcpu>
+ <resource>
+ <partition>/machine</partition>
+ </resource>
+ <os>
+ <type arch='x86_64' machine='pc-i440fx-trusty'>hvm</type>
+ <boot dev='hd'/>
+ <boot dev='cdrom'/>
+ <bios rebootTimeout='5000'/>
+ </os>
+ <cpu mode='host-model'>
+ <model fallback='forbid'/>
+ </cpu>
+ <clock offset='utc'>
+ <timer name='rtc' tickpolicy='catchup' track='wall'>
+ <catchup threshold='123' slew='120' limit='10000'/>
+ </timer>
+ <timer name='pit' tickpolicy='delay'/>
+ <timer name='hpet' present='no'/>
+ </clock>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>destroy</on_crash>
+ <devices>
+ <emulator>/usr/bin/qemu-system-x86_64</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2' cache='unsafe'/>
+ <source file='/var/lib/libvirt/images/template_centos.img'/>
+ <backingStore/>
+ <target dev='sda' bus='virtio'/>
+ <alias name='virtio-disk0'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
+ </disk>
+ <disk type='file' device='cdrom'>
+ <driver name='qemu' type='raw' cache='unsafe'/>
+ <source file='/home/jenkins/workspace/iso/CentOS-6.6-x86_64-minimal.iso'/>
+ <backingStore/>
+ <target dev='hdb' bus='ide' tray='open'/>
+ <readonly/>
+ <alias name='ide0-0-1'/>
+ <address type='drive' controller='0' bus='0' target='0' unit='1'/>
+ </disk>
+ <controller type='usb' index='0' model='nec-xhci'>
+ <alias name='usb0'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
+ </controller>
+ <controller type='pci' index='0' model='pci-root'>
+ <alias name='pci.0'/>
+ </controller>
+ <controller type='ide' index='0'>
+ <alias name='ide0'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
+ </controller>
+ <interface type='network'>
+ <mac address='52:54:00:28:c0:b0'/>
+ <source network='testing-network_pub' bridge='virbr3'/>
+ <target dev='vnet9'/>
+ <model type='virtio'/>
+ <alias name='net0'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
+ </interface>
+ <serial type='pty'>
+ <source path='/dev/pts/11'/>
+ <target port='0'/>
+ <alias name='serial0'/>
+ </serial>
+ <console type='pty' tty='/dev/pts/11'>
+ <source path='/dev/pts/11'/>
+ <target type='serial' port='0'/>
+ <alias name='serial0'/>
+ </console>
+ <input type='mouse' bus='ps2'/>
+ <input type='keyboard' bus='ps2'/>
+ <graphics type='vnc' port='5908' autoport='yes' listen='0.0.0.0'>
+ <listen type='address' address='0.0.0.0'/>
+ </graphics>
+ <video>
+ <model type='vga' vram='16384' heads='1'/>
+ <alias name='video0'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+ </video>
+ <memballoon model='virtio'>
+ <alias name='balloon0'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x0a' function='0x0'/>
+ </memballoon>
+ </devices>
+ <seclabel type='dynamic' model='apparmor' relabel='yes'>
+ <label>libvirt-176a7835-9c14-4859-9f96-934b0d0ff2ee</label>
+ <imagelabel>libvirt-176a7835-9c14-4859-9f96-934b0d0ff2ee</imagelabel>
+ </seclabel>
+</domain>
+
--- /dev/null
+<domain type='kvm' id='533'>
+ <name>testing_vm_ubuntu</name>
+ <uuid>a3e9ac8d-9b00-48e7-bf7b-3cec02fd25b6</uuid>
+ <memory unit='KiB'>8388608</memory>
+ <currentMemory unit='KiB'>8388608</currentMemory>
+ <vcpu placement='static'>4</vcpu>
+ <resource>
+ <partition>/machine</partition>
+ </resource>
+ <os>
+ <type arch='x86_64' machine='pc-i440fx-trusty'>hvm</type>
+ <boot dev='hd'/>
+ <boot dev='cdrom'/>
+ <bios rebootTimeout='5000'/>
+ </os>
+ <cpu mode='host-model'>
+ <model fallback='forbid'/>
+ </cpu>
+ <clock offset='utc'>
+ <timer name='rtc' tickpolicy='catchup' track='wall'>
+ <catchup threshold='123' slew='120' limit='10000'/>
+ </timer>
+ <timer name='pit' tickpolicy='delay'/>
+ <timer name='hpet' present='no'/>
+ </clock>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>destroy</on_crash>
+ <devices>
+ <emulator>/usr/bin/qemu-system-x86_64</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2' cache='unsafe'/>
+ <source file='/var/lib/libvirt/images/template_ubuntu.img'/>
+ <backingStore/>
+ <target dev='sda' bus='virtio'/>
+ <alias name='virtio-disk0'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
+ </disk>
+ <disk type='file' device='cdrom'>
+ <driver name='qemu' type='raw' cache='unsafe'/>
+ <source file='/home/jenkins/workspace/iso/ubuntu-14.04.3-server-amd64.iso'/>
+ <backingStore/>
+ <target dev='hdb' bus='ide' tray='open'/>
+ <readonly/>
+ <alias name='ide0-0-1'/>
+ <address type='drive' controller='0' bus='0' target='0' unit='1'/>
+ </disk>
+ <controller type='usb' index='0' model='nec-xhci'>
+ <alias name='usb0'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
+ </controller>
+ <controller type='pci' index='0' model='pci-root'>
+ <alias name='pci.0'/>
+ </controller>
+ <controller type='ide' index='0'>
+ <alias name='ide0'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
+ </controller>
+ <interface type='network'>
+ <mac address='52:54:00:a4:1e:2c'/>
+ <source network='testing-network_pub' bridge='virbr7'/>
+ <target dev='vnet8'/>
+ <model type='virtio'/>
+ <alias name='net0'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
+ </interface>
+ <serial type='pty'>
+ <source path='/dev/pts/7'/>
+ <target port='0'/>
+ <alias name='serial0'/>
+ </serial>
+ <console type='pty' tty='/dev/pts/7'>
+ <source path='/dev/pts/7'/>
+ <target type='serial' port='0'/>
+ <alias name='serial0'/>
+ </console>
+ <input type='mouse' bus='ps2'/>
+ <input type='keyboard' bus='ps2'/>
+ <graphics type='vnc' port='5904' autoport='yes' listen='0.0.0.0'>
+ <listen type='address' address='0.0.0.0'/>
+ </graphics>
+ <video>
+ <model type='vga' vram='16384' heads='1'/>
+ <alias name='video0'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+ </video>
+ <memballoon model='virtio'>
+ <alias name='balloon0'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x0a' function='0x0'/>
+ </memballoon>
+ </devices>
+ <seclabel type='dynamic' model='apparmor' relabel='yes'>
+ <label>libvirt-a3e9ac8d-9b00-48e7-bf7b-3cec02fd25b6</label>
+ <imagelabel>libvirt-a3e9ac8d-9b00-48e7-bf7b-3cec02fd25b6</imagelabel>
+ </seclabel>
+</domain>
+
--- /dev/null
+<network>
+ <name>testing-network_pub</name>
+ <forward mode='route'/>
+ <ip address='172.16.59.241' prefix='28'>
+ <dhcp>
+ <range start='172.16.59.242' end='172.16.59.254' />
+ </dhcp>
+ </ip>
+</network>
--- /dev/null
+- hosts: localhost
+
+ vars:
+ new_volume_xml_filename: new_volume.xml
+
+ tasks:
+
+ - include_vars: common_vars.yml
+ - include_vars: vm_defaults_vars.yml
+
+ - name: prepare file with disk image definition
+ template: src=templates/volume_template.xml dest=./{{ new_volume_xml_filename }}
+
+ - name: create new disk image
+ shell: virsh vol-create --pool {{ storage_pool }} {{ new_volume_xml_filename }}
+
+ - name: cleanup file with disk image definition
+ file: path=./{{ new_volume_xml_filename }} state=absent
+
+ - name: create new vm
+ virt: name={{ new_vm_name }}
+ command=define
+ xml="{{ lookup('template', 'templates/vm_template.xml') }}"
+ uri={{ qemu_uri }}
+
+ - name: start newly created vm
+ virt: command=start name={{ new_vm_name }}
+
+ - name: get vnc console address
+ shell: virsh domdisplay {{ new_vm_name }}
+ register: vnc_uri
+
+ - name: print vnc console address
+ debug: msg={{ vnc_uri.stdout }}
\ No newline at end of file
--- /dev/null
+- hosts: localhost
+
+ tasks:
+
+ - include_vars: common_vars.yml
+
+ - name: patch ssh config to turn off known_hosts hashing
+ lineinfile:
+ dest=/etc/ssh/ssh_config
+ regexp="HashKnownHosts"
+ line=" HashKnownHosts no"
+ sudo: yes
--- /dev/null
+- hosts: localhost
+
+ vars:
+ sysprep_ops_enabled:
+ - abrt-data
+ - bash-history
+ - blkid-tab
+ - crash-data
+ - cron-spool
+ - dhcp-client-state
+ - dhcp-server-state
+ - dovecot-data
+ - logfiles
+ - machine-id
+ - mail-spool
+ - net-hostname
+ - net-hwaddr
+ - pacct-log
+ - package-manager-cache
+ - pam-data
+ - puppet-data-log
+ - rh-subscription-manager
+ - rhn-systemid
+ - rpm-db
+ - samba-db-log
+ - script
+ - smolt-uuid
+ - ssh-userdir
+ - sssd-db-log
+ - tmp-files
+ - udev-persistent-net
+ - utmp
+ - yum-uuid
+ - customize
+ - lvm-uuids
+ cleanup_source: false
+
+ tasks:
+
+ - include_vars: common_vars.yml
+
+ - name: shutdown source vm
+ virt: command=shutdown name={{ source_vm }}
+
+ - name: dump source vm as xml
+ shell: virsh dumpxml {{ source_vm }} > {{ vm_template_xml_path }}{{ template_name }}.xml
+
+ - name: prepare xml file, step 1
+ shell: sed -i "s/{{ source_vm }}.img/{{ template_name }}.img/g" {{ vm_template_xml_path }}{{ template_name }}.xml
+
+ - name: prepare xml file, step 2
+ shell: sed -i "s/boot dev='hd'/boot dev='cdrom'/g" {{ vm_template_xml_path }}{{ template_name }}.xml
+
+ - name: prepare xml file, step 3
+ shell: sed -i "0,/cdrom/s/boot dev='cdrom'/boot dev='hd'/" {{ vm_template_xml_path }}{{ template_name }}.xml
+
+ - name: copy disk image for template
+ shell: cp {{ libvirt_images_path }}{{ source_vm }}.img {{ libvirt_images_path }}{{ template_name }}.img
+ sudo: yes
+
+ - name: prepare template disk image
+ shell: virt-sysprep --operations {{ sysprep_ops_enabled | join(",") }} -a {{ libvirt_images_path }}{{ template_name }}.img
+ sudo: yes
+
+ - name: destroy source vm
+ virt: command=destroy name={{ source_vm }}
+ when: cleanup_source == true
+
+ - name: undefine source vm
+ virt: command=undefine name={{ source_vm }}
+ when: cleanup_source == true
+
+ - name: remove source disk image
+ shell: virsh vol-delete --pool default {{ source_vm }}.img
+ when: cleanup_source == true
--- /dev/null
+- hosts: localhost
+
+ tasks:
+
+ - include_vars: common_vars.yml
+
+ - name: add newly cloned vm as host
+ add_host: name={{ master_node_ip }} groups=master_nodes ansible_ssh_user=root ansible_ssh_pass=r00tme #TODO parameterize
+
+
+- hosts: master_nodes
+
+ tasks:
+ - name: preinstall packages
+ yum: name={{ item }} state=present
+ with_items:
+ - wget
+ - curl
+ - rsync
+ sudo: yes
+
+ - name: sync Nailgun updates
+ shell: "rsync -vap --chmod=Dugo+x rsync://fuel-repository.mirantis.com/mirror/fwm/{{ mos_version }}/updates/ /var/www/nailgun/updates/"
+
+ - name: download update script
+ shell: "wget https://github.com/Mirantis/tools-sustaining/raw/master/scripts/mos_apply_mu.py"
+
+ - name: execute update script
+ shell: "python mos_apply_mu.py --update --all-envs --master-ip={{ master_node_ip }}"
+
+ - name: wait for node updates to complete
+ pause: minutes=3 # TODO custom task for recurrent check
+
+ - name: check update status
+ shell: "python mos_apply_mu.py --check --all-envs --master-ip={{ master_node_ip }}"
+
+
+- hosts: localhost
+
+ tasks:
+ - name: zip Tempest report files set
+ shell: tar cvvf tempest-report.tar ./tempest-report
--- /dev/null
+- hosts: localhost
+
+ tasks:
+
+ - include_vars: common_vars.yml
+
+ - name: set attrs for vm template xml files
+ file: path={{ playbook_dir }}/contrib/{{ template_name }}.xml owner=jenkins group=jenkins
+ sudo: yes
+
+ - name: clone vm
+ shell: "virt-clone --connect {{ qemu_uri }} --original-xml {{ playbook_dir }}/contrib/{{ template_name }}.xml --name {{ vm_name }} --file {{ libvirt_images_path }}{{ vm_name }}.img"
+
+ - name: attach admin network
+ shell: virsh attach-interface {{ vm_name }} network {{ env_name }}_adm --persistent
+
+ - name: attach admin network | start vm
+ shell: virsh start {{ vm_name }}
+
+ - name: get pubnet MAC info
+ shell: >
+ virsh domiflist {{ vm_name }} | grep testing-network_pub | awk '{print $5}'
+ changed_when: no
+ register: pub_mac
+
+ - name: get cloned vm IP
+ shell: |
+ until arp -an | grep -q -F {{ pub_mac.stdout }}; do
+ sleep 2
+ done
+ arp -an | grep -F {{ pub_mac.stdout }} | cut -f 2 -d "(" | cut -f 1 -d ")"
+ changed_when: no
+ register: pub_ip
+
+ - name: add newly cloned vm as host
+ add_host: name={{ pub_ip.stdout_lines[0] }} groups=new_test_vms ansible_ssh_user=tester ansible_ssh_pass=test
+
+ - name: wait for ssh server up and running
+ pause: seconds=10
+
+
+- hosts: new_test_vms
+
+ tasks:
+
+ - include_vars: common_vars.yml
+
+ - name: preinstall packages
+ apt: name={{ item }} state=present
+ with_items:
+ - vlan
+ - git
+ - build-essential
+ - libssl-dev
+ - libffi-dev
+ - python-dev
+ - libxml2-dev
+ - libxslt1-dev
+ - libpq-dev
+ - python-pip
+ sudo: yes
+
+ - name: setup admin network | vconfig
+ shell: vconfig add eth1 101
+ sudo: yes
+
+ - name: setup admin network | up
+ shell: ifconfig eth1 up
+ sudo: yes
+
+ - name: setup admin network | routing
+ shell: ifconfig eth1.101 inet 192.168.0.254/24 up
+ sudo: yes
+
+ - name: prepare Rally | git clone
+ git: repo=https://github.com/openstack/rally dest=rally-dist
+
+ - name: prepare Rally | install
+ shell: ~/rally-dist/install_rally.sh
+
+ - name: upload contrib files
+ copy: src={{ item }} dest=/home/tester # TODO parameterize
+ with_items:
+ - contrib/instance_dd_test.sh
+ - contrib/openrc
+ - contrib/rally-mos.yaml
+
+ - name: Rally | create deployment
+ shell: . ~/rally/bin/activate && rally-manage db recreate && . ~/openrc admin admin && rally deployment create --fromenv --name=deployment_{{ env_name }}
+ args:
+ executable: /bin/bash
+
+ - name: Rally | perform task
+ shell: . ~/rally/bin/activate && . ~/.rally/openrc admin admin && rally task start ~/rally-mos.yaml
+ args:
+ executable: /bin/bash
+
+ - name: Rally | generate report
+ shell: . ~/rally/bin/activate && . ~/.rally/openrc admin admin && rally task report --out {{ report_filename }}
+ args:
+ executable: /bin/bash
+
+ - name: download Rally report
+ fetch: fail_on_missing=yes src=/home/tester/{{ report_filename }} dest={{ rally_report_path }} flat=yes
+
+
+- hosts: localhost
+
+ tasks:
+
+ - include_vars: common_vars.yml
+
+ - name: Remove current testing VM from known_hosts, otherwise all gets stuck on next run with the same VM IP
+ known_hosts:
+ path={{ home_dir }}/.ssh/known_hosts
+ name={{ pub_ip.stdout_lines[0] }}
+ state=absent
--- /dev/null
+- hosts: localhost
+
+ tasks:
+
+ - include_vars: common_vars.yml
+
+ - name: add newly cloned vm as host
+ add_host: name={{ master_node_ip }} groups=master_nodes ansible_ssh_user=root ansible_ssh_pass=r00tme #TODO parameterize
+
+
+- hosts: master_nodes
+
+ tasks:
+ - name: preinstall packages
+ yum: name={{ item }} state=present
+ with_items:
+ - git
+ - mc
+ - htop
+ - tmux
+ sudo: yes
+
+ - name: prepare Tempest runner | git clone
+ git: repo=https://github.com/Mirantis/mos-tempest-runner.git dest=mos-tempest-runner force=yes
+
+ - name: patch mos-tempest runner to create testing status file
+ lineinfile:
+ dest=/root/mos-tempest-runner/tempest/run_tests.sh
+ insertbefore="^ return_exit_code"
+ line=" touch /home/developer/tempest-finished"
+
+ - name: prepare Tempest runner | setup
+ shell: cd ~/mos-tempest-runner && ./setup_env.sh
+
+ - name: upload tmux script
+ copy: src=contrib/tempest-tmux dest=/root
+
+ - name: set modes on tmux script
+ file: path=/root/tempest-tmux mode="a+rx"
+
+ - name: run Tempest tests within tmux session
+ shell: /root/tempest-tmux
+
+ # FIXME: parameterize
+ - name: wait for Tempest tests set to complete
+ wait_for:
+ path=/home/developer/tempest-finished
+ state=present
+ delay=7000
+ timeout={{ tempest_timeout }}
+
+ - name: remove Tempest state file
+ file: path=/home/developer/tempest-finished state=absent
+
+ - name: download Tempest report files
+ fetch: fail_on_missing=yes src=/home/developer/mos-tempest-runner/tempest-reports/{{ item }} dest=./tempest-report/ flat=yes
+ with_items:
+ - tempest-report.xml
+ - tempest.conf
+ - tempest-report.html
+ - shouldfail.yaml
+
+
+- hosts: localhost
+
+ tasks:
+ - name: zip Tempest report files set
+ shell: tar cvvf tempest-report.tar ./tempest-report
--- /dev/null
+- hosts: localhost
+
+ tasks:
+
+ - include_vars: common_vars.yml
+
+ - name: destroy pubnet
+ shell: virsh net-destroy {{ default_pub_subnet }}
+ ignore_errors: True
+
+ - name: undefine pubnet
+ shell: virsh net-undefine {{ default_pub_subnet }}
+ ignore_errors: True
+
+ - name: define pubnet
+ shell: virsh net-define --file contrib/{{ default_pub_subnet }}.xml
+
+ - name: start pubnet
+ shell: virsh net-start {{ default_pub_subnet }}
--- /dev/null
+<domain type='kvm'>
+ <name>{{ new_vm_name }}</name>
+ <memory unit='KiB'>{{ memory * 1024 }}</memory>
+ <currentMemory unit='KiB'>{{ memory * 1024 }}</currentMemory>
+ <vcpu placement='static'>{{ vcpu }}</vcpu>
+ <os>
+ <type arch='x86_64' machine='pc-i440fx-trusty'>hvm</type>
+ <boot dev='{{ first_boot }}'/>
+ <boot dev='{{ second_boot }}'/>
+ <bios rebootTimeout='5000'/>
+ </os>
+ <cpu mode='host-model'>
+ <model fallback='forbid'/>
+ </cpu>
+ <clock offset='utc'>
+ <timer name='rtc' tickpolicy='catchup' track='wall'>
+ <catchup threshold='123' slew='120' limit='10000'/>
+ </timer>
+ <timer name='pit' tickpolicy='delay'/>
+ <timer name='hpet' present='no'/>
+ </clock>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>destroy</on_crash>
+ <devices>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2' cache='unsafe'/>
+ <source file='{{ libvirt_images_path }}{{ new_vm_name }}.img'/>
+ <target dev='sda' bus='virtio'/>
+ </disk>
+ <disk type='file' device='cdrom'>
+ <driver name='qemu' type='raw' cache='unsafe'/>
+ <source file='{{ iso_path }}{{ ubuntu_iso }}'/>
+ <target dev='hdb' bus='ide'/>
+ <readonly/>
+ </disk>
+ <controller type='usb' index='0' model='nec-xhci'>
+ <alias name='usb0'/>
+ <address type='pci' domain='0x0000' bus='0x00'
+ slot='0x08' function='0x0'/>
+ </controller>
+ <controller type='pci' index='0' model='pci-root'>
+ <alias name='pci.0'/>
+ </controller>
+ <controller type='ide' index='0'>
+ <alias name='ide0'/>
+ <address type='pci' domain='0x0000' bus='0x00'
+ slot='0x01' function='0x1'/>
+ </controller>
+ <interface type='network'>
+ <source network='{{ default_pub_subnet }}'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00'
+ slot='0x03' function='0x0'/>
+ </interface>
+ <serial type='pty'>
+ <source path='/dev/pts/6'/>
+ <target port='0'/>
+ <alias name='serial0'/>
+ </serial>
+ <console type='pty' tty='/dev/pts/6'>
+ <source path='/dev/pts/6'/>
+ <target type='serial' port='0'/>
+ <alias name='serial0'/>
+ </console>
+ <input type='mouse' bus='ps2'/>
+ <input type='keyboard' bus='ps2'/>
+ <graphics type='vnc' port='5900' autoport='yes' listen='0.0.0.0'>
+ <listen type='address' address='0.0.0.0'/>
+ </graphics>
+ <video>
+ <model type='vga' vram='9216' heads='1'/>
+ <alias name='video0'/>
+ <address type='pci' domain='0x0000' bus='0x00'
+ slot='0x02' function='0x0'/>
+ </video>
+ <memballoon model='virtio'>
+ <alias name='balloon0'/>
+ <address type='pci' domain='0x0000' bus='0x00'
+ slot='0x0a' function='0x0'/>
+ </memballoon>
+ </devices>
+</domain>
--- /dev/null
+<volume type='file'>
+ <name>{{ new_vm_name }}.img</name>
+ <allocation>0</allocation>
+ <capacity unit='G'>{{ new_vol_size }}</capacity>
+ <target>
+ <format type='qcow2'/>
+ </target>
+</volume>
--- /dev/null
+new_vol_size: 50
+memory: 8192 # ram_amount * 1024
+vcpu: 4
+first_boot: cdrom
+second_boot: hd
--- /dev/null
+* DONE provide playbook for deployment
+CLOSED: [2015-09-30 Ср 19:57]
+:LOGBOOK:
+- State "DONE" from "TODO" [2015-09-30 Ср 19:57]
+:END:
+* TODO leverage https://wiki.jenkins-ci.org/display/JENKINS/Ansible+Plugin usage
+* TODO add cleanup steps before actual workload for all jobs
--- /dev/null
+#!/bin/sh
+time_seconds(){ (time -p $1 ) 2>&1 |awk '/real/{print $2}'; }
+file=/tmp/test.img
+c=100 #100M
+write_seq=$(time_seconds "dd if=/dev/zero of=$file bs=1M count=$c")
+read_seq=$(time_seconds "dd if=$file of=/dev/null bs=1M count=$c")
+[ -f $file ] && rm $file
+
+echo "{
+ \"write_seq\": $write_seq,
+ \"read_seq\": $read_seq
+ }"
--- /dev/null
+#!/bin/sh
+export LC_ALL=C
+export OS_NO_CACHE='true'
+export OS_TENANT_NAME='admin'
+export OS_USERNAME='admin'
+export OS_PASSWORD='admin'
+export OS_AUTH_URL='http://192.168.0.2:5000/v2.0/'
+export OS_AUTH_STRATEGY='keystone'
+export OS_REGION_NAME='RegionOne'
+export CINDER_ENDPOINT_TYPE='publicURL'
+export GLANCE_ENDPOINT_TYPE='publicURL'
+export KEYSTONE_ENDPOINT_TYPE='publicURL'
+export NOVA_ENDPOINT_TYPE='publicURL'
+export NEUTRON_ENDPOINT_TYPE='publicURL'
--- /dev/null
+---
+
+ KeystoneBasic.create_user:
+ -
+ args:
+ name_length: 10
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ sla:
+ failure_rate:
+ max: 0
+
+ KeystoneBasic.create_delete_user:
+ -
+ args:
+ name_length: 10
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ sla:
+ failure_rate:
+ max: 0
+
+ KeystoneBasic.create_and_list_tenants:
+ -
+ args:
+ name_length: 10
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ sla:
+ failure_rate:
+ max: 0
+
+ KeystoneBasic.create_and_list_users:
+ -
+ args:
+ name_length: 10
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ sla:
+ failure_rate:
+ max: 0
+
+ KeystoneBasic.create_tenant:
+ -
+ args:
+ name_length: 10
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ sla:
+ failure_rate:
+ max: 0
+
+ KeystoneBasic.create_tenant_with_users:
+ -
+ args:
+ name_length: 10
+ users_per_tenant: 10
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ sla:
+ failure_rate:
+ max: 0
+
+ KeystoneBasic.create_delete_user:
+ -
+ args:
+ name_length: 10
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ sla:
+ failure_rate:
+ max: 0
+
+ HeatStacks.create_and_list_stack:
+ -
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+
+ HeatStacks.create_and_delete_stack:
+ -
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+
+ Authenticate.keystone:
+ -
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+ sla:
+ failure_rate:
+ max: 0
+
+ Authenticate.validate_cinder:
+ -
+ args:
+ repetitions: 2
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+ sla:
+ failure_rate:
+ max: 0
+
+ Authenticate.validate_glance:
+ -
+ args:
+ repetitions: 2
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+ sla:
+ failure_rate:
+ max: 0
+
+ Authenticate.validate_heat:
+ -
+ args:
+ repetitions: 2
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+ sla:
+ failure_rate:
+ max: 0
+
+ Authenticate.validate_nova:
+ -
+ args:
+ repetitions: 2
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+ sla:
+ failure_rate:
+ max: 0
+
+ Quotas.cinder_update_and_delete:
+ -
+ args:
+ max_quota: 1024
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+ sla:
+ failure_rate:
+ max: 0
+
+ Quotas.cinder_update:
+ -
+ args:
+ max_quota: 1024
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+ sla:
+ failure_rate:
+ max: 0
+
+ Quotas.nova_update_and_delete:
+ -
+ args:
+ max_quota: 1024
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+ sla:
+ failure_rate:
+ max: 0
+
+ Quotas.nova_update:
+ -
+ args:
+ max_quota: 1024
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+ sla:
+ failure_rate:
+ max: 0
+
+ VMTasks.boot_runcommand_delete:
+ -
+ args:
+ flavor:
+ name: "m1.tiny"
+ image:
+ name: "TestVM|cirros.*uec"
+ floating_network: "net04_ext"
+ use_floatingip: true
+ script: "/home/tester/instance_dd_test.sh"
+ interpreter: "/bin/sh"
+ username: "cirros"
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+ network: {}
+ sla:
+ failure_rate:
+ max: 0
+
+
+ NovaServers.boot_and_delete_server:
+ -
+ args:
+ flavor:
+ name: "m1.tiny"
+ image:
+ name: "TestVM|cirros.*uec"
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+ sla:
+ failure_rate:
+ max: 0
+
+ -
+ args:
+ auto_assign_nic: true
+ flavor:
+ name: "m1.tiny"
+ image:
+ name: "TestVM|cirros.*uec"
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+ network:
+ start_cidr: "10.2.0.0/24"
+ networks_per_tenant: 2
+ sla:
+ failure_rate:
+ max: 0
+
+
+ NovaServers.boot_and_list_server:
+ -
+ args:
+ flavor:
+ name: "m1.tiny"
+ image:
+ name: "TestVM|cirros.*uec"
+ detailed: True
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+ sla:
+ failure_rate:
+ max: 0
+
+ NovaServers.list_servers:
+ -
+ args:
+ detailed: True
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+ servers:
+ flavor:
+ name: "m1.tiny"
+ image:
+ name: "TestVM|cirros.*uec"
+ servers_per_tenant: 1
+ sla:
+ failure_rate:
+ max: 0
+
+ NovaServers.boot_and_bounce_server:
+ -
+ args:
+ flavor:
+ name: "m1.tiny"
+ image:
+ name: "TestVM|cirros.*uec"
+ actions:
+ -
+ hard_reboot: 1
+ -
+ stop_start: 1
+ -
+ rescue_unrescue: 1
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+ sla:
+ failure_rate:
+ max: 0
+
+ NovaServers.boot_server:
+ -
+ args:
+ flavor:
+ name: "^ram64$"
+ image:
+ name: "TestVM|cirros.*uec"
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+ flavors:
+ -
+ name: "ram64"
+ ram: 64
+ sla:
+ failure_rate:
+ max: 0
+ -
+ args:
+ flavor:
+ name: "m1.tiny"
+ image:
+ name: "TestVM|cirros.*uec"
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+ sla:
+ failure_rate:
+ max: 0
+
+ HttpRequests.check_request:
+ -
+ args:
+ url: "http://www.example.com"
+ method: "GET"
+ status_code: 200
+ runner:
+ type: "constant"
+ times: 2
+ concurrency: 50
+ sla:
+ failure_rate:
+ max: 0
+
+ NovaSecGroup.create_and_delete_secgroups:
+ -
+ args:
+ security_group_count: 5
+ rules_per_security_group: 5
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+ quotas:
+ nova:
+ security_groups: -1
+ security_group_rules: -1
+ sla:
+ failure_rate:
+ max: 0
+
+ NovaSecGroup.create_and_list_secgroups:
+ -
+ args:
+ security_group_count: 5
+ rules_per_security_group: 5
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+ quotas:
+ nova:
+ security_groups: -1
+ security_group_rules: -1
+ sla:
+ failure_rate:
+ max: 0
+
+
+ NovaSecGroup.boot_and_delete_server_with_secgroups:
+ -
+ args:
+ flavor:
+ name: "m1.tiny"
+ image:
+ name: "TestVM|cirros.*uec"
+ security_group_count: 5
+ rules_per_security_group: 5
+ runner:
+ type: "constant"
+ times: 1
+ concurrency: 50
+ context:
+ users:
+ tenants: 1
+ users_per_tenant: 1
+ network:
+ start_cidr: "10.2.0.0/24"
+ quotas:
+ nova:
+ security_groups: -1
+ security_group_rules: -1
--- /dev/null
+<network>
+ <name>testing-network_pub</name>
+ <forward mode='route'/>
+ <ip address='172.16.59.241' prefix='28'>
+ <dhcp>
+ <range start="172.16.59.242" end="172.16.59.254" />
+ </dhcp>
+ </ip>
+</network>