From b94b97cbfab1482d8372fc56eacf0c7d654d5645 Mon Sep 17 00:00:00 2001 From: Sumit Naiksatam Date: Thu, 15 Mar 2012 16:43:19 -0700 Subject: [PATCH] Bug #956559 VIF driver and scheduler for UCS plugin are broken since the flag configuration mechanism in nova is changed. Fixing that and also fixing some property names, along changes to how the quantum client code is invoked. Change-Id: I757cc149f08673ce24d35ee0bfffae8e5b1a4afc --- quantum/plugins/cisco/README | 877 +++++++++--------- .../nova/quantum_port_aware_scheduler.py | 61 +- quantum/plugins/cisco/nova/vifdirect.py | 50 +- 3 files changed, 499 insertions(+), 489 deletions(-) diff --git a/quantum/plugins/cisco/README b/quantum/plugins/cisco/README index 148a3f326..d81764363 100755 --- a/quantum/plugins/cisco/README +++ b/quantum/plugins/cisco/README @@ -1,447 +1,430 @@ -========================================================================================= -README: A Quantum Plugin Framework for Supporting L2 Networks Spannning Multiple Switches -========================================================================================= - -:Author: Sumit Naiksatam, Ram Durairaj, Mark Voelker, Edgar Magana, Shweta Padubidri, - Rohit Agarwalla, Ying Liu, Debo Dutta -:Contact: netstack@lists.launchpad.net -:Web site: https://launchpad.net/~cisco-openstack -:Copyright: 2011 Cisco Systems, Inc. - -.. contents:: - -Introduction ------------- - -This plugin implementation provides the following capabilities -to help you take your Layer 2 network for a Quantum leap: - -* A reference implementation for a Quantum Plugin Framework -(For details see: http://wiki.openstack.org/quantum-multi-switch-plugin) -* Supports multiple switches in the network -* Supports multiple models of switches concurrently -* Supports use of multiple L2 technologies -* Supports Cisco UCS blade servers with M81KR Virtual Interface Cards - (aka "Palo adapters") via 802.1Qbh. -* Supports the Cisco Nexus family of switches. - -It does not provide: - -* A hologram of Al that only you can see. -* A map to help you find your way through time. -* A cure for amnesia or your swiss-cheesed brain. - -Let's leap in! - -Pre-requisites --------------- -(The following are necessary only when using the UCS and/or Nexus devices in your system. -If you plan to just leverage the plugin framework, you do not need these.) -* One or more UCS B200 series blade servers with M81KR VIC (aka - Palo adapters) installed. -* UCSM 2.0 (Capitola) Build 230 or above. -* OpenStack Diablo D3 or later (should have VIF-driver support) -* RHEL 6.1 (as of this writing, UCS only officially supports RHEL, but - it should be noted that Ubuntu support is planned in coming releases as well) - ** Package: python-configobj-4.6.0-3.el6.noarch (or newer) - ** Package: python-routes-1.12.3-2.el6.noarch (or newer) - -If you are using a Nexus switch in your topology, you'll need the following -NX-OS version and packages to enable Nexus support: -* NX-OS 5.2.1 (Delhi) Build 69 or above. -* paramiko library - SSHv2 protocol library for python - ** To install on RHEL 6.1, run: yum install python-paramiko -* ncclient v0.3.1 - Python library for NETCONF clients - ** You need a version of ncclient modifed by Cisco Systems. - To get it, from your shell prompt do: - - git clone git@github.com:CiscoSystems/ncclient.git - sudo python ./setup.py install - - ** For more information of ncclient, see: - http://schmizz.net/ncclient/ - -To verify the version of any package you have installed on your system, -run "rpm -qav | grep ", where is the -package you want to query (for example: python-routes). - -Note that you can get access to recent versions of the packages above -and other OpenStack software packages by adding a new repository to -your yum configuration. To do so, edit or create -/etc/yum.repos.d/openstack.repo and add the following: - -[openstack-deps] -name=OpenStack Nova Compute Dependencies -baseurl=http://yum.griddynamics.net/yum/cactus/deps -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-OPENSTACK - -Then run "yum install python-routes". - - -Module Structure: ------------------ -* quantum/plugins/cisco/ - Contains the L2-Network Plugin Framework - /client - CLI module for core and extensions API - /common - Modules common to the entire plugin - /conf - All configuration files - /db - Persistence framework - /models - Class(es) which tie the logical abstractions - to the physical topology - /nova - Scheduler and VIF-driver to be used by Nova - /nexus - Nexus-specific modules - /segmentation - Implementation of segmentation manager, - e.g. VLAN Manager - /tests - Tests specific to this plugin - /ucs - UCS-specific modules - - -Plugin Installation Instructions ----------------------------------- -1. Make a backup copy of quantum/quantum/plugins.ini. - -2. Edit quantum/quantum/plugins.ini and edit the "provider" entry to point - to the L2Network-plugin: - -provider = quantum.plugins.cisco.l2network_plugin.L2Network - -3. Configure your OpenStack installation to use the 802.1qbh VIF driver and - Quantum-aware scheduler by editing the /etc/nova/nova.conf file with the - following entries: - ---scheduler_driver=quantum.plugins.cisco.nova.quantum_port_aware_scheduler.QuantumPortAwareScheduler ---quantum_host=127.0.0.1 ---quantum_port=9696 ---libvirt_vif_driver=quantum.plugins.cisco.nova.vifdirect.Libvirt802dot1QbhDriver ---libvirt_vif_type=802.1Qbh - - Note: To be able to bring up a VM on a UCS blade, you should first create a - port for that VM using the Quantum create port API. VM creation will - fail if an unused port is not available. If you have configured your - Nova project with more than one network, Nova will attempt to instantiate - the VM with one network interface (VIF) per configured network. To provide - plugin points for each of these VIFs, you will need to create multiple - Quantum ports, one for each of the networks, prior to starting the VM. - However, in this case you will need to use the Cisco multiport extension - API instead of the Quantum create port API. More details on using the - multiport extension follow in the section on multi NIC support. - -4. To support the above configuration, you will need some Quantum modules. It's easiest - to copy the entire quantum directory from your quantum installation into: - -/usr/lib/python2.6/site-packages/ - - This needs to be done for each nova compute node. - -5. If you want to turn on support for Cisco Nexus switches: - 5a. Uncomment the nexus_plugin property in - quantum/plugins/cisco/conf/plugins.ini to read: - -nexus_plugin=quantum.plugins.cisco.nexus.cisco_nexus_plugin.NexusPlugin - - 5b. Enter the relevant configuration in the - quantum/plugins/cisco/conf/nexus.ini file. Example: - -[SWITCH] -# Change the following to reflect the IP address of the Nexus switch. -# This will be the address at which Quantum sends and receives configuration -# information via SSHv2. -nexus_ip_address=10.0.0.1 -# Port numbers on the Nexus switch to each one of the UCSM 6120s is connected -# Use shortened interface syntax, e.g. "1/10" not "Ethernet1/10". -nexus_first_port=1/10 -nexus_second_port=1/11 -#Port number where SSH will be running on the Nexus switch. Typically this is 22 -#unless you've configured your switch otherwise. -nexus_ssh_port=22 - -[DRIVER] -name=quantum.plugins.cisco.nexus.cisco_nexus_network_driver.CiscoNEXUSDriver - - 5c. Make sure that SSH host key of the Nexus switch is known to the - host on which you are running the Quantum service. You can do - this simply by logging in to your Quantum host as the user that - Quantum runs as and SSHing to the switch at least once. If the - host key changes (e.g. due to replacement of the supervisor or - clearing of the SSH config on the switch), you may need to repeat - this step and remove the old hostkey from ~/.ssh/known_hosts. - -6. Plugin Persistence framework setup: - 6a. Create quantum_l2network database in mysql with the following command - - -mysql -u -p -e "create database quantum_l2network" - - 6b. Enter the quantum_l2network database configuration info in the - quantum/plugins/cisco/conf/db_conn.ini file. - - 6c. If there is a change in the plugin configuration, service would need - to be restarted after dropping and re-creating the database using - the following commands - - -mysql -u -p -e "drop database quantum_l2network" -mysql -u -p -e "create database quantum_l2network" - -7. Verify that you have the correct credentials for each IP address listed - in quantum/plugins/cisco/conf/credentials.ini. Example: - -# Provide the UCSM credentials, create a separte entry for each UCSM used in your system -# UCSM IP address, username and password. -[10.0.0.2] -username=admin -password=mySecretPasswordForUCSM - -# Provide the Nexus credentials, if you are using Nexus switches. -# If not this will be ignored. -[10.0.0.1] -username=admin -password=mySecretPasswordForNexus - - In general, make sure that every UCSM and Nexus switch used in your system, - has a credential entry in the above file. This is required for the system to - be able to communicate with those switches. - -8. Configure the UCS systems' information in your deployment by editing the - quantum/plugins/cisco/conf/ucs_inventory.ini file. You can configure multiple - UCSMs per deployment, multiple chassis per UCSM, and multiple blades per - chassis. Chassis ID and blade ID can be obtained from the UCSM (they will - typically be numbers like 1, 2, 3, etc.). Also make sure that you put the exact - hostname as nova sees it (the host column in the services table of the nova - DB will give you that information). - -[ucsm-1] -ip_address = -[[chassis-1]] -chassis_id = -[[[blade-1]]] -blade_id = -host_name = -[[[blade-2]]] -blade_id = -host_name = -[[[blade-3]]] -blade_id = -host_name = - -[ucsm-2] -ip_address = -[[chassis-1]] -chassis_id = -[[[blade-1]]] -blade_id = -host_name = -[[[blade-2]]] -blade_id = -host_name = - - -9. Start the Quantum service. If something doesn't work, verify that - your configuration of each of the above files hasn't gone a little kaka. - Once you've put right what once went wrong, leap on. - - -Multi NIC support for VMs -------------------------- -As indicated earlier, if your Nova setup has a project with more than one network, -Nova will try to create a virtual network interface (VIF) on the VM for each of those -networks. That implies - - - (1) You should create the same number of networks in Quantum as in your Nova - project. - - (2) Before each VM is instantiated, you should create Quantum ports on each of those - networks. These ports need to be created using the following rest call: - -POST /v1.0/extensions/csco/tenants/{tenant_id}/multiport/ - -with request body: - -{'multiport': - {'status': 'ACTIVE', - 'net_id_list': net_id_list, - 'ports_desc': {'key': 'value'}}} - -where, - -net_id_list is a list of network IDs: [netid1, netid2, ...]. The "ports_desc" dictionary -is reserved for later use. For now, the same structure in terms of the dictionary name, key -and value should be used. - -The corresponding CLI for this operation is as follows: - -PYTHONPATH=. python plugins/cisco-plugin/lib/quantum/plugins/cisco/client/cli.py create_multiport - - (Note that you should not be using the create port core API in the above case.) - - -Using the Command Line Client to work with this Plugin ------------------------------------------------------- -A command line client is packaged with this plugin. This module can be used -to invoke the core API as well as the extensions API, so that you don't have -to switch between different CLI modules (it internally invokes the Quantum -CLI module for the core APIs to ensure consistency when using either). This -command line client can be invoked as follows: - -PYTHONPATH=.:tools python plugins/cisco-plugin/lib/quantum/plugins/cisco/client/cli.py - -1. Creating the network - -# PYTHONPATH=. python plugins/cisco-plugin/lib/quantum/plugins/cisco/client/cli.py create_net -H 10.10.2.6 demo net1 -Created a new Virtual Network with ID: c4a2bea7-a528-4caf-b16e-80397cd1663a -for Tenant demo - - -2. Listing the networks - -# PYTHONPATH=. python plugins/cisco-plugin/lib/quantum/plugins/cisco/client/cli.py list_nets -H 10.10.2.6 demo -Virtual Networks for Tenant demo - Network ID: 0e85e924-6ef6-40c1-9f7a-3520ac6888b3 - Network ID: c4a2bea7-a528-4caf-b16e-80397cd1663a - - -3. Creating one port on each of the networks - -# PYTHONPATH=. python plugins/cisco-plugin/lib/quantum/plugins/cisco/client/cli.py create_multiport -H 10.10.2.6 demo c4a2bea7-a528-4caf-b16e-80397cd1663a,0e85e924-6ef6-40c1-9f7a-3520ac6888b3 -Created ports: {u'ports': [{u'id': u'118ac473-294d-480e-8f6d-425acbbe81ae'}, {u'id': u'996e84b8-2ed3-40cf-be75-de17ff1214c4'}]} - - -4. List all the ports on a network - -# PYTHONPATH=. python plugins/cisco-plugin/lib/quantum/plugins/cisco/client/cli.py list_ports -H 10.10.2.6 demo c4a2bea7-a528-4caf-b16e-80397cd1663a -Ports on Virtual Network: c4a2bea7-a528-4caf-b16e-80397cd1663a -for Tenant: demo - Logical Port: 118ac473-294d-480e-8f6d-425acbbe81ae - - -5. Show the details of a port - -# PYTHONPATH=. python plugins/cisco-plugin/lib/quantum/plugins/cisco/client/cli.py show_port -H 10.10.2.6 demo c4a2bea7-a528-4caf-b16e-80397cd1663a 118ac473-294d-480e-8f6d-425acbbe81ae -Logical Port ID: 118ac473-294d-480e-8f6d-425acbbe81ae -administrative State: ACTIVE -interface: -on Virtual Network: c4a2bea7-a528-4caf-b16e-80397cd1663a -for Tenant: demo - - -6. Start the VM instance using Nova - Note that when using UCS and the 802.1Qbh features, the association of the - VIF-ID (also referred to as interface ID) on the VM's NIC with a port will - happen automatically when the VM is instantiated. At this point, doing a - show_port will reveal the VIF-ID associated with the port. To indicate that - this VIF-ID is still detached from the network it would eventually be on, you - will see the suffix "(detached)" on the VIF-ID. This indicates that although - the VIF-ID and the port have been associated, the VIF still does not have - connectivity to the network on which the port resides. That connectivity - will be established only after the plug/attach operation is performed (as - described in the next step). - -# PYTHONPATH=. python plugins/cisco-plugin/lib/quantum/plugins/cisco/client/cli.py show_port demo c4a2bea7-a528-4caf-b16e-80397cd1663a 118ac473-294d-480e-8f6d-425acbbe81ae -Logical Port ID: 118ac473-294d-480e-8f6d-425acbbe81ae -administrative State: ACTIVE -interface: b73e3585-d074-4379-8dde-931c0fc4db0e(detached) -on Virtual Network: c4a2bea7-a528-4caf-b16e-80397cd1663a -for Tenant: demo - - -7. Plug interface and port into the network - Use the interface information obtained in step 6 to plug the interface into - the network. - -# PYTHONPATH=. python plugins/cisco-plugin/lib/quantum/plugins/cisco/client/cli.py plug_iface demo c4a2bea7-a528-4caf-b16e-80397cd1663a 118ac473-294d-480e-8f6d-425acbbe81ae b73e3585-d074-4379-8dde-931c0fc4db0e -Plugged interface b73e3585-d074-4379-8dde-931c0fc4db0e -into Logical Port: 118ac473-294d-480e-8f6d-425acbbe81ae -on Virtual Network: c4a2bea7-a528-4caf-b16e-80397cd1663a -for Tenant: demo - - -8. Unplug an interface and port from the network - -# PYTHONPATH=. python plugins/cisco-plugin/lib/quantum/plugins/cisco/client/cli.py unplug_iface demo c4a2bea7-a528-4caf-b16e-80397cd1663a 118ac473-294d-480e-8f6d-425acbbe81ae -Unplugged interface from Logical Port: 118ac473-294d-480e-8f6d-425acbbe81ae -on Virtual Network: c4a2bea7-a528-4caf-b16e-80397cd1663a -for Tenant: demo - - Note: After unplugging, if you check the details of the port, you will - see the VIF-IF associated with the port (but now suffixed with the state - "detached"). At this point, it is possible to plug the VIF into the network - again making use of the same VIF-ID. In general, once associated, the VIF-ID - cannot be disassociated with the port until the VM is terminated. After the - VM is terminated, the VIF-ID will be automatically disassociated from the - port. To summarize, association and disassociation of the VIF-ID with a port - happens automatically at the time of creating and terminating the VM. The - connectivity of the VIF to the network is controlled by the user via the - plug and unplug operations. - - -How to test the installation ----------------------------- -The unit tests are located at quantum/plugins/cisco/tests/unit. They can be -executed from the main folder using the run_tests.sh or to get a more detailed -result the run_tests.py script. - -1. All unit tests (needs environment setup as indicated in the pre-requisites): - - ./run_tests.sh -N quantum.plugins.cisco.tests.unit - - or by modifying the environment variable to point to the plugin directory - - In bash : export PLUGIN_DIR=quantum/plugins/cisco - tcsh/csh : setenv PLUGIN_DIR quantum/plugins/cisco - - ./run_tests.sh -N - - Another option is to execute the python script run_tests.py - - python run_tests.py quantum.plugins.cisco.tests.unit - -2. Testing the core API (without UCS/Nexus/RHEL hardware, and can be run on - Ubuntu): - Device-specific plugins can be disabled by commenting out the entries in: - etc/quantum/plugins/cisco/cisco_plugins.ini - The Core API can be tested by initially disabling all device plugins, then - enabling just the UCS plugins, and finally enabling both the UCS and the - Nexus plugins. - Execute the test script as follows: - - ./run_tests.sh -N quantum.plugins.cisco.tests.unit.test_l2networkApi - - or - - python run_tests.py quantum.plugins.cisco.tests.unit.test_l2networkApi - -3. Specific Plugin unit test (needs environment setup as indicated in the - pre-requisites): - - ./run_tests.sh -N quantum.plugins.cisco.tests.unit. - - or - - python run_tests.py quantum.plugins.cisco.tests.unit. - E.g.: - - python run_tests.py quantum.plugins.cisco.tests.unit.test_ucs_plugin - - To run specific tests, use the following: - python run_tests.py - quantum.plugins.cisco.tests.unit.:. - - Eg: - python run_tests.py - quantum.plugins.cisco.tests.unit.test_ucs_plugin:UCSVICTestPlugin.test_create_port - -4. Testing the Extension API - The script is placed alongwith the other cisco unit tests. The location may - change later. - Location quantum/plugins/cisco/tests/unit/test_cisco_extension.py - - The script can be executed by : - ./run_tests.sh -N quantum.plugins.cisco.tests.unit.test_cisco_extension - - or - - python run_tests.py quantum.plugins.cisco.tests.unit.test_cisco_extension - - -Bingo bango bongo! That's it! Thanks for taking the leap into Quantum. - -...Oh, boy! +========================================================================================= +README: A Quantum Plugin Framework for Supporting L2 Networks Spannning Multiple Switches +========================================================================================= + +:Author: Sumit Naiksatam, Ram Durairaj, Mark Voelker, Edgar Magana, Shweta Padubidri, + Rohit Agarwalla, Ying Liu, Debo Dutta +:Contact: netstack@lists.launchpad.net +:Web site: https://launchpad.net/~cisco-openstack +:Copyright: 2011 Cisco Systems, Inc. + +.. contents:: + +Introduction +------------ + +This plugin implementation provides the following capabilities +to help you take your Layer 2 network for a Quantum leap: + +* A reference implementation for a Quantum Plugin Framework +(For details see: http://wiki.openstack.org/quantum-multi-switch-plugin) +* Supports multiple switches in the network +* Supports multiple models of switches concurrently +* Supports use of multiple L2 technologies +* Supports Cisco UCS blade servers with M81KR Virtual Interface Cards + (aka "Palo adapters") via 802.1Qbh. +* Supports the Cisco Nexus family of switches. + +It does not provide: + +* A hologram of Al that only you can see. +* A map to help you find your way through time. +* A cure for amnesia or your swiss-cheesed brain. + +Let's leap in! + +Pre-requisites +-------------- +(The following are necessary only when using the UCS and/or Nexus devices in your system. +If you plan to just leverage the plugin framework, you do not need these.) +* One or more UCS B200 series blade servers with M81KR VIC (aka + Palo adapters) installed. +* UCSM 2.0 (Capitola) Build 230 or above. +* OpenStack Diablo D3 or later (should have VIF-driver support) +* OS supported: + ** RHEL 6.1 or above + ** Ubuntu 11.10 or above + ** Package: python-configobj-4.6.0-3.el6.noarch (or newer) + ** Package: python-routes-1.12.3-2.el6.noarch (or newer) + +If you are using a Nexus switch in your topology, you'll need the following +NX-OS version and packages to enable Nexus support: +* NX-OS 5.2.1 (Delhi) Build 69 or above. +* paramiko library - SSHv2 protocol library for python +* ncclient v0.3.1 - Python library for NETCONF clients + ** You need a version of ncclient modifed by Cisco Systems. + To get it, from your shell prompt do: + + git clone git@github.com:CiscoSystems/ncclient.git + sudo python ./setup.py install + + ** For more information of ncclient, see: + http://schmizz.net/ncclient/ + +Module Structure: +----------------- +* quantum/plugins/cisco/ - Contains the L2-Network Plugin Framework + /client - CLI module for core and extensions API + /common - Modules common to the entire plugin + /conf - All configuration files + /db - Persistence framework + /models - Class(es) which tie the logical abstractions + to the physical topology + /nova - Scheduler and VIF-driver to be used by Nova + /nexus - Nexus-specific modules + /segmentation - Implementation of segmentation manager, + e.g. VLAN Manager + /services - Set of orchestration libraries to insert + In-path Networking Services + /tests - Tests specific to this plugin + /ucs - UCS-specific modules + + +Plugin Installation Instructions +---------------------------------- +1. Make a backup copy of quantum/etc/plugins.ini. + +2. Edit quantum/etc/plugins.ini and edit the "provider" entry to point + to the L2Network-plugin: + +provider = quantum.plugins.cisco.l2network_plugin.L2Network + +3. Configure your OpenStack installation to use the 802.1qbh VIF driver and + Quantum-aware scheduler by editing the /etc/nova/nova.conf file with the + following entries: + +--scheduler_driver=quantum.plugins.cisco.nova.quantum_port_aware_scheduler.QuantumPortAwareScheduler +--quantum_host=127.0.0.1 +--quantum_port=9696 +--libvirt_vif_driver=quantum.plugins.cisco.nova.vifdirect.Libvirt802dot1QbhDriver +--libvirt_vif_type=802.1Qbh + + Note: To be able to bring up a VM on a UCS blade, you should first create a + port for that VM using the Quantum create port API. VM creation will + fail if an unused port is not available. If you have configured your + Nova project with more than one network, Nova will attempt to instantiate + the VM with one network interface (VIF) per configured network. To provide + plugin points for each of these VIFs, you will need to create multiple + Quantum ports, one for each of the networks, prior to starting the VM. + However, in this case you will need to use the Cisco multiport extension + API instead of the Quantum create port API. More details on using the + multiport extension follow in the section on multi NIC support. + +4. To support the above configuration, you will need some Quantum modules. It's easiest + to copy the entire quantum directory from your quantum installation into: + +/usr/lib/python2.7/site-packages/ + + This needs to be done for each nova compute node. + +5. If you want to turn on support for Cisco Nexus switches: + 5a. Uncomment the nexus_plugin property in + etc/quantum/plugins/cisco/cisco_plugins.ini to read: + +nexus_plugin=quantum.plugins.cisco.nexus.cisco_nexus_plugin.NexusPlugin + + 5b. Enter the relevant configuration in the + etc/quantum/plugins/cisco/nexus.ini file. Example: + +[SWITCH] +# Change the following to reflect the IP address of the Nexus switch. +# This will be the address at which Quantum sends and receives configuration +# information via SSHv2. +nexus_ip_address=10.0.0.1 +# Port numbers on the Nexus switch to each one of the UCSM 6120s is connected +# Use shortened interface syntax, e.g. "1/10" not "Ethernet1/10". +nexus_first_port=1/10 +nexus_second_port=1/11 +#Port number where SSH will be running on the Nexus switch. Typically this is 22 +#unless you've configured your switch otherwise. +nexus_ssh_port=22 + +[DRIVER] +name=quantum.plugins.cisco.nexus.cisco_nexus_network_driver.CiscoNEXUSDriver + + 5c. Make sure that SSH host key of the Nexus switch is known to the + host on which you are running the Quantum service. You can do + this simply by logging in to your Quantum host as the user that + Quantum runs as and SSHing to the switch at least once. If the + host key changes (e.g. due to replacement of the supervisor or + clearing of the SSH config on the switch), you may need to repeat + this step and remove the old hostkey from ~/.ssh/known_hosts. + +6. Plugin Persistence framework setup: + 6a. Create quantum_l2network database in mysql with the following command - + +mysql -u -p -e "create database quantum_l2network" + + 6b. Enter the quantum_l2network database configuration info in the + quantum/plugins/cisco/conf/db_conn.ini file. + + 6c. If there is a change in the plugin configuration, service would need + to be restarted after dropping and re-creating the database using + the following commands - + +mysql -u -p -e "drop database quantum_l2network" +mysql -u -p -e "create database quantum_l2network" + +7. Verify that you have the correct credentials for each IP address listed + in quantum/plugins/cisco/conf/credentials.ini. Example: + +# Provide the UCSM credentials, create a separte entry for each UCSM used in your system +# UCSM IP address, username and password. +[10.0.0.2] +username=admin +password=mySecretPasswordForUCSM + +# Provide the Nexus credentials, if you are using Nexus switches. +# If not this will be ignored. +[10.0.0.1] +username=admin +password=mySecretPasswordForNexus + + In general, make sure that every UCSM and Nexus switch used in your system, + has a credential entry in the above file. This is required for the system to + be able to communicate with those switches. + +8. Configure the UCS systems' information in your deployment by editing the + quantum/plugins/cisco/conf/ucs_inventory.ini file. You can configure multiple + UCSMs per deployment, multiple chassis per UCSM, and multiple blades per + chassis. Chassis ID and blade ID can be obtained from the UCSM (they will + typically be numbers like 1, 2, 3, etc.). Also make sure that you put the exact + hostname as nova sees it (the host column in the services table of the nova + DB will give you that information). + +[ucsm-1] +ip_address = +[[chassis-1]] +chassis_id = +[[[blade-1]]] +blade_id = +host_name = +[[[blade-2]]] +blade_id = +host_name = +[[[blade-3]]] +blade_id = +host_name = + +[ucsm-2] +ip_address = +[[chassis-1]] +chassis_id = +[[[blade-1]]] +blade_id = +host_name = +[[[blade-2]]] +blade_id = +host_name = + + +9. Start the Quantum service. If something doesn't work, verify that + your configuration of each of the above files hasn't gone a little kaka. + Once you've put right what once went wrong, leap on. + + +Multi NIC support for VMs +------------------------- +As indicated earlier, if your Nova setup has a project with more than one network, +Nova will try to create a virtual network interface (VIF) on the VM for each of those +networks. That implies - + + (1) You should create the same number of networks in Quantum as in your Nova + project. + + (2) Before each VM is instantiated, you should create Quantum ports on each of those + networks. These ports need to be created using the following rest call: + +POST /1.0/extensions/csco/tenants/{tenant_id}/multiport/ + +with request body: + +{'multiport': + {'status': 'ACTIVE', + 'net_id_list': net_id_list, + 'ports_desc': {'key': 'value'}}} + +where, + +net_id_list is a list of network IDs: [netid1, netid2, ...]. The "ports_desc" dictionary +is reserved for later use. For now, the same structure in terms of the dictionary name, key +and value should be used. + +The corresponding CLI for this operation is as follows: + +PYTHONPATH=. python quantum/plugins/cisco/client/cli.py create_multiport + + (Note that you should not be using the create port core API in the above case.) + + +Using the Command Line Client to work with this Plugin +------------------------------------------------------ +A command line client is packaged with this plugin. This module can be used +to invoke the core API as well as the extensions API, so that you don't have +to switch between different CLI modules (it internally invokes the Quantum +CLI module for the core APIs to ensure consistency when using either). This +command line client can be invoked as follows: + +PYTHONPATH=.:tools python quantum/plugins/cisco/client/cli.py + +1. Creating the network + +# PYTHONPATH=. python quantum/plugins/cisco/client/cli.py create_net -H 10.10.2.6 demo net1 +Created a new Virtual Network with ID: c4a2bea7-a528-4caf-b16e-80397cd1663a +for Tenant demo + + +2. Listing the networks + +# PYTHONPATH=. python quantum/plugins/cisco/client/cli.py list_nets -H 10.10.2.6 demo +Virtual Networks for Tenant demo + Network ID: 0e85e924-6ef6-40c1-9f7a-3520ac6888b3 + Network ID: c4a2bea7-a528-4caf-b16e-80397cd1663a + + +3. Creating one port on each of the networks + +# PYTHONPATH=. python quantum/plugins/cisco/client/cli.py create_multiport -H 10.10.2.6 demo c4a2bea7-a528-4caf-b16e-80397cd1663a,0e85e924-6ef6-40c1-9f7a-3520ac6888b3 +Created ports: {u'ports': [{u'id': u'118ac473-294d-480e-8f6d-425acbbe81ae'}, {u'id': u'996e84b8-2ed3-40cf-be75-de17ff1214c4'}]} + + +4. List all the ports on a network + +# PYTHONPATH=. python quantum/plugins/cisco/client/cli.py list_ports -H 10.10.2.6 demo c4a2bea7-a528-4caf-b16e-80397cd1663a +Ports on Virtual Network: c4a2bea7-a528-4caf-b16e-80397cd1663a +for Tenant: demo + Logical Port: 118ac473-294d-480e-8f6d-425acbbe81ae + + +5. Show the details of a port + +# PYTHONPATH=. python quantum/plugins/cisco/client/cli.py show_port -H 10.10.2.6 demo c4a2bea7-a528-4caf-b16e-80397cd1663a 118ac473-294d-480e-8f6d-425acbbe81ae +Logical Port ID: 118ac473-294d-480e-8f6d-425acbbe81ae +administrative State: ACTIVE +interface: +on Virtual Network: c4a2bea7-a528-4caf-b16e-80397cd1663a +for Tenant: demo + + +6. Start the VM instance using Nova + Note that when using UCS and the 802.1Qbh features, the association of the + VIF-ID (also referred to as interface ID) on the VM's NIC with a port will + happen automatically when the VM is instantiated. At this point, doing a + show_port will reveal the VIF-ID associated with the port. To indicate that + this VIF-ID is still detached from the network it would eventually be on, you + will see the suffix "(detached)" on the VIF-ID. This indicates that although + the VIF-ID and the port have been associated, the VIF still does not have + connectivity to the network on which the port resides. That connectivity + will be established only after the plug/attach operation is performed (as + described in the next step). + +# PYTHONPATH=. python quantum/plugins/cisco/client/cli.py show_port demo c4a2bea7-a528-4caf-b16e-80397cd1663a 118ac473-294d-480e-8f6d-425acbbe81ae +Logical Port ID: 118ac473-294d-480e-8f6d-425acbbe81ae +administrative State: ACTIVE +interface: b73e3585-d074-4379-8dde-931c0fc4db0e(detached) +on Virtual Network: c4a2bea7-a528-4caf-b16e-80397cd1663a +for Tenant: demo + + +7. Plug interface and port into the network + Use the interface information obtained in step 6 to plug the interface into + the network. + +# PYTHONPATH=. python quantum/plugins/cisco/client/cli.py plug_iface demo c4a2bea7-a528-4caf-b16e-80397cd1663a 118ac473-294d-480e-8f6d-425acbbe81ae b73e3585-d074-4379-8dde-931c0fc4db0e +Plugged interface b73e3585-d074-4379-8dde-931c0fc4db0e +into Logical Port: 118ac473-294d-480e-8f6d-425acbbe81ae +on Virtual Network: c4a2bea7-a528-4caf-b16e-80397cd1663a +for Tenant: demo + + +8. Unplug an interface and port from the network + +# PYTHONPATH=. python quantum/plugins/cisco/client/cli.py unplug_iface demo c4a2bea7-a528-4caf-b16e-80397cd1663a 118ac473-294d-480e-8f6d-425acbbe81ae +Unplugged interface from Logical Port: 118ac473-294d-480e-8f6d-425acbbe81ae +on Virtual Network: c4a2bea7-a528-4caf-b16e-80397cd1663a +for Tenant: demo + + Note: After unplugging, if you check the details of the port, you will + see the VIF-IF associated with the port (but now suffixed with the state + "detached"). At this point, it is possible to plug the VIF into the network + again making use of the same VIF-ID. In general, once associated, the VIF-ID + cannot be disassociated with the port until the VM is terminated. After the + VM is terminated, the VIF-ID will be automatically disassociated from the + port. To summarize, association and disassociation of the VIF-ID with a port + happens automatically at the time of creating and terminating the VM. The + connectivity of the VIF to the network is controlled by the user via the + plug and unplug operations. + + +How to test the installation +---------------------------- +The unit tests are located at quantum/plugins/cisco/tests/unit. They can be +executed from the main folder using the run_tests.sh or to get a more detailed +result the run_tests.py script. + +1. All unit tests (needs environment setup as indicated in the pre-requisites): + + ./run_tests.sh -N quantum.plugins.cisco.tests.unit + + or by modifying the environment variable to point to the plugin directory + + In bash : export PLUGIN_DIR=quantum/plugins/cisco + tcsh/csh : setenv PLUGIN_DIR quantum/plugins/cisco + + ./run_tests.sh -N + + Another option is to execute the python script run_tests.py + + python run_tests.py quantum.plugins.cisco.tests.unit + +2. Testing the core API (without UCS/Nexus/RHEL hardware, and can be run on + Ubuntu): + Device-specific plugins can be disabled by commenting out the entries in: + etc/quantum/plugins/cisco/cisco_plugins.ini + The Core API can be tested by initially disabling all device plugins, then + enabling just the UCS plugins, and finally enabling both the UCS and the + Nexus plugins. + Execute the test script as follows: + + ./run_tests.sh -N quantum.plugins.cisco.tests.unit.test_l2networkApi + + or + + python run_tests.py quantum.plugins.cisco.tests.unit.test_l2networkApi + +3. Specific Plugin unit test (needs environment setup as indicated in the + pre-requisites): + + ./run_tests.sh -N quantum.plugins.cisco.tests.unit. + + or + + python run_tests.py quantum.plugins.cisco.tests.unit. + E.g.: + + python run_tests.py quantum.plugins.cisco.tests.unit.test_ucs_plugin + + To run specific tests, use the following: + python run_tests.py + quantum.plugins.cisco.tests.unit.:. + + Eg: + python run_tests.py + quantum.plugins.cisco.tests.unit.test_ucs_plugin:UCSVICTestPlugin.test_create_port + +4. Testing the Extension API + The script is placed alongwith the other cisco unit tests. The location may + change later. + Location quantum/plugins/cisco/tests/unit/test_cisco_extension.py + + The script can be executed by : + ./run_tests.sh -N quantum.plugins.cisco.tests.unit.test_cisco_extension + + or + + python run_tests.py quantum.plugins.cisco.tests.unit.test_cisco_extension + + +Bingo bango bongo! That's it! Thanks for taking the leap into Quantum. + +...Oh, boy! diff --git a/quantum/plugins/cisco/nova/quantum_port_aware_scheduler.py b/quantum/plugins/cisco/nova/quantum_port_aware_scheduler.py index 27b56bf97..53543b2da 100644 --- a/quantum/plugins/cisco/nova/quantum_port_aware_scheduler.py +++ b/quantum/plugins/cisco/nova/quantum_port_aware_scheduler.py @@ -17,33 +17,48 @@ # @author: Sumit Naiksatam, Cisco Systems, Inc. # + +""" +Quantum Port Aware Scheduler Implementation +""" + from nova import exception as excp from nova import flags from nova import log as logging +from nova.openstack.common import cfg from nova.scheduler import driver +from nova.scheduler import chance from quantum.client import Client -from quantum.common.wsgi import Serializer -LOG = logging.getLogger('quantum.plugins.cisco.nova.quantum_aware_scheduler') + +LOG = logging.getLogger(__name__) + +quantum_opts = [ + cfg.StrOpt('quantum_connection_host', + default='127.0.0.1', + help='HOST for connecting to quantum'), + cfg.StrOpt('quantum_connection_port', + default='9696', + help='PORT for connecting to quantum'), + cfg.StrOpt('quantum_default_tenant_id', + default="default", + help='Default tenant id when creating quantum networks'), + ] FLAGS = flags.FLAGS -flags.DEFINE_string('quantum_host', "127.0.0.1", - 'IP address of the quantum network service.') -flags.DEFINE_integer('quantum_port', 9696, - 'Listening port for Quantum network service') +FLAGS.register_opts(quantum_opts) -HOST = FLAGS.quantum_host -PORT = FLAGS.quantum_port +HOST = FLAGS.quantum_connection_host +PORT = FLAGS.quantum_connection_port USE_SSL = False -ACTION_PREFIX_EXT = '/v1.0' -ACTION_PREFIX_CSCO = ACTION_PREFIX_EXT + \ - '/extensions/csco/tenants/{tenant_id}' +VERSION = '1.0' +URI_PREFIX_CSCO = '/extensions/csco/tenants/{tenant_id}' TENANT_ID = 'nova' CSCO_EXT_NAME = 'Cisco Nova Tenant' ACTION = '/schedule_host' -class QuantumPortAwareScheduler(driver.Scheduler): +class QuantumPortAwareScheduler(chance.ChanceScheduler): """ Quantum network service dependent scheduler Obtains the hostname from Quantum using an extension API @@ -52,8 +67,9 @@ class QuantumPortAwareScheduler(driver.Scheduler): # We have to send a dummy tenant name here since the client # needs some tenant name, but the tenant name will not be used # since the extensions URL does not require it - client = Client(HOST, PORT, USE_SSL, format='json', - action_prefix=ACTION_PREFIX_EXT, tenant="dummy") + LOG.debug("Initializing Cisco Quantum Port-aware Scheduler...") + client = Client(HOST, PORT, USE_SSL, format='json', version=VERSION, + uri_prefix="", tenant="dummy", logger=LOG) request_url = "/extensions" data = client.do_request('GET', request_url) LOG.debug("Obtained supported extensions from Quantum: %s" % data) @@ -63,17 +79,19 @@ class QuantumPortAwareScheduler(driver.Scheduler): LOG.debug("Quantum plugin supports required \"%s\" extension" "for the scheduler." % name) return + LOG.error("Quantum plugin does not support required \"%s\" extension" " for the scheduler. Scheduler will quit." % CSCO_EXT_NAME) raise excp.ServiceUnavailable() - def schedule(self, context, topic, *args, **kwargs): + def _schedule(self, context, topic, request_spec, **kwargs): """Gets the host name from the Quantum service""" - instance_id = kwargs['instance_id'] + LOG.debug("Cisco Quantum Port-aware Scheduler is scheduling...") + instance_id = request_spec['instance_properties']['uuid'] user_id = \ - kwargs['request_spec']['instance_properties']['user_id'] + request_spec['instance_properties']['user_id'] project_id = \ - kwargs['request_spec']['instance_properties']['project_id'] + request_spec['instance_properties']['project_id'] instance_data_dict = \ {'novatenant': \ @@ -82,14 +100,15 @@ class QuantumPortAwareScheduler(driver.Scheduler): {'user_id': user_id, 'project_id': project_id}}} - client = Client(HOST, PORT, USE_SSL, format='json', tenant=TENANT_ID, - action_prefix=ACTION_PREFIX_CSCO) + client = Client(HOST, PORT, USE_SSL, format='json', version=VERSION, + uri_prefix=URI_PREFIX_CSCO, tenant=TENANT_ID, + logger=LOG) request_url = "/novatenants/" + project_id + ACTION data = client.do_request('PUT', request_url, body=instance_data_dict) hostname = data["host_list"]["host_1"] if not hostname: - raise driver.NoValidHost(_("Scheduler was unable to locate a host" + raise excp.NoValidHost(_("Scheduler was unable to locate a host" " for this request. Is the appropriate" " service running?")) diff --git a/quantum/plugins/cisco/nova/vifdirect.py b/quantum/plugins/cisco/nova/vifdirect.py index cfc891298..1554f9c3e 100644 --- a/quantum/plugins/cisco/nova/vifdirect.py +++ b/quantum/plugins/cisco/nova/vifdirect.py @@ -18,31 +18,37 @@ """VIF drivers for interface type direct.""" + from nova import exception as excp from nova import flags from nova import log as logging -from nova.network import linux_net -from nova.virt.libvirt import netutils -from nova import utils +from nova.openstack.common import cfg from nova.virt.vif import VIFDriver from quantum.client import Client -from quantum.common.wsgi import Serializer -LOG = logging.getLogger('quantum.plugins.cisco.nova.vifdirect') + +LOG = logging.getLogger(__name__) + +quantum_opts = [ + cfg.StrOpt('quantum_connection_host', + default='127.0.0.1', + help='HOST for connecting to quantum'), + cfg.StrOpt('quantum_connection_port', + default='9696', + help='PORT for connecting to quantum'), + cfg.StrOpt('quantum_default_tenant_id', + default="default", + help='Default tenant id when creating quantum networks'), + ] FLAGS = flags.FLAGS -flags.DEFINE_string('quantum_host', "127.0.0.1", - 'IP address of the quantum network service.') -flags.DEFINE_integer('quantum_port', 9696, - 'Listening port for Quantum network service') +FLAGS.register_opts(quantum_opts) -HOST = FLAGS.quantum_host -PORT = FLAGS.quantum_port +HOST = FLAGS.quantum_connection_host +PORT = FLAGS.quantum_connection_port USE_SSL = False -TENANT_ID = 'nova' -ACTION_PREFIX_EXT = '/v1.0' -ACTION_PREFIX_CSCO = ACTION_PREFIX_EXT + \ - '/extensions/csco/tenants/{tenant_id}' +VERSION = '1.0' +URI_PREFIX_CSCO = '/extensions/csco/tenants/{tenant_id}' TENANT_ID = 'nova' CSCO_EXT_NAME = 'Cisco Nova Tenant' ASSOCIATE_ACTION = '/associate_port' @@ -55,8 +61,9 @@ class Libvirt802dot1QbhDriver(VIFDriver): # We have to send a dummy tenant name here since the client # needs some tenant name, but the tenant name will not be used # since the extensions URL does not require it - client = Client(HOST, PORT, USE_SSL, format='json', - action_prefix=ACTION_PREFIX_EXT, tenant="dummy") + LOG.debug("Initializing Cisco Quantum VIF driver...") + client = Client(HOST, PORT, USE_SSL, format='json', version=VERSION, + uri_prefix="", tenant="dummy", logger=LOG) request_url = "/extensions" data = client.do_request('GET', request_url) LOG.debug("Obtained supported extensions from Quantum: %s" % data) @@ -73,8 +80,8 @@ class Libvirt802dot1QbhDriver(VIFDriver): def _update_configurations(self, instance, network, mapping, action): """Gets the device name and the profile name from Quantum""" - - instance_id = instance['id'] + LOG.debug("Cisco Quantum VIF driver performing: %s" % (action)) + instance_id = instance['uuid'] user_id = instance['user_id'] project_id = instance['project_id'] vif_id = mapping['vif_uuid'] @@ -87,8 +94,9 @@ class Libvirt802dot1QbhDriver(VIFDriver): 'project_id': project_id, 'vif_id': vif_id}}} - client = Client(HOST, PORT, USE_SSL, format='json', tenant=TENANT_ID, - action_prefix=ACTION_PREFIX_CSCO) + client = Client(HOST, PORT, USE_SSL, format='json', version=VERSION, + uri_prefix=URI_PREFIX_CSCO, tenant=TENANT_ID, + logger=LOG) request_url = "/novatenants/" + project_id + action data = client.do_request('PUT', request_url, body=instance_data_dict) -- 2.45.2