import sqlalchemy as sa
-from neutron import context as nctx
-import neutron.db.api as db
-from neutron.db import db_base_plugin_v2
from neutron.db import model_base
from neutron.db import models_v2
-VLAN_SEGMENTATION = 'vlan'
-
UUID_LEN = 36
STR_LEN = 255
def eos_tenant_representation(self):
return {u'tenantId': self.tenant_id}
-
-
-def remember_tenant(tenant_id):
- """Stores a tenant information in repository.
-
- :param tenant_id: globally unique neutron tenant identifier
- """
- session = db.get_session()
- with session.begin():
- tenant = AristaProvisionedTenants(tenant_id=tenant_id)
- session.add(tenant)
-
-
-def forget_tenant(tenant_id):
- """Removes a tenant information from repository.
-
- :param tenant_id: globally unique neutron tenant identifier
- """
- session = db.get_session()
- with session.begin():
- (session.query(AristaProvisionedTenants).
- filter_by(tenant_id=tenant_id).
- delete())
-
-
-def get_all_tenants():
- """Returns a list of all tenants stored in repository."""
- session = db.get_session()
- with session.begin():
- return session.query(AristaProvisionedTenants).all()
-
-
-def num_provisioned_tenants():
- """Returns number of tenants stored in repository."""
- session = db.get_session()
- with session.begin():
- return session.query(AristaProvisionedTenants).count()
-
-
-def remember_vm(vm_id, host_id, port_id, network_id, tenant_id):
- """Stores all relevant information about a VM in repository.
-
- :param vm_id: globally unique identifier for VM instance
- :param host_id: ID of the host where the VM is placed
- :param port_id: globally unique port ID that connects VM to network
- :param network_id: globally unique neutron network identifier
- :param tenant_id: globally unique neutron tenant identifier
- """
- session = db.get_session()
- with session.begin():
- vm = AristaProvisionedVms(
- vm_id=vm_id,
- host_id=host_id,
- port_id=port_id,
- network_id=network_id,
- tenant_id=tenant_id)
- session.add(vm)
-
-
-def forget_vm(vm_id, host_id, port_id, network_id, tenant_id):
- """Removes all relevant information about a VM from repository.
-
- :param vm_id: globally unique identifier for VM instance
- :param host_id: ID of the host where the VM is placed
- :param port_id: globally unique port ID that connects VM to network
- :param network_id: globally unique neutron network identifier
- :param tenant_id: globally unique neutron tenant identifier
- """
- session = db.get_session()
- with session.begin():
- (session.query(AristaProvisionedVms).
- filter_by(vm_id=vm_id, host_id=host_id,
- port_id=port_id, tenant_id=tenant_id,
- network_id=network_id).delete())
-
-
-def remember_network(tenant_id, network_id, segmentation_id):
- """Stores all relevant information about a Network in repository.
-
- :param tenant_id: globally unique neutron tenant identifier
- :param network_id: globally unique neutron network identifier
- :param segmentation_id: VLAN ID that is assigned to the network
- """
- session = db.get_session()
- with session.begin():
- net = AristaProvisionedNets(
- tenant_id=tenant_id,
- network_id=network_id,
- segmentation_id=segmentation_id)
- session.add(net)
-
-
-def forget_network(tenant_id, network_id):
- """Deletes all relevant information about a Network from repository.
-
- :param tenant_id: globally unique neutron tenant identifier
- :param network_id: globally unique neutron network identifier
- """
- session = db.get_session()
- with session.begin():
- (session.query(AristaProvisionedNets).
- filter_by(tenant_id=tenant_id, network_id=network_id).
- delete())
-
-
-def get_segmentation_id(tenant_id, network_id):
- """Returns Segmentation ID (VLAN) associated with a network.
-
- :param tenant_id: globally unique neutron tenant identifier
- :param network_id: globally unique neutron network identifier
- """
- session = db.get_session()
- with session.begin():
- net = (session.query(AristaProvisionedNets).
- filter_by(tenant_id=tenant_id,
- network_id=network_id).first())
- return net.segmentation_id if net else None
-
-
-def is_vm_provisioned(vm_id, host_id, port_id,
- network_id, tenant_id):
- """Checks if a VM is already known to EOS
-
- :returns: True, if yes; False otherwise.
- :param vm_id: globally unique identifier for VM instance
- :param host_id: ID of the host where the VM is placed
- :param port_id: globally unique port ID that connects VM to network
- :param network_id: globally unique neutron network identifier
- :param tenant_id: globally unique neutron tenant identifier
- """
- session = db.get_session()
- with session.begin():
- num_vm = (session.query(AristaProvisionedVms).
- filter_by(tenant_id=tenant_id,
- vm_id=vm_id,
- port_id=port_id,
- network_id=network_id,
- host_id=host_id).count())
- return num_vm > 0
-
-
-def is_network_provisioned(tenant_id, network_id, seg_id=None):
- """Checks if a networks is already known to EOS
-
- :returns: True, if yes; False otherwise.
- :param tenant_id: globally unique neutron tenant identifier
- :param network_id: globally unique neutron network identifier
- :param seg_id: Optionally matches the segmentation ID (VLAN)
- """
- session = db.get_session()
- with session.begin():
- if not seg_id:
- num_nets = (session.query(AristaProvisionedNets).
- filter_by(tenant_id=tenant_id,
- network_id=network_id).count())
- else:
- num_nets = (session.query(AristaProvisionedNets).
- filter_by(tenant_id=tenant_id,
- network_id=network_id,
- segmentation_id=seg_id).count())
- return num_nets > 0
-
-
-def is_tenant_provisioned(tenant_id):
- """Checks if a tenant is already known to EOS
-
- :returns: True, if yes; False otherwise.
- :param tenant_id: globally unique neutron tenant identifier
- """
- session = db.get_session()
- with session.begin():
- num_tenants = (session.query(AristaProvisionedTenants).
- filter_by(tenant_id=tenant_id).count())
- return num_tenants > 0
-
-
-def num_nets_provisioned(tenant_id):
- """Returns number of networks for a given tennat.
-
- :param tenant_id: globally unique neutron tenant identifier
- """
- session = db.get_session()
- with session.begin():
- return (session.query(AristaProvisionedNets).
- filter_by(tenant_id=tenant_id).count())
-
-
-def num_vms_provisioned(tenant_id):
- """Returns number of VMs for a given tennat.
-
- :param tenant_id: globally unique neutron tenant identifier
- """
- session = db.get_session()
- with session.begin():
- return (session.query(AristaProvisionedVms).
- filter_by(tenant_id=tenant_id).count())
-
-
-def get_networks(tenant_id):
- """Returns all networks for a given tenant in EOS-compatible format.
-
- See AristaRPCWrapper.get_network_list() for return value format.
- :param tenant_id: globally unique neutron tenant identifier
- """
- session = db.get_session()
- with session.begin():
- model = AristaProvisionedNets
- # hack for pep8 E711: comparison to None should be
- # 'if cond is not None'
- none = None
- all_nets = (session.query(model).
- filter(model.tenant_id == tenant_id,
- model.segmentation_id != none))
- res = dict(
- (net.network_id, net.eos_network_representation(
- VLAN_SEGMENTATION))
- for net in all_nets
- )
- return res
-
-
-def get_vms(tenant_id):
- """Returns all VMs for a given tenant in EOS-compatible format.
-
- :param tenant_id: globally unique neutron tenant identifier
- """
- session = db.get_session()
- with session.begin():
- model = AristaProvisionedVms
- # hack for pep8 E711: comparison to None should be
- # 'if cond is not None'
- none = None
- all_vms = (session.query(model).
- filter(model.tenant_id == tenant_id,
- model.host_id != none,
- model.vm_id != none,
- model.network_id != none,
- model.port_id != none))
- res = dict(
- (vm.vm_id, vm.eos_vm_representation())
- for vm in all_vms
- )
- return res
-
-
-def get_ports(tenant_id):
- """Returns all ports of VMs in EOS-compatible format.
-
- :param tenant_id: globally unique neutron tenant identifier
- """
- session = db.get_session()
- with session.begin():
- model = AristaProvisionedVms
- # hack for pep8 E711: comparison to None should be
- # 'if cond is not None'
- none = None
- all_ports = (session.query(model).
- filter(model.tenant_id == tenant_id,
- model.host_id != none,
- model.vm_id != none,
- model.network_id != none,
- model.port_id != none))
- res = dict(
- (port.port_id, port.eos_port_representation())
- for port in all_ports
- )
- return res
-
-
-def get_tenants():
- """Returns list of all tenants in EOS-compatible format."""
- session = db.get_session()
- with session.begin():
- model = AristaProvisionedTenants
- all_tenants = session.query(model)
- res = dict(
- (tenant.tenant_id, tenant.eos_tenant_representation())
- for tenant in all_tenants
- )
- return res
-
-
-class NeutronNets(db_base_plugin_v2.NeutronDbPluginV2):
- """Access to Neutron DB.
-
- Provides access to the Neutron Data bases for all provisioned
- networks as well ports. This data is used during the synchronization
- of DB between ML2 Mechanism Driver and Arista EOS
- Names of the networks and ports are not stroed in Arista repository
- They are pulled from Neutron DB.
- """
-
- def __init__(self):
- self.admin_ctx = nctx.get_admin_context()
-
- def get_network_name(self, tenant_id, network_id):
- network = self._get_network(tenant_id, network_id)
- network_name = None
- if network:
- network_name = network[0]['name']
- return network_name
-
- def get_all_networks_for_tenant(self, tenant_id):
- filters = {'tenant_id': [tenant_id]}
- return super(NeutronNets,
- self).get_networks(self.admin_ctx, filters=filters) or []
-
- def get_all_ports_for_tenant(self, tenant_id):
- filters = {'tenant_id': [tenant_id]}
- return super(NeutronNets,
- self).get_ports(self.admin_ctx, filters=filters) or []
-
- def get_shared_network_owner_id(self, network_id):
- filters = {'id': [network_id]}
- nets = self.get_networks(self.admin_ctx, filters=filters) or []
- if not nets:
- return
- if nets[0]['shared']:
- return nets[0]['tenant_id']
-
- def _get_network(self, tenant_id, network_id):
- filters = {'tenant_id': [tenant_id],
- 'id': [network_id]}
- return super(NeutronNets,
- self).get_networks(self.admin_ctx, filters=filters) or []
# See the License for the specific language governing permissions and
# limitations under the License.
-import itertools
import threading
-import jsonrpclib
+from networking_arista.common import db_lib
+from networking_arista.ml2 import arista_ml2
from oslo.config import cfg
from neutron.common import constants as n_const
-from neutron.common import utils
-from neutron.i18n import _LI, _LW
+from neutron.i18n import _LI
from neutron.openstack.common import log as logging
from neutron.plugins.ml2.common import exceptions as ml2_exc
from neutron.plugins.ml2 import driver_api
LOG = logging.getLogger(__name__)
EOS_UNREACHABLE_MSG = _('Unable to reach EOS')
-DEFAULT_VLAN = 1
-
-
-class AristaRPCWrapper(object):
- """Wraps Arista JSON RPC.
-
- All communications between Neutron and EOS are over JSON RPC.
- EOS - operating system used on Arista hardware
- Command API - JSON RPC API provided by Arista EOS
- """
- def __init__(self):
- self._server = jsonrpclib.Server(self._eapi_host_url())
- self.keystone_conf = cfg.CONF.keystone_authtoken
- self.region = cfg.CONF.ml2_arista.region_name
- self.sync_interval = cfg.CONF.ml2_arista.sync_interval
- self._region_updated_time = None
- # The cli_commands dict stores the mapping between the CLI command key
- # and the actual CLI command.
- self.cli_commands = {}
- self.initialize_cli_commands()
-
- def _get_exit_mode_cmds(self, modes):
- """Returns a list of 'exit' commands for the modes.
-
- :param modes: a list of CLI modes to exit out of.
- """
- return ['exit'] * len(modes)
-
- def initialize_cli_commands(self):
- self.cli_commands['timestamp'] = []
-
- def check_cli_commands(self):
- """Checks whether the CLI commands are vaild.
-
- This method tries to execute the commands on EOS and if it succeedes
- the command is stored.
- """
- cmd = ['show openstack config region %s timestamp' % self.region]
- try:
- self._run_eos_cmds(cmd)
- self.cli_commands['timestamp'] = cmd
- except arista_exc.AristaRpcError:
- self.cli_commands['timestamp'] = []
- LOG.warn(_LW("'timestamp' command '%s' is not available on EOS"),
- cmd)
-
- def get_tenants(self):
- """Returns dict of all tenants known by EOS.
-
- :returns: dictionary containing the networks per tenant
- and VMs allocated per tenant
- """
- cmds = ['show openstack config region %s' % self.region]
- command_output = self._run_eos_cmds(cmds)
- tenants = command_output[0]['tenants']
-
- return tenants
-
- def plug_port_into_network(self, vm_id, host_id, port_id,
- net_id, tenant_id, port_name, device_owner):
- """Genric routine plug a port of a VM instace into network.
-
- :param vm_id: globally unique identifier for VM instance
- :param host: ID of the host where the VM is placed
- :param port_id: globally unique port ID that connects VM to network
- :param network_id: globally unique neutron network identifier
- :param tenant_id: globally unique neutron tenant identifier
- :param port_name: Name of the port - for display purposes
- :param device_owner: Device owner - e.g. compute or network:dhcp
- """
- if device_owner == n_const.DEVICE_OWNER_DHCP:
- self.plug_dhcp_port_into_network(vm_id,
- host_id,
- port_id,
- net_id,
- tenant_id,
- port_name)
- elif device_owner.startswith('compute'):
- self.plug_host_into_network(vm_id,
- host_id,
- port_id,
- net_id,
- tenant_id,
- port_name)
-
- def plug_host_into_network(self, vm_id, host, port_id,
- network_id, tenant_id, port_name):
- """Creates VLAN between TOR and compute host.
-
- :param vm_id: globally unique identifier for VM instance
- :param host: ID of the host where the VM is placed
- :param port_id: globally unique port ID that connects VM to network
- :param network_id: globally unique neutron network identifier
- :param tenant_id: globally unique neutron tenant identifier
- :param port_name: Name of the port - for display purposes
- """
- cmds = ['tenant %s' % tenant_id,
- 'vm id %s hostid %s' % (vm_id, host)]
- if port_name:
- cmds.append('port id %s name "%s" network-id %s' %
- (port_id, port_name, network_id))
- else:
- cmds.append('port id %s network-id %s' %
- (port_id, network_id))
- cmds.append('exit')
- cmds.append('exit')
- self._run_openstack_cmds(cmds)
-
- def plug_dhcp_port_into_network(self, dhcp_id, host, port_id,
- network_id, tenant_id, port_name):
- """Creates VLAN between TOR and dhcp host.
-
- :param dhcp_id: globally unique identifier for dhcp
- :param host: ID of the host where the dhcp is hosted
- :param port_id: globally unique port ID that connects dhcp to network
- :param network_id: globally unique neutron network identifier
- :param tenant_id: globally unique neutron tenant identifier
- :param port_name: Name of the port - for display purposes
- """
- cmds = ['tenant %s' % tenant_id,
- 'network id %s' % network_id]
- if port_name:
- cmds.append('dhcp id %s hostid %s port-id %s name "%s"' %
- (dhcp_id, host, port_id, port_name))
- else:
- cmds.append('dhcp id %s hostid %s port-id %s' %
- (dhcp_id, host, port_id))
- cmds.append('exit')
- self._run_openstack_cmds(cmds)
-
- def unplug_host_from_network(self, vm_id, host, port_id,
- network_id, tenant_id):
- """Removes previously configured VLAN between TOR and a host.
-
- :param vm_id: globally unique identifier for VM instance
- :param host: ID of the host where the VM is placed
- :param port_id: globally unique port ID that connects VM to network
- :param network_id: globally unique neutron network identifier
- :param tenant_id: globally unique neutron tenant identifier
- """
- cmds = ['tenant %s' % tenant_id,
- 'vm id %s hostid %s' % (vm_id, host),
- 'no port id %s' % port_id,
- 'exit',
- 'exit']
- self._run_openstack_cmds(cmds)
-
- def unplug_dhcp_port_from_network(self, dhcp_id, host, port_id,
- network_id, tenant_id):
- """Removes previously configured VLAN between TOR and a dhcp host.
-
- :param dhcp_id: globally unique identifier for dhcp
- :param host: ID of the host where the dhcp is hosted
- :param port_id: globally unique port ID that connects dhcp to network
- :param network_id: globally unique neutron network identifier
- :param tenant_id: globally unique neutron tenant identifier
- """
- cmds = ['tenant %s' % tenant_id,
- 'network id %s' % network_id,
- 'no dhcp id %s port-id %s' % (dhcp_id, port_id),
- 'exit']
- self._run_openstack_cmds(cmds)
-
- def sync_start(self):
- """Sends indication to EOS that ML2->EOS sync has started."""
-
- sync_start_cmd = ['sync start']
- self._run_openstack_cmds(sync_start_cmd)
-
- def sync_end(self):
- """Sends indication to EOS that ML2->EOS sync has completed."""
-
- sync_end_cmd = ['sync end']
- self._run_openstack_cmds(sync_end_cmd)
-
- def create_network(self, tenant_id, network):
- """Creates a single network on Arista hardware
-
- :param tenant_id: globally unique neutron tenant identifier
- :param network: dict containing network_id, network_name and
- segmentation_id
- """
- self.create_network_bulk(tenant_id, [network])
-
- def create_network_bulk(self, tenant_id, network_list):
- """Creates a network on Arista Hardware
-
- :param tenant_id: globally unique neutron tenant identifier
- :param network_list: list of dicts containing network_id, network_name
- and segmentation_id
- """
- cmds = ['tenant %s' % tenant_id]
- # Create a reference to function to avoid name lookups in the loop
- append_cmd = cmds.append
- for network in network_list:
- try:
- append_cmd('network id %s name "%s"' %
- (network['network_id'], network['network_name']))
- except KeyError:
- append_cmd('network id %s' % network['network_id'])
- # Enter segment mode without exiting out of network mode
- if not network['segmentation_id']:
- network['segmentation_id'] = DEFAULT_VLAN
- append_cmd('segment 1 type vlan id %d' %
- network['segmentation_id'])
- if network['shared']:
- append_cmd('shared')
- else:
- append_cmd('no shared')
- cmds.extend(self._get_exit_mode_cmds(['segment', 'network', 'tenant']))
- self._run_openstack_cmds(cmds)
-
- def create_network_segments(self, tenant_id, network_id,
- network_name, segments):
- """Creates a network on Arista Hardware
-
- Note: This method is not used at the moment. create_network()
- is used instead. This will be used once the support for
- multiple segments is added in Neutron.
-
- :param tenant_id: globally unique neutron tenant identifier
- :param network_id: globally unique neutron network identifier
- :param network_name: Network name - for display purposes
- :param segments: List of segments in a given network
- """
- if segments:
- cmds = ['tenant %s' % tenant_id,
- 'network id %s name "%s"' % (network_id, network_name)]
- seg_num = 1
- for seg in segments:
- cmds.append('segment %d type %s id %d' % (seg_num,
- seg['network_type'], seg['segmentation_id']))
- seg_num += 1
- cmds.append('exit') # exit for segment mode
- cmds.append('exit') # exit for network mode
- cmds.append('exit') # exit for tenant mode
-
- self._run_openstack_cmds(cmds)
-
- def delete_network(self, tenant_id, network_id):
- """Deletes a specified network for a given tenant
-
- :param tenant_id: globally unique neutron tenant identifier
- :param network_id: globally unique neutron network identifier
- """
- self.delete_network_bulk(tenant_id, [network_id])
-
- def delete_network_bulk(self, tenant_id, network_id_list):
- """Deletes the network ids specified for a tenant
-
- :param tenant_id: globally unique neutron tenant identifier
- :param network_id_list: list of globally unique neutron network
- identifiers
- """
- cmds = ['tenant %s' % tenant_id]
- for network_id in network_id_list:
- cmds.append('no network id %s' % network_id)
- cmds.extend(self._get_exit_mode_cmds(['network', 'tenant']))
- self._run_openstack_cmds(cmds)
-
- def delete_vm(self, tenant_id, vm_id):
- """Deletes a VM from EOS for a given tenant
-
- :param tenant_id : globally unique neutron tenant identifier
- :param vm_id : id of a VM that needs to be deleted.
- """
- self.delete_vm_bulk(tenant_id, [vm_id])
-
- def delete_vm_bulk(self, tenant_id, vm_id_list):
- """Deletes VMs from EOS for a given tenant
-
- :param tenant_id : globally unique neutron tenant identifier
- :param vm_id_list : ids of VMs that needs to be deleted.
- """
- cmds = ['tenant %s' % tenant_id]
- for vm_id in vm_id_list:
- cmds.append('no vm id %s' % vm_id)
- cmds.extend(self._get_exit_mode_cmds(['vm', 'tenant']))
- self._run_openstack_cmds(cmds)
-
- def create_vm_port_bulk(self, tenant_id, vm_port_list, vms):
- """Sends a bulk request to create ports.
-
- :param tenant_id: globaly unique neutron tenant identifier
- :param vm_port_list: list of ports that need to be created.
- :param vms: list of vms to which the ports will be attached to.
- """
- cmds = ['tenant %s' % tenant_id]
- # Create a reference to function to avoid name lookups in the loop
- append_cmd = cmds.append
- for port in vm_port_list:
- try:
- vm = vms[port['device_id']]
- except KeyError:
- LOG.warn(_LW("VM id %(vmid)s not found for port %(portid)s"),
- {'vmid': port['device_id'], 'portid': port['id']})
- continue
-
- port_name = '' if 'name' not in port else 'name "%s"' % (
- port['name']
- )
-
- if port['device_owner'] == n_const.DEVICE_OWNER_DHCP:
- append_cmd('network id %s' % port['network_id'])
- append_cmd('dhcp id %s hostid %s port-id %s %s' %
- (vm['vmId'], vm['host'], port['id'], port_name))
- elif port['device_owner'].startswith('compute'):
- append_cmd('vm id %s hostid %s' % (vm['vmId'], vm['host']))
- append_cmd('port id %s %s network-id %s' %
- (port['id'], port_name, port['network_id']))
- else:
- LOG.warn(_LW("Unknown device owner: %s"), port['device_owner'])
- continue
-
- append_cmd('exit')
- self._run_openstack_cmds(cmds)
-
- def delete_tenant(self, tenant_id):
- """Deletes a given tenant and all its networks and VMs from EOS.
-
- :param tenant_id: globally unique neutron tenant identifier
- """
- self.delete_tenant_bulk([tenant_id])
-
- def delete_tenant_bulk(self, tenant_list):
- """Sends a bulk request to delete the tenants.
-
- :param tenant_list: list of globaly unique neutron tenant ids which
- need to be deleted.
- """
-
- cmds = []
- for tenant in tenant_list:
- cmds.append('no tenant %s' % tenant)
- cmds.append('exit')
- self._run_openstack_cmds(cmds)
-
- def delete_this_region(self):
- """Deleted the region data from EOS."""
- cmds = ['enable',
- 'configure',
- 'cvx',
- 'service openstack',
- 'no region %s' % self.region,
- 'exit',
- 'exit',
- 'exit']
- self._run_eos_cmds(cmds)
-
- def register_with_eos(self):
- """This is the registration request with EOS.
-
- This the initial handshake between Neutron and EOS.
- critical end-point information is registered with EOS.
- """
- keystone_conf = self.keystone_conf
- # FIXME(ihrachys): plugins should not construct keystone URL
- # from configuration file and should instead rely on service
- # catalog contents
- auth_uri = utils.get_keystone_url(keystone_conf)
-
- cmds = ['auth url %(auth_url)s user %(user)s '
- 'password %(password)s tenant %(tenant)s' %
- {'auth_url': auth_uri,
- 'user': keystone_conf.admin_user,
- 'password': keystone_conf.admin_password,
- 'tenant': keystone_conf.admin_tenant_name}]
-
- log_cmds = ['auth url %(auth_url)s user %(user)s '
- 'password %(password)s tenant %(tenant)s' %
- {'auth_url': auth_uri,
- 'user': keystone_conf.admin_user,
- 'password': '******',
- 'tenant': keystone_conf.admin_tenant_name}]
-
- sync_interval_cmd = 'sync interval %d' % self.sync_interval
- cmds.append(sync_interval_cmd)
- log_cmds.append(sync_interval_cmd)
-
- self._run_openstack_cmds(cmds, commands_to_log=log_cmds)
-
- def clear_region_updated_time(self):
- """Clear the region updated time which forces a resync."""
-
- self._region_updated_time = None
-
- def region_in_sync(self):
- """Check whether EOS is in sync with Neutron."""
-
- eos_region_updated_times = self.get_region_updated_time()
- return (self._region_updated_time and
- (self._region_updated_time['regionTimestamp'] ==
- eos_region_updated_times['regionTimestamp']))
-
- def get_region_updated_time(self):
- """Return the timestamp of the last update.
-
- This method returns the time at which any entities in the region
- were updated.
- """
- timestamp_cmd = self.cli_commands['timestamp']
- if timestamp_cmd:
- return self._run_eos_cmds(commands=timestamp_cmd)[0]
- return None
-
- def _run_eos_cmds(self, commands, commands_to_log=None):
- """Execute/sends a CAPI (Command API) command to EOS.
-
- In this method, list of commands is appended with prefix and
- postfix commands - to make is understandble by EOS.
-
- :param commands : List of command to be executed on EOS.
- :param commands_to_log : This should be set to the command that is
- logged. If it is None, then the commands
- param is logged.
- """
-
- log_cmds = commands
- if commands_to_log:
- log_cmds = commands_to_log
-
- LOG.info(_LI('Executing command on Arista EOS: %s'), log_cmds)
-
- try:
- # this returns array of return values for every command in
- # full_command list
- ret = self._server.runCmds(version=1, cmds=commands)
- except Exception as error:
- host = cfg.CONF.ml2_arista.eapi_host
- error_msg_str = unicode(error)
- if commands_to_log:
- # The command might contain sensitive information. If the
- # command to log is different from the actual command, use
- # that in the error message.
- for cmd, log_cmd in itertools.izip(commands, log_cmds):
- error_msg_str = error_msg_str.replace(cmd, log_cmd)
- msg = (_('Error %(err)s while trying to execute '
- 'commands %(cmd)s on EOS %(host)s') %
- {'err': error_msg_str,
- 'cmd': commands_to_log,
- 'host': host})
- # Logging exception here can reveal passwords as the exception
- # contains the CLI command which contains the credentials.
- LOG.error(msg)
- raise arista_exc.AristaRpcError(msg=msg)
-
- return ret
-
- def _build_command(self, cmds):
- """Build full EOS's openstack CLI command.
-
- Helper method to add commands to enter and exit from openstack
- CLI modes.
-
- :param cmds: The openstack CLI commands that need to be executed
- in the openstack config mode.
- """
-
- full_command = [
- 'enable',
- 'configure',
- 'cvx',
- 'service openstack',
- 'region %s' % self.region,
- ]
- full_command.extend(cmds)
- full_command.extend(self._get_exit_mode_cmds(['region',
- 'openstack',
- 'cvx']))
- full_command.extend(self.cli_commands['timestamp'])
- return full_command
-
- def _run_openstack_cmds(self, commands, commands_to_log=None):
- """Execute/sends a CAPI (Command API) command to EOS.
-
- In this method, list of commands is appended with prefix and
- postfix commands - to make is understandble by EOS.
-
- :param commands : List of command to be executed on EOS.
- :param commands_to_logs : This should be set to the command that is
- logged. If it is None, then the commands
- param is logged.
- """
-
- full_command = self._build_command(commands)
- if commands_to_log:
- full_log_command = self._build_command(commands_to_log)
- else:
- full_log_command = None
- ret = self._run_eos_cmds(full_command, full_log_command)
- # Remove return values for 'configure terminal',
- # 'service openstack' and 'exit' commands
- if self.cli_commands['timestamp']:
- self._region_updated_time = ret[-1]
-
- def _eapi_host_url(self):
- self._validate_config()
-
- user = cfg.CONF.ml2_arista.eapi_username
- pwd = cfg.CONF.ml2_arista.eapi_password
- host = cfg.CONF.ml2_arista.eapi_host
-
- eapi_server_url = ('https://%s:%s@%s/command-api' %
- (user, pwd, host))
- return eapi_server_url
-
- def _validate_config(self):
- if cfg.CONF.ml2_arista.get('eapi_host') == '':
- msg = _('Required option eapi_host is not set')
- LOG.error(msg)
- raise arista_exc.AristaConfigError(msg=msg)
- if cfg.CONF.ml2_arista.get('eapi_username') == '':
- msg = _('Required option eapi_username is not set')
- LOG.error(msg)
- raise arista_exc.AristaConfigError(msg=msg)
-
-
-class SyncService(object):
- """Synchronization of information between Neutron and EOS
-
- Periodically (through configuration option), this service
- ensures that Networks and VMs configured on EOS/Arista HW
- are always in sync with Neutron DB.
- """
- def __init__(self, rpc_wrapper, neutron_db):
- self._rpc = rpc_wrapper
- self._ndb = neutron_db
- self._force_sync = True
-
- def do_synchronize(self):
- try:
- # Send trigger to EOS that the ML2->EOS sync has started.
- self._rpc.sync_start()
- LOG.info(_LI('Sync start trigger sent to EOS'))
- except arista_exc.AristaRpcError:
- LOG.warning(EOS_UNREACHABLE_MSG)
- return
-
- # Perform the sync
- self.synchronize()
-
- try:
- # Send trigger to EOS that the ML2->EOS sync is Complete.
- self._rpc.sync_end()
- except arista_exc.AristaRpcError:
- LOG.warning(EOS_UNREACHABLE_MSG)
-
- def synchronize(self):
- """Sends data to EOS which differs from neutron DB."""
-
- LOG.info(_LI('Syncing Neutron <-> EOS'))
- try:
- # Get the time at which entities in the region were updated.
- # If the times match, then ML2 is in sync with EOS. Otherwise
- # perform a complete sync.
- if not self._force_sync and self._rpc.region_in_sync():
- LOG.info(_LI('OpenStack and EOS are in sync!'))
- return
- except arista_exc.AristaRpcError:
- LOG.warning(EOS_UNREACHABLE_MSG)
- self._force_sync = True
- return
-
- try:
- #Always register with EOS to ensure that it has correct credentials
- self._rpc.register_with_eos()
- eos_tenants = self._rpc.get_tenants()
- except arista_exc.AristaRpcError:
- LOG.warning(EOS_UNREACHABLE_MSG)
- self._force_sync = True
- return
-
- db_tenants = db.get_tenants()
-
- if not db_tenants and eos_tenants:
- # No tenants configured in Neutron. Clear all EOS state
- try:
- self._rpc.delete_this_region()
- LOG.info(_LI('No Tenants configured in Neutron DB. But %d '
- 'tenants discovered in EOS during '
- 'synchronization. Entire EOS region is cleared'),
- len(eos_tenants))
- # Re-register with EOS so that the timestamp is updated.
- self._rpc.register_with_eos()
- # Region has been completely cleaned. So there is nothing to
- # synchronize
- self._force_sync = False
- except arista_exc.AristaRpcError:
- LOG.warning(EOS_UNREACHABLE_MSG)
- self._force_sync = True
- return
-
- # Delete tenants that are in EOS, but not in the database
- tenants_to_delete = frozenset(eos_tenants.keys()).difference(
- db_tenants.keys())
-
- if tenants_to_delete:
- try:
- self._rpc.delete_tenant_bulk(tenants_to_delete)
- except arista_exc.AristaRpcError:
- LOG.warning(EOS_UNREACHABLE_MSG)
- self._force_sync = True
- return
-
- # None of the commands have failed till now. But if subsequent
- # operations fail, then force_sync is set to true
- self._force_sync = False
-
- # To support shared networks, split the sync loop in two parts:
- # In first loop, delete unwanted VM and networks and update networks
- # In second loop, update VMs. This is done to ensure that networks for
- # all tenats are updated before VMs are updated
- vms_to_update = {}
- for tenant in db_tenants:
- db_nets = db.get_networks(tenant)
- db_vms = db.get_vms(tenant)
- eos_nets = self._get_eos_networks(eos_tenants, tenant)
- eos_vms = self._get_eos_vms(eos_tenants, tenant)
-
- db_nets_key_set = frozenset(db_nets.keys())
- db_vms_key_set = frozenset(db_vms.keys())
- eos_nets_key_set = frozenset(eos_nets.keys())
- eos_vms_key_set = frozenset(eos_vms.keys())
-
- # Find the networks that are present on EOS, but not in Neutron DB
- nets_to_delete = eos_nets_key_set.difference(db_nets_key_set)
-
- # Find the VMs that are present on EOS, but not in Neutron DB
- vms_to_delete = eos_vms_key_set.difference(db_vms_key_set)
-
- # Find the Networks that are present in Neutron DB, but not on EOS
- nets_to_update = db_nets_key_set.difference(eos_nets_key_set)
-
- # Find the VMs that are present in Neutron DB, but not on EOS
- vms_to_update[tenant] = db_vms_key_set.difference(eos_vms_key_set)
-
- try:
- if vms_to_delete:
- self._rpc.delete_vm_bulk(tenant, vms_to_delete)
- if nets_to_delete:
- self._rpc.delete_network_bulk(tenant, nets_to_delete)
- if nets_to_update:
- # Create a dict of networks keyed by id.
- neutron_nets = dict(
- (network['id'], network) for network in
- self._ndb.get_all_networks_for_tenant(tenant)
- )
-
- networks = [
- {'network_id': net_id,
- 'segmentation_id':
- db_nets[net_id]['segmentationTypeId'],
- 'network_name':
- neutron_nets.get(net_id, {'name': ''})['name'],
- 'shared':
- neutron_nets.get(net_id,
- {'shared': False})['shared']}
- for net_id in nets_to_update
- ]
- self._rpc.create_network_bulk(tenant, networks)
- except arista_exc.AristaRpcError:
- LOG.warning(EOS_UNREACHABLE_MSG)
- self._force_sync = True
-
- # Now update the VMs
- for tenant in vms_to_update:
- try:
- # Filter the ports to only the vms that we are interested
- # in.
- vm_ports = [
- port for port in self._ndb.get_all_ports_for_tenant(
- tenant) if port['device_id'] in vms_to_update[tenant]
- ]
- if vm_ports:
- db_vms = db.get_vms(tenant)
- self._rpc.create_vm_port_bulk(tenant, vm_ports, db_vms)
- except arista_exc.AristaRpcError:
- LOG.warning(EOS_UNREACHABLE_MSG)
- self._force_sync = True
-
- def _get_eos_networks(self, eos_tenants, tenant):
- networks = {}
- if eos_tenants and tenant in eos_tenants:
- networks = eos_tenants[tenant]['tenantNetworks']
- return networks
-
- def _get_eos_vms(self, eos_tenants, tenant):
- vms = {}
- if eos_tenants and tenant in eos_tenants:
- vms = eos_tenants[tenant]['tenantVmInstances']
- return vms
class AristaDriver(driver_api.MechanismDriver):
"""
def __init__(self, rpc=None):
- self.rpc = rpc or AristaRPCWrapper()
+ self.rpc = rpc or arista_ml2.AristaRPCWrapper()
self.db_nets = db.AristaProvisionedNets()
self.db_vms = db.AristaProvisionedVms()
self.db_tenants = db.AristaProvisionedTenants()
- self.ndb = db.NeutronNets()
+ self.ndb = db_lib.NeutronNets()
confg = cfg.CONF.ml2_arista
- self.segmentation_type = db.VLAN_SEGMENTATION
+ self.segmentation_type = db_lib.VLAN_SEGMENTATION
self.timer = None
- self.eos = SyncService(self.rpc, self.ndb)
+ self.eos = arista_ml2.SyncService(self.rpc, self.ndb)
self.sync_timeout = confg['sync_interval']
self.eos_sync_lock = threading.Lock()
tenant_id = network['tenant_id']
segmentation_id = segments[0]['segmentation_id']
with self.eos_sync_lock:
- db.remember_tenant(tenant_id)
- db.remember_network(tenant_id,
+ db_lib.remember_tenant(tenant_id)
+ db_lib.remember_network(tenant_id,
network_id,
segmentation_id)
vlan_id = segments[0]['segmentation_id']
shared_net = network['shared']
with self.eos_sync_lock:
- if db.is_network_provisioned(tenant_id, network_id):
+ if db_lib.is_network_provisioned(tenant_id, network_id):
try:
network_dict = {
'network_id': network_id,
vlan_id = new_network['provider:segmentation_id']
shared_net = new_network['shared']
with self.eos_sync_lock:
- if db.is_network_provisioned(tenant_id, network_id):
+ if db_lib.is_network_provisioned(tenant_id, network_id):
try:
network_dict = {
'network_id': network_id,
network_id = network['id']
tenant_id = network['tenant_id']
with self.eos_sync_lock:
- if db.is_network_provisioned(tenant_id, network_id):
- db.forget_network(tenant_id, network_id)
+ if db_lib.is_network_provisioned(tenant_id, network_id):
+ db_lib.forget_network(tenant_id, network_id)
# if necessary, delete tenant as well.
self.delete_tenant(tenant_id)
network_id = port['network_id']
tenant_id = port['tenant_id']
with self.eos_sync_lock:
- db.remember_tenant(tenant_id)
- db.remember_vm(device_id, host, port_id,
+ db_lib.remember_tenant(tenant_id)
+ db_lib.remember_vm(device_id, host, port_id,
network_id, tenant_id)
def create_port_postcommit(self, context):
tenant_id = port['tenant_id']
with self.eos_sync_lock:
hostname = self._host_name(host)
- vm_provisioned = db.is_vm_provisioned(device_id,
+ vm_provisioned = db_lib.is_vm_provisioned(device_id,
host,
port_id,
network_id,
# If network does not exist under this tenant,
# it may be a shared network. Get shared network owner Id
net_provisioned = (
- db.is_network_provisioned(tenant_id, network_id) or
+ db_lib.is_network_provisioned(tenant_id, network_id) or
self.ndb.get_shared_network_owner_id(network_id)
)
if vm_provisioned and net_provisioned:
tenant_id = port['tenant_id']
with self.eos_sync_lock:
hostname = self._host_name(host)
- segmentation_id = db.get_segmentation_id(tenant_id,
+ segmentation_id = db_lib.get_segmentation_id(tenant_id,
network_id)
- vm_provisioned = db.is_vm_provisioned(device_id,
+ vm_provisioned = db_lib.is_vm_provisioned(device_id,
host,
port_id,
network_id,
# If network does not exist under this tenant,
# it may be a shared network. Get shared network owner Id
net_provisioned = (
- db.is_network_provisioned(tenant_id, network_id,
- segmentation_id) or
+ db_lib.is_network_provisioned(tenant_id, network_id,
+ segmentation_id) or
self.ndb.get_shared_network_owner_id(network_id)
)
if vm_provisioned and net_provisioned:
network_id = port['network_id']
port_id = port['id']
with self.eos_sync_lock:
- if db.is_vm_provisioned(device_id, host_id, port_id,
- network_id, tenant_id):
- db.forget_vm(device_id, host_id, port_id,
+ if db_lib.is_vm_provisioned(device_id, host_id, port_id,
+ network_id, tenant_id):
+ db_lib.forget_vm(device_id, host_id, port_id,
network_id, tenant_id)
# if necessary, delete tenant as well.
self.delete_tenant(tenant_id)
A tenant is deleted only if there is no network or VM configured
configured for this tenant.
"""
- objects_for_tenant = (db.num_nets_provisioned(tenant_id) +
- db.num_vms_provisioned(tenant_id))
+ objects_for_tenant = (db_lib.num_nets_provisioned(tenant_id) +
+ db_lib.num_vms_provisioned(tenant_id))
if not objects_for_tenant:
- db.forget_tenant(tenant_id)
+ db_lib.forget_tenant(tenant_id)
def _host_name(self, hostname):
fqdns_used = cfg.CONF.ml2_arista['use_fqdn']
def _cleanup_db(self):
"""Clean up any uncessary entries in our DB."""
- db_tenants = db.get_tenants()
+ db_tenants = db_lib.get_tenants()
for tenant in db_tenants:
neutron_nets = self.ndb.get_all_networks_for_tenant(tenant)
neutron_nets_id = []
for net in neutron_nets:
neutron_nets_id.append(net['id'])
- db_nets = db.get_networks(tenant)
+ db_nets = db_lib.get_networks(tenant)
for net_id in db_nets.keys():
if net_id not in neutron_nets_id:
- db.forget_network(tenant, net_id)
+ db_lib.forget_network(tenant, net_id)
--- /dev/null
+networking_arista
# See the License for the specific language governing permissions and
# limitations under the License.
+import sys
+
import mock
-from oslo.config import cfg
-from neutron.common import constants as n_const
-from neutron.common import utils
from neutron.extensions import portbindings
-from neutron.plugins.ml2.drivers.arista import db
-from neutron.plugins.ml2.drivers.arista import exceptions as arista_exc
-from neutron.plugins.ml2.drivers.arista import mechanism_arista as arista
-from neutron.tests import base
+with mock.patch.dict(sys.modules,
+ {'networking_arista': mock.Mock(),
+ 'networking_arista.ml2': mock.Mock(),
+ 'networking_arista.common': mock.Mock()}):
+ from neutron.plugins.ml2.drivers.arista import mechanism_arista
from neutron.tests.unit import testlib_api
-def setup_arista_wrapper_config(value=''):
- cfg.CONF.keystone_authtoken = fake_keystone_info_class()
- cfg.CONF.set_override('eapi_host', value, "ml2_arista")
- cfg.CONF.set_override('eapi_username', value, "ml2_arista")
- cfg.CONF.set_override('sync_interval', 10, "ml2_arista")
-
-
-def setup_valid_config():
- # Config is not valid if value is not set
- setup_arista_wrapper_config('value')
-
-
-class AristaProvisionedVlansStorageTestCase(testlib_api.SqlTestCase):
- """Test storing and retriving functionality of Arista mechanism driver.
-
- Tests all methods of this class by invoking them separately as well
- as a group.
- """
+class AristaDriverTestCase(testlib_api.SqlTestCase):
+ """Main test cases for Arista Mechanism driver.
- def test_tenant_is_remembered(self):
- tenant_id = 'test'
-
- db.remember_tenant(tenant_id)
- net_provisioned = db.is_tenant_provisioned(tenant_id)
- self.assertTrue(net_provisioned, 'Tenant must be provisioned')
-
- def test_tenant_is_removed(self):
- tenant_id = 'test'
-
- db.remember_tenant(tenant_id)
- db.forget_tenant(tenant_id)
- net_provisioned = db.is_tenant_provisioned(tenant_id)
- self.assertFalse(net_provisioned, 'The Tenant should be deleted')
-
- def test_network_is_remembered(self):
- tenant_id = 'test'
- network_id = '123'
- segmentation_id = 456
-
- db.remember_network(tenant_id, network_id, segmentation_id)
- net_provisioned = db.is_network_provisioned(tenant_id,
- network_id)
- self.assertTrue(net_provisioned, 'Network must be provisioned')
-
- def test_network_is_removed(self):
- tenant_id = 'test'
- network_id = '123'
-
- db.remember_network(tenant_id, network_id, '123')
- db.forget_network(tenant_id, network_id)
- net_provisioned = db.is_network_provisioned(tenant_id, network_id)
- self.assertFalse(net_provisioned, 'The network should be deleted')
-
- def test_vm_is_remembered(self):
- vm_id = 'VM-1'
- tenant_id = 'test'
- network_id = '123'
- port_id = 456
- host_id = 'ubuntu1'
-
- db.remember_vm(vm_id, host_id, port_id, network_id, tenant_id)
- vm_provisioned = db.is_vm_provisioned(vm_id, host_id, port_id,
- network_id, tenant_id)
- self.assertTrue(vm_provisioned, 'VM must be provisioned')
-
- def test_vm_is_removed(self):
- vm_id = 'VM-1'
- tenant_id = 'test'
- network_id = '123'
- port_id = 456
- host_id = 'ubuntu1'
-
- db.remember_vm(vm_id, host_id, port_id, network_id, tenant_id)
- db.forget_vm(vm_id, host_id, port_id, network_id, tenant_id)
- vm_provisioned = db.is_vm_provisioned(vm_id, host_id, port_id,
- network_id, tenant_id)
- self.assertFalse(vm_provisioned, 'The vm should be deleted')
-
- def test_remembers_multiple_networks(self):
- tenant_id = 'test'
- expected_num_nets = 100
- nets = ['id%s' % n for n in range(expected_num_nets)]
- for net_id in nets:
- db.remember_network(tenant_id, net_id, 123)
-
- num_nets_provisioned = db.num_nets_provisioned(tenant_id)
- self.assertEqual(expected_num_nets, num_nets_provisioned,
- 'There should be %d nets, not %d' %
- (expected_num_nets, num_nets_provisioned))
-
- def test_removes_all_networks(self):
- tenant_id = 'test'
- num_nets = 100
- old_nets = db.num_nets_provisioned(tenant_id)
- nets = ['id_%s' % n for n in range(num_nets)]
- for net_id in nets:
- db.remember_network(tenant_id, net_id, 123)
- for net_id in nets:
- db.forget_network(tenant_id, net_id)
-
- num_nets_provisioned = db.num_nets_provisioned(tenant_id)
- expected = old_nets
- self.assertEqual(expected, num_nets_provisioned,
- 'There should be %d nets, not %d' %
- (expected, num_nets_provisioned))
-
- def test_remembers_multiple_tenants(self):
- expected_num_tenants = 100
- tenants = ['id%s' % n for n in range(expected_num_tenants)]
- for tenant_id in tenants:
- db.remember_tenant(tenant_id)
-
- num_tenants_provisioned = db.num_provisioned_tenants()
- self.assertEqual(expected_num_tenants, num_tenants_provisioned,
- 'There should be %d tenants, not %d' %
- (expected_num_tenants, num_tenants_provisioned))
-
- def test_removes_multiple_tenants(self):
- num_tenants = 100
- tenants = ['id%s' % n for n in range(num_tenants)]
- for tenant_id in tenants:
- db.remember_tenant(tenant_id)
- for tenant_id in tenants:
- db.forget_tenant(tenant_id)
-
- num_tenants_provisioned = db.num_provisioned_tenants()
- expected = 0
- self.assertEqual(expected, num_tenants_provisioned,
- 'There should be %d tenants, not %d' %
- (expected, num_tenants_provisioned))
-
- def test_num_vm_is_valid(self):
- tenant_id = 'test'
- network_id = '123'
- port_id = 456
- host_id = 'ubuntu1'
-
- vm_to_remember = ['vm1', 'vm2', 'vm3']
- vm_to_forget = ['vm2', 'vm1']
-
- for vm in vm_to_remember:
- db.remember_vm(vm, host_id, port_id, network_id, tenant_id)
- for vm in vm_to_forget:
- db.forget_vm(vm, host_id, port_id, network_id, tenant_id)
-
- num_vms = len(db.get_vms(tenant_id))
- expected = len(vm_to_remember) - len(vm_to_forget)
-
- self.assertEqual(expected, num_vms,
- 'There should be %d records, '
- 'got %d records' % (expected, num_vms))
- # clean up afterwards
- db.forget_vm('vm3', host_id, port_id, network_id, tenant_id)
-
- def test_get_network_list_returns_eos_compatible_data(self):
- tenant = u'test-1'
- segm_type = 'vlan'
- network_id = u'123'
- network2_id = u'1234'
- vlan_id = 123
- vlan2_id = 1234
- expected_eos_net_list = {network_id: {u'networkId': network_id,
- u'segmentationTypeId': vlan_id,
- u'segmentationType': segm_type},
- network2_id: {u'networkId': network2_id,
- u'segmentationTypeId': vlan2_id,
- u'segmentationType': segm_type}}
-
- db.remember_network(tenant, network_id, vlan_id)
- db.remember_network(tenant, network2_id, vlan2_id)
-
- net_list = db.get_networks(tenant)
- self.assertNotEqual(net_list != expected_eos_net_list, ('%s != %s' %
- (net_list, expected_eos_net_list)))
-
-
-class PositiveRPCWrapperValidConfigTestCase(base.BaseTestCase):
- """Test cases to test the RPC between Arista Driver and EOS.
-
- Tests all methods used to send commands between Arista Driver and EOS
+ Tests all mechanism driver APIs supported by Arista Driver. It invokes
+ all the APIs as they would be invoked in real world scenarios and
+ verifies the functionality.
"""
-
def setUp(self):
- super(PositiveRPCWrapperValidConfigTestCase, self).setUp()
- setup_valid_config()
- self.drv = arista.AristaRPCWrapper()
- self.region = 'RegionOne'
- self.drv._server = mock.MagicMock()
-
- def _get_exit_mode_cmds(self, modes):
- return ['exit'] * len(modes)
+ super(AristaDriverTestCase, self).setUp()
+ self.fake_rpc = mock.MagicMock()
+ mechanism_arista.db_lib = self.fake_rpc
+ self.drv = mechanism_arista.AristaDriver(self.fake_rpc)
- def test_no_exception_on_correct_configuration(self):
- self.assertIsNotNone(self.drv)
+ def tearDown(self):
+ super(AristaDriverTestCase, self).tearDown()
+ self.drv.stop_synchronization_thread()
- def test_sync_start(self):
- self.drv.sync_start()
- cmds = ['enable', 'configure', 'cvx', 'service openstack',
- 'region RegionOne',
- 'sync start',
- 'exit', 'exit', 'exit']
+ def test_create_network_precommit(self):
+ tenant_id = 'ten-1'
+ network_id = 'net1-id'
+ segmentation_id = 1001
- self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
+ network_context = self._get_network_context(tenant_id,
+ network_id,
+ segmentation_id,
+ False)
+ self.drv.create_network_precommit(network_context)
- def test_sync_end(self):
- self.drv.sync_end()
- cmds = ['enable', 'configure', 'cvx', 'service openstack',
- 'region RegionOne',
- 'sync end',
- 'exit', 'exit', 'exit']
+ expected_calls = [
+ mock.call.remember_tenant(tenant_id),
+ mock.call.remember_network(tenant_id,
+ network_id,
+ segmentation_id)
+ ]
- self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
+ mechanism_arista.db_lib.assert_has_calls(expected_calls)
- def test_plug_host_into_network(self):
- tenant_id = 'ten-1'
- vm_id = 'vm-1'
- port_id = 123
- network_id = 'net-id'
- host = 'host'
- port_name = '123-port'
-
- self.drv.plug_host_into_network(vm_id, host, port_id,
- network_id, tenant_id, port_name)
- cmds = ['enable', 'configure', 'cvx', 'service openstack',
- 'region RegionOne',
- 'tenant ten-1', 'vm id vm-1 hostid host',
- 'port id 123 name "123-port" network-id net-id',
- 'exit', 'exit', 'exit', 'exit', 'exit']
-
- self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
-
- def test_plug_dhcp_port_into_network(self):
- tenant_id = 'ten-1'
- vm_id = 'vm-1'
- port_id = 123
- network_id = 'net-id'
- host = 'host'
- port_name = '123-port'
-
- self.drv.plug_dhcp_port_into_network(vm_id, host, port_id,
- network_id, tenant_id, port_name)
- cmds = ['enable', 'configure', 'cvx', 'service openstack',
- 'region RegionOne',
- 'tenant ten-1', 'network id net-id',
- 'dhcp id vm-1 hostid host port-id 123 name "123-port"',
- 'exit', 'exit', 'exit', 'exit']
-
- self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
-
- def test_unplug_host_from_network(self):
+ def test_create_network_postcommit(self):
tenant_id = 'ten-1'
- vm_id = 'vm-1'
- port_id = 123
- network_id = 'net-id'
- host = 'host'
- self.drv.unplug_host_from_network(vm_id, host, port_id,
- network_id, tenant_id)
- cmds = ['enable', 'configure', 'cvx', 'service openstack',
- 'region RegionOne',
- 'tenant ten-1', 'vm id vm-1 hostid host',
- 'no port id 123',
- 'exit', 'exit', 'exit', 'exit', 'exit']
- self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
-
- def test_unplug_dhcp_port_from_network(self):
- tenant_id = 'ten-1'
- vm_id = 'vm-1'
- port_id = 123
- network_id = 'net-id'
- host = 'host'
-
- self.drv.unplug_dhcp_port_from_network(vm_id, host, port_id,
- network_id, tenant_id)
- cmds = ['enable', 'configure', 'cvx', 'service openstack',
- 'region RegionOne',
- 'tenant ten-1', 'network id net-id',
- 'no dhcp id vm-1 port-id 123',
- 'exit', 'exit', 'exit', 'exit']
-
- self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
+ network_id = 'net1-id'
+ segmentation_id = 1001
- def test_create_network(self):
- tenant_id = 'ten-1'
- network = {
- 'network_id': 'net-id',
- 'network_name': 'net-name',
- 'segmentation_id': 123,
- 'shared': False}
- self.drv.create_network(tenant_id, network)
- cmds = ['enable', 'configure', 'cvx', 'service openstack',
- 'region RegionOne',
- 'tenant ten-1', 'network id net-id name "net-name"',
- 'segment 1 type vlan id 123',
- 'no shared',
- 'exit', 'exit', 'exit', 'exit', 'exit', 'exit']
- self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
-
- def test_create_shared_network(self):
- tenant_id = 'ten-1'
- network = {
- 'network_id': 'net-id',
- 'network_name': 'net-name',
- 'segmentation_id': 123,
- 'shared': True}
- self.drv.create_network(tenant_id, network)
- cmds = ['enable', 'configure', 'cvx', 'service openstack',
- 'region RegionOne',
- 'tenant ten-1', 'network id net-id name "net-name"',
- 'segment 1 type vlan id 123',
- 'shared',
- 'exit', 'exit', 'exit', 'exit', 'exit', 'exit']
- self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
-
- def test_create_network_bulk(self):
- tenant_id = 'ten-2'
- num_networks = 10
- networks = [{
- 'network_id': 'net-id-%d' % net_id,
- 'network_name': 'net-name-%d' % net_id,
- 'segmentation_id': net_id,
- 'shared': True} for net_id in range(1, num_networks)
+ network_context = self._get_network_context(tenant_id,
+ network_id,
+ segmentation_id,
+ False)
+ mechanism_arista.db_lib.is_network_provisioned.return_value = True
+ network = network_context.current
+ segments = network_context.network_segments
+ net_dict = {
+ 'network_id': network['id'],
+ 'segmentation_id': segments[0]['segmentation_id'],
+ 'network_name': network['name'],
+ 'shared': network['shared']}
+
+ self.drv.create_network_postcommit(network_context)
+
+ expected_calls = [
+ mock.call.is_network_provisioned(tenant_id, network_id),
+ mock.call.create_network(tenant_id, net_dict),
]
- self.drv.create_network_bulk(tenant_id, networks)
- cmds = ['enable',
- 'configure',
- 'cvx',
- 'service openstack',
- 'region RegionOne',
- 'tenant ten-2']
- for net_id in range(1, num_networks):
- cmds.append('network id net-id-%d name "net-name-%d"' %
- (net_id, net_id))
- cmds.append('segment 1 type vlan id %d' % net_id)
- cmds.append('shared')
-
- cmds.extend(self._get_exit_mode_cmds(['tenant', 'region', 'openstack',
- 'cvx', 'configure', 'enable']))
- self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
-
- def test_delete_network(self):
- tenant_id = 'ten-1'
- network_id = 'net-id'
- self.drv.delete_network(tenant_id, network_id)
- cmds = ['enable', 'configure', 'cvx', 'service openstack',
- 'region RegionOne',
- 'tenant ten-1', 'no network id net-id',
- 'exit', 'exit', 'exit', 'exit', 'exit']
- self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
-
- def test_delete_network_bulk(self):
- tenant_id = 'ten-2'
- num_networks = 10
- networks = [{
- 'network_id': 'net-id-%d' % net_id,
- 'network_name': 'net-name-%d' % net_id,
- 'segmentation_id': net_id} for net_id in range(1, num_networks)
- ]
+ mechanism_arista.db_lib.assert_has_calls(expected_calls)
- networks = ['net-id-%d' % net_id for net_id in range(1, num_networks)]
- self.drv.delete_network_bulk(tenant_id, networks)
- cmds = ['enable',
- 'configure',
- 'cvx',
- 'service openstack',
- 'region RegionOne',
- 'tenant ten-2']
- for net_id in range(1, num_networks):
- cmds.append('no network id net-id-%d' % net_id)
-
- cmds.extend(self._get_exit_mode_cmds(['tenant', 'region', 'openstack',
- 'cvx', 'configure']))
- self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
-
- def test_delete_vm(self):
- tenant_id = 'ten-1'
- vm_id = 'vm-id'
- self.drv.delete_vm(tenant_id, vm_id)
- cmds = ['enable', 'configure', 'cvx', 'service openstack',
- 'region RegionOne',
- 'tenant ten-1', 'no vm id vm-id',
- 'exit', 'exit', 'exit', 'exit', 'exit']
- self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
-
- def test_delete_vm_bulk(self):
- tenant_id = 'ten-2'
- num_vms = 10
- vm_ids = ['vm-id-%d' % vm_id for vm_id in range(1, num_vms)]
- self.drv.delete_vm_bulk(tenant_id, vm_ids)
-
- cmds = ['enable',
- 'configure',
- 'cvx',
- 'service openstack',
- 'region RegionOne',
- 'tenant ten-2']
-
- for vm_id in range(1, num_vms):
- cmds.append('no vm id vm-id-%d' % vm_id)
-
- cmds.extend(self._get_exit_mode_cmds(['tenant', 'region', 'openstack',
- 'cvx', 'configure']))
- self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
-
- def test_create_vm_port_bulk(self):
- tenant_id = 'ten-3'
- num_vms = 10
- num_ports_per_vm = 2
-
- vms = dict(
- ('vm-id-%d' % vm_id, {
- 'vmId': 'vm-id-%d' % vm_id,
- 'host': 'host_%d' % vm_id,
- }
- ) for vm_id in range(1, num_vms)
- )
-
- devices = [n_const.DEVICE_OWNER_DHCP, 'compute']
- vm_port_list = []
-
- net_count = 1
- for vm_id in range(1, num_vms):
- for port_id in range(1, num_ports_per_vm):
- port = {
- 'id': 'port-id-%d-%d' % (vm_id, port_id),
- 'device_id': 'vm-id-%d' % vm_id,
- 'device_owner': devices[(vm_id + port_id) % 2],
- 'network_id': 'network-id-%d' % net_count,
- 'name': 'port-%d-%d' % (vm_id, port_id)
- }
- vm_port_list.append(port)
- net_count += 1
-
- self.drv.create_vm_port_bulk(tenant_id, vm_port_list, vms)
- cmds = ['enable',
- 'configure',
- 'cvx',
- 'service openstack',
- 'region RegionOne',
- 'tenant ten-3']
-
- net_count = 1
- for vm_count in range(1, num_vms):
- host = 'host_%s' % vm_count
- for port_count in range(1, num_ports_per_vm):
- vm_id = 'vm-id-%d' % vm_count
- device_owner = devices[(vm_count + port_count) % 2]
- port_name = '"port-%d-%d"' % (vm_count, port_count)
- network_id = 'network-id-%d' % net_count
- port_id = 'port-id-%d-%d' % (vm_count, port_count)
- if device_owner == 'network:dhcp':
- cmds.append('network id %s' % network_id)
- cmds.append('dhcp id %s hostid %s port-id %s name %s' % (
- vm_id, host, port_id, port_name))
- elif device_owner == 'compute':
- cmds.append('vm id %s hostid %s' % (vm_id, host))
- cmds.append('port id %s name %s network-id %s' % (
- port_id, port_name, network_id))
- net_count += 1
-
- cmds.extend(self._get_exit_mode_cmds(['tenant', 'region',
- 'openstack', 'cvx']))
- self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
-
- def test_delete_tenant(self):
+ def test_delete_network_precommit(self):
tenant_id = 'ten-1'
- self.drv.delete_tenant(tenant_id)
- cmds = ['enable', 'configure', 'cvx', 'service openstack',
- 'region RegionOne', 'no tenant ten-1',
- 'exit', 'exit', 'exit', 'exit']
- self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
-
- def test_delete_tenant_bulk(self):
- num_tenants = 10
- tenant_list = ['ten-%d' % t_id for t_id in range(1, num_tenants)]
- self.drv.delete_tenant_bulk(tenant_list)
- cmds = ['enable',
- 'configure',
- 'cvx',
- 'service openstack',
- 'region RegionOne']
- for ten_id in range(1, num_tenants):
- cmds.append('no tenant ten-%d' % ten_id)
-
- cmds.extend(self._get_exit_mode_cmds(['region', 'openstack',
- 'cvx', 'configure']))
- self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
-
- def test_get_network_info_returns_none_when_no_such_net(self):
- expected = []
- self.drv.get_tenants = mock.MagicMock()
- self.drv.get_tenants.return_value = []
-
- net_info = self.drv.get_tenants()
-
- self.drv.get_tenants.assert_called_once_with()
- self.assertEqual(net_info, expected, ('Network info must be "None"'
- 'for unknown network'))
-
- def test_get_network_info_returns_info_for_available_net(self):
- valid_network_id = '12345'
- valid_net_info = {'network_id': valid_network_id,
- 'some_info': 'net info'}
- known_nets = valid_net_info
-
- self.drv.get_tenants = mock.MagicMock()
- self.drv.get_tenants.return_value = known_nets
-
- net_info = self.drv.get_tenants()
- self.assertEqual(net_info, valid_net_info,
- ('Must return network info for a valid net'))
-
- def test_check_cli_commands(self):
- self.drv.check_cli_commands()
- cmds = ['show openstack config region RegionOne timestamp']
- self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
-
- def test_register_with_eos(self):
- self.drv.register_with_eos()
- auth = fake_keystone_info_class()
- auth_cmd = (
- 'auth url %(auth_url)s user %(user)s '
- 'password %(password)s tenant %(tenant)s' %
- {'auth_url': utils.get_keystone_url(auth),
- 'user': auth.admin_user,
- 'password': auth.admin_password,
- 'tenant': auth.admin_tenant_name}
- )
- cmds = ['enable',
- 'configure',
- 'cvx',
- 'service openstack',
- 'region %s' % self.region,
- auth_cmd,
- 'sync interval %d' % cfg.CONF.ml2_arista.sync_interval,
- 'exit',
- 'exit',
- 'exit',
- ]
- self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
-
-
-class AristaRPCWrapperInvalidConfigTestCase(base.BaseTestCase):
- """Negative test cases to test the Arista Driver configuration."""
-
- def setUp(self):
- super(AristaRPCWrapperInvalidConfigTestCase, self).setUp()
- self.setup_invalid_config() # Invalid config, required options not set
-
- def setup_invalid_config(self):
- setup_arista_wrapper_config('')
+ network_id = 'net1-id'
+ segmentation_id = 1001
- def test_raises_exception_on_wrong_configuration(self):
- self.assertRaises(arista_exc.AristaConfigError,
- arista.AristaRPCWrapper)
+ network_context = self._get_network_context(tenant_id,
+ network_id,
+ segmentation_id,
+ False)
+ mechanism_arista.db_lib.is_network_provisioned.return_value = True
+ mechanism_arista.db_lib.num_nets_provisioned.return_value = 0
+ mechanism_arista.db_lib.num_vms_provisioned.return_value = 0
+ self.drv.delete_network_precommit(network_context)
+ expected_calls = [
+ mock.call.is_network_provisioned(tenant_id, network_id),
+ mock.call.forget_network(tenant_id, network_id),
+ mock.call.num_nets_provisioned(tenant_id),
+ mock.call.num_vms_provisioned(tenant_id),
+ mock.call.forget_tenant(tenant_id),
+ ]
-class NegativeRPCWrapperTestCase(base.BaseTestCase):
- """Negative test cases to test the RPC between Arista Driver and EOS."""
+ mechanism_arista.db_lib.assert_has_calls(expected_calls)
- def setUp(self):
- super(NegativeRPCWrapperTestCase, self).setUp()
- setup_valid_config()
+ def test_delete_network_postcommit(self):
+ tenant_id = 'ten-1'
+ network_id = 'net1-id'
+ segmentation_id = 1001
- def test_exception_is_raised_on_json_server_error(self):
- drv = arista.AristaRPCWrapper()
+ network_context = self._get_network_context(tenant_id,
+ network_id,
+ segmentation_id,
+ False)
- drv._server = mock.MagicMock()
- drv._server.runCmds.side_effect = Exception('server error')
- self.assertRaises(arista_exc.AristaRpcError, drv.get_tenants)
+ self.drv.delete_network_postcommit(network_context)
+ expected_calls = [
+ mock.call.delete_network(tenant_id, network_id),
+ ]
+ mechanism_arista.db_lib.assert_has_calls(expected_calls)
-class RealNetStorageAristaDriverTestCase(testlib_api.SqlTestCase):
- """Main test cases for Arista Mechanism driver.
+ def test_create_port_precommit(self):
+ tenant_id = 'ten-1'
+ network_id = 'net1-id'
+ segmentation_id = 1001
+ vm_id = 'vm1'
- Tests all mechanism driver APIs supported by Arista Driver. It invokes
- all the APIs as they would be invoked in real world scenarios and
- verifies the functionality.
- """
- def setUp(self):
- super(RealNetStorageAristaDriverTestCase, self).setUp()
- self.fake_rpc = mock.MagicMock()
- self.drv = arista.AristaDriver(self.fake_rpc)
+ network_context = self._get_network_context(tenant_id,
+ network_id,
+ segmentation_id,
+ False)
+
+ port_context = self._get_port_context(tenant_id,
+ network_id,
+ vm_id,
+ network_context)
+ host_id = port_context.current['binding:host_id']
+ port_id = port_context.current['id']
+ self.drv.create_port_precommit(port_context)
+
+ expected_calls = [
+ mock.call.remember_tenant(tenant_id),
+ mock.call.remember_vm(vm_id, host_id, port_id,
+ network_id, tenant_id)
+ ]
- def tearDown(self):
- super(RealNetStorageAristaDriverTestCase, self).tearDown()
- self.drv.stop_synchronization_thread()
+ mechanism_arista.db_lib.assert_has_calls(expected_calls)
- def test_create_and_delete_network(self):
+ def test_create_port_postcommit(self):
tenant_id = 'ten-1'
network_id = 'net1-id'
segmentation_id = 1001
+ vm_id = 'vm1'
network_context = self._get_network_context(tenant_id,
network_id,
- segmentation_id)
- self.drv.create_network_precommit(network_context)
- net_provisioned = db.is_network_provisioned(tenant_id, network_id)
- self.assertTrue(net_provisioned, 'The network should be created')
-
- expected_num_nets = 1
- num_nets_provisioned = db.num_nets_provisioned(tenant_id)
- self.assertEqual(expected_num_nets, num_nets_provisioned,
- 'There should be %d nets, not %d' %
- (expected_num_nets, num_nets_provisioned))
-
- #Now test the delete network
- self.drv.delete_network_precommit(network_context)
- net_provisioned = db.is_network_provisioned(tenant_id, network_id)
- self.assertFalse(net_provisioned, 'The network should be created')
+ segmentation_id,
+ False)
+ port_context = self._get_port_context(tenant_id,
+ network_id,
+ vm_id,
+ network_context)
+ mechanism_arista.db_lib.is_vm_provisioned.return_value = True
+ mechanism_arista.db_lib.is_network_provisioned.return_value = True
+ mechanism_arista.db_lib.get_shared_network_owner_id.return_value = 1
+
+ port = port_context.current
+ device_id = port['device_id']
+ device_owner = port['device_owner']
+ host_id = port['binding:host_id']
+ port_id = port['id']
+ port_name = port['name']
+
+ self.drv.create_port_postcommit(port_context)
+
+ expected_calls = [
+ mock.call.is_vm_provisioned(device_id, host_id, port_id,
+ network_id, tenant_id),
+ mock.call.is_network_provisioned(tenant_id, network_id),
+ mock.call.plug_port_into_network(device_id, host_id, port_id,
+ network_id, tenant_id,
+ port_name, device_owner)
+ ]
- expected_num_nets = 0
- num_nets_provisioned = db.num_nets_provisioned(tenant_id)
- self.assertEqual(expected_num_nets, num_nets_provisioned,
- 'There should be %d nets, not %d' %
- (expected_num_nets, num_nets_provisioned))
+ mechanism_arista.db_lib.assert_has_calls(expected_calls)
- def test_create_and_delete_multiple_networks(self):
+ # Now test the delete ports
+ def test_delete_port_precommit(self):
tenant_id = 'ten-1'
- expected_num_nets = 100
+ network_id = 'net1-id'
segmentation_id = 1001
- nets = ['id%s' % n for n in range(expected_num_nets)]
- for net_id in nets:
- network_context = self._get_network_context(tenant_id,
- net_id,
- segmentation_id)
- self.drv.create_network_precommit(network_context)
-
- num_nets_provisioned = db.num_nets_provisioned(tenant_id)
- self.assertEqual(expected_num_nets, num_nets_provisioned,
- 'There should be %d nets, not %d' %
- (expected_num_nets, num_nets_provisioned))
-
- #now test the delete networks
- for net_id in nets:
- network_context = self._get_network_context(tenant_id,
- net_id,
- segmentation_id)
- self.drv.delete_network_precommit(network_context)
-
- num_nets_provisioned = db.num_nets_provisioned(tenant_id)
- expected_num_nets = 0
- self.assertEqual(expected_num_nets, num_nets_provisioned,
- 'There should be %d nets, not %d' %
- (expected_num_nets, num_nets_provisioned))
-
- def test_create_and_delete_ports(self):
+ vm_id = 'vm1'
+
+ network_context = self._get_network_context(tenant_id,
+ network_id,
+ segmentation_id,
+ False)
+
+ port_context = self._get_port_context(tenant_id,
+ network_id,
+ vm_id,
+ network_context)
+ mechanism_arista.db_lib.is_vm_provisioned.return_value = True
+ mechanism_arista.db_lib.num_nets_provisioned.return_value = 0
+ mechanism_arista.db_lib.num_vms_provisioned.return_value = 0
+ self.drv.delete_port_precommit(port_context)
+
+ host_id = port_context.current['binding:host_id']
+ port_id = port_context.current['id']
+ expected_calls = [
+ mock.call.is_vm_provisioned(vm_id, host_id, port_id,
+ network_id, tenant_id),
+ mock.call.forget_vm(vm_id, host_id, port_id,
+ network_id, tenant_id),
+ mock.call.num_nets_provisioned(tenant_id),
+ mock.call.num_vms_provisioned(tenant_id),
+ mock.call.forget_tenant(tenant_id),
+ ]
+
+ mechanism_arista.db_lib.assert_has_calls(expected_calls)
+
+ def test_delete_port_postcommit(self):
tenant_id = 'ten-1'
network_id = 'net1-id'
segmentation_id = 1001
- vms = ['vm1', 'vm2', 'vm3']
+ vm_id = 'vm1'
network_context = self._get_network_context(tenant_id,
network_id,
- segmentation_id)
- self.drv.create_network_precommit(network_context)
-
- for vm_id in vms:
- port_context = self._get_port_context(tenant_id,
- network_id,
- vm_id,
- network_context)
- self.drv.create_port_precommit(port_context)
+ segmentation_id,
+ False)
+ port_context = self._get_port_context(tenant_id,
+ network_id,
+ vm_id,
+ network_context)
+ port = port_context.current
+ device_id = port['device_id']
+ host_id = port['binding:host_id']
+ port_id = port['id']
+
+ self.drv.delete_port_postcommit(port_context)
+
+ expected_calls = [
+ mock.call.unplug_host_from_network(device_id, host_id, port_id,
+ network_id, tenant_id)
+ ]
- vm_list = db.get_vms(tenant_id)
- provisioned_vms = len(vm_list)
- expected_vms = len(vms)
- self.assertEqual(expected_vms, provisioned_vms,
- 'There should be %d '
- 'hosts, not %d' % (expected_vms, provisioned_vms))
+ mechanism_arista.db_lib.assert_has_calls(expected_calls)
- # Now test the delete ports
- for vm_id in vms:
- port_context = self._get_port_context(tenant_id,
- network_id,
- vm_id,
- network_context)
- self.drv.delete_port_precommit(port_context)
-
- vm_list = db.get_vms(tenant_id)
- provisioned_vms = len(vm_list)
- expected_vms = 0
- self.assertEqual(expected_vms, provisioned_vms,
- 'There should be %d '
- 'VMs, not %d' % (expected_vms, provisioned_vms))
-
- def _get_network_context(self, tenant_id, net_id, seg_id):
+ def _get_network_context(self, tenant_id, net_id, seg_id, shared):
network = {'id': net_id,
- 'tenant_id': tenant_id}
+ 'tenant_id': tenant_id,
+ 'name': 'test-net',
+ 'shared': shared}
network_segments = [{'segmentation_id': seg_id}]
return FakeNetworkContext(network, network_segments, network)
port = {'device_id': vm_id,
'device_owner': 'compute',
'binding:host_id': 'ubuntu1',
+ 'name': 'test-port',
'tenant_id': tenant_id,
'id': 101,
'network_id': net_id