--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""nvp lbaas plugin
+
+Revision ID: 3d6fae8b70b0
+Revises: 3ed8f075e38a
+Create Date: 2013-09-13 19:34:41.522665
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '3d6fae8b70b0'
+down_revision = '3ed8f075e38a'
+
+# Change to ['*'] if this migration applies to all plugins
+
+migration_for_plugins = [
+ 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin'
+]
+
+from alembic import op
+import sqlalchemy as sa
+
+from neutron.db import migration
+
+
+def upgrade(active_plugins=None, options=None):
+ if not migration.should_run(active_plugins, migration_for_plugins):
+ return
+
+ op.create_table(
+ 'vcns_edge_pool_bindings',
+ sa.Column('pool_id', sa.String(length=36), nullable=False),
+ sa.Column('edge_id', sa.String(length=36), nullable=False),
+ sa.Column('pool_vseid', sa.String(length=36), nullable=True),
+ sa.ForeignKeyConstraint(['pool_id'], ['pools.id'],
+ ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('pool_id', 'edge_id')
+ )
+ op.create_table(
+ 'vcns_edge_monitor_bindings',
+ sa.Column('monitor_id', sa.String(length=36), nullable=False),
+ sa.Column('edge_id', sa.String(length=36), nullable=False),
+ sa.Column('monitor_vseid', sa.String(length=36), nullable=True),
+ sa.ForeignKeyConstraint(['monitor_id'], ['healthmonitors.id'],
+ ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('monitor_id', 'edge_id')
+ )
+ op.create_table(
+ 'vcns_edge_vip_bindings',
+ sa.Column('vip_id', sa.String(length=36), nullable=False),
+ sa.Column('edge_id', sa.String(length=36), nullable=True),
+ sa.Column('vip_vseid', sa.String(length=36), nullable=True),
+ sa.Column('app_profileid', sa.String(length=36), nullable=True),
+ sa.ForeignKeyConstraint(['vip_id'], ['vips.id'],
+ ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('vip_id')
+ )
+
+
+def downgrade(active_plugins=None, options=None):
+ if not migration.should_run(active_plugins, migration_for_plugins):
+ return
+
+ op.drop_table('vcns_edge_vip_bindings')
+ op.drop_table('vcns_edge_monitor_bindings')
+ op.drop_table('vcns_edge_pool_bindings')
from neutron.common import exceptions as q_exc
from neutron.db.firewall import firewall_db
from neutron.db import l3_db
+from neutron.db.loadbalancer import loadbalancer_db
from neutron.db import routedserviceinsertion_db as rsi_db
from neutron.extensions import firewall as fw_ext
+from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as service_constants
from neutron.plugins.nicira.common import config # noqa
constants as vcns_const)
from neutron.plugins.nicira.vshield.common.constants import RouterStatus
from neutron.plugins.nicira.vshield.common import exceptions
+from neutron.plugins.nicira.vshield.tasks.constants import TaskState
from neutron.plugins.nicira.vshield.tasks.constants import TaskStatus
from neutron.plugins.nicira.vshield import vcns_driver
from sqlalchemy.orm import exc as sa_exc
NeutronPlugin.NvpPluginV2,
rsi_db.RoutedServiceInsertionDbMixin,
firewall_db.Firewall_db_mixin,
+ loadbalancer_db.LoadBalancerPluginDb
):
+
supported_extension_aliases = (
NeutronPlugin.NvpPluginV2.supported_extension_aliases + [
"service-router",
"routed-service-insertion",
- "fwaas"
+ "fwaas",
+ "lbaas"
])
def __init__(self):
binding['edge_id'],
snat, dnat)
- def _update_interface(self, context, router):
+ def _update_interface(self, context, router, sync=False):
addr, mask, nexthop = self._get_external_attachment_info(
context, router)
for fip in fip_db:
if fip.fixed_port_id:
secondary.append(fip.floating_ip_address)
+ #Add all vip addresses bound on the router
+ vip_addrs = self._get_all_vip_addrs_by_router_id(context,
+ router['id'])
+ secondary.extend(vip_addrs)
binding = vcns_db.get_vcns_router_binding(context.session,
router['id'])
- self.vcns_driver.update_interface(
+ task = self.vcns_driver.update_interface(
router['id'], binding['edge_id'],
vcns_const.EXTERNAL_VNIC_INDEX,
self.vcns_driver.external_network,
addr, mask, secondary=secondary)
+ if sync:
+ task.wait(TaskState.RESULT)
def _update_router_gw_info(self, context, router_id, info):
if not self._is_advanced_service_router(context, router_id):
context, fwr['id'], edge_id)
return fwp
+ #
+ # LBAAS service plugin implementation
+ #
+ def _get_edge_id_by_vip_id(self, context, vip_id):
+ try:
+ router_binding = self._get_resource_router_id_bindings(
+ context, loadbalancer_db.Vip, resource_ids=[vip_id])[0]
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to find the edge with "
+ "vip_id: %s"), vip_id)
+ service_binding = vcns_db.get_vcns_router_binding(
+ context.session, router_binding.router_id)
+ return service_binding.edge_id
+
+ def _get_all_vip_addrs_by_router_id(
+ self, context, router_id):
+ vip_bindings = self._get_resource_router_id_bindings(
+ context, loadbalancer_db.Vip, router_ids=[router_id])
+ vip_addrs = []
+ for vip_binding in vip_bindings:
+ vip = self.get_vip(context, vip_binding.resource_id)
+ vip_addrs.append(vip.get('address'))
+ return vip_addrs
+
+ def _add_router_service_insertion_binding(self, context, resource_id,
+ router_id,
+ model):
+ res = {
+ 'id': resource_id,
+ 'router_id': router_id
+ }
+ self._process_create_resource_router_id(context, res,
+ model)
+
+ def _resource_set_status(self, context, model, id, status, obj=None,
+ pool_id=None):
+ with context.session.begin(subtransactions=True):
+ try:
+ qry = context.session.query(model)
+ if issubclass(model, loadbalancer_db.PoolMonitorAssociation):
+ res = qry.filter_by(monitor_id=id,
+ pool_id=pool_id).one()
+ else:
+ res = qry.filter_by(id=id).one()
+ if status == service_constants.PENDING_UPDATE and (
+ res.get('status') == service_constants.PENDING_DELETE):
+ msg = (_("Operation can't be performed, Since resource "
+ "%(model)s : %(id)s is in DELETEing status!") %
+ {'model': model,
+ 'id': id})
+ LOG.error(msg)
+ raise nvp_exc.NvpServicePluginException(err_msg=msg)
+ else:
+ res.status = status
+ except sa_exc.NoResultFound:
+ msg = (_("Resource %(model)s : %(id)s not found!") %
+ {'model': model,
+ 'id': id})
+ LOG.exception(msg)
+ raise nvp_exc.NvpServicePluginException(err_msg=msg)
+ if obj:
+ obj['status'] = status
+
+ def _vcns_create_pool_and_monitors(self, context, pool_id, **kwargs):
+ pool = self.get_pool(context, pool_id)
+ edge_id = kwargs.get('edge_id')
+ if not edge_id:
+ edge_id = self._get_edge_id_by_vip_id(
+ context, pool['vip_id'])
+ #Check wheter the pool is already created on the router
+ #in case of future's M:N relation between Pool and Vip
+
+ #Check associated HealthMonitors and then create them
+ for monitor_id in pool.get('health_monitors'):
+ hm = self.get_health_monitor(context, monitor_id)
+ try:
+ self.vcns_driver.create_health_monitor(
+ context, edge_id, hm)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to create healthmonitor "
+ "associated with pool id: %s!") % pool_id)
+ for monitor_ide in pool.get('health_monitors'):
+ if monitor_ide == monitor_id:
+ break
+ self.vcns_driver.delete_health_monitor(
+ context, monitor_ide, edge_id)
+ #Create the pool on the edge
+ members = [
+ super(NvpAdvancedPlugin, self).get_member(
+ context, member_id)
+ for member_id in pool.get('members')
+ ]
+ try:
+ self.vcns_driver.create_pool(context, edge_id, pool, members)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to create pool on vshield edge"))
+ self.vcns_driver.delete_pool(
+ context, pool_id, edge_id)
+ for monitor_id in pool.get('health_monitors'):
+ self.vcns_driver.delete_health_monitor(
+ context, monitor_id, edge_id)
+
+ def _vcns_update_pool(self, context, pool, **kwargs):
+ edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id'])
+ members = kwargs.get('members')
+ if not members:
+ members = [
+ super(NvpAdvancedPlugin, self).get_member(
+ context, member_id)
+ for member_id in pool.get('members')
+ ]
+ self.vcns_driver.update_pool(context, edge_id, pool, members)
+
+ def create_vip(self, context, vip):
+ LOG.debug(_("create_vip() called"))
+ router_id = vip['vip'].get(vcns_const.ROUTER_ID)
+ if not router_id:
+ msg = _("router_id is not provided!")
+ LOG.error(msg)
+ raise q_exc.BadRequest(resource='router', msg=msg)
+
+ if not self._is_advanced_service_router(context, router_id):
+ msg = _("router_id: %s is not an advanced router!") % router_id
+ LOG.error(msg)
+ raise nvp_exc.NvpServicePluginException(err_msg=msg)
+
+ #Check whether the vip port is an external port
+ subnet_id = vip['vip']['subnet_id']
+ network_id = self.get_subnet(context, subnet_id)['network_id']
+ ext_net = self._get_network(context, network_id)
+ if not ext_net.external:
+ msg = (_("Network '%s' is not a valid external "
+ "network") % network_id)
+ raise nvp_exc.NvpServicePluginException(err_msg=msg)
+
+ v = super(NvpAdvancedPlugin, self).create_vip(context, vip)
+ #Get edge_id for the resource
+ router_binding = vcns_db.get_vcns_router_binding(
+ context.session,
+ router_id)
+ edge_id = router_binding.edge_id
+ #Add vip_router binding
+ self._add_router_service_insertion_binding(context, v['id'],
+ router_id,
+ loadbalancer_db.Vip)
+ #Create the vip port on vShield Edge
+ router = self._get_router(context, router_id)
+ self._update_interface(context, router, sync=True)
+ #Create the vip and associated pool/monitor on the corresponding edge
+ try:
+ self._vcns_create_pool_and_monitors(
+ context, v['pool_id'], edge_id=edge_id)
+ self.vcns_driver.create_vip(context, edge_id, v)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to create vip!"))
+ self._delete_resource_router_id_binding(
+ context, v['id'], loadbalancer_db.Vip)
+ super(NvpAdvancedPlugin, self).delete_vip(context, v['id'])
+ self._resource_set_status(context, loadbalancer_db.Vip,
+ v['id'], service_constants.ACTIVE, v)
+
+ return v
+
+ def update_vip(self, context, id, vip):
+ edge_id = self._get_edge_id_by_vip_id(context, id)
+ old_vip = self.get_vip(context, id)
+ vip['vip']['status'] = service_constants.PENDING_UPDATE
+ v = super(NvpAdvancedPlugin, self).update_vip(context, id, vip)
+ if old_vip['pool_id'] != v['pool_id']:
+ self.vcns_driver.delete_vip(context, id)
+ #Delete old pool/monitor on the edge
+ #TODO(linb): Factor out procedure for removing pool and health
+ #separate method
+ old_pool = self.get_pool(context, old_vip['pool_id'])
+ self.vcns_driver.delete_pool(
+ context, old_vip['pool_id'], edge_id)
+ for monitor_id in old_pool.get('health_monitors'):
+ self.vcns_driver.delete_health_monitor(
+ context, monitor_id, edge_id)
+ #Create new pool/monitor object on the edge
+ #TODO(linb): add exception handle if error
+ self._vcns_create_pool_and_monitors(
+ context, v['pool_id'], edge_id=edge_id)
+ self.vcns_driver.create_vip(context, edge_id, v)
+ return v
+ try:
+ self.vcns_driver.update_vip(context, v)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to update vip with id: %s!"), id)
+ self._resource_set_status(context, loadbalancer_db.Vip,
+ id, service_constants.ERROR, v)
+
+ self._resource_set_status(context, loadbalancer_db.Vip,
+ v['id'], service_constants.ACTIVE, v)
+ return v
+
+ def delete_vip(self, context, id):
+ v = self.get_vip(context, id)
+ self._resource_set_status(
+ context, loadbalancer_db.Vip,
+ id, service_constants.PENDING_DELETE)
+ try:
+ self.vcns_driver.delete_vip(context, id)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to delete vip with id: %s!"), id)
+ self._resource_set_status(context, loadbalancer_db.Vip,
+ id, service_constants.ERROR)
+ edge_id = self._get_edge_id_by_vip_id(context, id)
+ #Check associated HealthMonitors and then delete them
+ pool = self.get_pool(context, v['pool_id'])
+ self.vcns_driver.delete_pool(context, v['pool_id'], edge_id)
+ for monitor_id in pool.get('health_monitors'):
+ #TODO(linb): do exception handle if error
+ self.vcns_driver.delete_health_monitor(
+ context, monitor_id, edge_id)
+
+ router_binding = self._get_resource_router_id_binding(
+ context, loadbalancer_db.Vip, resource_id=id)
+ router = self._get_router(context, router_binding.router_id)
+ self._delete_resource_router_id_binding(
+ context, id, loadbalancer_db.Vip)
+ super(NvpAdvancedPlugin, self).delete_vip(context, id)
+ self._update_interface(context, router, sync=True)
+
+ def update_pool(self, context, id, pool):
+ pool['pool']['status'] = service_constants.PENDING_UPDATE
+ p = super(NvpAdvancedPlugin, self).update_pool(context, id, pool)
+ #Check whether the pool is already associated with the vip
+ if not p.get('vip_id'):
+ self._resource_set_status(context, loadbalancer_db.Pool,
+ p['id'], service_constants.ACTIVE, p)
+ return p
+ try:
+ self._vcns_update_pool(context, p)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to update pool with id: %s!"), id)
+ self._resource_set_status(context, loadbalancer_db.Pool,
+ p['id'], service_constants.ERROR, p)
+ self._resource_set_status(context, loadbalancer_db.Pool,
+ p['id'], service_constants.ACTIVE, p)
+ return p
+
+ def create_member(self, context, member):
+ m = super(NvpAdvancedPlugin, self).create_member(context, member)
+ pool_id = m.get('pool_id')
+ pool = self.get_pool(context, pool_id)
+ if not pool.get('vip_id'):
+ self._resource_set_status(context, loadbalancer_db.Member,
+ m['id'], service_constants.ACTIVE, m)
+ return m
+ self._resource_set_status(context, loadbalancer_db.Pool,
+ pool_id,
+ service_constants.PENDING_UPDATE)
+ try:
+ self._vcns_update_pool(context, pool)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to update pool with the member"))
+ super(NvpAdvancedPlugin, self).delete_member(context, m['id'])
+
+ self._resource_set_status(context, loadbalancer_db.Pool,
+ pool_id, service_constants.ACTIVE)
+ self._resource_set_status(context, loadbalancer_db.Member,
+ m['id'], service_constants.ACTIVE, m)
+ return m
+
+ def update_member(self, context, id, member):
+ member['member']['status'] = service_constants.PENDING_UPDATE
+ old_member = self.get_member(context, id)
+ m = super(NvpAdvancedPlugin, self).update_member(
+ context, id, member)
+
+ if m['pool_id'] != old_member['pool_id']:
+ old_pool_id = old_member['pool_id']
+ old_pool = self.get_pool(context, old_pool_id)
+ if old_pool.get('vip_id'):
+ self._resource_set_status(
+ context, loadbalancer_db.Pool,
+ old_pool_id, service_constants.PENDING_UPDATE)
+ try:
+ self._vcns_update_pool(context, old_pool)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to update old pool "
+ "with the member"))
+ super(NvpAdvancedPlugin, self).delete_member(
+ context, m['id'])
+ self._resource_set_status(
+ context, loadbalancer_db.Pool,
+ old_pool_id, service_constants.ACTIVE)
+
+ pool_id = m['pool_id']
+ pool = self.get_pool(context, pool_id)
+ if not pool.get('vip_id'):
+ self._resource_set_status(context, loadbalancer_db.Member,
+ m['id'], service_constants.ACTIVE, m)
+ return m
+ self._resource_set_status(context, loadbalancer_db.Pool,
+ pool_id,
+ service_constants.PENDING_UPDATE)
+ try:
+ self._vcns_update_pool(context, pool)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to update pool with the member"))
+ super(NvpAdvancedPlugin, self).delete_member(
+ context, m['id'])
+
+ self._resource_set_status(context, loadbalancer_db.Pool,
+ pool_id, service_constants.ACTIVE)
+ self._resource_set_status(context, loadbalancer_db.Member,
+ m['id'], service_constants.ACTIVE, m)
+ return m
+
+ def delete_member(self, context, id):
+ m = self.get_member(context, id)
+ super(NvpAdvancedPlugin, self).delete_member(context, id)
+ pool_id = m['pool_id']
+ pool = self.get_pool(context, pool_id)
+ if not pool.get('vip_id'):
+ return
+ self._resource_set_status(context, loadbalancer_db.Pool,
+ pool_id, service_constants.PENDING_UPDATE)
+ try:
+ self._vcns_update_pool(context, pool)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to update pool with the member"))
+ self._resource_set_status(context, loadbalancer_db.Pool,
+ pool_id, service_constants.ACTIVE)
+
+ def update_health_monitor(self, context, id, health_monitor):
+ old_hm = super(NvpAdvancedPlugin, self).get_health_monitor(
+ context, id)
+ hm = super(NvpAdvancedPlugin, self).update_health_monitor(
+ context, id, health_monitor)
+ for hm_pool in hm.get('pools'):
+ pool_id = hm_pool['pool_id']
+ pool = self.get_pool(context, pool_id)
+ if pool.get('vip_id'):
+ edge_id = self._get_edge_id_by_vip_id(
+ context, pool['vip_id'])
+ try:
+ self.vcns_driver.update_health_monitor(
+ context, edge_id, old_hm, hm)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to update monitor "
+ "with id: %s!"), id)
+ return hm
+
+ def delete_health_monitor(self, context, id):
+ with context.session.begin(subtransactions=True):
+ qry = context.session.query(
+ loadbalancer_db.PoolMonitorAssociation
+ ).filter_by(monitor_id=id)
+ for assoc in qry:
+ pool_id = assoc['pool_id']
+ super(NvpAdvancedPlugin,
+ self).delete_pool_health_monitor(context,
+ id,
+ pool_id)
+ pool = self.get_pool(context, pool_id)
+ if not pool.get('vip_id'):
+ continue
+ edge_id = self._get_edge_id_by_vip_id(
+ context, pool['vip_id'])
+ self._resource_set_status(
+ context, loadbalancer_db.Pool,
+ pool_id, service_constants.PENDING_UPDATE)
+ try:
+ self._vcns_update_pool(context, pool)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to update pool with monitor!"))
+ self._resource_set_status(
+ context, loadbalancer_db.Pool,
+ pool_id, service_constants.ACTIVE)
+ try:
+ self.vcns_driver.delete_health_monitor(
+ context, id, edge_id)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to delete monitor "
+ "with id: %s!"), id)
+ super(NvpAdvancedPlugin,
+ self).delete_health_monitor(context, id)
+ self._delete_resource_router_id_binding(
+ context, id, loadbalancer_db.HealthMonitor)
+
+ super(NvpAdvancedPlugin, self).delete_health_monitor(context, id)
+ self._delete_resource_router_id_binding(
+ context, id, loadbalancer_db.HealthMonitor)
+
+ def create_pool_health_monitor(self, context,
+ health_monitor, pool_id):
+ monitor_id = health_monitor['health_monitor']['id']
+ pool = self.get_pool(context, pool_id)
+ monitors = pool.get('health_monitors')
+ if len(monitors) > 0:
+ msg = _("Vcns right now can only support "
+ "one monitor per pool")
+ LOG.error(msg)
+ raise nvp_exc.NvpServicePluginException(err_msg=msg)
+ #Check whether the pool is already associated with the vip
+ if not pool.get('vip_id'):
+ res = super(NvpAdvancedPlugin,
+ self).create_pool_health_monitor(context,
+ health_monitor,
+ pool_id)
+ return res
+ #Get the edge_id
+ edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id'])
+ res = super(NvpAdvancedPlugin,
+ self).create_pool_health_monitor(context,
+ health_monitor,
+ pool_id)
+ monitor = self.get_health_monitor(context, monitor_id)
+ #TODO(linb)Add Exception handle if error
+ self.vcns_driver.create_health_monitor(context, edge_id, monitor)
+ #Get updated pool
+ pool['health_monitors'].append(monitor['id'])
+ self._resource_set_status(
+ context, loadbalancer_db.Pool,
+ pool_id, service_constants.PENDING_UPDATE)
+ try:
+ self._vcns_update_pool(context, pool)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to associate monitor with pool!"))
+ self._resource_set_status(
+ context, loadbalancer_db.Pool,
+ pool_id, service_constants.ERROR)
+ super(NvpAdvancedPlugin, self).delete_pool_health_monitor(
+ context, monitor_id, pool_id)
+ self._resource_set_status(
+ context, loadbalancer_db.Pool,
+ pool_id, service_constants.ACTIVE)
+ self._resource_set_status(
+ context, loadbalancer_db.PoolMonitorAssociation,
+ monitor_id, service_constants.ACTIVE, res,
+ pool_id=pool_id)
+ return res
+
+ def delete_pool_health_monitor(self, context, id, pool_id):
+ super(NvpAdvancedPlugin, self).delete_pool_health_monitor(
+ context, id, pool_id)
+ pool = self.get_pool(context, pool_id)
+ #Check whether the pool is already associated with the vip
+ if pool.get('vip_id'):
+ #Delete the monitor on vshield edge
+ edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id'])
+ self._resource_set_status(
+ context, loadbalancer_db.Pool,
+ pool_id, service_constants.PENDING_UPDATE)
+ try:
+ self._vcns_update_pool(context, pool)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(
+ _("Failed to update pool with pool_monitor!"))
+ self._resource_set_status(
+ context, loadbalancer_db.Pool,
+ pool_id, service_constants.ERROR)
+ #TODO(linb): Add exception handle if error
+ self.vcns_driver.delete_health_monitor(context, id, edge_id)
+ self._resource_set_status(
+ context, loadbalancer_db.Pool,
+ pool_id, service_constants.ACTIVE)
+
class VcnsCallbacks(object):
"""Edge callback implementation Callback functions for
class NvpServicePluginException(q_exc.NeutronException):
"""NVP Service Plugin exceptions."""
message = _("An unexpected error happened "
- "in the NVP Service Plugin:%(err_msg)s")
+ "in the NVP Service Plugin: %(err_msg)s")
class NvpServiceOverQuota(q_exc.Conflict):
from sqlalchemy.orm import exc
+from neutron.openstack.common import log as logging
from neutron.plugins.nicira.common import exceptions as nvp_exc
from neutron.plugins.nicira.dbexts import vcns_models
+from neutron.plugins.nicira.vshield.common import (
+ exceptions as vcns_exc)
+
+LOG = logging.getLogger(__name__)
def add_vcns_router_binding(session, router_id, vse_id, lswitch_id, status):
#
# Edge Firewall binding methods
+#
def add_vcns_edge_firewallrule_binding(session, map_info):
with session.begin(subtransactions=True):
binding = vcns_models.VcnsEdgeFirewallRuleBinding(
session.query(
vcns_models.VcnsEdgeFirewallRuleBinding).filter_by(
edge_id=edge_id).delete()
+
+
+def add_vcns_edge_vip_binding(session, map_info):
+ with session.begin(subtransactions=True):
+ binding = vcns_models.VcnsEdgeVipBinding(
+ vip_id=map_info['vip_id'],
+ edge_id=map_info['edge_id'],
+ vip_vseid=map_info['vip_vseid'],
+ app_profileid=map_info['app_profileid'])
+ session.add(binding)
+
+ return binding
+
+
+def get_vcns_edge_vip_binding(session, id):
+ with session.begin(subtransactions=True):
+ try:
+ qry = session.query(vcns_models.VcnsEdgeVipBinding)
+ return qry.filter_by(vip_id=id).one()
+ except exc.NoResultFound:
+ msg = _("VIP Resource binding with id:%s not found!") % id
+ LOG.exception(msg)
+ raise vcns_exc.VcnsNotFound(
+ resource='router_service_binding', msg=msg)
+
+
+def delete_vcns_edge_vip_binding(session, id):
+ with session.begin(subtransactions=True):
+ qry = session.query(vcns_models.VcnsEdgeVipBinding)
+ if not qry.filter_by(vip_id=id).delete():
+ msg = _("VIP Resource binding with id:%s not found!") % id
+ LOG.exception(msg)
+ raise nvp_exc.NvpServicePluginException(err_msg=msg)
+
+
+def add_vcns_edge_pool_binding(session, map_info):
+ with session.begin(subtransactions=True):
+ binding = vcns_models.VcnsEdgePoolBinding(
+ pool_id=map_info['pool_id'],
+ edge_id=map_info['edge_id'],
+ pool_vseid=map_info['pool_vseid'])
+ session.add(binding)
+
+ return binding
+
+
+def get_vcns_edge_pool_binding(session, id, edge_id):
+ with session.begin(subtransactions=True):
+ return (session.query(vcns_models.VcnsEdgePoolBinding).
+ filter_by(pool_id=id, edge_id=edge_id).first())
+
+
+def get_vcns_edge_pool_binding_by_vseid(session, edge_id, pool_vseid):
+ with session.begin(subtransactions=True):
+ try:
+ qry = session.query(vcns_models.VcnsEdgePoolBinding)
+ binding = qry.filter_by(edge_id=edge_id,
+ pool_vseid=pool_vseid).one()
+ except exc.NoResultFound:
+ msg = (_("Pool Resource binding with edge_id:%(edge_id)s "
+ "pool_vseid:%(pool_vseid)s not found!") %
+ {'edge_id': edge_id, 'pool_vseid': pool_vseid})
+ LOG.exception(msg)
+ raise nvp_exc.NvpServicePluginException(err_msg=msg)
+ return binding
+
+
+def delete_vcns_edge_pool_binding(session, id, edge_id):
+ with session.begin(subtransactions=True):
+ qry = session.query(vcns_models.VcnsEdgePoolBinding)
+ if not qry.filter_by(pool_id=id, edge_id=edge_id).delete():
+ msg = _("Pool Resource binding with id:%s not found!") % id
+ LOG.exception(msg)
+ raise nvp_exc.NvpServicePluginException(err_msg=msg)
+
+
+def add_vcns_edge_monitor_binding(session, map_info):
+ with session.begin(subtransactions=True):
+ binding = vcns_models.VcnsEdgeMonitorBinding(
+ monitor_id=map_info['monitor_id'],
+ edge_id=map_info['edge_id'],
+ monitor_vseid=map_info['monitor_vseid'])
+ session.add(binding)
+
+ return binding
+
+
+def get_vcns_edge_monitor_binding(session, id, edge_id):
+ with session.begin(subtransactions=True):
+ return (session.query(vcns_models.VcnsEdgeMonitorBinding).
+ filter_by(monitor_id=id, edge_id=edge_id).first())
+
+
+def delete_vcns_edge_monitor_binding(session, id, edge_id):
+ with session.begin(subtransactions=True):
+ qry = session.query(vcns_models.VcnsEdgeMonitorBinding)
+ if not qry.filter_by(monitor_id=id, edge_id=edge_id).delete():
+ msg = _("Monitor Resource binding with id:%s not found!") % id
+ LOG.exception(msg)
+ raise nvp_exc.NvpServicePluginException(err_msg=msg)
primary_key=True)
edge_id = sa.Column(sa.String(36), primary_key=True)
rule_vseid = sa.Column(sa.String(36))
+
+
+class VcnsEdgePoolBinding(model_base.BASEV2):
+ """Represents the mapping between neutron pool and Edge pool."""
+
+ __tablename__ = 'vcns_edge_pool_bindings'
+
+ pool_id = sa.Column(sa.String(36),
+ sa.ForeignKey("pools.id", ondelete="CASCADE"),
+ primary_key=True)
+ edge_id = sa.Column(sa.String(36), primary_key=True)
+ pool_vseid = sa.Column(sa.String(36))
+
+
+class VcnsEdgeVipBinding(model_base.BASEV2):
+ """Represents the mapping between neutron vip and Edge vip."""
+
+ __tablename__ = 'vcns_edge_vip_bindings'
+
+ vip_id = sa.Column(sa.String(36),
+ sa.ForeignKey("vips.id", ondelete="CASCADE"),
+ primary_key=True)
+ edge_id = sa.Column(sa.String(36))
+ vip_vseid = sa.Column(sa.String(36))
+ app_profileid = sa.Column(sa.String(36))
+
+
+class VcnsEdgeMonitorBinding(model_base.BASEV2):
+ """Represents the mapping between neutron monitor and Edge monitor."""
+
+ __tablename__ = 'vcns_edge_monitor_bindings'
+
+ monitor_id = sa.Column(sa.String(36),
+ sa.ForeignKey("healthmonitors.id",
+ ondelete="CASCADE"),
+ primary_key=True)
+ edge_id = sa.Column(sa.String(36), primary_key=True)
+ monitor_vseid = sa.Column(sa.String(36))
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2013 VMware, Inc,
+# Copyright 2013 VMware, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2013 OpenStack Foundation.
+# Copyright 2013 VMware, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# @author: Kaiwei Fan, VMware, Inc.
# @author: Bo Link, VMware, Inc.
+from neutron.openstack.common import excutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.nicira.vshield.common import (
status_level = RouterStatus.ROUTER_STATUS_ERROR
return status_level
+ def _enable_loadbalancer(self, edge):
+ if not edge.get('featureConfigs') or (
+ not edge['featureConfigs'].get('features')):
+ edge['featureConfigs'] = {'features': []}
+ edge['featureConfigs']['features'].append(
+ {'featureType': 'loadbalancer_4.0',
+ 'enabled': True})
+
def get_edge_status(self, edge_id):
try:
response = self.vcns.get_edge_status(edge_id)[1]
raise e
def deploy_edge(self, router_id, name, internal_network, jobdata=None,
- wait_for_exec=False):
+ wait_for_exec=False, loadbalancer_enable=True):
task_name = 'deploying-%s' % name
edge_name = name
edge = self._assemble_edge(
vcns_const.INTEGRATION_SUBNET_NETMASK,
type="internal")
edge['vnics']['vnics'].append(vnic_inside)
+ if loadbalancer_enable:
+ self._enable_loadbalancer(edge)
userdata = {
'request': edge,
'router_name': name,
def delete_lswitch(self, lswitch_id):
self.vcns.delete_lswitch(lswitch_id)
+
+ def get_loadbalancer_config(self, edge_id):
+ try:
+ header, response = self.vcns.get_loadbalancer_config(
+ edge_id)
+ except exceptions.VcnsApiException:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to get service config"))
+ return response
+
+ def enable_service_loadbalancer(self, edge_id):
+ config = self.get_loadbalancer_config(
+ edge_id)
+ if not config['enabled']:
+ config['enabled'] = True
+ try:
+ self.vcns.enable_service_loadbalancer(edge_id, config)
+ except exceptions.VcnsApiException:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to enable loadbalancer "
+ "service config"))
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2013 VMware, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# @author: Leon Cui, VMware
+
+from neutron.openstack.common import excutils
+from neutron.openstack.common import log as logging
+from neutron.plugins.nicira.dbexts import vcns_db
+from neutron.plugins.nicira.vshield.common import (
+ constants as vcns_const)
+from neutron.plugins.nicira.vshield.common import (
+ exceptions as vcns_exc)
+from neutron.services.loadbalancer import constants as lb_constants
+
+LOG = logging.getLogger(__name__)
+
+BALANCE_MAP = {
+ lb_constants.LB_METHOD_ROUND_ROBIN: 'round-robin',
+ lb_constants.LB_METHOD_LEAST_CONNECTIONS: 'leastconn',
+ lb_constants.LB_METHOD_SOURCE_IP: 'source'
+}
+PROTOCOL_MAP = {
+ lb_constants.PROTOCOL_TCP: 'tcp',
+ lb_constants.PROTOCOL_HTTP: 'http',
+ lb_constants.PROTOCOL_HTTPS: 'tcp'
+}
+
+
+class EdgeLbDriver():
+ """Implementation of driver APIs for
+ Edge Loadbalancer feature configuration
+ """
+
+ def _convert_lb_vip(self, context, edge_id, vip, app_profileid):
+ pool_id = vip.get('pool_id')
+ poolid_map = vcns_db.get_vcns_edge_pool_binding(
+ context.session, pool_id, edge_id)
+ pool_vseid = poolid_map['pool_vseid']
+ return {
+ 'name': vip.get('name'),
+ 'ipAddress': vip.get('address'),
+ 'protocol': vip.get('protocol'),
+ 'port': vip.get('protocol_port'),
+ 'defaultPoolId': pool_vseid,
+ 'applicationProfileId': app_profileid
+ }
+
+ def _restore_lb_vip(self, context, edge_id, vip_vse):
+ pool_binding = vcns_db.get_vcns_edge_pool_binding_by_vseid(
+ context.session,
+ edge_id,
+ vip_vse['defaultPoolId'])
+
+ return {
+ 'name': vip_vse['name'],
+ 'address': vip_vse['ipAddress'],
+ 'protocol': vip_vse['protocol'],
+ 'protocol_port': vip_vse['port'],
+ 'pool_id': pool_binding['pool_id']
+ }
+
+ def _convert_lb_pool(self, context, edge_id, pool, members):
+ vsepool = {
+ 'name': pool.get('name'),
+ 'algorithm': BALANCE_MAP.get(
+ pool.get('lb_method'),
+ 'round-robin'),
+ 'member': [],
+ 'monitorId': []
+ }
+ for member in members:
+ vsepool['member'].append({
+ 'ipAddress': member['address'],
+ 'port': member['protocol_port']
+ })
+ ##TODO(linb) right now, vse only accept at most one monitor per pool
+ monitors = pool.get('health_monitors')
+ if not monitors:
+ return vsepool
+ monitorid_map = vcns_db.get_vcns_edge_monitor_binding(
+ context.session,
+ monitors[0],
+ edge_id)
+ vsepool['monitorId'].append(monitorid_map['monitor_vseid'])
+ return vsepool
+
+ def _restore_lb_pool(self, context, edge_id, pool_vse):
+ #TODO(linb): Get more usefule info
+ return {
+ 'name': pool_vse['name'],
+ }
+
+ def _convert_lb_monitor(self, context, monitor):
+ return {
+ 'type': PROTOCOL_MAP.get(
+ monitor.get('type'), 'http'),
+ 'interval': monitor.get('delay'),
+ 'timeout': monitor.get('timeout'),
+ 'maxRetries': monitor.get('max_retries'),
+ 'name': monitor.get('id')
+ }
+
+ def _restore_lb_monitor(self, context, edge_id, monitor_vse):
+ return {
+ 'delay': monitor_vse['interval'],
+ 'timeout': monitor_vse['timeout'],
+ 'max_retries': monitor_vse['maxRetries'],
+ 'id': monitor_vse['name']
+ }
+
+ def _convert_app_profile(self, name, app_profile):
+ #TODO(linb): convert the session_persistence to
+ #corresponding app_profile
+ return {
+ "insertXForwardedFor": False,
+ "name": name,
+ "persistence": {
+ "method": "sourceip"
+ },
+ "serverSslEnabled": False,
+ "sslPassthrough": False,
+ "template": "HTTP"
+ }
+
+ def create_vip(self, context, edge_id, vip):
+ app_profile = self._convert_app_profile(
+ vip['name'], vip.get('session_persistence'))
+ try:
+ header, response = self.vcns.create_app_profile(
+ edge_id, app_profile)
+ except vcns_exc.VcnsApiException:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to create app profile on edge: %s"),
+ edge_id)
+ objuri = header['location']
+ app_profileid = objuri[objuri.rfind("/") + 1:]
+
+ vip_new = self._convert_lb_vip(context, edge_id, vip, app_profileid)
+ try:
+ header, response = self.vcns.create_vip(
+ edge_id, vip_new)
+ except vcns_exc.VcnsApiException:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to create vip on vshield edge: %s"),
+ edge_id)
+ objuri = header['location']
+ vip_vseid = objuri[objuri.rfind("/") + 1:]
+
+ # Add the vip mapping
+ map_info = {
+ "vip_id": vip['id'],
+ "vip_vseid": vip_vseid,
+ "edge_id": edge_id,
+ "app_profileid": app_profileid
+ }
+ vcns_db.add_vcns_edge_vip_binding(context.session, map_info)
+
+ def get_vip(self, context, id):
+ vip_binding = vcns_db.get_vcns_edge_vip_binding(context.session, id)
+ edge_id = vip_binding[vcns_const.EDGE_ID]
+ vip_vseid = vip_binding['vip_vseid']
+ try:
+ response = self.vcns.get_vip(edge_id, vip_vseid)[1]
+ except vcns_exc.VcnsApiException:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to get vip on edge"))
+ return self._restore_lb_vip(context, edge_id, response)
+
+ def update_vip(self, context, vip):
+ vip_binding = vcns_db.get_vcns_edge_vip_binding(
+ context.session, vip['id'])
+ edge_id = vip_binding[vcns_const.EDGE_ID]
+ vip_vseid = vip_binding.get('vip_vseid')
+ app_profileid = vip_binding.get('app_profileid')
+
+ vip_new = self._convert_lb_vip(context, edge_id, vip, app_profileid)
+ try:
+ self.vcns.update_vip(edge_id, vip_vseid, vip_new)
+ except vcns_exc.VcnsApiException:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to update vip on edge: %s"), edge_id)
+
+ def delete_vip(self, context, id):
+ vip_binding = vcns_db.get_vcns_edge_vip_binding(
+ context.session, id)
+ edge_id = vip_binding[vcns_const.EDGE_ID]
+ vip_vseid = vip_binding['vip_vseid']
+ app_profileid = vip_binding['app_profileid']
+
+ try:
+ self.vcns.delete_vip(edge_id, vip_vseid)
+ self.vcns.delete_app_profile(edge_id, app_profileid)
+ except vcns_exc.VcnsApiException:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to delete vip on edge: %s"), edge_id)
+ vcns_db.delete_vcns_edge_vip_binding(context.session, id)
+
+ def create_pool(self, context, edge_id, pool, members):
+ pool_new = self._convert_lb_pool(context, edge_id, pool, members)
+ try:
+ header = self.vcns.create_pool(edge_id, pool_new)[0]
+ except vcns_exc.VcnsApiException:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to create pool"))
+
+ objuri = header['location']
+ pool_vseid = objuri[objuri.rfind("/") + 1:]
+
+ # update the pool mapping table
+ map_info = {
+ "pool_id": pool['id'],
+ "pool_vseid": pool_vseid,
+ "edge_id": edge_id
+ }
+ vcns_db.add_vcns_edge_pool_binding(context.session, map_info)
+
+ def get_pool(self, context, id, edge_id):
+ pool_binding = vcns_db.get_vcns_edge_pool_binding(
+ context.session, id, edge_id)
+ if not pool_binding:
+ msg = (_("pool_binding not found with id: %(id)s "
+ "edge_id: %(edge_id)s") % {
+ 'id': id,
+ 'edge_id': edge_id})
+ LOG.error(msg)
+ raise vcns_exc.VcnsNotFound(
+ resource='router_service_binding', msg=msg)
+ pool_vseid = pool_binding['pool_vseid']
+ try:
+ response = self.vcns.get_pool(edge_id, pool_vseid)[1]
+ except vcns_exc.VcnsApiException:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to get pool on edge"))
+ return self._restore_lb_pool(context, edge_id, response)
+
+ def update_pool(self, context, edge_id, pool, members):
+ pool_binding = vcns_db.get_vcns_edge_pool_binding(
+ context.session, pool['id'], edge_id)
+ pool_vseid = pool_binding['pool_vseid']
+ pool_new = self._convert_lb_pool(context, edge_id, pool, members)
+ try:
+ self.vcns.update_pool(edge_id, pool_vseid, pool_new)
+ except vcns_exc.VcnsApiException:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to update pool"))
+
+ def delete_pool(self, context, id, edge_id):
+ pool_binding = vcns_db.get_vcns_edge_pool_binding(
+ context.session, id, edge_id)
+ pool_vseid = pool_binding['pool_vseid']
+ try:
+ self.vcns.delete_pool(edge_id, pool_vseid)
+ except vcns_exc.VcnsApiException:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to delete pool"))
+ vcns_db.delete_vcns_edge_pool_binding(
+ context.session, id, edge_id)
+
+ def create_health_monitor(self, context, edge_id, health_monitor):
+ monitor_new = self._convert_lb_monitor(context, health_monitor)
+ try:
+ header = self.vcns.create_health_monitor(edge_id, monitor_new)[0]
+ except vcns_exc.VcnsApiException:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to create monitor on edge: %s"),
+ edge_id)
+
+ objuri = header['location']
+ monitor_vseid = objuri[objuri.rfind("/") + 1:]
+
+ # update the health_monitor mapping table
+ map_info = {
+ "monitor_id": health_monitor['id'],
+ "monitor_vseid": monitor_vseid,
+ "edge_id": edge_id
+ }
+ vcns_db.add_vcns_edge_monitor_binding(context.session, map_info)
+
+ def get_health_monitor(self, context, id, edge_id):
+ monitor_binding = vcns_db.get_vcns_edge_monitor_binding(
+ context.session, id, edge_id)
+ if not monitor_binding:
+ msg = (_("monitor_binding not found with id: %(id)s "
+ "edge_id: %(edge_id)s") % {
+ 'id': id,
+ 'edge_id': edge_id})
+ LOG.error(msg)
+ raise vcns_exc.VcnsNotFound(
+ resource='router_service_binding', msg=msg)
+ monitor_vseid = monitor_binding['monitor_vseid']
+ try:
+ response = self.vcns.get_health_monitor(edge_id, monitor_vseid)[1]
+ except vcns_exc.VcnsApiException as e:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to get monitor on edge: %s"),
+ e.response)
+ return self._restore_lb_monitor(context, edge_id, response)
+
+ def update_health_monitor(self, context, edge_id,
+ old_health_monitor, health_monitor):
+ monitor_binding = vcns_db.get_vcns_edge_monitor_binding(
+ context.session,
+ old_health_monitor['id'], edge_id)
+ monitor_vseid = monitor_binding['monitor_vseid']
+ monitor_new = self._convert_lb_monitor(
+ context, health_monitor)
+ try:
+ self.vcns.update_health_monitor(
+ edge_id, monitor_vseid, monitor_new)
+ except vcns_exc.VcnsApiException:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to update monitor on edge: %s"),
+ edge_id)
+
+ def delete_health_monitor(self, context, id, edge_id):
+ monitor_binding = vcns_db.get_vcns_edge_monitor_binding(
+ context.session, id, edge_id)
+ monitor_vseid = monitor_binding['monitor_vseid']
+ try:
+ self.vcns.delete_health_monitor(edge_id, monitor_vseid)
+ except vcns_exc.VcnsApiException:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_("Failed to delete monitor"))
+ vcns_db.delete_vcns_edge_monitor_binding(
+ context.session, id, edge_id)
FIREWALL_SERVICE = "firewall/config"
FIREWALL_RULE_RESOURCE = "rules"
+#LbaaS Constants
+LOADBALANCER_SERVICE = "loadbalancer/config"
+VIP_RESOURCE = "virtualservers"
+POOL_RESOURCE = "pools"
+MONITOR_RESOURCE = "monitors"
+APP_PROFILE_RESOURCE = "applicationprofiles"
+
class Vcns(object):
uri = "/api/ws.v1/lswitch/%s" % lswitch_id
return self.do_request(HTTP_DELETE, uri)
+ def get_loadbalancer_config(self, edge_id):
+ uri = self._build_uri_path(edge_id, LOADBALANCER_SERVICE)
+ return self.do_request(HTTP_GET, uri, decode=True)
+
+ def enable_service_loadbalancer(self, edge_id, config):
+ uri = self._build_uri_path(edge_id, LOADBALANCER_SERVICE)
+ return self.do_request(HTTP_PUT, uri, config)
+
def update_firewall(self, edge_id, fw_req):
uri = self._build_uri_path(
edge_id, FIREWALL_SERVICE)
vcns_rule_id)
return self.do_request(HTTP_GET, uri, decode=True)
+ #
+ #Edge LBAAS call helper
+ #
+ def create_vip(self, edge_id, vip_new):
+ uri = self._build_uri_path(
+ edge_id, LOADBALANCER_SERVICE,
+ VIP_RESOURCE)
+ return self.do_request(HTTP_POST, uri, vip_new)
+
+ def get_vip(self, edge_id, vip_vseid):
+ uri = self._build_uri_path(
+ edge_id, LOADBALANCER_SERVICE,
+ VIP_RESOURCE, vip_vseid)
+ return self.do_request(HTTP_GET, uri, decode=True)
+
+ def update_vip(self, edge_id, vip_vseid, vip_new):
+ uri = self._build_uri_path(
+ edge_id, LOADBALANCER_SERVICE,
+ VIP_RESOURCE, vip_vseid)
+ return self.do_request(HTTP_PUT, uri, vip_new)
+
+ def delete_vip(self, edge_id, vip_vseid):
+ uri = self._build_uri_path(
+ edge_id, LOADBALANCER_SERVICE,
+ VIP_RESOURCE, vip_vseid)
+ return self.do_request(HTTP_DELETE, uri)
+
+ def create_pool(self, edge_id, pool_new):
+ uri = self._build_uri_path(
+ edge_id, LOADBALANCER_SERVICE,
+ POOL_RESOURCE)
+ return self.do_request(HTTP_POST, uri, pool_new)
+
+ def get_pool(self, edge_id, pool_vseid):
+ uri = self._build_uri_path(
+ edge_id, LOADBALANCER_SERVICE,
+ POOL_RESOURCE, pool_vseid)
+ return self.do_request(HTTP_GET, uri, decode=True)
+
+ def update_pool(self, edge_id, pool_vseid, pool_new):
+ uri = self._build_uri_path(
+ edge_id, LOADBALANCER_SERVICE,
+ POOL_RESOURCE, pool_vseid)
+ return self.do_request(HTTP_PUT, uri, pool_new)
+
+ def delete_pool(self, edge_id, pool_vseid):
+ uri = self._build_uri_path(
+ edge_id, LOADBALANCER_SERVICE,
+ POOL_RESOURCE, pool_vseid)
+ return self.do_request(HTTP_DELETE, uri)
+
+ def create_health_monitor(self, edge_id, monitor_new):
+ uri = self._build_uri_path(
+ edge_id, LOADBALANCER_SERVICE,
+ MONITOR_RESOURCE)
+ return self.do_request(HTTP_POST, uri, monitor_new)
+
+ def get_health_monitor(self, edge_id, monitor_vseid):
+ uri = self._build_uri_path(
+ edge_id, LOADBALANCER_SERVICE,
+ MONITOR_RESOURCE, monitor_vseid)
+ return self.do_request(HTTP_GET, uri, decode=True)
+
+ def update_health_monitor(self, edge_id, monitor_vseid, monitor_new):
+ uri = self._build_uri_path(
+ edge_id, LOADBALANCER_SERVICE,
+ MONITOR_RESOURCE,
+ monitor_vseid)
+ return self.do_request(HTTP_PUT, uri, monitor_new)
+
+ def delete_health_monitor(self, edge_id, monitor_vseid):
+ uri = self._build_uri_path(
+ edge_id, LOADBALANCER_SERVICE,
+ MONITOR_RESOURCE,
+ monitor_vseid)
+ return self.do_request(HTTP_DELETE, uri)
+
+ def create_app_profile(self, edge_id, app_profile):
+ uri = self._build_uri_path(
+ edge_id, LOADBALANCER_SERVICE,
+ APP_PROFILE_RESOURCE)
+ return self.do_request(HTTP_POST, uri, app_profile)
+
+ def delete_app_profile(self, edge_id, app_profileid):
+ uri = self._build_uri_path(
+ edge_id, LOADBALANCER_SERVICE,
+ APP_PROFILE_RESOURCE,
+ app_profileid)
+ return self.do_request(HTTP_DELETE, uri)
+
def _build_uri_path(self, edge_id,
service,
resource=None,
from oslo.config import cfg
-from neutron.plugins.nicira.common import config # noqa
+from neutron.openstack.common import log as logging
+from neutron.plugins.nicira.common import config as nicira_cfg # noqa
from neutron.plugins.nicira.vshield import edge_appliance_driver
from neutron.plugins.nicira.vshield import edge_firewall_driver
+from neutron.plugins.nicira.vshield import edge_loadbalancer_driver
from neutron.plugins.nicira.vshield.tasks import tasks
from neutron.plugins.nicira.vshield import vcns
+LOG = logging.getLogger(__name__)
+
class VcnsDriver(edge_appliance_driver.EdgeApplianceDriver,
- edge_firewall_driver.EdgeFirewallDriver):
+ edge_firewall_driver.EdgeFirewallDriver,
+ edge_loadbalancer_driver.EdgeLbDriver):
+
def __init__(self, callbacks):
super(VcnsDriver, self).__init__()
for k in loadbalancer.RESOURCE_ATTRIBUTE_MAP.keys()
)
+ def _get_vip_optional_args(self):
+ return ('description', 'subnet_id', 'address',
+ 'session_persistence', 'connection_limit')
+
def _create_vip(self, fmt, name, pool_id, protocol, protocol_port,
admin_state_up, expected_res_status=None, **kwargs):
data = {'vip': {'name': name,
'protocol_port': protocol_port,
'admin_state_up': admin_state_up,
'tenant_id': self._tenant_id}}
- for arg in ('description', 'subnet_id', 'address',
- 'session_persistence', 'connection_limit'):
+ args = self._get_vip_optional_args()
+ for arg in args:
if arg in kwargs and kwargs[arg] is not None:
data['vip'][arg] = kwargs[arg]
class LoadBalancerPluginDbTestCase(LoadBalancerTestMixin,
test_db_plugin.NeutronDbPluginV2TestCase):
- def setUp(self, core_plugin=None, lb_plugin=None, lbaas_provider=None):
+ def setUp(self, core_plugin=None, lb_plugin=None, lbaas_provider=None,
+ ext_mgr=None):
service_plugins = {'lb_plugin_name': DB_LB_PLUGIN_KLASS}
if not lbaas_provider:
lbaas_provider = (
sdb.ServiceTypeManager._instance = None
super(LoadBalancerPluginDbTestCase, self).setUp(
+ ext_mgr=ext_mgr,
service_plugins=service_plugins
)
- self._subnet_id = _subnet_id
-
- self.plugin = loadbalancer_plugin.LoadBalancerPlugin()
+ if not ext_mgr:
+ self.plugin = loadbalancer_plugin.LoadBalancerPlugin()
+ ext_mgr = PluginAwareExtensionManager(
+ extensions_path,
+ {constants.LOADBALANCER: self.plugin}
+ )
+ app = config.load_paste_app('extensions_test_app')
+ self.ext_api = ExtensionMiddleware(app, ext_mgr=ext_mgr)
get_lbaas_agent_patcher = mock.patch(
'neutron.services.loadbalancer.agent_scheduler'
self.addCleanup(mock.patch.stopall)
self.addCleanup(cfg.CONF.reset)
- ext_mgr = PluginAwareExtensionManager(
- extensions_path,
- {constants.LOADBALANCER: self.plugin}
- )
- app = config.load_paste_app('extensions_test_app')
- self.ext_api = ExtensionMiddleware(app, ext_mgr=ext_mgr)
+ self._subnet_id = _subnet_id
class TestLoadBalancer(LoadBalancerPluginDbTestCase):
from neutron.tests.unit.nicira import test_nicira_plugin
from neutron.tests.unit.nicira import VCNS_NAME
from neutron.tests.unit.nicira.vshield import fake_vcns
+from neutron.tests.unit import test_l3_plugin
_uuid = uuidutils.generate_uuid
service_plugins=service_plugins)
-class ServiceRouterTest(test_nicira_plugin.NiciraL3NatTest):
+class ServiceRouterTest(test_nicira_plugin.NiciraL3NatTest,
+ test_l3_plugin.L3NatTestCaseMixin):
def vcns_patch(self):
instance = self.mock_vcns.start()
self.fc2.create_lswitch)
instance.return_value.delete_lswitch.side_effect = (
self.fc2.delete_lswitch)
+ instance.return_value.get_loadbalancer_config.side_effect = (
+ self.fc2.get_loadbalancer_config)
+ instance.return_value.enable_service_loadbalancer.side_effect = (
+ self.fc2.enable_service_loadbalancer)
def setUp(self, ext_mgr=None, service_plugins=None):
cfg.CONF.set_override('api_extensions_path', NVPEXT_PATH)
self.fc2.set_fake_nvpapi(self.fc)
self.addCleanup(self.fc2.reset_all)
- self.addCleanup(self.mock_vcns.stop)
+ self.addCleanup(mock.patch.stopall)
def tearDown(self):
plugin = NeutronManager.get_plugin()
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2013 VMware, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# @author: linb, VMware
+
+import contextlib
+
+from webob import exc as web_exc
+
+from neutron.api.v2 import attributes
+from neutron import context
+from neutron.db.loadbalancer import loadbalancer_db as ldb
+from neutron.extensions import loadbalancer as lb
+from neutron import manager
+from neutron.openstack.common import uuidutils
+from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
+from neutron.tests.unit.nicira import test_edge_router
+
+_uuid = uuidutils.generate_uuid
+
+LBAAS_PLUGIN_CLASS = (
+ "neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin"
+)
+
+
+class LoadBalancerTestExtensionManager(
+ test_edge_router.ServiceRouterTestExtensionManager):
+
+ def get_resources(self):
+ # If l3 resources have been loaded and updated by main API
+ # router, update the map in the l3 extension so it will load
+ # the same attributes as the API router
+ resources = super(LoadBalancerTestExtensionManager,
+ self).get_resources()
+ lb_attr_map = lb.RESOURCE_ATTRIBUTE_MAP.copy()
+ for res in lb.RESOURCE_ATTRIBUTE_MAP.keys():
+ attr_info = attributes.RESOURCE_ATTRIBUTE_MAP.get(res)
+ if attr_info:
+ lb.RESOURCE_ATTRIBUTE_MAP[res] = attr_info
+ lb_resources = lb.Loadbalancer.get_resources()
+ # restore the original resources once the controllers are created
+ lb.RESOURCE_ATTRIBUTE_MAP = lb_attr_map
+ resources.extend(lb_resources)
+ return resources
+
+
+class TestLoadbalancerPlugin(
+ test_db_loadbalancer.LoadBalancerPluginDbTestCase,
+ test_edge_router.ServiceRouterTest):
+
+ def vcns_loadbalancer_patch(self):
+ instance = self.vcns_instance
+ instance.return_value.create_vip.side_effect = (
+ self.fc2.create_vip)
+ instance.return_value.get_vip.side_effect = (
+ self.fc2.get_vip)
+ instance.return_value.update_vip.side_effect = (
+ self.fc2.update_vip)
+ instance.return_value.delete_vip.side_effect = (
+ self.fc2.delete_vip)
+ instance.return_value.create_pool.side_effect = (
+ self.fc2.create_pool)
+ instance.return_value.get_pool.side_effect = (
+ self.fc2.get_pool)
+ instance.return_value.update_pool.side_effect = (
+ self.fc2.update_pool)
+ instance.return_value.delete_pool.side_effect = (
+ self.fc2.delete_pool)
+ instance.return_value.create_health_monitor.side_effect = (
+ self.fc2.create_health_monitor)
+ instance.return_value.get_health_monitor.side_effect = (
+ self.fc2.get_health_monitor)
+ instance.return_value.update_health_monitor.side_effect = (
+ self.fc2.update_health_monitor)
+ instance.return_value.delete_health_monitor.side_effect = (
+ self.fc2.delete_health_monitor)
+ instance.return_value.create_app_profile.side_effect = (
+ self.fc2.create_app_profile)
+ instance.return_value.delete_app_profile.side_effect = (
+ self.fc2.delete_app_profile)
+
+ def setUp(self):
+ # Save the global RESOURCE_ATTRIBUTE_MAP
+ self.saved_attr_map = {}
+ for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
+ self.saved_attr_map[resource] = attrs.copy()
+
+ super(TestLoadbalancerPlugin, self).setUp(
+ ext_mgr=LoadBalancerTestExtensionManager(),
+ lb_plugin=LBAAS_PLUGIN_CLASS)
+ self.vcns_loadbalancer_patch()
+ self.plugin = manager.NeutronManager.get_plugin()
+ self.router_id = None
+
+ def tearDown(self):
+ super(TestLoadbalancerPlugin, self).tearDown()
+ # Restore the global RESOURCE_ATTRIBUTE_MAP
+ attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
+ self.ext_api = None
+ self.plugin = None
+
+ def _fake_router_edge_mapping(self):
+ req = self._create_router(self.fmt, self._tenant_id)
+ res = self.deserialize(self.fmt, req)
+ self.router_id = res['router']['id']
+
+ def _get_vip_optional_args(self):
+ args = super(TestLoadbalancerPlugin, self)._get_vip_optional_args()
+ return args + ('router_id',)
+
+ def test_update_healthmonitor(self):
+ self._fake_router_edge_mapping()
+
+ keys = [('type', "TCP"),
+ ('tenant_id', self._tenant_id),
+ ('delay', 20),
+ ('timeout', 20),
+ ('max_retries', 2),
+ ('admin_state_up', False)]
+
+ with contextlib.nested(
+ self.subnet(),
+ self.pool(),
+ self.health_monitor()
+ ) as (subnet, pool, health_mon):
+ net_id = subnet['subnet']['network_id']
+ self._set_net_external(net_id)
+ with self.vip(
+ router_id=self.router_id, pool=pool,
+ subnet=subnet):
+ self.plugin.create_pool_health_monitor(
+ context.get_admin_context(),
+ health_mon, pool['pool']['id']
+ )
+ data = {'health_monitor': {'delay': 20,
+ 'timeout': 20,
+ 'max_retries': 2,
+ 'admin_state_up': False}}
+ req = self.new_update_request(
+ "health_monitors",
+ data,
+ health_mon['health_monitor']['id'])
+ res = self.deserialize(
+ self.fmt, req.get_response(self.ext_api))
+ for k, v in keys:
+ self.assertEqual(res['health_monitor'][k], v)
+
+ def test_delete_healthmonitor(self):
+ ctx = context.get_admin_context()
+ self._fake_router_edge_mapping()
+ with contextlib.nested(
+ self.subnet(),
+ self.pool(),
+ self.health_monitor(no_delete=True)
+ ) as (subnet, pool, health_mon):
+ net_id = subnet['subnet']['network_id']
+ self._set_net_external(net_id)
+ with self.vip(
+ router_id=self.router_id, pool=pool,
+ subnet=subnet):
+ self.plugin.create_pool_health_monitor(
+ context.get_admin_context(),
+ health_mon, pool['pool']['id']
+ )
+
+ req = self.new_delete_request('health_monitors',
+ health_mon['health_monitor']['id'])
+ res = req.get_response(self.ext_api)
+ self.assertEqual(res.status_int, 204)
+ qry = ctx.session.query(ldb.HealthMonitor)
+ qry = qry.filter_by(id=health_mon['health_monitor']['id'])
+ self.assertIsNone(qry.first())
+
+ def test_create_vip(self, **extras):
+ self._fake_router_edge_mapping()
+ expected = {
+ 'name': 'vip1',
+ 'description': '',
+ 'protocol_port': 80,
+ 'protocol': 'HTTP',
+ 'connection_limit': -1,
+ 'admin_state_up': True,
+ 'status': 'PENDING_CREATE',
+ 'tenant_id': self._tenant_id,
+ }
+
+ expected.update(extras)
+
+ name = expected['name']
+
+ with contextlib.nested(
+ self.subnet(),
+ self.pool(),
+ self.health_monitor()
+ ) as (subnet, pool, monitor):
+ net_id = subnet['subnet']['network_id']
+ self._set_net_external(net_id)
+ expected['pool_id'] = pool['pool']['id']
+ self.plugin.create_pool_health_monitor(
+ context.get_admin_context(),
+ monitor, pool['pool']['id']
+ )
+ with self.vip(
+ router_id=self.router_id, name=name,
+ pool=pool, subnet=subnet, **extras) as vip:
+ for k in ('id', 'address', 'port_id', 'pool_id'):
+ self.assertTrue(vip['vip'].get(k, None))
+ expected['status'] = 'ACTIVE'
+ self.assertEqual(
+ dict((k, v)
+ for k, v in vip['vip'].items() if k in expected),
+ expected
+ )
+
+ def test_update_vip(self):
+ self._fake_router_edge_mapping()
+ name = 'new_vip'
+ keys = [('name', name),
+ ('address', "10.0.0.2"),
+ ('protocol_port', 80),
+ ('connection_limit', 100),
+ ('admin_state_up', False),
+ ('status', 'ACTIVE')]
+
+ with contextlib.nested(
+ self.subnet(),
+ self.pool(),
+ self.health_monitor()
+ ) as (subnet, pool, monitor):
+ net_id = subnet['subnet']['network_id']
+ self._set_net_external(net_id)
+ self.plugin.create_pool_health_monitor(
+ context.get_admin_context(),
+ monitor, pool['pool']['id']
+ )
+ with self.vip(
+ router_id=self.router_id, name=name,
+ pool=pool, subnet=subnet) as vip:
+ keys.append(('subnet_id', vip['vip']['subnet_id']))
+ data = {'vip': {'name': name,
+ 'connection_limit': 100,
+ 'session_persistence':
+ {'type': "APP_COOKIE",
+ 'cookie_name': "jesssionId"},
+ 'admin_state_up': False}}
+ req = self.new_update_request(
+ 'vips', data, vip['vip']['id'])
+ res = self.deserialize(self.fmt,
+ req.get_response(self.ext_api))
+ for k, v in keys:
+ self.assertEqual(res['vip'][k], v)
+
+ def test_delete_vip(self):
+ self._fake_router_edge_mapping()
+ with contextlib.nested(
+ self.subnet(),
+ self.pool(),
+ self.health_monitor()
+ ) as (subnet, pool, monitor):
+ net_id = subnet['subnet']['network_id']
+ self._set_net_external(net_id)
+ self.plugin.create_pool_health_monitor(
+ context.get_admin_context(),
+ monitor, pool['pool']['id']
+ )
+ with self.vip(
+ router_id=self.router_id,
+ pool=pool, subnet=subnet, no_delete=True) as vip:
+ req = self.new_delete_request('vips',
+ vip['vip']['id'])
+ res = req.get_response(self.ext_api)
+ self.assertEqual(res.status_int, 204)
+
+ def test_update_pool(self):
+ self._fake_router_edge_mapping()
+ data = {'pool': {'name': "new_pool",
+ 'admin_state_up': False}}
+ with contextlib.nested(
+ self.subnet(),
+ self.pool(),
+ self.health_monitor()
+ ) as (subnet, pool, monitor):
+ net_id = subnet['subnet']['network_id']
+ self._set_net_external(net_id)
+ self.plugin.create_pool_health_monitor(
+ context.get_admin_context(),
+ monitor, pool['pool']['id']
+ )
+ with self.vip(
+ router_id=self.router_id,
+ pool=pool, subnet=subnet):
+ req = self.new_update_request(
+ 'pools', data, pool['pool']['id'])
+ res = self.deserialize(self.fmt,
+ req.get_response(self.ext_api))
+ for k, v in data['pool'].items():
+ self.assertEqual(res['pool'][k], v)
+
+ def test_create_member(self):
+ self._fake_router_edge_mapping()
+
+ with contextlib.nested(
+ self.subnet(),
+ self.pool(),
+ self.health_monitor()
+ ) as (subnet, pool, monitor):
+ pool_id = pool['pool']['id']
+ net_id = subnet['subnet']['network_id']
+ self._set_net_external(net_id)
+ self.plugin.create_pool_health_monitor(
+ context.get_admin_context(),
+ monitor, pool['pool']['id']
+ )
+ with self.vip(
+ router_id=self.router_id,
+ pool=pool, subnet=subnet):
+ with contextlib.nested(
+ self.member(address='192.168.1.100',
+ protocol_port=80,
+ pool_id=pool_id),
+ self.member(router_id=self.router_id,
+ address='192.168.1.101',
+ protocol_port=80,
+ pool_id=pool_id)) as (member1, member2):
+ req = self.new_show_request('pools',
+ pool_id,
+ fmt=self.fmt)
+ pool_update = self.deserialize(
+ self.fmt,
+ req.get_response(self.ext_api)
+ )
+ self.assertIn(member1['member']['id'],
+ pool_update['pool']['members'])
+ self.assertIn(member2['member']['id'],
+ pool_update['pool']['members'])
+
+ def _show_pool(self, pool_id):
+ req = self.new_show_request('pools', pool_id, fmt=self.fmt)
+ res = req.get_response(self.ext_api)
+ self.assertEqual(web_exc.HTTPOk.code, res.status_int)
+ return self.deserialize(self.fmt, res)
+
+ def test_update_member(self):
+ self._fake_router_edge_mapping()
+ with contextlib.nested(
+ self.subnet(),
+ self.pool(name="pool1"),
+ self.pool(name="pool2"),
+ self.health_monitor()
+ ) as (subnet, pool1, pool2, monitor):
+ net_id = subnet['subnet']['network_id']
+ self._set_net_external(net_id)
+ self.plugin.create_pool_health_monitor(
+ context.get_admin_context(),
+ monitor, pool1['pool']['id']
+ )
+ self.plugin.create_pool_health_monitor(
+ context.get_admin_context(),
+ monitor, pool2['pool']['id']
+ )
+ with self.vip(
+ router_id=self.router_id,
+ pool=pool1, subnet=subnet):
+ keys = [('address', "192.168.1.100"),
+ ('tenant_id', self._tenant_id),
+ ('protocol_port', 80),
+ ('weight', 10),
+ ('pool_id', pool2['pool']['id']),
+ ('admin_state_up', False),
+ ('status', 'ACTIVE')]
+ with self.member(
+ pool_id=pool1['pool']['id']) as member:
+
+ pool1_update = self._show_pool(pool1['pool']['id'])
+ self.assertEqual(len(pool1_update['pool']['members']), 1)
+ pool2_update = self._show_pool(pool2['pool']['id'])
+ self.assertEqual(len(pool1_update['pool']['members']), 1)
+ self.assertFalse(pool2_update['pool']['members'])
+
+ data = {'member': {'pool_id': pool2['pool']['id'],
+ 'weight': 10,
+ 'admin_state_up': False}}
+ req = self.new_update_request('members',
+ data,
+ member['member']['id'])
+ raw_res = req.get_response(self.ext_api)
+ self.assertEquals(web_exc.HTTPOk.code, raw_res.status_int)
+ res = self.deserialize(self.fmt, raw_res)
+ for k, v in keys:
+ self.assertEqual(res['member'][k], v)
+ pool1_update = self._show_pool(pool1['pool']['id'])
+ pool2_update = self._show_pool(pool2['pool']['id'])
+ self.assertEqual(len(pool2_update['pool']['members']), 1)
+ self.assertFalse(pool1_update['pool']['members'])
+
+ def test_delete_member(self):
+ self._fake_router_edge_mapping()
+ with contextlib.nested(
+ self.subnet(),
+ self.pool(),
+ self.health_monitor()
+ ) as (subnet, pool, monitor):
+ pool_id = pool['pool']['id']
+ net_id = subnet['subnet']['network_id']
+ self._set_net_external(net_id)
+ self.plugin.create_pool_health_monitor(
+ context.get_admin_context(),
+ monitor, pool['pool']['id']
+ )
+ with self.vip(
+ router_id=self.router_id,
+ pool=pool, subnet=subnet):
+ with self.member(pool_id=pool_id,
+ no_delete=True) as member:
+ req = self.new_delete_request('members',
+ member['member']['id'])
+ res = req.get_response(self.ext_api)
+ self.assertEqual(res.status_int, 204)
+ pool_update = self._show_pool(pool['pool']['id'])
+ self.assertFalse(pool_update['pool']['members'])
ext_mgr=ext_mgr)
cfg.CONF.set_override('metadata_mode', None, 'NVP')
self.addCleanup(self.fc.reset_all)
- self.addCleanup(self.mock_nvpapi.stop)
- self.addCleanup(patch_sync.stop)
+ self.addCleanup(mock.patch.stopall)
class TestNiciraBasicGet(test_plugin.TestBasicGet, NiciraPluginV2TestCase):
"firewallRules": []
}
}
+ self._fake_virtualservers_dict = {}
+ self._fake_pools_dict = {}
+ self._fake_monitors_dict = {}
+ self._fake_app_profiles_dict = {}
+ self._fake_loadbalancer_config = {}
def set_fake_nvpapi(self, fake_nvpapi):
self._fake_nvpapi = fake_nvpapi
break
return self.return_helper(header, response)
+ #
+ #Fake Edge LBAAS call
+ #
+ def create_vip(self, edge_id, vip_new):
+ if not self._fake_virtualservers_dict.get(edge_id):
+ self._fake_virtualservers_dict[edge_id] = {}
+ vip_vseid = uuidutils.generate_uuid()
+ self._fake_virtualservers_dict[edge_id][vip_vseid] = vip_new
+ header = {
+ 'status': 204,
+ 'location': "https://host/api/4.0/edges/edge_id"
+ "/loadbalancer/config/%s" % vip_vseid}
+ response = ""
+ return self.return_helper(header, response)
+
+ def get_vip(self, edge_id, vip_vseid):
+ header = {'status': 404}
+ response = ""
+ if not self._fake_virtualservers_dict.get(edge_id) or (
+ not self._fake_virtualservers_dict[edge_id].get(vip_vseid)):
+ return self.return_helper(header, response)
+ header = {'status': 204}
+ response = self._fake_virtualservers_dict[edge_id][vip_vseid]
+ return self.return_helper(header, response)
+
+ def update_vip(self, edge_id, vip_vseid, vip_new):
+ header = {'status': 404}
+ response = ""
+ if not self._fake_virtualservers_dict.get(edge_id) or (
+ not self._fake_virtualservers_dict[edge_id].get(vip_vseid)):
+ return self.return_helper(header, response)
+ header = {'status': 204}
+ self._fake_virtualservers_dict[edge_id][vip_vseid].update(
+ vip_new)
+ return self.return_helper(header, response)
+
+ def delete_vip(self, edge_id, vip_vseid):
+ header = {'status': 404}
+ response = ""
+ if not self._fake_virtualservers_dict.get(edge_id) or (
+ not self._fake_virtualservers_dict[edge_id].get(vip_vseid)):
+ return self.return_helper(header, response)
+ header = {'status': 204}
+ del self._fake_virtualservers_dict[edge_id][vip_vseid]
+ return self.return_helper(header, response)
+
+ def create_pool(self, edge_id, pool_new):
+ if not self._fake_pools_dict.get(edge_id):
+ self._fake_pools_dict[edge_id] = {}
+ pool_vseid = uuidutils.generate_uuid()
+ self._fake_pools_dict[edge_id][pool_vseid] = pool_new
+ header = {
+ 'status': 204,
+ 'location': "https://host/api/4.0/edges/edge_id"
+ "/loadbalancer/config/%s" % pool_vseid}
+ response = ""
+ return self.return_helper(header, response)
+
+ def get_pool(self, edge_id, pool_vseid):
+ header = {'status': 404}
+ response = ""
+ if not self._fake_pools_dict.get(edge_id) or (
+ not self._fake_pools_dict[edge_id].get(pool_vseid)):
+ return self.return_helper(header, response)
+ header = {'status': 204}
+ response = self._fake_pools_dict[edge_id][pool_vseid]
+ return self.return_helper(header, response)
+
+ def update_pool(self, edge_id, pool_vseid, pool_new):
+ header = {'status': 404}
+ response = ""
+ if not self._fake_pools_dict.get(edge_id) or (
+ not self._fake_pools_dict[edge_id].get(pool_vseid)):
+ return self.return_helper(header, response)
+ header = {'status': 204}
+ self._fake_pools_dict[edge_id][pool_vseid].update(
+ pool_new)
+ return self.return_helper(header, response)
+
+ def delete_pool(self, edge_id, pool_vseid):
+ header = {'status': 404}
+ response = ""
+ if not self._fake_pools_dict.get(edge_id) or (
+ not self._fake_pools_dict[edge_id].get(pool_vseid)):
+ return self.return_helper(header, response)
+ header = {'status': 204}
+ del self._fake_pools_dict[edge_id][pool_vseid]
+ return self.return_helper(header, response)
+
+ def create_health_monitor(self, edge_id, monitor_new):
+ if not self._fake_monitors_dict.get(edge_id):
+ self._fake_monitors_dict[edge_id] = {}
+ monitor_vseid = uuidutils.generate_uuid()
+ self._fake_monitors_dict[edge_id][monitor_vseid] = monitor_new
+ header = {
+ 'status': 204,
+ 'location': "https://host/api/4.0/edges/edge_id"
+ "/loadbalancer/config/%s" % monitor_vseid}
+ response = ""
+ return self.return_helper(header, response)
+
+ def get_health_monitor(self, edge_id, monitor_vseid):
+ header = {'status': 404}
+ response = ""
+ if not self._fake_monitors_dict.get(edge_id) or (
+ not self._fake_monitors_dict[edge_id].get(monitor_vseid)):
+ return self.return_helper(header, response)
+ header = {'status': 204}
+ response = self._fake_monitors_dict[edge_id][monitor_vseid]
+ return self.return_helper(header, response)
+
+ def update_health_monitor(self, edge_id, monitor_vseid, monitor_new):
+ header = {'status': 404}
+ response = ""
+ if not self._fake_monitors_dict.get(edge_id) or (
+ not self._fake_monitors_dict[edge_id].get(monitor_vseid)):
+ return self.return_helper(header, response)
+ header = {'status': 204}
+ self._fake_monitors_dict[edge_id][monitor_vseid].update(
+ monitor_new)
+ return self.return_helper(header, response)
+
+ def delete_health_monitor(self, edge_id, monitor_vseid):
+ header = {'status': 404}
+ response = ""
+ if not self._fake_monitors_dict.get(edge_id) or (
+ not self._fake_monitors_dict[edge_id].get(monitor_vseid)):
+ return self.return_helper(header, response)
+ header = {'status': 204}
+ del self._fake_monitors_dict[edge_id][monitor_vseid]
+ return self.return_helper(header, response)
+
+ def create_app_profile(self, edge_id, app_profile):
+ if not self._fake_app_profiles_dict.get(edge_id):
+ self._fake_app_profiles_dict[edge_id] = {}
+ app_profileid = uuidutils.generate_uuid()
+ self._fake_app_profiles_dict[edge_id][app_profileid] = app_profile
+ header = {
+ 'status': 204,
+ 'location': "https://host/api/4.0/edges/edge_id"
+ "/loadbalancer/config/%s" % app_profileid}
+ response = ""
+ return self.return_helper(header, response)
+
+ def delete_app_profile(self, edge_id, app_profileid):
+ header = {'status': 404}
+ response = ""
+ if not self._fake_app_profiles_dict.get(edge_id) or (
+ not self._fake_app_profiles_dict[edge_id].get(app_profileid)):
+ return self.return_helper(header, response)
+ header = {'status': 204}
+ del self._fake_app_profiles_dict[edge_id][app_profileid]
+ return self.return_helper(header, response)
+
+ def get_loadbalancer_config(self, edge_id):
+ header = {'status': 204}
+ response = {'config': False}
+ if self._fake_loadbalancer_config[edge_id]:
+ response['config'] = self._fake_loadbalancer_config[edge_id]
+ return self.return_helper(header, response)
+
+ def enable_service_loadbalancer(self, edge_id, config):
+ header = {'status': 204}
+ response = ""
+ self._fake_loadbalancer_config[edge_id] = True
+ return self.return_helper(header, response)
+
def return_helper(self, header, response):
status = int(header['status'])
if 200 <= status <= 300:
self._edges.clear()
self._lswitches.clear()
self.fake_firewall_dict = {}
+ self._fake_virtualservers_dict = {}
+ self._fake_pools_dict = {}
+ self._fake_monitors_dict = {}
+ self._fake_app_profiles_dict = {}
+ self._fake_loadbalancer_config = {}
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2013 VMware, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# @author: linb, VMware
+
+import mock
+
+from neutron.common import config as n_config
+from neutron import context
+from neutron.openstack.common import uuidutils
+from neutron.plugins.nicira.dbexts import vcns_db
+from neutron.plugins.nicira.vshield import (
+ vcns_driver)
+from neutron.plugins.nicira.vshield.common import (
+ exceptions as vcns_exc)
+from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
+from neutron.tests.unit.nicira import get_fake_conf
+from neutron.tests.unit.nicira import VCNS_NAME
+from neutron.tests.unit.nicira.vshield import fake_vcns
+
+_uuid = uuidutils.generate_uuid
+
+VSE_ID = 'edge-1'
+POOL_MAP_INFO = {
+ 'pool_id': None,
+ 'edge_id': VSE_ID,
+ 'pool_vseid': 'pool-1'}
+
+VCNS_CONFIG_FILE = get_fake_conf("vcns.ini.test")
+
+
+class VcnsDriverTestCase(test_db_loadbalancer.LoadBalancerPluginDbTestCase):
+
+ def vcns_loadbalancer_patch(self):
+ instance = self.mock_vcns.start()
+ instance.return_value.create_vip.side_effect = (
+ self.fc2.create_vip)
+ instance.return_value.get_vip.side_effect = (
+ self.fc2.get_vip)
+ instance.return_value.update_vip.side_effect = (
+ self.fc2.update_vip)
+ instance.return_value.delete_vip.side_effect = (
+ self.fc2.delete_vip)
+ instance.return_value.create_pool.side_effect = (
+ self.fc2.create_pool)
+ instance.return_value.get_pool.side_effect = (
+ self.fc2.get_pool)
+ instance.return_value.update_pool.side_effect = (
+ self.fc2.update_pool)
+ instance.return_value.delete_pool.side_effect = (
+ self.fc2.delete_pool)
+ instance.return_value.create_health_monitor.side_effect = (
+ self.fc2.create_health_monitor)
+ instance.return_value.get_health_monitor.side_effect = (
+ self.fc2.get_health_monitor)
+ instance.return_value.update_health_monitor.side_effect = (
+ self.fc2.update_health_monitor)
+ instance.return_value.delete_health_monitor.side_effect = (
+ self.fc2.delete_health_monitor)
+ instance.return_value.create_app_profile.side_effect = (
+ self.fc2.create_app_profile)
+ instance.return_value.delete_app_profile.side_effect = (
+ self.fc2.delete_app_profile)
+ self.pool_id = None
+ self.vip_id = None
+
+ def setUp(self):
+
+ n_config.parse(['--config-file', VCNS_CONFIG_FILE])
+ # mock vcns
+ self.fc2 = fake_vcns.FakeVcns(unique_router_name=False)
+ self.mock_vcns = mock.patch(VCNS_NAME, autospec=True)
+ self.vcns_loadbalancer_patch()
+
+ self.nvp_service_plugin_callback = mock.Mock()
+ self.driver = vcns_driver.VcnsDriver(self.nvp_service_plugin_callback)
+
+ super(VcnsDriverTestCase, self).setUp()
+ self.addCleanup(self.fc2.reset_all)
+ self.addCleanup(self.mock_vcns.stop)
+
+ def tearDown(self):
+ super(VcnsDriverTestCase, self).tearDown()
+
+
+class TestEdgeLbDriver(VcnsDriverTestCase):
+
+ def test_create_and_get_vip(self):
+ ctx = context.get_admin_context()
+ with self.pool(no_delete=True) as pool:
+ self.pool_id = pool['pool']['id']
+ POOL_MAP_INFO['pool_id'] = pool['pool']['id']
+ vcns_db.add_vcns_edge_pool_binding(ctx.session, POOL_MAP_INFO)
+ with self.vip(pool=pool) as res:
+ vip_create = res['vip']
+ self.driver.create_vip(ctx, VSE_ID, vip_create)
+ vip_get = self.driver.get_vip(ctx, vip_create['id'])
+ for k, v in vip_get.iteritems():
+ self.assertEqual(vip_create[k], v)
+
+ def test_update_vip(self):
+ ctx = context.get_admin_context()
+ with self.pool(no_delete=True) as pool:
+ self.pool_id = pool['pool']['id']
+ POOL_MAP_INFO['pool_id'] = pool['pool']['id']
+ vcns_db.add_vcns_edge_pool_binding(ctx.session, POOL_MAP_INFO)
+ with self.vip(pool=pool) as res:
+ vip_create = res['vip']
+ self.driver.create_vip(ctx, VSE_ID, vip_create)
+ vip_update = {'id': vip_create['id'],
+ 'pool_id': pool['pool']['id'],
+ 'name': 'update_name',
+ 'description': 'description',
+ 'address': 'update_address',
+ 'port_id': 'update_port_id',
+ 'protocol_port': 'protocol_port',
+ 'protocol': 'update_protocol'}
+ self.driver.update_vip(ctx, vip_update)
+ vip_get = self.driver.get_vip(ctx, vip_create['id'])
+ for k, v in vip_get.iteritems():
+ if k in vip_update:
+ self.assertEqual(vip_update[k], v)
+
+ def test_delete_vip(self):
+ ctx = context.get_admin_context()
+ with self.pool(no_delete=True) as pool:
+ self.pool_id = pool['pool']['id']
+ POOL_MAP_INFO['pool_id'] = pool['pool']['id']
+ vcns_db.add_vcns_edge_pool_binding(ctx.session, POOL_MAP_INFO)
+ with self.vip(pool=pool) as res:
+ vip_create = res['vip']
+ self.driver.create_vip(ctx, VSE_ID, vip_create)
+ self.driver.delete_vip(ctx, vip_create['id'])
+ self.assertRaises(vcns_exc.VcnsNotFound,
+ self.driver.get_vip,
+ ctx,
+ vip_create['id'])
+
+ #Test Pool Operation
+ def test_create_and_get_pool(self):
+ ctx = context.get_admin_context()
+ with self.pool(no_delete=True) as p:
+ self.pool_id = p['pool']['id']
+ pool_create = p['pool']
+ self.driver.create_pool(ctx, VSE_ID, pool_create, [])
+ pool_get = self.driver.get_pool(ctx, pool_create['id'], VSE_ID)
+ for k, v in pool_get.iteritems():
+ self.assertEqual(pool_create[k], v)
+
+ def test_update_pool(self):
+ ctx = context.get_admin_context()
+ with self.pool(no_delete=True) as p:
+ self.pool_id = p['pool']['id']
+ pool_create = p['pool']
+ self.driver.create_pool(ctx, VSE_ID, pool_create, [])
+ pool_update = {'id': pool_create['id'],
+ 'lb_method': 'lb_method',
+ 'name': 'update_name',
+ 'members': [],
+ 'health_monitors': []}
+ self.driver.update_pool(ctx, VSE_ID, pool_update, [])
+ pool_get = self.driver.get_pool(ctx, pool_create['id'], VSE_ID)
+ for k, v in pool_get.iteritems():
+ if k in pool_update:
+ self.assertEqual(pool_update[k], v)
+
+ def test_delete_pool(self):
+ ctx = context.get_admin_context()
+ with self.pool(no_delete=True) as p:
+ self.pool_id = p['pool']['id']
+ pool_create = p['pool']
+ self.driver.create_pool(ctx, VSE_ID, pool_create, [])
+ self.driver.delete_pool(ctx, pool_create['id'], VSE_ID)
+ self.assertRaises(vcns_exc.VcnsNotFound,
+ self.driver.get_pool,
+ ctx,
+ pool_create['id'],
+ VSE_ID)
+
+ def test_create_and_get_monitor(self):
+ ctx = context.get_admin_context()
+ with self.health_monitor(no_delete=True) as m:
+ monitor_create = m['health_monitor']
+ self.driver.create_health_monitor(ctx, VSE_ID, monitor_create)
+ monitor_get = self.driver.get_health_monitor(
+ ctx, monitor_create['id'], VSE_ID)
+ for k, v in monitor_get.iteritems():
+ self.assertEqual(monitor_create[k], v)
+
+ def test_update_health_monitor(self):
+ ctx = context.get_admin_context()
+ with self.health_monitor(no_delete=True) as m:
+ monitor_create = m['health_monitor']
+ self.driver.create_health_monitor(
+ ctx, VSE_ID, monitor_create)
+ monitor_update = {'id': monitor_create['id'],
+ 'delay': 'new_delay',
+ 'timeout': "new_timeout",
+ 'type': 'type',
+ 'max_retries': "max_retries"}
+ self.driver.update_health_monitor(
+ ctx, VSE_ID, monitor_create, monitor_update)
+ monitor_get = self.driver.get_health_monitor(
+ ctx, monitor_create['id'], VSE_ID)
+ for k, v in monitor_get.iteritems():
+ if k in monitor_update:
+ self.assertEqual(monitor_update[k], v)
+
+ def test_delete_health_monitor(self):
+ ctx = context.get_admin_context()
+ with self.health_monitor(no_delete=True) as m:
+ monitor_create = m['health_monitor']
+ self.driver.create_health_monitor(ctx, VSE_ID, monitor_create)
+ self.driver.delete_health_monitor(
+ ctx, monitor_create['id'], VSE_ID)
+ self.assertRaises(vcns_exc.VcnsNotFound,
+ self.driver.get_health_monitor,
+ ctx,
+ monitor_create['id'],
+ VSE_ID)