#
# @author: Mark McClain, DreamHost
+import functools
+
+from alembic import context
from alembic import op
import sqlalchemy as sa
CISCO_PLUGIN = 'neutron.plugins.cisco.network_plugin.PluginV2'
+def skip_if_offline(func):
+ """Decorator for skipping migrations in offline mode."""
+ @functools.wraps(func)
+ def decorator(*args, **kwargs):
+ if context.is_offline_mode():
+ return
+ return func(*args, **kwargs)
+
+ return decorator
+
+
+def raise_if_offline(func):
+ """Decorator for raising if a function is called in offline mode."""
+ @functools.wraps(func)
+ def decorator(*args, **kwargs):
+ if context.is_offline_mode():
+ raise RuntimeError(_("%s cannot be called while in offline mode") %
+ func.__name__)
+ return func(*args, **kwargs)
+
+ return decorator
+
+
+@raise_if_offline
+def schema_has_table(table_name):
+ """Check whether the specified table exists in the current schema.
+
+ This method cannot be executed in offline mode.
+ """
+ bind = op.get_bind()
+ insp = sa.engine.reflection.Inspector.from_engine(bind)
+ return table_name in insp.get_table_names()
+
+
+@raise_if_offline
+def schema_has_column(table_name, column_name):
+ """Check whether the specified column exists in the current schema.
+
+ This method cannot be executed in offline mode.
+ """
+ bind = op.get_bind()
+ insp = sa.engine.reflection.Inspector.from_engine(bind)
+ # first check that the table exists
+ if not schema_has_table(table_name):
+ return
+ # check whether column_name exists in table columns
+ return column_name in [column['name'] for column in
+ insp.get_columns(table_name)]
+
+
+@raise_if_offline
+def alter_column_if_exists(table_name, column_name, **kwargs):
+ """Alter a column only if it exists in the schema."""
+ if schema_has_column(table_name, column_name):
+ op.alter_column(table_name, column_name, **kwargs)
+
+
+@raise_if_offline
+def drop_table_if_exists(table_name):
+ if schema_has_table(table_name):
+ op.drop_table(table_name)
+
+
+@raise_if_offline
+def rename_table_if_exists(old_table_name, new_table_name):
+ if schema_has_table(old_table_name):
+ op.rename_table(old_table_name, new_table_name)
+
+
def should_run(active_plugins, migrate_plugins):
if '*' in migrate_plugins:
return True
revision = '10cd28e692e9'
down_revision = '1b837a7125a9'
-# Change to ['*'] if this migration applies to all plugins
-
-migration_for_plugins = [
- 'neutron.plugins.nuage.plugin.NuagePlugin'
-]
from alembic import op
import sqlalchemy as sa
def upgrade(active_plugins=None, options=None):
- if not migration.should_run(active_plugins, migration_for_plugins):
- return
-
op.create_table(
'routerroutes_mapping',
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
ondelete='CASCADE'),
)
- op.create_table(
- 'routerroutes',
- sa.Column('destination', sa.String(length=64), nullable=False),
- sa.Column('nexthop', sa.String(length=64), nullable=False),
- sa.Column('router_id', sa.String(length=36), nullable=False),
- sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
- ondelete='CASCADE'),
- sa.PrimaryKeyConstraint('destination', 'nexthop',
- 'router_id'),
- )
+ # This table might already exist as it might have been created
+ # if another plugin was configured before the nuage one
+ if op.get_bind().engine.dialect.name == 'postgresql':
+ migration.create_table_if_not_exist_psql(
+ 'routerroutes',
+ ("(destination VARCHAR(64) NOT NULL,"
+ "nexthop VARCHAR(64) NOT NULL,"
+ "router_id VARCHAR(36) NOT NULL,"
+ "PRIMARY KEY (destination, nexthop, router_id),"
+ "FOREIGN KEY (router_id) REFERENCES routers (id) "
+ "ON DELETE CASCADE ON UPDATE CASCADE)"))
+ else:
+ op.execute("CREATE TABLE IF NOT EXISTS routerroutes( "
+ "destination VARCHAR(64) NOT NULL,"
+ "nexthop VARCHAR(64) NOT NULL,"
+ "router_id VARCHAR(36) NOT NULL,"
+ "PRIMARY KEY (destination, nexthop, router_id),"
+ "FOREIGN KEY (router_id) REFERENCES routers (id) "
+ "ON DELETE CASCADE ON UPDATE CASCADE)")
def downgrade(active_plugins=None, options=None):
- if not migration.should_run(active_plugins, migration_for_plugins):
- return
-
- op.drop_table('routerroutes')
- op.drop_table('routerroutes_mapping')
+ # The routerroutes table should not be dropped
+ op.execute('DROP TABLE IF EXISTS routerroutes_mapping')
revision = '1b837a7125a9'
down_revision = '6be312499f9'
-migration_for_plugins = [
- 'neutron.plugins.ml2.plugin.Ml2Plugin'
-]
from alembic import op
import sqlalchemy as sa
-from neutron.db import migration
-
def upgrade(active_plugins=None, options=None):
- if not migration.should_run(active_plugins, migration_for_plugins):
- return
-
op.create_table(
'cisco_ml2_apic_epgs',
sa.Column('network_id', sa.String(length=255), nullable=False),
def downgrade(active_plugins=None, options=None):
- if not migration.should_run(active_plugins, migration_for_plugins):
- return
-
op.drop_table('cisco_ml2_apic_contracts')
op.drop_table('cisco_ml2_apic_port_profiles')
op.drop_table('cisco_ml2_apic_epgs')
# Change to ['*'] if this migration applies to all plugins
-migration_for_plugins = [
- 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin'
-]
+# This migration will be executed only if the neutron DB schema
+# contains the tables for load balancing service plugin.
+# This migration will be skipped when executed in offline mode.
-from alembic import op
-import sqlalchemy as sa
+import sqlalchemy as sa
from neutron.db import migration
+@migration.skip_if_offline
def upgrade(active_plugins=None, options=None):
- if not migration.should_run(active_plugins, migration_for_plugins):
- return
-
- op.alter_column('poolstatisticss', 'bytes_in', nullable=False,
- existing_type=sa.BigInteger())
- op.alter_column('poolstatisticss', 'bytes_out', nullable=False,
- existing_type=sa.BigInteger())
- op.alter_column('poolstatisticss', 'active_connections', nullable=False,
- existing_type=sa.BigInteger())
- op.alter_column('poolstatisticss', 'total_connections', nullable=False,
- existing_type=sa.BigInteger())
+ migration.alter_column_if_exists(
+ 'poolstatisticss', 'bytes_in',
+ nullable=False,
+ existing_type=sa.BigInteger())
+ migration.alter_column_if_exists(
+ 'poolstatisticss', 'bytes_out',
+ nullable=False,
+ existing_type=sa.BigInteger())
+ migration.alter_column_if_exists(
+ 'poolstatisticss', 'active_connections',
+ nullable=False,
+ existing_type=sa.BigInteger())
+ migration.alter_column_if_exists(
+ 'poolstatisticss', 'total_connections',
+ nullable=False,
+ existing_type=sa.BigInteger())
+@migration.skip_if_offline
def downgrade(active_plugins=None, options=None):
- if not migration.should_run(active_plugins, migration_for_plugins):
- return
-
- op.alter_column('poolstatisticss', 'bytes_in', nullable=True,
- existing_type=sa.BigInteger())
- op.alter_column('poolstatisticss', 'bytes_out', nullable=True,
- existing_type=sa.BigInteger())
- op.alter_column('poolstatisticss', 'active_connections', nullable=True,
- existing_type=sa.BigInteger())
- op.alter_column('poolstatisticss', 'total_connections', nullable=True,
- existing_type=sa.BigInteger())
+ migration.alter_column_if_exists(
+ 'poolstatisticss', 'bytes_in',
+ nullable=True,
+ existing_type=sa.BigInteger())
+ migration.alter_column_if_exists(
+ 'poolstatisticss', 'bytes_out',
+ nullable=True,
+ existing_type=sa.BigInteger())
+ migration.alter_column_if_exists(
+ 'poolstatisticss', 'active_connections',
+ nullable=True,
+ existing_type=sa.BigInteger())
+ migration.alter_column_if_exists(
+ 'poolstatisticss', 'total_connections',
+ nullable=True,
+ existing_type=sa.BigInteger())
revision = '2db5203cb7a9'
down_revision = '10cd28e692e9'
-migration_for_plugins = [
- 'neutron.plugins.nuage.plugin.NuagePlugin'
-]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
+# This migration will be executed only if the neutron DB schema contains
+# the tables for the nuage plugin.
+# This migration will be skipped when executed in offline mode.
-def upgrade(active_plugins=None, options=None):
- if not migration.should_run(active_plugins, migration_for_plugins):
- return
+@migration.skip_if_offline
+def upgrade(active_plugins=None, options=None):
+ # These tables will be created even if the nuage plugin is not enabled.
+ # This is fine as they would be created anyway by the healing migration.
op.create_table(
'nuage_floatingip_pool_mapping',
sa.Column('fip_pool_id', sa.String(length=36), nullable=False),
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('fip_id'),
)
- op.rename_table('net_partitions', 'nuage_net_partitions')
- op.rename_table('net_partition_router_mapping',
- 'nuage_net_partition_router_mapping')
- op.rename_table('router_zone_mapping', 'nuage_router_zone_mapping')
- op.rename_table('subnet_l2dom_mapping', 'nuage_subnet_l2dom_mapping')
- op.rename_table('port_mapping', 'nuage_port_mapping')
- op.rename_table('routerroutes_mapping', 'nuage_routerroutes_mapping')
+ migration.rename_table_if_exists('net_partitions',
+ 'nuage_net_partitions')
+ migration.rename_table_if_exists('net_partition_router_mapping',
+ 'nuage_net_partition_router_mapping')
+ migration.rename_table_if_exists('router_zone_mapping',
+ 'nuage_router_zone_mapping')
+ migration.rename_table_if_exists('subnet_l2dom_mapping',
+ 'nuage_subnet_l2dom_mapping')
+ migration.rename_table_if_exists('port_mapping',
+ 'nuage_port_mapping')
+ migration.rename_table_if_exists('routerroutes_mapping',
+ 'nuage_routerroutes_mapping')
+@migration.skip_if_offline
def downgrade(active_plugins=None, options=None):
- if not migration.should_run(active_plugins, migration_for_plugins):
- return
-
- op.drop_table('nuage_floatingip_mapping')
- op.drop_table('nuage_floatingip_pool_mapping')
- op.rename_table('nuage_net_partitions', 'net_partitions')
- op.rename_table('nuage_net_partition_router_mapping',
- 'net_partition_router_mapping')
- op.rename_table('nuage_router_zone_mapping', 'router_zone_mapping')
- op.rename_table('nuage_subnet_l2dom_mapping', 'subnet_l2dom_mapping')
- op.rename_table('nuage_port_mapping', 'port_mapping')
- op.rename_table('nuage_routerroutes_mapping', 'routerroutes_mapping')
+ migration.drop_table_if_exists('nuage_floatingip_mapping')
+ migration.drop_table_if_exists('nuage_floatingip_pool_mapping')
+ migration.rename_table_if_exists('nuage_net_partitions', 'net_partitions')
+ migration.rename_table_if_exists('nuage_net_partition_router_mapping',
+ 'net_partition_router_mapping')
+ migration.rename_table_if_exists('nuage_router_zone_mapping',
+ 'router_zone_mapping')
+ migration.rename_table_if_exists('nuage_subnet_l2dom_mapping',
+ 'subnet_l2dom_mapping')
+ migration.rename_table_if_exists('nuage_port_mapping', 'port_mapping')
+ migration.rename_table_if_exists('nuage_routerroutes_mapping',
+ 'routerroutes_mapping')
revision = '33c3db036fe4'
down_revision = 'b65aa907aec'
-# Change to ['*'] if this migration applies to all plugins
-
-migration_for_plugins = [
- 'neutron.services.metering.metering_plugin.MeteringPlugin'
-]
-
from alembic import op
import sqlalchemy as sa
def upgrade(active_plugins=None, options=None):
- if not migration.should_run(active_plugins, migration_for_plugins):
- return
-
if op.get_bind().engine.dialect.name == 'postgresql':
migration.create_table_if_not_exist_psql(
'meteringlabels',
def downgrade(active_plugins=None, options=None):
- if not migration.should_run(active_plugins, migration_for_plugins):
- return
-
pass
revision = '4eca4a84f08a'
down_revision = '33c3db036fe4'
-# Change to ['*'] if this migration applies to all plugins
-
-migration_for_plugins = [
- 'neutron.plugins.ml2.plugin.Ml2Plugin'
-]
from alembic import op
-import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
- if not migration.should_run(active_plugins, migration_for_plugins):
- return
-
- op.drop_table('cisco_ml2_credentials')
+ op.execute('DROP TABLE IF EXISTS cisco_ml2_credentials')
def downgrade(active_plugins=None, options=None):
- if not migration.should_run(active_plugins, migration_for_plugins):
- return
-
- op.create_table(
- 'cisco_ml2_credentials',
- sa.Column('credential_id', sa.String(length=255), nullable=True),
- sa.Column('tenant_id', sa.String(length=255), nullable=False),
- sa.Column('credential_name', sa.String(length=255), nullable=False),
- sa.Column('user_name', sa.String(length=255), nullable=True),
- sa.Column('password', sa.String(length=255), nullable=True),
- sa.PrimaryKeyConstraint('tenant_id', 'credential_name')
- )
+ if op.get_bind().engine.dialect.name == 'postgresql':
+ migration.create_table_if_not_exist_psql(
+ 'cisco_ml2_credentials',
+ ("(credential_id VARCHAR(255) NULL,"
+ "tenant_id VARCHAR(255) NOT NULL,"
+ "credential_name VARCHAR(255) NOT NULL,"
+ "user_name VARCHAR(255) NULL,"
+ "password VARCHAR(255) NULL,"
+ "PRIMARY KEY (tenant_id, credential_name))"))
+ else:
+ op.execute('CREATE TABLE IF NOT EXISTS cisco_ml2_credentials( '
+ 'credential_id VARCHAR(255) NULL,'
+ 'tenant_id VARCHAR(255) NOT NULL,'
+ 'credential_name VARCHAR(255) NOT NULL,'
+ 'user_name VARCHAR(255) NULL,'
+ 'password VARCHAR(255) NULL,'
+ 'PRIMARY KEY (tenant_id, credential_name))')
down_revision = '2db5203cb7a9'
-from alembic import op
import sqlalchemy as sa
import sqlalchemy.sql
+from neutron.db import migration
from neutron.plugins.cisco.common import cisco_constants
-PLUGINS = {
- 'brocade': 'neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2',
- 'cisco': 'neutron.plugins.cisco.network_plugin.PluginV2',
- 'ml2': 'neutron.plugins.ml2.plugin.Ml2Plugin',
- 'mlnx': 'neutron.plugins.mlnx.mlnx_plugin.MellanoxEswitchPlugin',
- 'vmware': [
- 'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
- 'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin',
- 'neutron.plugins.vmware.plugin.NsxPlugin',
- 'neutron.plugins.vmware.plugin.NsxServicePlugin',
- ],
- 'agents': [
- 'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2',
- 'neutron.plugins.nec.nec_plugin.NECPluginV2',
- 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2',
- 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2',
- 'neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2',
- 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin',
- ],
-}
+# This migration will be executed only if then Neutron db contains tables for
+# selected plugins and agents.
+# required tables and columns are:
+# brocade_ports.port_id
+# segmentation_id_llocation.allocated
+# cisco_n1kv_profile_bindings.tenant_id
+# cisco_network_profiles.multicast_ip_index
+# cisco_n1kv_vlan_allocations.allocated
+# nsxrouterextattributess.service_router
+# nsxrouterextattributess.distributed
+# qosqueues.default
+# agents.admin_state_up
+# ml2_gre_allocations.allocated
+# ml2_vxlan_allocations.allocated
+# This migration will be skipped when executed offline mode.
def upgrade(active_plugins=None, options=None):
run(active_plugins, None)
+@migration.skip_if_offline
def run(active_plugins, default):
- if PLUGINS['ml2'] in active_plugins:
- set_default_ml2(default)
- if PLUGINS['mlnx'] in active_plugins:
- set_default_agents(default)
- set_default_mlnx(default)
- if PLUGINS['brocade'] in active_plugins:
- set_default_agents(default)
- set_default_brocade(default)
- if PLUGINS['cisco'] in active_plugins:
- set_default_cisco(default)
- if set(PLUGINS['vmware']) & set(active_plugins):
- set_default_vmware(default)
- set_default_agents(default)
- if set(PLUGINS['agents']) & set(active_plugins):
- set_default_agents(default)
+ set_default_ml2(default)
+ set_default_mlnx(default)
+ set_default_brocade(default)
+ set_default_cisco(default)
+ set_default_vmware(default)
+ set_default_agents(default)
def set_default_brocade(default):
if default:
default = ''
- op.alter_column('brocadeports', 'port_id',
- server_default=default, existing_type=sa.String(36))
+ migration.alter_column_if_exists(
+ 'brocadeports', 'port_id',
+ server_default=default,
+ existing_type=sa.String(36))
def set_default_mlnx(default):
if default:
default = sqlalchemy.sql.false()
- op.alter_column('segmentation_id_allocation', 'allocated',
- server_default=default, existing_nullable=False,
- existing_type=sa.Boolean)
+ migration.alter_column_if_exists(
+ 'segmentation_id_allocation', 'allocated',
+ server_default=default,
+ existing_nullable=False,
+ existing_type=sa.Boolean)
def set_default_cisco(default):
profile_default = '0' if default else None
if default:
default = sqlalchemy.sql.false()
- op.alter_column('cisco_n1kv_profile_bindings', 'tenant_id',
- existing_type=sa.String(length=36),
- server_default=profile_binding_default,
- existing_nullable=False)
- op.alter_column('cisco_network_profiles', 'multicast_ip_index',
- server_default=profile_default, existing_type=sa.Integer)
- op.alter_column('cisco_n1kv_vlan_allocations', 'allocated',
- existing_type=sa.Boolean,
- server_default=default, existing_nullable=False)
- op.alter_column('cisco_n1kv_vxlan_allocations', 'allocated',
- existing_type=sa.Boolean,
- server_default=default, existing_nullable=False)
+ migration.alter_column_if_exists(
+ 'cisco_n1kv_profile_bindings', 'tenant_id',
+ existing_type=sa.String(length=36),
+ server_default=profile_binding_default,
+ existing_nullable=False)
+ migration.alter_column_if_exists(
+ 'cisco_network_profiles', 'multicast_ip_index',
+ server_default=profile_default,
+ existing_type=sa.Integer)
+ migration.alter_column_if_exists(
+ 'cisco_n1kv_vlan_allocations', 'allocated',
+ existing_type=sa.Boolean,
+ server_default=default,
+ existing_nullable=False)
def set_default_vmware(default=None):
if default:
default = sqlalchemy.sql.false()
- op.alter_column('nsxrouterextattributess', 'service_router',
- server_default=default, existing_nullable=False,
- existing_type=sa.Boolean)
- op.alter_column('nsxrouterextattributess', 'distributed',
- server_default=default, existing_nullable=False,
- existing_type=sa.Boolean)
- op.alter_column('qosqueues', 'default',
- server_default=default, existing_type=sa.Boolean)
+ migration.alter_column_if_exists(
+ 'nsxrouterextattributess', 'service_router',
+ server_default=default,
+ existing_nullable=False,
+ existing_type=sa.Boolean)
+ migration.alter_column_if_exists(
+ 'nsxrouterextattributess', 'distributed',
+ server_default=default,
+ existing_nullable=False,
+ existing_type=sa.Boolean)
+ migration.alter_column_if_exists(
+ 'qosqueues', 'default',
+ server_default=default,
+ existing_type=sa.Boolean)
def set_default_agents(default=None):
if default:
default = sqlalchemy.sql.true()
- op.alter_column('agents', 'admin_state_up',
- server_default=default, existing_nullable=False,
- existing_type=sa.Boolean)
+ migration.alter_column_if_exists(
+ 'agents', 'admin_state_up',
+ server_default=default,
+ existing_nullable=False,
+ existing_type=sa.Boolean)
def set_default_ml2(default=None):
if default:
default = sqlalchemy.sql.false()
- op.alter_column('ml2_gre_allocations', 'allocated',
- server_default=default, existing_nullable=False,
- existing_type=sa.Boolean)
- op.alter_column('ml2_vxlan_allocations', 'allocated',
- server_default=default, existing_nullable=False,
- existing_type=sa.Boolean)
+ migration.alter_column_if_exists(
+ 'ml2_gre_allocations', 'allocated',
+ server_default=default,
+ existing_nullable=False,
+ existing_type=sa.Boolean)
+ migration.alter_column_if_exists(
+ 'ml2_vxlan_allocations', 'allocated',
+ server_default=default,
+ existing_nullable=False,
+ existing_type=sa.Boolean)
revision = '54f7549a0e5f'
down_revision = 'icehouse'
-# Change to ['*'] if this migration applies to all plugins
+# This migration will be executed only if the neutron DB schema
+# contains the tables for VPN service plugin.
+# This migration will be skipped when executed in offline mode.
-migration_for_plugins = [
- 'neutron.services.vpn.plugin.VPNDriverPlugin'
-]
-from alembic import op
import sqlalchemy as sa
-
from neutron.db import migration
+@migration.skip_if_offline
def upgrade(active_plugins=None, options=None):
- if not migration.should_run(active_plugins, migration_for_plugins):
- return
-
- op.alter_column('ipsec_site_connections', 'peer_address',
- existing_type=sa.String(255), nullable=False)
+ migration.alter_column_if_exists(
+ 'ipsec_site_connections', 'peer_address',
+ existing_type=sa.String(255),
+ nullable=False)
+@migration.skip_if_offline
def downgrade(active_plugins=None, options=None):
- if not migration.should_run(active_plugins, migration_for_plugins):
- return
-
- op.alter_column('ipsec_site_connections', 'peer_address', nullable=True,
- existing_type=sa.String(255))
+ migration.alter_column_if_exists(
+ 'ipsec_site_connections', 'peer_address',
+ nullable=True,
+ existing_type=sa.String(255))
revision = '6be312499f9'
down_revision = 'd06e871c0d5'
-# Change to ['*'] if this migration applies to all plugins
+# This migration will be executed only if the neutron DB schema
+# contains the tables for the cisco plugin.
+# This migration will be skipped when executed in offline mode.
-migration_for_plugins = [
- 'neutron.plugins.cisco.network_plugin.PluginV2'
-]
-
-from alembic import op
import sqlalchemy as sa
from neutron.db import migration
+@migration.skip_if_offline
def upgrade(active_plugins=None, options=None):
- if not migration.should_run(active_plugins, migration_for_plugins):
- return
-
- op.alter_column('cisco_nexusport_bindings', 'vlan_id', nullable=False,
- existing_type=sa.Integer)
+ migration.alter_column_if_exists(
+ 'cisco_nexusport_bindings', 'vlan_id',
+ nullable=False,
+ existing_type=sa.Integer)
+@migration.skip_if_offline
def downgrade(active_plugins=None, options=None):
- if not migration.should_run(active_plugins, migration_for_plugins):
- return
-
- op.alter_column('cisco_nexusport_bindings', 'vlan_id', nullable=True,
- existing_type=sa.Integer)
+ migration.alter_column_if_exists(
+ 'cisco_nexusport_bindings', 'vlan_id',
+ nullable=True,
+ existing_type=sa.Integer)
"""set_length_of_protocol_field
Revision ID: b65aa907aec
-Revises: 2447ad0e9585
+Revises: 1e5dd1d09b22
Create Date: 2014-03-21 16:30:10.626649
"""
revision = 'b65aa907aec'
down_revision = '1e5dd1d09b22'
-# Change to ['*'] if this migration applies to all plugins
+# This migration will be executed only if then Neutron db contains tables for
+# the firewall service plugin
+# This migration will not be executed in offline mode
-migration_for_plugins = [
- 'neutron.services.firewall.fwaas_plugin.FirewallPlugin'
-]
-
-from alembic import op
import sqlalchemy as sa
from neutron.db import migration
+@migration.skip_if_offline
def upgrade(active_plugins=None, options=None):
- if not migration.should_run(active_plugins, migration_for_plugins):
- return
-
- op.alter_column('firewall_rules', 'protocol', type_=sa.String(40),
- existing_nullable=True)
+ migration.alter_column_if_exists(
+ 'firewall_rules', 'protocol',
+ type_=sa.String(40),
+ existing_nullable=True)
def downgrade(active_plugins=None, options=None):
- if not migration.should_run(active_plugins, migration_for_plugins):
- return
-
pass
revision = 'd06e871c0d5'
down_revision = '4eca4a84f08a'
-# Change to ['*'] if this migration applies to all plugins
+# This migration will be executed only if the neutron DB schema
+# contains the tables for the ML2 plugin brocade driver.
+# This migration will be skipped when executed in offline mode.
-migration_for_plugins = [
- 'neutron.plugins.ml2.plugin.Ml2Plugin'
-]
-from alembic import op
import sqlalchemy as sa
from neutron.db import migration
+@migration.skip_if_offline
def upgrade(active_plugins=None, options=None):
- if not migration.should_run(active_plugins, migration_for_plugins):
- return
-
- op.alter_column('ml2_brocadeports', 'admin_state_up', nullable=False,
- existing_type=sa.Boolean)
+ migration.alter_column_if_exists(
+ 'ml2_brocadeports', 'admin_state_up',
+ nullable=False,
+ existing_type=sa.Boolean)
+@migration.skip_if_offline
def downgrade(active_plugins=None, options=None):
- if not migration.should_run(active_plugins, migration_for_plugins):
- return
-
- op.alter_column('ml2_brocadeports', 'admin_state_up', nullable=True,
- existing_type=sa.Boolean)
+ migration.alter_column_if_exists(
+ 'ml2_brocadeports', 'admin_state_up',
+ nullable=True,
+ existing_type=sa.Boolean)
class TestDbMigration(base.BaseTestCase):
+
+ def setUp(self):
+ super(TestDbMigration, self).setUp()
+ mock.patch('alembic.op.get_bind').start()
+ self.mock_alembic_is_offline = mock.patch(
+ 'alembic.context.is_offline_mode', return_value=False).start()
+ self.mock_alembic_is_offline.return_value = False
+ self.mock_sa_inspector = mock.patch(
+ 'sqlalchemy.engine.reflection.Inspector').start()
+
def test_should_run_plugin_in_list(self):
self.assertTrue(migration.should_run(['foo'], ['foo', 'bar']))
self.assertFalse(migration.should_run(['foo'], ['bar']))
def test_should_run_plugin_wildcard(self):
self.assertTrue(migration.should_run(['foo'], ['*']))
+ def _prepare_mocked_sqlalchemy_inspector(self):
+ mock_inspector = mock.MagicMock()
+ mock_inspector.get_table_names.return_value = ['foo', 'bar']
+ mock_inspector.get_columns.return_value = [{'name': 'foo_column'},
+ {'name': 'bar_column'}]
+ self.mock_sa_inspector.from_engine.return_value = mock_inspector
+
+ def test_schema_has_table(self):
+ self._prepare_mocked_sqlalchemy_inspector()
+ self.assertTrue(migration.schema_has_table('foo'))
+
+ def test_schema_has_table_raises_if_offline(self):
+ self.mock_alembic_is_offline.return_value = True
+ self.assertRaises(RuntimeError, migration.schema_has_table, 'foo')
+
+ def test_schema_has_column_missing_table(self):
+ self._prepare_mocked_sqlalchemy_inspector()
+ self.assertFalse(migration.schema_has_column('meh', 'meh'))
+
+ def test_schema_has_column(self):
+ self._prepare_mocked_sqlalchemy_inspector()
+ self.assertTrue(migration.schema_has_column('foo', 'foo_column'))
+
+ def test_schema_has_column_raises_if_offline(self):
+ self.mock_alembic_is_offline.return_value = True
+ self.assertRaises(RuntimeError, migration.schema_has_column,
+ 'foo', 'foo_col')
+
+ def test_schema_has_column_missing_column(self):
+ self._prepare_mocked_sqlalchemy_inspector()
+ self.assertFalse(migration.schema_has_column(
+ 'foo', column_name='meh'))
+
class TestCli(base.BaseTestCase):
def setUp(self):