Forwarded: not-needed
Last-Update: 2014-10-10
---- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/884573acbf1c_unify_nsx_router_extra_attributes.py
-+++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/884573acbf1c_unify_nsx_router_extra_attributes.py
-@@ -46,12 +46,15 @@ def _migrate_data(old_table, new_table):
- "WHERE new_t.router_id = old_t.router_id)") %
- {'new_table': new_table, 'old_table': old_table})
- else:
-- op.execute(("UPDATE %(new_table)s new_t "
-- "INNER JOIN %(old_table)s as old_t "
-- "ON new_t.router_id = old_t.router_id "
-- "SET new_t.distributed = old_t.distributed, "
-- "new_t.service_router = old_t.service_router") %
-- {'new_table': new_table, 'old_table': old_table})
-+ if op.get_bind().engine.name == 'sqlite':
-+ print("Fix this for SQLite")
-+ else:
-+ op.execute(("UPDATE %(new_table)s new_t "
-+ "INNER JOIN %(old_table)s as old_t "
-+ "ON new_t.router_id = old_t.router_id "
-+ "SET new_t.distributed = old_t.distributed, "
-+ "new_t.service_router = old_t.service_router") %
-+ {'new_table': new_table, 'old_table': old_table})
+--- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/117643811bca_nec_delete_ofc_mapping.py
++++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/117643811bca_nec_delete_ofc_mapping.py
+@@ -120,52 +120,78 @@ def upgrade():
+ sa_expr.column('network_id'))
+
+ # ofctenants -> ofctenantmappings
+- select_obj = sa.select([ofctenants.c.quantum_id,
+- op.inline_literal('/tenants/') + ofctenants.c.id])
+- stmt = InsertFromSelect([ofctenantmappings.c.quantum_id,
+- ofctenantmappings.c.ofc_id],
+- select_obj)
+- op.execute(stmt)
++ if op.get_bind().engine.name == 'sqlite':
++ op.execute("INSERT INTO ofctenantmappings (quantum_id, ofc_id) SELECT ofctenants.quantum_id, '/tenants/' || ofctenants.id AS anon_1 FROM ofctenants")
++ else:
++ select_obj = sa.select([ofctenants.c.quantum_id,
++ op.inline_literal('/tenants/') + ofctenants.c.id])
++ stmt = InsertFromSelect([ofctenantmappings.c.quantum_id,
++ ofctenantmappings.c.ofc_id],
++ select_obj)
++ op.execute(stmt)
+
+ # ofcnetworks -> ofcnetworkmappings
+- select_obj = ofcnetworks.join(
+- networks,
+- ofcnetworks.c.quantum_id == networks.c.id)
+- select_obj = select_obj.join(
+- ofctenantmappings,
+- ofctenantmappings.c.quantum_id == networks.c.tenant_id)
+- select_obj = sa.select(
+- [ofcnetworks.c.quantum_id,
+- (ofctenantmappings.c.ofc_id +
+- op.inline_literal('/networks/') + ofcnetworks.c.id)],
+- from_obj=select_obj)
+- stmt = InsertFromSelect([ofcnetworkmappings.c.quantum_id,
+- ofcnetworkmappings.c.ofc_id],
+- select_obj)
+- op.execute(stmt)
++ if op.get_bind().engine.name == 'sqlite':
++ op.execute("INSERT INTO ofcnetworkmappings (quantum_id, ofc_id) "
++ "SELECT ofcnetworks.quantum_id, "
++ "ofctenantmappings.ofc_id || '/networks/' || ofcnetworks.id "
++ "AS anon_1 FROM ofcnetworks "
++ "JOIN networks ON ofcnetworks.quantum_id = networks.id "
++ "JOIN ofctenantmappings "
++ "ON ofctenantmappings.quantum_id = networks.tenant_id")
++ else:
++ select_obj = ofcnetworks.join(
++ networks,
++ ofcnetworks.c.quantum_id == networks.c.id)
++ select_obj = select_obj.join(
++ ofctenantmappings,
++ ofctenantmappings.c.quantum_id == networks.c.tenant_id)
++ select_obj = sa.select(
++ [ofcnetworks.c.quantum_id,
++ (ofctenantmappings.c.ofc_id +
++ op.inline_literal('/networks/') + ofcnetworks.c.id)],
++ from_obj=select_obj)
++ stmt = InsertFromSelect([ofcnetworkmappings.c.quantum_id,
++ ofcnetworkmappings.c.ofc_id],
++ select_obj)
++ op.execute(stmt)
+
+ # ofcports -> ofcportmappings
+- select_obj = ofcports.join(ports, ofcports.c.quantum_id == ports.c.id)
+- select_obj = select_obj.join(
+- ofcnetworkmappings,
+- ofcnetworkmappings.c.quantum_id == ports.c.network_id)
+- select_obj = sa.select(
+- [ofcports.c.quantum_id,
+- (ofcnetworkmappings.c.ofc_id +
+- op.inline_literal('/ports/') + ofcports.c.id)],
+- from_obj=select_obj)
+- stmt = InsertFromSelect([ofcportmappings.c.quantum_id,
+- ofcportmappings.c.ofc_id],
+- select_obj)
+- op.execute(stmt)
++ if op.get_bind().engine.name == 'sqlite':
++ op.execute("INSERT INTO ofcportmappings (quantum_id, ofc_id) "
++ "SELECT ofcports.quantum_id, "
++ "ofcnetworkmappings.ofc_id || '/ports/' || ofcports.id "
++ "AS anon_1 FROM ofcports "
++ "JOIN ports ON ofcports.quantum_id = ports.id "
++ "JOIN ofcnetworkmappings "
++ "ON ofcnetworkmappings.quantum_id = ports.network_id")
++ else:
++ select_obj = ofcports.join(ports, ofcports.c.quantum_id == ports.c.id)
++ select_obj = select_obj.join(
++ ofcnetworkmappings,
++ ofcnetworkmappings.c.quantum_id == ports.c.network_id)
++ select_obj = sa.select(
++ [ofcports.c.quantum_id,
++ (ofcnetworkmappings.c.ofc_id +
++ op.inline_literal('/ports/') + ofcports.c.id)],
++ from_obj=select_obj)
++ stmt = InsertFromSelect([ofcportmappings.c.quantum_id,
++ ofcportmappings.c.ofc_id],
++ select_obj)
++ op.execute(stmt)
+
+ # ofcfilters -> ofcfiltermappings
+- select_obj = sa.select([ofcfilters.c.quantum_id,
+- op.inline_literal('/filters/') + ofcfilters.c.id])
+- stmt = InsertFromSelect([ofcfiltermappings.c.quantum_id,
+- ofcfiltermappings.c.ofc_id],
+- select_obj)
+- op.execute(stmt)
++ if op.get_bind().engine.name == 'sqlite':
++ op.execute("INSERT INTO ofcfiltermappings (quantum_id, ofc_id) "
++ "SELECT ofcfilters.quantum_id, '/filters/' || ofcfilters.id "
++ "AS anon_1 FROM ofcfilters")
++ else:
++ select_obj = sa.select([ofcfilters.c.quantum_id,
++ op.inline_literal('/filters/') + ofcfilters.c.id])
++ stmt = InsertFromSelect([ofcfiltermappings.c.quantum_id,
++ ofcfiltermappings.c.ofc_id],
++ select_obj)
++ op.execute(stmt)
+
+ # drop old mapping tables
+ op.drop_table('ofctenants')
+--- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/1d6ee1ae5da5_db_healing.py
++++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/1d6ee1ae5da5_db_healing.py
+@@ -25,11 +25,16 @@ Create Date: 2014-05-29 10:52:43.898980
+ revision = 'db_healing'
+ down_revision = '5446f2a45467'
+
++
++from alembic import op
++
++
+ from neutron.db.migration.alembic_migrations import heal_script
def upgrade():
---- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/5446f2a45467_set_server_default.py
-+++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/5446f2a45467_set_server_default.py
-@@ -26,6 +26,7 @@ revision = '5446f2a45467'
- down_revision = '2db5203cb7a9'
+- heal_script.heal()
++ if op.get_bind().engine.name != 'sqlite':
++ heal_script.heal()
+ def downgrade():
+--- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/1e5dd1d09b22_set_not_null_fields_lb_stats.py
++++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/1e5dd1d09b22_set_not_null_fields_lb_stats.py
+@@ -25,6 +25,7 @@ Create Date: 2014-03-17 11:00:35.370618
+ revision = '1e5dd1d09b22'
+ down_revision = '54f7549a0e5f'
+
+from alembic import op
import sqlalchemy as sa
- import sqlalchemy.sql
-@@ -70,20 +71,78 @@ def run(default=None):
- def set_default_brocade(default):
- if default:
- default = ''
+ from neutron.db import migration
+@@ -32,22 +33,23 @@ from neutron.db import migration
+
+ @migration.skip_if_offline
+ def upgrade():
- migration.alter_column_if_exists(
-- 'brocadeports', 'port_id',
-- server_default=default,
-- existing_type=sa.String(36))
+- 'poolstatisticss', 'bytes_in',
+- nullable=False,
+- existing_type=sa.BigInteger())
+- migration.alter_column_if_exists(
+- 'poolstatisticss', 'bytes_out',
+- nullable=False,
+- existing_type=sa.BigInteger())
+- migration.alter_column_if_exists(
+- 'poolstatisticss', 'active_connections',
+- nullable=False,
+- existing_type=sa.BigInteger())
+- migration.alter_column_if_exists(
+- 'poolstatisticss', 'total_connections',
+- nullable=False,
+- existing_type=sa.BigInteger())
++ if op.get_bind().engine.name != 'sqlite':
++ migration.alter_column_if_exists(
++ 'poolstatisticss', 'bytes_in',
++ nullable=False,
++ existing_type=sa.BigInteger())
++ migration.alter_column_if_exists(
++ 'poolstatisticss', 'bytes_out',
++ nullable=False,
++ existing_type=sa.BigInteger())
++ migration.alter_column_if_exists(
++ 'poolstatisticss', 'active_connections',
++ nullable=False,
++ existing_type=sa.BigInteger())
++ migration.alter_column_if_exists(
++ 'poolstatisticss', 'total_connections',
++ nullable=False,
++ existing_type=sa.BigInteger())
+
+
+ @migration.skip_if_offline
+--- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/1fcfc149aca4_agents_unique_by_type_and_host.py
++++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/1fcfc149aca4_agents_unique_by_type_and_host.py
+@@ -41,11 +41,15 @@ def upgrade():
+ # configured plugin did not create the agents table.
+ return
+
+- op.create_unique_constraint(
+- name=UC_NAME,
+- source=TABLE_NAME,
+- local_cols=['agent_type', 'host']
+- )
+ if op.get_bind().engine.name == 'sqlite':
-+ op.execute("CREATE TEMPORARY TABLE brocadeports_backup ( "
-+ "port_id VARCHAR(36) NOT NULL DEFAULT '', "
-+ "network_id VARCHAR(36) NOT NULL, "
-+ "admin_state_up BOOLEAN NOT NULL, "
-+ "physical_interface VARCHAR(36), "
-+ "vlan_id VARCHAR(36), "
-+ "tenant_id VARCHAR(36),"
-+ "PRIMARY KEY (port_id), "
-+ "FOREIGN KEY(network_id) REFERENCES brocadenetworks (id), "
-+ "CHECK (admin_state_up IN (0, 1)))")
-+ op.execute("INSERT INTO brocadeports_backup "
-+ "(port_id, network_id, admin_state_up, physical_interface, vlan_id, tenant_id) "
-+ "SELECT port_id, network_id, admin_state_up, physical_interface, vlan_id, tenant_id "
-+ "FROM brocadeports")
-+ op.execute("DROP TABLE brocadeports")
-+ op.execute("CREATE TABLE brocadeports ( "
-+ "port_id VARCHAR(36) NOT NULL DEFAULT '', "
-+ "network_id VARCHAR(36) NOT NULL, "
-+ "admin_state_up BOOLEAN NOT NULL, "
-+ "physical_interface VARCHAR(36), "
-+ "vlan_id VARCHAR(36), "
-+ "tenant_id VARCHAR(36),"
-+ "PRIMARY KEY (port_id), "
-+ "FOREIGN KEY(network_id) REFERENCES brocadenetworks (id), "
-+ "CHECK (admin_state_up IN (0, 1)))")
-+ op.execute("INSERT INTO brocadeports "
-+ "(port_id, network_id, admin_state_up, physical_interface, vlan_id, tenant_id) "
-+ "SELECT port_id, network_id, admin_state_up, physical_interface, vlan_id, tenant_id "
-+ "FROM brocadeports_backup")
-+ op.execute("DROP TABLE brocadeports_backup")
++ op.execute("CREATE UNIQUE INDEX uniq_agents0agent_type0host "
++ "on agents (agent_type,host);")
+ else:
-+ migration.alter_column_if_exists(
-+ 'brocadeports', 'port_id',
-+ server_default=default,
-+ existing_type=sa.String(36))
++ op.create_unique_constraint(
++ name=UC_NAME,
++ source=TABLE_NAME,
++ local_cols=['agent_type', 'host']
++ )
- def set_default_mlnx(default):
- if default:
- default = sqlalchemy.sql.false()
-- migration.alter_column_if_exists(
-- 'segmentation_id_allocation', 'allocated',
-- server_default=default,
-- existing_nullable=False,
-- existing_type=sa.Boolean)
-+
-+ if op.get_bind().engine.name == 'sqlite':
-+ op.execute("CREATE TEMPORARY TABLE segmentation_id_allocation_backup ( "
-+ "physical_network VARCHAR(64) NOT NULL, "
-+ "segmentation_id INTEGER NOT NULL, "
-+ "allocated BOOLEAN NOT NULL DEFAULT 0, "
-+ "PRIMARY KEY (physical_network, segmentation_id), "
-+ "CHECK (allocated IN (0, 1)))")
-+ op.execute("INSERT INTO segmentation_id_allocation_backup "
-+ "(physical_network, segmentation_id, allocated) "
-+ "SELECT physical_network, segmentation_id, allocated "
-+ "FROM segmentation_id_allocation")
-+ op.execute("DROP TABLE segmentation_id_allocation")
-+ op.execute("CREATE TABLE segmentation_id_allocation ( "
-+ "physical_network VARCHAR(64) NOT NULL, "
-+ "segmentation_id INTEGER NOT NULL, "
-+ "allocated BOOLEAN NOT NULL DEFAULT 0, "
-+ "PRIMARY KEY (physical_network, segmentation_id), "
-+ "CHECK (allocated IN (0, 1)))")
-+ op.execute("INSERT INTO segmentation_id_allocation "
-+ "(physical_network, segmentation_id, allocated) "
-+ "SELECT physical_network, segmentation_id, allocated "
-+ "FROM segmentation_id_allocation_backup")
-+ op.execute("DROP TABLE segmentation_id_allocation_backup")
-+
-+ else:
-+ migration.alter_column_if_exists(
-+ 'segmentation_id_allocation', 'allocated',
-+ server_default=default,
-+ existing_nullable=False,
-+ existing_type=sa.Boolean)
-
-
- def set_default_cisco(default):
-@@ -92,61 +151,299 @@ def set_default_cisco(default):
- profile_default = '0' if default else None
- if default:
- default = sqlalchemy.sql.false()
-- migration.alter_column_if_exists(
-- 'cisco_n1kv_profile_bindings', 'tenant_id',
-- existing_type=sa.String(length=36),
-- server_default=profile_binding_default,
-- existing_nullable=False)
-- migration.alter_column_if_exists(
-- 'cisco_network_profiles', 'multicast_ip_index',
-- server_default=profile_default,
-- existing_type=sa.Integer)
-- migration.alter_column_if_exists(
-- 'cisco_n1kv_vlan_allocations', 'allocated',
-- existing_type=sa.Boolean,
-- server_default=default,
-- existing_nullable=False)
-+ if op.get_bind().engine.name == 'sqlite':
-+ # cisco_n1kv_profile_bindings_backup
-+ op.execute("CREATE TEMPORARY TABLE cisco_n1kv_profile_bindings_backup ( "
-+ "profile_type VARCHAR(7), "
-+ "tenant_id VARCHAR(36) NOT NULL DEFAULT 'TENANT_ID_NOT_SET', "
-+ "profile_id VARCHAR(36) NOT NULL, "
-+ "PRIMARY KEY (tenant_id, profile_id), "
-+ "CONSTRAINT profile_type CHECK (profile_type IN ('network', 'policy')))")
-+ op.execute("INSERT INTO cisco_n1kv_profile_bindings_backup "
-+ "(profile_type, tenant_id, profile_id) "
-+ "SELECT profile_type, tenant_id, profile_id "
-+ "FROM cisco_n1kv_profile_bindings")
-+ op.execute("DROP TABLE cisco_n1kv_profile_bindings")
-+ op.execute("CREATE TABLE cisco_n1kv_profile_bindings ( "
-+ "profile_type VARCHAR(7), "
-+ "tenant_id VARCHAR(36) NOT NULL DEFAULT 'TENANT_ID_NOT_SET', "
-+ "profile_id VARCHAR(36) NOT NULL, "
-+ "PRIMARY KEY (tenant_id, profile_id), "
-+ "CONSTRAINT profile_type CHECK (profile_type IN ('network', 'policy')))")
-+ op.execute("INSERT INTO cisco_n1kv_profile_bindings "
-+ "(profile_type, tenant_id, profile_id) "
-+ "SELECT profile_type, tenant_id, profile_id "
-+ "FROM cisco_n1kv_profile_bindings_backup")
-+ op.execute("DROP TABLE cisco_n1kv_profile_bindings_backup")
-+
-+ # cisco_network_profiles
-+ op.execute("CREATE TEMPORARY TABLE cisco_network_profiles_backup ( "
-+ "id VARCHAR(36) NOT NULL, "
-+ "name VARCHAR(255), "
-+ "segment_type VARCHAR(13) NOT NULL, "
-+ "sub_type VARCHAR(255), "
-+ "segment_range VARCHAR(255), "
-+ "multicast_ip_index INTEGER DEFAULT '0', "
-+ "multicast_ip_range VARCHAR(255), "
-+ "physical_network VARCHAR(255), "
-+ "PRIMARY KEY (id), "
-+ "CONSTRAINT segment_type CHECK (segment_type IN ('vlan', 'overlay', 'trunk', 'multi-segment')))")
-+ op.execute("INSERT INTO cisco_network_profiles_backup "
-+ "(id, name, segment_type, sub_type, segment_range, multicast_ip_index, multicast_ip_range, physical_network) "
-+ "SELECT id, name, segment_type, sub_type, segment_range, multicast_ip_index, multicast_ip_range, physical_network "
-+ "FROM cisco_network_profiles")
-+ op.execute("DROP TABLE cisco_network_profiles")
-+ op.execute("CREATE TABLE cisco_network_profiles ( "
-+ "id VARCHAR(36) NOT NULL, "
-+ "name VARCHAR(255), "
-+ "segment_type VARCHAR(13) NOT NULL, "
-+ "sub_type VARCHAR(255), "
-+ "segment_range VARCHAR(255), "
-+ "multicast_ip_index INTEGER DEFAULT '0', "
-+ "multicast_ip_range VARCHAR(255), "
-+ "physical_network VARCHAR(255), "
-+ "PRIMARY KEY (id), "
-+ "CONSTRAINT segment_type CHECK (segment_type IN ('vlan', 'overlay', 'trunk', 'multi-segment')))")
-+ op.execute("INSERT INTO cisco_network_profiles "
-+ "(id, name, segment_type, sub_type, segment_range, multicast_ip_index, multicast_ip_range, physical_network) "
-+ "SELECT id, name, segment_type, sub_type, segment_range, multicast_ip_index, multicast_ip_range, physical_network "
-+ "FROM cisco_network_profiles_backup")
-+ op.execute("DROP TABLE cisco_network_profiles_backup")
-+
-+ # cisco_n1kv_vlan_allocations
-+ op.execute("CREATE TEMPORARY TABLE zigo_backup ( "
-+ "physical_network VARCHAR(64) NOT NULL, "
-+ "vlan_id INTEGER NOT NULL, "
-+ "allocated BOOLEAN NOT NULL DEFAULT 0, "
-+ "network_profile_id VARCHAR(36), "
-+ "PRIMARY KEY (physical_network, vlan_id), "
-+ "CHECK (allocated IN (0, 1)))")
-+ op.execute("INSERT INTO zigo_backup "
-+ "(physical_network, vlan_id, allocated, allocated) "
-+ "SELECT physical_network, vlan_id, allocated, allocated "
-+ "FROM cisco_n1kv_vlan_allocations")
-+ op.execute("DROP TABLE cisco_n1kv_vlan_allocations")
-+ op.execute("CREATE TABLE cisco_n1kv_vlan_allocations ( "
-+ "physical_network VARCHAR(64) NOT NULL, "
-+ "vlan_id INTEGER NOT NULL, "
-+ "allocated BOOLEAN NOT NULL DEFAULT 0, "
-+ "network_profile_id VARCHAR(36), "
-+ "PRIMARY KEY (physical_network, vlan_id), "
-+ "CHECK (allocated IN (0, 1)))")
-+ op.execute("INSERT INTO cisco_n1kv_vlan_allocations "
-+ "(physical_network, vlan_id, allocated, allocated) "
-+ "SELECT physical_network, vlan_id, allocated, allocated "
-+ "FROM zigo_backup")
-+ op.execute("DROP TABLE zigo_backup")
-+
-+ else:
-+ migration.alter_column_if_exists(
-+ 'cisco_n1kv_profile_bindings', 'tenant_id',
-+ existing_type=sa.String(length=36),
-+ server_default=profile_binding_default,
-+ existing_nullable=False)
-+ migration.alter_column_if_exists(
-+ 'cisco_network_profiles', 'multicast_ip_index',
-+ server_default=profile_default,
-+ existing_type=sa.Integer)
-+ migration.alter_column_if_exists(
-+ 'cisco_n1kv_vlan_allocations', 'allocated',
-+ existing_type=sa.Boolean,
-+ server_default=default,
-+ existing_nullable=False)
-
-
- def set_default_vmware(default=None):
- if default:
- default = sqlalchemy.sql.false()
-- migration.alter_column_if_exists(
-- 'nsxrouterextattributess', 'service_router',
-- server_default=default,
-- existing_nullable=False,
-- existing_type=sa.Boolean)
-- migration.alter_column_if_exists(
-- 'nsxrouterextattributess', 'distributed',
-- server_default=default,
-- existing_nullable=False,
-- existing_type=sa.Boolean)
-- migration.alter_column_if_exists(
-- 'qosqueues', 'default',
-- server_default=default,
-- existing_type=sa.Boolean)
-+ if op.get_bind().engine.name == 'sqlite':
-+ # nsxrouterextattributess
-+ op.execute("CREATE TEMPORARY TABLE nsxrouterextattributess_backup ( "
-+ "router_id VARCHAR(36) NOT NULL, "
-+ "distributed BOOLEAN NOT NULL, "
-+ "service_router BOOLEAN DEFAULT '0' NOT NULL, "
-+ "PRIMARY KEY (router_id), "
-+ "FOREIGN KEY(router_id) REFERENCES routers (id) ON DELETE CASCADE, "
-+ "CHECK (distributed IN (0, 1)), "
-+ "CHECK (service_router IN (0, 1)))")
-+ op.execute("INSERT INTO nsxrouterextattributess_backup "
-+ "(router_id, distributed, service_router) "
-+ "SELECT router_id, distributed, service_router "
-+ "FROM nsxrouterextattributess")
-+ op.execute("DROP TABLE nsxrouterextattributess")
-+ op.execute("CREATE TABLE nsxrouterextattributess ( "
-+ "router_id VARCHAR(36) NOT NULL, "
-+ "distributed BOOLEAN NOT NULL DEFAULT 0, "
-+ "service_router BOOLEAN DEFAULT '0' NOT NULL, "
-+ "PRIMARY KEY (router_id), "
-+ "FOREIGN KEY(router_id) REFERENCES routers (id) ON DELETE CASCADE, "
-+ "CHECK (distributed IN (0, 1)), "
-+ "CHECK (service_router IN (0, 1)))")
-+ op.execute("INSERT INTO nsxrouterextattributess "
-+ "(router_id, distributed, service_router) "
-+ "SELECT router_id, distributed, service_router "
-+ "FROM nsxrouterextattributess_backup")
-+ op.execute("DROP TABLE nsxrouterextattributess_backup")
-+
-+ op.execute("CREATE TEMPORARY TABLE qosqueues_backup ("
-+ "tenant_id VARCHAR(255), "
-+ "id VARCHAR(36) NOT NULL, "
-+ "name VARCHAR(255), "
-+ "\"default\" BOOLEAN, "
-+ "min INTEGER NOT NULL, "
-+ "max INTEGER, "
-+ "qos_marking VARCHAR(9), "
-+ "dscp INTEGER, "
-+ "PRIMARY KEY (id), "
-+ "CHECK (\"default\" IN (0, 1)), "
-+ "CONSTRAINT qosqueues_qos_marking CHECK (qos_marking IN ('untrusted', 'trusted')))")
-+ op.execute("INSERT INTO qosqueues_backup "
-+ "(tenant_id, id, name, \"default\", min, max, qos_marking, dscp) "
-+ "SELECT tenant_id, id, name, \"default\", min, max, qos_marking, dscp "
-+ "FROM qosqueues")
-+ op.execute("DROP TABLE qosqueues")
-+ op.execute("CREATE TABLE qosqueues ("
-+ "tenant_id VARCHAR(255), "
-+ "id VARCHAR(36) NOT NULL, "
-+ "name VARCHAR(255), "
-+ "\"default\" BOOLEAN, "
-+ "min INTEGER NOT NULL, "
-+ "max INTEGER, "
-+ "qos_marking VARCHAR(9), "
-+ "dscp INTEGER, "
-+ "PRIMARY KEY (id), "
-+ "CHECK (\"default\" IN (0, 1)), "
-+ "CONSTRAINT qosqueues_qos_marking CHECK (qos_marking IN ('untrusted', 'trusted')))")
-+ op.execute("INSERT INTO qosqueues "
-+ "(tenant_id, id, name, \"default\", min, max, qos_marking, dscp) "
-+ "SELECT tenant_id, id, name, \"default\", min, max, qos_marking, dscp "
-+ "FROM qosqueues_backup")
-+ op.execute("DROP TABLE qosqueues_backup")
-+
-+ else:
-+ migration.alter_column_if_exists(
-+ 'nsxrouterextattributess', 'service_router',
-+ server_default=default,
-+ existing_nullable=False,
-+ existing_type=sa.Boolean)
-+ migration.alter_column_if_exists(
-+ 'nsxrouterextattributess', 'distributed',
-+ server_default=default,
-+ existing_nullable=False,
-+ existing_type=sa.Boolean)
-+ migration.alter_column_if_exists(
-+ 'qosqueues', 'default',
-+ server_default=default,
-+ existing_type=sa.Boolean)
-
-
- def set_default_agents(default=None):
- if default:
- default = sqlalchemy.sql.true()
-- migration.alter_column_if_exists(
-- 'agents', 'admin_state_up',
-- server_default=default,
-- existing_nullable=False,
-- existing_type=sa.Boolean)
-+ if op.get_bind().engine.name == 'sqlite':
-+ op.execute("CREATE TEMPORARY TABLE agents_backup ( "
-+ "id VARCHAR(36) NOT NULL, "
-+ "agent_type VARCHAR(255) NOT NULL, "
-+ "binary VARCHAR(255) NOT NULL, "
-+ "topic VARCHAR(255) NOT NULL, "
-+ "host VARCHAR(255) NOT NULL, "
-+ "admin_state_up BOOLEAN NOT NULL DEFAULT 1, "
-+ "created_at DATETIME NOT NULL, "
-+ "started_at DATETIME NOT NULL, "
-+ "heartbeat_timestamp DATETIME NOT NULL, "
-+ "description VARCHAR(255), "
-+ "configurations VARCHAR(4095) NOT NULL, "
-+ "PRIMARY KEY (id), "
-+ "CHECK (admin_state_up IN (0, 1)))")
-+ op.execute("INSERT INTO agents_backup "
-+ "(id, agent_type, binary, topic, host, admin_state_up, created_at, started_at, heartbeat_timestamp, description, configurations) "
-+ "SELECT id, agent_type, binary, topic, host, admin_state_up, created_at, started_at, heartbeat_timestamp, description, configurations "
-+ "FROM agents")
-+ op.execute("DROP TABLE agents")
-+ op.execute("CREATE TABLE agents ( "
-+ "id VARCHAR(36) NOT NULL, "
-+ "agent_type VARCHAR(255) NOT NULL, "
-+ "binary VARCHAR(255) NOT NULL, "
-+ "topic VARCHAR(255) NOT NULL, "
-+ "host VARCHAR(255) NOT NULL, "
-+ "admin_state_up BOOLEAN NOT NULL DEFAULT 1, "
-+ "created_at DATETIME NOT NULL, "
-+ "started_at DATETIME NOT NULL, "
-+ "heartbeat_timestamp DATETIME NOT NULL, "
-+ "description VARCHAR(255), "
-+ "configurations VARCHAR(4095) NOT NULL, "
-+ "PRIMARY KEY (id), "
-+ "CHECK (admin_state_up IN (0, 1)))")
-+ op.execute("INSERT INTO agents "
-+ "(id, agent_type, binary, topic, host, admin_state_up, created_at, started_at, heartbeat_timestamp, description, configurations) "
-+ "SELECT id, agent_type, binary, topic, host, admin_state_up, created_at, started_at, heartbeat_timestamp, description, configurations "
-+ "FROM agents_backup")
-+ op.execute("DROP TABLE agents_backup")
-+
-+ else:
-+ migration.alter_column_if_exists(
-+ 'agents', 'admin_state_up',
-+ server_default=default,
-+ existing_nullable=False,
-+ existing_type=sa.Boolean)
-
-
- def set_default_ml2(default=None):
- if default:
- default = sqlalchemy.sql.false()
-- migration.alter_column_if_exists(
-- 'ml2_gre_allocations', 'allocated',
-- server_default=default,
-- existing_nullable=False,
-- existing_type=sa.Boolean)
-- migration.alter_column_if_exists(
-- 'ml2_vxlan_allocations', 'allocated',
-- server_default=default,
-- existing_nullable=False,
-- existing_type=sa.Boolean)
-+ if op.get_bind().engine.name == 'sqlite':
-+ # ml2_gre_allocations
-+ op.execute("CREATE TEMPORARY TABLE ml2_gre_allocations_backup ( "
-+ "gre_id INTEGER NOT NULL, "
-+ "allocated BOOLEAN NOT NULL DEFAULT 0, "
-+ "PRIMARY KEY (gre_id), "
-+ "CHECK (allocated IN (0, 1)))")
-+ op.execute("INSERT INTO ml2_gre_allocations_backup "
-+ "(gre_id, allocated) "
-+ "SELECT gre_id, allocated "
-+ "FROM ml2_gre_allocations")
-+ op.execute("DROP TABLE ml2_gre_allocations")
-+ op.execute("CREATE TABLE ml2_gre_allocations ( "
-+ "gre_id INTEGER NOT NULL, "
-+ "allocated BOOLEAN NOT NULL DEFAULT 0, "
-+ "PRIMARY KEY (gre_id), "
-+ "CHECK (allocated IN (0, 1)))")
-+ op.execute("INSERT INTO ml2_gre_allocations "
-+ "(gre_id, allocated) "
-+ "SELECT gre_id, allocated "
-+ "FROM ml2_gre_allocations_backup")
-+ op.execute("DROP TABLE ml2_gre_allocations_backup")
-+
-+ # ml2_vxlan_allocations
-+ op.execute("CREATE TABLE ml2_vxlan_allocations_backup ( "
-+ "vxlan_vni INTEGER NOT NULL, "
-+ "allocated BOOLEAN NOT NULL DEFAULT 0, "
-+ "PRIMARY KEY (vxlan_vni), "
-+ "CHECK (allocated IN (0, 1)))")
-+ op.execute("INSERT INTO ml2_vxlan_allocations_backup "
-+ "(vxlan_vni, allocated) "
-+ "SELECT vxlan_vni, allocated "
-+ "FROM ml2_vxlan_allocations")
-+ op.execute("DROP TABLE ml2_vxlan_allocations")
-+ op.execute("CREATE TABLE ml2_vxlan_allocations ( "
-+ "vxlan_vni INTEGER NOT NULL, "
-+ "allocated BOOLEAN NOT NULL DEFAULT 0, "
-+ "PRIMARY KEY (vxlan_vni), "
-+ "CHECK (allocated IN (0, 1)))")
-+ op.execute("INSERT INTO ml2_vxlan_allocations "
-+ "(vxlan_vni, allocated) "
-+ "SELECT vxlan_vni, allocated "
-+ "FROM ml2_vxlan_allocations_backup")
-+ op.execute("DROP TABLE ml2_vxlan_allocations_backup")
-+
-+ else:
-+ migration.alter_column_if_exists(
-+ 'ml2_gre_allocations', 'allocated',
-+ server_default=default,
-+ existing_nullable=False,
-+ existing_type=sa.Boolean)
-+ migration.alter_column_if_exists(
-+ 'ml2_vxlan_allocations', 'allocated',
-+ server_default=default,
-+ existing_nullable=False,
-+ existing_type=sa.Boolean)
---- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/3927f7f7c456_l3_extension_distributed_mode.py
-+++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/3927f7f7c456_l3_extension_distributed_mode.py
-@@ -45,9 +45,14 @@ def upgrade():
- "SELECT id as router_id, "
- "0 as distributed from routers")
+ def downgrade():
+--- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/31d7f831a591_add_constraint_for_routerid.py
++++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/31d7f831a591_add_constraint_for_routerid.py
+@@ -58,13 +58,22 @@ def upgrade():
+ 'ON %(table)s.id = temp.id WHERE temp.id is NULL);'
+ % {'table': TABLE_NAME})
else:
-- op.execute("INSERT INTO router_extra_attributes "
-- "SELECT id as router_id, "
-- "False as distributed from routers")
+- op.execute('DELETE %(table)s FROM %(table)s LEFT OUTER JOIN '
+- '(SELECT MIN(id) as id, router_id, l3_agent_id '
+- ' FROM %(table)s GROUP BY router_id, l3_agent_id) AS temp '
+- 'ON %(table)s.id = temp.id WHERE temp.id is NULL;'
+- % {'table': TABLE_NAME})
+ if op.get_bind().engine.name == 'sqlite':
-+ op.execute("INSERT INTO router_extra_attributes "
-+ "SELECT id AS router_id, "
-+ "0 AS distributed FROM routers")
++ # TODO: Fix this for SQLITE
++ print("Fix this for SQLITE")
+ else:
-+ op.execute("INSERT INTO router_extra_attributes "
-+ "SELECT id AS router_id, "
-+ "False AS distributed FROM routers")
-
-
- def downgrade():
---- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/5ac1c354a051_n1kv_segment_alloc.py
-+++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/5ac1c354a051_n1kv_segment_alloc.py
-@@ -42,28 +42,30 @@ def upgrade():
- 'cisco_n1kv_vlan_allocations',
- sa.Column('network_profile_id',
- sa.String(length=36),
-- nullable=False)
-- )
-- op.create_foreign_key(
-- 'cisco_n1kv_vlan_allocations_ibfk_1',
-- source='cisco_n1kv_vlan_allocations',
-- referent='cisco_network_profiles',
-- local_cols=['network_profile_id'], remote_cols=['id'],
-- ondelete='CASCADE'
-+ nullable='False')
- )
-+ if op.get_bind().engine.name != 'sqlite':
-+ op.create_foreign_key(
-+ 'cisco_n1kv_vlan_allocations_ibfk_1',
-+ source='cisco_n1kv_vlan_allocations',
-+ referent='cisco_network_profiles',
-+ local_cols=['network_profile_id'], remote_cols=['id'],
-+ ondelete='CASCADE'
-+ )
- op.add_column(
- 'cisco_n1kv_vxlan_allocations',
- sa.Column('network_profile_id',
- sa.String(length=36),
-- nullable=False)
-- )
-- op.create_foreign_key(
-- 'cisco_n1kv_vxlan_allocations_ibfk_1',
-- source='cisco_n1kv_vxlan_allocations',
-- referent='cisco_network_profiles',
-- local_cols=['network_profile_id'], remote_cols=['id'],
-- ondelete='CASCADE'
-+ nullable='False')
- )
-+ if op.get_bind().engine.name != 'sqlite':
-+ op.create_foreign_key(
-+ 'cisco_n1kv_vxlan_allocations_ibfk_1',
-+ source='cisco_n1kv_vxlan_allocations',
-+ referent='cisco_network_profiles',
-+ local_cols=['network_profile_id'], remote_cols=['id'],
-+ ondelete='CASCADE'
-+ )
-
-
- def downgrade():
---- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/1e5dd1d09b22_set_not_null_fields_lb_stats.py
-+++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/1e5dd1d09b22_set_not_null_fields_lb_stats.py
-@@ -25,6 +25,7 @@ Create Date: 2014-03-17 11:00:35.370618
- revision = '1e5dd1d09b22'
- down_revision = '54f7549a0e5f'
-
-+from alembic import op
- import sqlalchemy as sa
-
- from neutron.db import migration
-@@ -32,22 +33,23 @@ from neutron.db import migration
++ op.execute('DELETE %(table)s FROM %(table)s LEFT OUTER JOIN '
++ '(SELECT MIN(id) as id, router_id, l3_agent_id '
++ ' FROM %(table)s GROUP BY router_id, l3_agent_id) AS temp '
++ 'ON %(table)s.id = temp.id WHERE temp.id is NULL;'
++ % {'table': TABLE_NAME})
- @migration.skip_if_offline
- def upgrade():
-- migration.alter_column_if_exists(
-- 'poolstatisticss', 'bytes_in',
-- nullable=False,
-- existing_type=sa.BigInteger())
-- migration.alter_column_if_exists(
-- 'poolstatisticss', 'bytes_out',
-- nullable=False,
-- existing_type=sa.BigInteger())
-- migration.alter_column_if_exists(
-- 'poolstatisticss', 'active_connections',
-- nullable=False,
-- existing_type=sa.BigInteger())
-- migration.alter_column_if_exists(
-- 'poolstatisticss', 'total_connections',
-- nullable=False,
-- existing_type=sa.BigInteger())
-+ if op.get_bind().engine.name != 'sqlite':
-+ migration.alter_column_if_exists(
-+ 'poolstatisticss', 'bytes_in',
-+ nullable=False,
-+ existing_type=sa.BigInteger())
-+ migration.alter_column_if_exists(
-+ 'poolstatisticss', 'bytes_out',
-+ nullable=False,
-+ existing_type=sa.BigInteger())
-+ migration.alter_column_if_exists(
-+ 'poolstatisticss', 'active_connections',
-+ nullable=False,
-+ existing_type=sa.BigInteger())
-+ migration.alter_column_if_exists(
-+ 'poolstatisticss', 'total_connections',
-+ nullable=False,
-+ existing_type=sa.BigInteger())
+- op.drop_column(TABLE_NAME, 'id')
++ if op.get_bind().engine.name == 'sqlite':
++ # TODO: Fix this for SQLITE
++ print("Fix this for SQLITE")
++ return
++ else:
++ op.drop_column(TABLE_NAME, 'id')
+ # DB2 doesn't support nullable column in primary key
+ if context.bind.dialect.name == 'ibm_db_sa':
+--- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/33c3db036fe4_set_length_of_description_field_metering.py
++++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/33c3db036fe4_set_length_of_description_field_metering.py
+@@ -34,8 +34,9 @@ from neutron.db.migration.alembic_migrat
- @migration.skip_if_offline
+ def upgrade():
+ if migration.schema_has_table('meteringlabels'):
+- op.alter_column('meteringlabels', 'description', type_=sa.String(1024),
+- existing_nullable=True)
++ if op.get_bind().engine.name != 'sqlite':
++ op.alter_column('meteringlabels', 'description', type_=sa.String(1024),
++ existing_nullable=True)
+ else:
+ metering_init_ops.create_meteringlabels()
+
+--- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/3927f7f7c456_l3_extension_distributed_mode.py
++++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/3927f7f7c456_l3_extension_distributed_mode.py
+@@ -45,9 +45,14 @@ def upgrade():
+ "SELECT id as router_id, "
+ "0 as distributed from routers")
+ else:
+- op.execute("INSERT INTO router_extra_attributes "
+- "SELECT id as router_id, "
+- "False as distributed from routers")
++ if op.get_bind().engine.name == 'sqlite':
++ op.execute("INSERT INTO router_extra_attributes "
++ "SELECT id AS router_id, "
++ "0 AS distributed FROM routers")
++ else:
++ op.execute("INSERT INTO router_extra_attributes "
++ "SELECT id AS router_id, "
++ "False AS distributed FROM routers")
+
+
+ def downgrade():
--- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/4eba2f05c2f4_correct_vxlan_endpoint_primary_key.py
+++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/4eba2f05c2f4_correct_vxlan_endpoint_primary_key.py
@@ -34,8 +34,11 @@ PK_NAME = 'ml2_vxlan_endpoints_pkey'
def downgrade():
---- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/117643811bca_nec_delete_ofc_mapping.py
-+++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/117643811bca_nec_delete_ofc_mapping.py
-@@ -120,52 +120,78 @@ def upgrade():
- sa_expr.column('network_id'))
-
- # ofctenants -> ofctenantmappings
-- select_obj = sa.select([ofctenants.c.quantum_id,
-- op.inline_literal('/tenants/') + ofctenants.c.id])
-- stmt = InsertFromSelect([ofctenantmappings.c.quantum_id,
-- ofctenantmappings.c.ofc_id],
-- select_obj)
-- op.execute(stmt)
-+ if op.get_bind().engine.name == 'sqlite':
-+ op.execute("INSERT INTO ofctenantmappings (quantum_id, ofc_id) SELECT ofctenants.quantum_id, '/tenants/' || ofctenants.id AS anon_1 FROM ofctenants")
-+ else:
-+ select_obj = sa.select([ofctenants.c.quantum_id,
-+ op.inline_literal('/tenants/') + ofctenants.c.id])
-+ stmt = InsertFromSelect([ofctenantmappings.c.quantum_id,
-+ ofctenantmappings.c.ofc_id],
-+ select_obj)
-+ op.execute(stmt)
-
- # ofcnetworks -> ofcnetworkmappings
-- select_obj = ofcnetworks.join(
-- networks,
-- ofcnetworks.c.quantum_id == networks.c.id)
-- select_obj = select_obj.join(
-- ofctenantmappings,
-- ofctenantmappings.c.quantum_id == networks.c.tenant_id)
-- select_obj = sa.select(
-- [ofcnetworks.c.quantum_id,
-- (ofctenantmappings.c.ofc_id +
-- op.inline_literal('/networks/') + ofcnetworks.c.id)],
-- from_obj=select_obj)
-- stmt = InsertFromSelect([ofcnetworkmappings.c.quantum_id,
-- ofcnetworkmappings.c.ofc_id],
-- select_obj)
-- op.execute(stmt)
-+ if op.get_bind().engine.name == 'sqlite':
-+ op.execute("INSERT INTO ofcnetworkmappings (quantum_id, ofc_id) "
-+ "SELECT ofcnetworks.quantum_id, "
-+ "ofctenantmappings.ofc_id || '/networks/' || ofcnetworks.id "
-+ "AS anon_1 FROM ofcnetworks "
-+ "JOIN networks ON ofcnetworks.quantum_id = networks.id "
-+ "JOIN ofctenantmappings "
-+ "ON ofctenantmappings.quantum_id = networks.tenant_id")
-+ else:
-+ select_obj = ofcnetworks.join(
-+ networks,
-+ ofcnetworks.c.quantum_id == networks.c.id)
-+ select_obj = select_obj.join(
-+ ofctenantmappings,
-+ ofctenantmappings.c.quantum_id == networks.c.tenant_id)
-+ select_obj = sa.select(
-+ [ofcnetworks.c.quantum_id,
-+ (ofctenantmappings.c.ofc_id +
-+ op.inline_literal('/networks/') + ofcnetworks.c.id)],
-+ from_obj=select_obj)
-+ stmt = InsertFromSelect([ofcnetworkmappings.c.quantum_id,
-+ ofcnetworkmappings.c.ofc_id],
-+ select_obj)
-+ op.execute(stmt)
+--- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/50d5ba354c23_ml2_binding_vif_details.py
++++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/50d5ba354c23_ml2_binding_vif_details.py
+@@ -50,6 +50,45 @@ def upgrade():
+ "UPDATE ml2_port_bindings SET"
+ " vif_details = '{\"port_filter\": false}'"
+ " WHERE cap_port_filter = 0")
++ op.drop_column('ml2_port_bindings', 'cap_port_filter')
++ elif op.get_bind().engine.name == 'sqlite':
++ op.execute("CREATE TEMPORARY TABLE ml2_port_bindings_backup ( "
++ "port_id VARCHAR(36) NOT NULL, "
++ "host VARCHAR(255) NOT NULL, "
++ "vif_type VARCHAR(64) NOT NULL, "
++ "cap_port_filter BOOLEAN NOT NULL, "
++ "driver VARCHAR(64), "
++ "segment VARCHAR(36), "
++ "vnic_type VARCHAR(64) DEFAULT 'normal' NOT NULL, "
++ "vif_details VARCHAR(4095) DEFAULT '' NOT NULL, "
++ "PRIMARY KEY (port_id), "
++ "FOREIGN KEY(port_id) REFERENCES ports (id) ON DELETE CASCADE, "
++ "FOREIGN KEY(segment) REFERENCES ml2_network_segments (id) ON DELETE SET NULL, "
++ "CHECK (cap_port_filter IN (0, 1)));")
++ op.execute("INSERT INTO ml2_port_bindings_backup "
++ "(port_id,host,vif_type,cap_port_filter,driver,segment,vnic_type) "
++ "SELECT port_id,host,vif_type,cap_port_filter,driver,segment,vnic_type "
++ "FROM ml2_port_bindings;")
++ for value in ('true', 'false'):
++ op.execute("UPDATE ml2_port_bindings_backup SET"
++ " vif_details = '{\"port_filter\": %(value)s}'"
++ " WHERE cap_port_filter = '%(value)s'" % {'value': value})
++ op.execute("DROP TABLE ml2_port_bindings")
++ op.execute("CREATE TABLE ml2_port_bindings ( "
++ "port_id VARCHAR(36) NOT NULL, "
++ "host VARCHAR(255) NOT NULL, "
++ "vif_type VARCHAR(64) NOT NULL, "
++ "driver VARCHAR(64), "
++ "segment VARCHAR(36), "
++ "vnic_type VARCHAR(64) DEFAULT 'normal' NOT NULL, "
++ "vif_details VARCHAR(4095) DEFAULT '' NOT NULL, "
++ "PRIMARY KEY (port_id), "
++ "FOREIGN KEY(port_id) REFERENCES ports (id) ON DELETE CASCADE, "
++ "FOREIGN KEY(segment) REFERENCES ml2_network_segments (id) ON DELETE SET NULL);")
++ op.execute("INSERT INTO ml2_port_bindings "
++ "SELECT port_id,host,vif_type,driver,segment,vnic_type,vif_details "
++ "FROM ml2_port_bindings_backup;")
++ op.execute("DROP TABLE ml2_port_bindings_backup")
+ else:
+ op.execute(
+ "UPDATE ml2_port_bindings SET"
+@@ -59,7 +98,7 @@ def upgrade():
+ "UPDATE ml2_port_bindings SET"
+ " vif_details = '{\"port_filter\": false}'"
+ " WHERE cap_port_filter = false")
+- op.drop_column('ml2_port_bindings', 'cap_port_filter')
++ op.drop_column('ml2_port_bindings', 'cap_port_filter')
+ if op.get_bind().engine.name == 'ibm_db_sa':
+ op.execute("CALL SYSPROC.ADMIN_CMD('REORG TABLE ml2_port_bindings')")
- # ofcports -> ofcportmappings
-- select_obj = ofcports.join(ports, ofcports.c.quantum_id == ports.c.id)
-- select_obj = select_obj.join(
-- ofcnetworkmappings,
-- ofcnetworkmappings.c.quantum_id == ports.c.network_id)
-- select_obj = sa.select(
-- [ofcports.c.quantum_id,
-- (ofcnetworkmappings.c.ofc_id +
-- op.inline_literal('/ports/') + ofcports.c.id)],
-- from_obj=select_obj)
-- stmt = InsertFromSelect([ofcportmappings.c.quantum_id,
-- ofcportmappings.c.ofc_id],
-- select_obj)
-- op.execute(stmt)
-+ if op.get_bind().engine.name == 'sqlite':
-+ op.execute("INSERT INTO ofcportmappings (quantum_id, ofc_id) "
-+ "SELECT ofcports.quantum_id, "
-+ "ofcnetworkmappings.ofc_id || '/ports/' || ofcports.id "
-+ "AS anon_1 FROM ofcports "
-+ "JOIN ports ON ofcports.quantum_id = ports.id "
-+ "JOIN ofcnetworkmappings "
-+ "ON ofcnetworkmappings.quantum_id = ports.network_id")
-+ else:
-+ select_obj = ofcports.join(ports, ofcports.c.quantum_id == ports.c.id)
-+ select_obj = select_obj.join(
-+ ofcnetworkmappings,
-+ ofcnetworkmappings.c.quantum_id == ports.c.network_id)
-+ select_obj = sa.select(
-+ [ofcports.c.quantum_id,
-+ (ofcnetworkmappings.c.ofc_id +
-+ op.inline_literal('/ports/') + ofcports.c.id)],
-+ from_obj=select_obj)
-+ stmt = InsertFromSelect([ofcportmappings.c.quantum_id,
-+ ofcportmappings.c.ofc_id],
-+ select_obj)
-+ op.execute(stmt)
+--- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/538732fa21e1_nec_rename_quantum_id_to_neutron_id.py
++++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/538732fa21e1_nec_rename_quantum_id_to_neutron_id.py
+@@ -38,14 +38,126 @@ def upgrade():
+ # configured plugin did not create any ofc tables.
+ return
- # ofcfilters -> ofcfiltermappings
-- select_obj = sa.select([ofcfilters.c.quantum_id,
-- op.inline_literal('/filters/') + ofcfilters.c.id])
-- stmt = InsertFromSelect([ofcfiltermappings.c.quantum_id,
-- ofcfiltermappings.c.ofc_id],
-- select_obj)
-- op.execute(stmt)
+- for table in ['ofctenantmappings', 'ofcnetworkmappings',
+- 'ofcportmappings', 'ofcfiltermappings',
+- 'ofcroutermappings',
+- ]:
+- op.alter_column(table, 'quantum_id',
+- new_column_name='neutron_id',
+- existing_type=sa.String(length=36),
+- existing_nullable=False)
+ if op.get_bind().engine.name == 'sqlite':
-+ op.execute("INSERT INTO ofcfiltermappings (quantum_id, ofc_id) "
-+ "SELECT ofcfilters.quantum_id, '/filters/' || ofcfilters.id "
-+ "AS anon_1 FROM ofcfilters")
-+ else:
-+ select_obj = sa.select([ofcfilters.c.quantum_id,
-+ op.inline_literal('/filters/') + ofcfilters.c.id])
-+ stmt = InsertFromSelect([ofcfiltermappings.c.quantum_id,
-+ ofcfiltermappings.c.ofc_id],
-+ select_obj)
-+ op.execute(stmt)
-
- # drop old mapping tables
- op.drop_table('ofctenants')
---- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/1d6ee1ae5da5_db_healing.py
-+++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/1d6ee1ae5da5_db_healing.py
-@@ -25,11 +25,16 @@ Create Date: 2014-05-29 10:52:43.898980
- revision = 'db_healing'
- down_revision = '5446f2a45467'
-
++ # ofctenantmappings
++ op.execute("CREATE TEMPORARY TABLE ofctenantmappings_backup ( "
++ "ofc_id VARCHAR(255) NOT NULL, "
++ "quantum_id VARCHAR(36) NOT NULL, "
++ "PRIMARY KEY (quantum_id), "
++ "UNIQUE (ofc_id))")
++ op.execute("INSERT INTO ofctenantmappings_backup "
++ "(ofc_id, quantum_id) "
++ "SELECT ofc_id, quantum_id "
++ "FROM ofctenantmappings")
++ op.execute("DROP TABLE ofctenantmappings")
++ op.execute("CREATE TABLE ofctenantmappings ( "
++ "ofc_id VARCHAR(255) NOT NULL, "
++ "neutron_id VARCHAR(36) NOT NULL, "
++ "PRIMARY KEY (neutron_id), "
++ "UNIQUE (ofc_id))")
++ op.execute("INSERT INTO ofctenantmappings "
++ "(ofc_id, neutron_id) "
++ "SELECT ofc_id, quantum_id "
++ "FROM ofctenantmappings_backup")
++ op.execute("DROP TABLE ofctenantmappings_backup")
+
-+from alembic import op
++ # ofcnetworkmappings
++ op.execute("CREATE TEMPORARY TABLE ofcnetworkmappings_backup ( "
++ "ofc_id VARCHAR(255) NOT NULL, "
++ "quantum_id VARCHAR(36) NOT NULL, "
++ "PRIMARY KEY (quantum_id), "
++ "UNIQUE (ofc_id))")
++ op.execute("INSERT INTO ofcnetworkmappings_backup "
++ "(ofc_id, quantum_id) "
++ "SELECT ofc_id, quantum_id "
++ "FROM ofcnetworkmappings")
++ op.execute("DROP TABLE ofcnetworkmappings")
++ op.execute("CREATE TABLE ofcnetworkmappings ( "
++ "ofc_id VARCHAR(255) NOT NULL, "
++ "neutron_id VARCHAR(36) NOT NULL, "
++ "PRIMARY KEY (neutron_id), "
++ "UNIQUE (ofc_id))")
++ op.execute("INSERT INTO ofcnetworkmappings "
++ "(ofc_id, neutron_id) "
++ "SELECT ofc_id, quantum_id "
++ "FROM ofcnetworkmappings_backup")
++ op.execute("DROP TABLE ofcnetworkmappings_backup")
++
++ # ofcportmappings
++ op.execute("CREATE TEMPORARY TABLE ofcportmappings_backup ( "
++ "ofc_id VARCHAR(255) NOT NULL, "
++ "quantum_id VARCHAR(36) NOT NULL, "
++ "PRIMARY KEY (quantum_id), "
++ "UNIQUE (ofc_id))")
++ op.execute("INSERT INTO ofcportmappings_backup "
++ "(ofc_id, quantum_id) "
++ "SELECT ofc_id, quantum_id "
++ "FROM ofcportmappings")
++ op.execute("DROP TABLE ofcportmappings")
++ op.execute("CREATE TABLE ofcportmappings ( "
++ "ofc_id VARCHAR(255) NOT NULL, "
++ "neutron_id VARCHAR(36) NOT NULL, "
++ "PRIMARY KEY (neutron_id), "
++ "UNIQUE (ofc_id))")
++ op.execute("INSERT INTO ofcportmappings "
++ "(ofc_id, neutron_id) "
++ "SELECT ofc_id, quantum_id "
++ "FROM ofcportmappings_backup")
++ op.execute("DROP TABLE ofcportmappings_backup")
++
++ # ofcfiltermappings
++ op.execute("CREATE TEMPORARY TABLE ofcfiltermappings_backup ( "
++ "ofc_id VARCHAR(255) NOT NULL, "
++ "quantum_id VARCHAR(36) NOT NULL, "
++ "PRIMARY KEY (quantum_id), "
++ "UNIQUE (ofc_id))")
++ op.execute("INSERT INTO ofcfiltermappings_backup "
++ "(ofc_id, quantum_id) "
++ "SELECT ofc_id, quantum_id "
++ "FROM ofcfiltermappings")
++ op.execute("DROP TABLE ofcfiltermappings")
++ op.execute("CREATE TABLE ofcfiltermappings ( "
++ "ofc_id VARCHAR(255) NOT NULL, "
++ "neutron_id VARCHAR(36) NOT NULL, "
++ "PRIMARY KEY (neutron_id), "
++ "UNIQUE (ofc_id))")
++ op.execute("INSERT INTO ofcfiltermappings "
++ "(ofc_id, neutron_id) "
++ "SELECT ofc_id, quantum_id "
++ "FROM ofcfiltermappings_backup")
++ op.execute("DROP TABLE ofcfiltermappings_backup")
+
++ # ofcroutermappings
++ op.execute("CREATE TEMPORARY TABLE ofcroutermappings_backup ( "
++ "ofc_id VARCHAR(255) NOT NULL, "
++ "quantum_id VARCHAR(36) NOT NULL, "
++ "PRIMARY KEY (quantum_id), "
++ "UNIQUE (ofc_id))")
++ op.execute("INSERT INTO ofcroutermappings_backup "
++ "(ofc_id, quantum_id) "
++ "SELECT ofc_id, quantum_id "
++ "FROM ofcroutermappings")
++ op.execute("DROP TABLE ofcroutermappings")
++ op.execute("CREATE TABLE ofcroutermappings ( "
++ "ofc_id VARCHAR(255) NOT NULL, "
++ "neutron_id VARCHAR(36) NOT NULL, "
++ "PRIMARY KEY (neutron_id), "
++ "UNIQUE (ofc_id))")
++ op.execute("INSERT INTO ofcroutermappings "
++ "(ofc_id, neutron_id) "
++ "SELECT ofc_id, quantum_id "
++ "FROM ofcroutermappings_backup")
++ op.execute("DROP TABLE ofcroutermappings_backup")
+
- from neutron.db.migration.alembic_migrations import heal_script
-
-
- def upgrade():
-- heal_script.heal()
-+ if op.get_bind().engine.name != 'sqlite':
-+ heal_script.heal()
++ else:
++ for table in ['ofctenantmappings', 'ofcnetworkmappings',
++ 'ofcportmappings', 'ofcfiltermappings',
++ 'ofcroutermappings',
++ ]:
++ op.alter_column(table, 'quantum_id',
++ new_column_name='neutron_id',
++ existing_type=sa.String(length=36),
++ existing_nullable=False)
def downgrade():
---- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/54f7549a0e5f_set_not_null_peer_address.py
-+++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/54f7549a0e5f_set_not_null_peer_address.py
-@@ -30,6 +30,7 @@ down_revision = 'icehouse'
- # This migration will be skipped when executed in offline mode.
+--- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/5446f2a45467_set_server_default.py
++++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/5446f2a45467_set_server_default.py
+@@ -26,6 +26,7 @@ revision = '5446f2a45467'
+ down_revision = '2db5203cb7a9'
+from alembic import op
import sqlalchemy as sa
+ import sqlalchemy.sql
- from neutron.db import migration
-@@ -37,10 +38,11 @@ from neutron.db import migration
+@@ -70,20 +71,78 @@ def run(default=None):
+ def set_default_brocade(default):
+ if default:
+ default = ''
+- migration.alter_column_if_exists(
+- 'brocadeports', 'port_id',
+- server_default=default,
+- existing_type=sa.String(36))
++ if op.get_bind().engine.name == 'sqlite':
++ op.execute("CREATE TEMPORARY TABLE brocadeports_backup ( "
++ "port_id VARCHAR(36) NOT NULL DEFAULT '', "
++ "network_id VARCHAR(36) NOT NULL, "
++ "admin_state_up BOOLEAN NOT NULL, "
++ "physical_interface VARCHAR(36), "
++ "vlan_id VARCHAR(36), "
++ "tenant_id VARCHAR(36),"
++ "PRIMARY KEY (port_id), "
++ "FOREIGN KEY(network_id) REFERENCES brocadenetworks (id), "
++ "CHECK (admin_state_up IN (0, 1)))")
++ op.execute("INSERT INTO brocadeports_backup "
++ "(port_id, network_id, admin_state_up, physical_interface, vlan_id, tenant_id) "
++ "SELECT port_id, network_id, admin_state_up, physical_interface, vlan_id, tenant_id "
++ "FROM brocadeports")
++ op.execute("DROP TABLE brocadeports")
++ op.execute("CREATE TABLE brocadeports ( "
++ "port_id VARCHAR(36) NOT NULL DEFAULT '', "
++ "network_id VARCHAR(36) NOT NULL, "
++ "admin_state_up BOOLEAN NOT NULL, "
++ "physical_interface VARCHAR(36), "
++ "vlan_id VARCHAR(36), "
++ "tenant_id VARCHAR(36),"
++ "PRIMARY KEY (port_id), "
++ "FOREIGN KEY(network_id) REFERENCES brocadenetworks (id), "
++ "CHECK (admin_state_up IN (0, 1)))")
++ op.execute("INSERT INTO brocadeports "
++ "(port_id, network_id, admin_state_up, physical_interface, vlan_id, tenant_id) "
++ "SELECT port_id, network_id, admin_state_up, physical_interface, vlan_id, tenant_id "
++ "FROM brocadeports_backup")
++ op.execute("DROP TABLE brocadeports_backup")
++ else:
++ migration.alter_column_if_exists(
++ 'brocadeports', 'port_id',
++ server_default=default,
++ existing_type=sa.String(36))
- @migration.skip_if_offline
- def upgrade():
+
+ def set_default_mlnx(default):
+ if default:
+ default = sqlalchemy.sql.false()
- migration.alter_column_if_exists(
-- 'ipsec_site_connections', 'peer_address',
-- existing_type=sa.String(255),
-- nullable=False)
-+ if op.get_bind().engine.name != 'sqlite':
+- 'segmentation_id_allocation', 'allocated',
+- server_default=default,
+- existing_nullable=False,
+- existing_type=sa.Boolean)
++
++ if op.get_bind().engine.name == 'sqlite':
++ op.execute("CREATE TEMPORARY TABLE segmentation_id_allocation_backup ( "
++ "physical_network VARCHAR(64) NOT NULL, "
++ "segmentation_id INTEGER NOT NULL, "
++ "allocated BOOLEAN NOT NULL DEFAULT 0, "
++ "PRIMARY KEY (physical_network, segmentation_id), "
++ "CHECK (allocated IN (0, 1)))")
++ op.execute("INSERT INTO segmentation_id_allocation_backup "
++ "(physical_network, segmentation_id, allocated) "
++ "SELECT physical_network, segmentation_id, allocated "
++ "FROM segmentation_id_allocation")
++ op.execute("DROP TABLE segmentation_id_allocation")
++ op.execute("CREATE TABLE segmentation_id_allocation ( "
++ "physical_network VARCHAR(64) NOT NULL, "
++ "segmentation_id INTEGER NOT NULL, "
++ "allocated BOOLEAN NOT NULL DEFAULT 0, "
++ "PRIMARY KEY (physical_network, segmentation_id), "
++ "CHECK (allocated IN (0, 1)))")
++ op.execute("INSERT INTO segmentation_id_allocation "
++ "(physical_network, segmentation_id, allocated) "
++ "SELECT physical_network, segmentation_id, allocated "
++ "FROM segmentation_id_allocation_backup")
++ op.execute("DROP TABLE segmentation_id_allocation_backup")
++
++ else:
++ migration.alter_column_if_exists(
++ 'segmentation_id_allocation', 'allocated',
++ server_default=default,
++ existing_nullable=False,
++ existing_type=sa.Boolean)
+
+
+ def set_default_cisco(default):
+@@ -92,61 +151,299 @@ def set_default_cisco(default):
+ profile_default = '0' if default else None
+ if default:
+ default = sqlalchemy.sql.false()
+- migration.alter_column_if_exists(
+- 'cisco_n1kv_profile_bindings', 'tenant_id',
+- existing_type=sa.String(length=36),
+- server_default=profile_binding_default,
+- existing_nullable=False)
+- migration.alter_column_if_exists(
+- 'cisco_network_profiles', 'multicast_ip_index',
+- server_default=profile_default,
+- existing_type=sa.Integer)
+- migration.alter_column_if_exists(
+- 'cisco_n1kv_vlan_allocations', 'allocated',
+- existing_type=sa.Boolean,
+- server_default=default,
+- existing_nullable=False)
++ if op.get_bind().engine.name == 'sqlite':
++ # cisco_n1kv_profile_bindings_backup
++ op.execute("CREATE TEMPORARY TABLE cisco_n1kv_profile_bindings_backup ( "
++ "profile_type VARCHAR(7), "
++ "tenant_id VARCHAR(36) NOT NULL DEFAULT 'TENANT_ID_NOT_SET', "
++ "profile_id VARCHAR(36) NOT NULL, "
++ "PRIMARY KEY (tenant_id, profile_id), "
++ "CONSTRAINT profile_type CHECK (profile_type IN ('network', 'policy')))")
++ op.execute("INSERT INTO cisco_n1kv_profile_bindings_backup "
++ "(profile_type, tenant_id, profile_id) "
++ "SELECT profile_type, tenant_id, profile_id "
++ "FROM cisco_n1kv_profile_bindings")
++ op.execute("DROP TABLE cisco_n1kv_profile_bindings")
++ op.execute("CREATE TABLE cisco_n1kv_profile_bindings ( "
++ "profile_type VARCHAR(7), "
++ "tenant_id VARCHAR(36) NOT NULL DEFAULT 'TENANT_ID_NOT_SET', "
++ "profile_id VARCHAR(36) NOT NULL, "
++ "PRIMARY KEY (tenant_id, profile_id), "
++ "CONSTRAINT profile_type CHECK (profile_type IN ('network', 'policy')))")
++ op.execute("INSERT INTO cisco_n1kv_profile_bindings "
++ "(profile_type, tenant_id, profile_id) "
++ "SELECT profile_type, tenant_id, profile_id "
++ "FROM cisco_n1kv_profile_bindings_backup")
++ op.execute("DROP TABLE cisco_n1kv_profile_bindings_backup")
++
++ # cisco_network_profiles
++ op.execute("CREATE TEMPORARY TABLE cisco_network_profiles_backup ( "
++ "id VARCHAR(36) NOT NULL, "
++ "name VARCHAR(255), "
++ "segment_type VARCHAR(13) NOT NULL, "
++ "sub_type VARCHAR(255), "
++ "segment_range VARCHAR(255), "
++ "multicast_ip_index INTEGER DEFAULT '0', "
++ "multicast_ip_range VARCHAR(255), "
++ "physical_network VARCHAR(255), "
++ "PRIMARY KEY (id), "
++ "CONSTRAINT segment_type CHECK (segment_type IN ('vlan', 'overlay', 'trunk', 'multi-segment')))")
++ op.execute("INSERT INTO cisco_network_profiles_backup "
++ "(id, name, segment_type, sub_type, segment_range, multicast_ip_index, multicast_ip_range, physical_network) "
++ "SELECT id, name, segment_type, sub_type, segment_range, multicast_ip_index, multicast_ip_range, physical_network "
++ "FROM cisco_network_profiles")
++ op.execute("DROP TABLE cisco_network_profiles")
++ op.execute("CREATE TABLE cisco_network_profiles ( "
++ "id VARCHAR(36) NOT NULL, "
++ "name VARCHAR(255), "
++ "segment_type VARCHAR(13) NOT NULL, "
++ "sub_type VARCHAR(255), "
++ "segment_range VARCHAR(255), "
++ "multicast_ip_index INTEGER DEFAULT '0', "
++ "multicast_ip_range VARCHAR(255), "
++ "physical_network VARCHAR(255), "
++ "PRIMARY KEY (id), "
++ "CONSTRAINT segment_type CHECK (segment_type IN ('vlan', 'overlay', 'trunk', 'multi-segment')))")
++ op.execute("INSERT INTO cisco_network_profiles "
++ "(id, name, segment_type, sub_type, segment_range, multicast_ip_index, multicast_ip_range, physical_network) "
++ "SELECT id, name, segment_type, sub_type, segment_range, multicast_ip_index, multicast_ip_range, physical_network "
++ "FROM cisco_network_profiles_backup")
++ op.execute("DROP TABLE cisco_network_profiles_backup")
++
++ # cisco_n1kv_vlan_allocations
++ op.execute("CREATE TEMPORARY TABLE zigo_backup ( "
++ "physical_network VARCHAR(64) NOT NULL, "
++ "vlan_id INTEGER NOT NULL, "
++ "allocated BOOLEAN NOT NULL DEFAULT 0, "
++ "network_profile_id VARCHAR(36), "
++ "PRIMARY KEY (physical_network, vlan_id), "
++ "CHECK (allocated IN (0, 1)))")
++ op.execute("INSERT INTO zigo_backup "
++ "(physical_network, vlan_id, allocated, allocated) "
++ "SELECT physical_network, vlan_id, allocated, allocated "
++ "FROM cisco_n1kv_vlan_allocations")
++ op.execute("DROP TABLE cisco_n1kv_vlan_allocations")
++ op.execute("CREATE TABLE cisco_n1kv_vlan_allocations ( "
++ "physical_network VARCHAR(64) NOT NULL, "
++ "vlan_id INTEGER NOT NULL, "
++ "allocated BOOLEAN NOT NULL DEFAULT 0, "
++ "network_profile_id VARCHAR(36), "
++ "PRIMARY KEY (physical_network, vlan_id), "
++ "CHECK (allocated IN (0, 1)))")
++ op.execute("INSERT INTO cisco_n1kv_vlan_allocations "
++ "(physical_network, vlan_id, allocated, allocated) "
++ "SELECT physical_network, vlan_id, allocated, allocated "
++ "FROM zigo_backup")
++ op.execute("DROP TABLE zigo_backup")
++
++ else:
++ migration.alter_column_if_exists(
++ 'cisco_n1kv_profile_bindings', 'tenant_id',
++ existing_type=sa.String(length=36),
++ server_default=profile_binding_default,
++ existing_nullable=False)
++ migration.alter_column_if_exists(
++ 'cisco_network_profiles', 'multicast_ip_index',
++ server_default=profile_default,
++ existing_type=sa.Integer)
+ migration.alter_column_if_exists(
-+ 'ipsec_site_connections', 'peer_address',
-+ existing_type=sa.String(255),
-+ nullable=False)
-
++ 'cisco_n1kv_vlan_allocations', 'allocated',
++ existing_type=sa.Boolean,
++ server_default=default,
++ existing_nullable=False)
- @migration.skip_if_offline
---- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/31d7f831a591_add_constraint_for_routerid.py
-+++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/31d7f831a591_add_constraint_for_routerid.py
-@@ -58,13 +58,22 @@ def upgrade():
- 'ON %(table)s.id = temp.id WHERE temp.id is NULL);'
- % {'table': TABLE_NAME})
- else:
-- op.execute('DELETE %(table)s FROM %(table)s LEFT OUTER JOIN '
-- '(SELECT MIN(id) as id, router_id, l3_agent_id '
-- ' FROM %(table)s GROUP BY router_id, l3_agent_id) AS temp '
-- 'ON %(table)s.id = temp.id WHERE temp.id is NULL;'
-- % {'table': TABLE_NAME})
-+ if op.get_bind().engine.name == 'sqlite':
-+ # TODO: Fix this for SQLITE
-+ print("Fix this for SQLITE")
-+ else:
-+ op.execute('DELETE %(table)s FROM %(table)s LEFT OUTER JOIN '
-+ '(SELECT MIN(id) as id, router_id, l3_agent_id '
-+ ' FROM %(table)s GROUP BY router_id, l3_agent_id) AS temp '
-+ 'ON %(table)s.id = temp.id WHERE temp.id is NULL;'
-+ % {'table': TABLE_NAME})
-- op.drop_column(TABLE_NAME, 'id')
+ def set_default_vmware(default=None):
+ if default:
+ default = sqlalchemy.sql.false()
+- migration.alter_column_if_exists(
+- 'nsxrouterextattributess', 'service_router',
+- server_default=default,
+- existing_nullable=False,
+- existing_type=sa.Boolean)
+- migration.alter_column_if_exists(
+- 'nsxrouterextattributess', 'distributed',
+- server_default=default,
+- existing_nullable=False,
+- existing_type=sa.Boolean)
+- migration.alter_column_if_exists(
+- 'qosqueues', 'default',
+- server_default=default,
+- existing_type=sa.Boolean)
+ if op.get_bind().engine.name == 'sqlite':
-+ # TODO: Fix this for SQLITE
-+ print("Fix this for SQLITE")
-+ return
++ # nsxrouterextattributess
++ op.execute("CREATE TEMPORARY TABLE nsxrouterextattributess_backup ( "
++ "router_id VARCHAR(36) NOT NULL, "
++ "distributed BOOLEAN NOT NULL, "
++ "service_router BOOLEAN DEFAULT '0' NOT NULL, "
++ "PRIMARY KEY (router_id), "
++ "FOREIGN KEY(router_id) REFERENCES routers (id) ON DELETE CASCADE, "
++ "CHECK (distributed IN (0, 1)), "
++ "CHECK (service_router IN (0, 1)))")
++ op.execute("INSERT INTO nsxrouterextattributess_backup "
++ "(router_id, distributed, service_router) "
++ "SELECT router_id, distributed, service_router "
++ "FROM nsxrouterextattributess")
++ op.execute("DROP TABLE nsxrouterextattributess")
++ op.execute("CREATE TABLE nsxrouterextattributess ( "
++ "router_id VARCHAR(36) NOT NULL, "
++ "distributed BOOLEAN NOT NULL DEFAULT 0, "
++ "service_router BOOLEAN DEFAULT '0' NOT NULL, "
++ "PRIMARY KEY (router_id), "
++ "FOREIGN KEY(router_id) REFERENCES routers (id) ON DELETE CASCADE, "
++ "CHECK (distributed IN (0, 1)), "
++ "CHECK (service_router IN (0, 1)))")
++ op.execute("INSERT INTO nsxrouterextattributess "
++ "(router_id, distributed, service_router) "
++ "SELECT router_id, distributed, service_router "
++ "FROM nsxrouterextattributess_backup")
++ op.execute("DROP TABLE nsxrouterextattributess_backup")
++
++ op.execute("CREATE TEMPORARY TABLE qosqueues_backup ("
++ "tenant_id VARCHAR(255), "
++ "id VARCHAR(36) NOT NULL, "
++ "name VARCHAR(255), "
++ "\"default\" BOOLEAN, "
++ "min INTEGER NOT NULL, "
++ "max INTEGER, "
++ "qos_marking VARCHAR(9), "
++ "dscp INTEGER, "
++ "PRIMARY KEY (id), "
++ "CHECK (\"default\" IN (0, 1)), "
++ "CONSTRAINT qosqueues_qos_marking CHECK (qos_marking IN ('untrusted', 'trusted')))")
++ op.execute("INSERT INTO qosqueues_backup "
++ "(tenant_id, id, name, \"default\", min, max, qos_marking, dscp) "
++ "SELECT tenant_id, id, name, \"default\", min, max, qos_marking, dscp "
++ "FROM qosqueues")
++ op.execute("DROP TABLE qosqueues")
++ op.execute("CREATE TABLE qosqueues ("
++ "tenant_id VARCHAR(255), "
++ "id VARCHAR(36) NOT NULL, "
++ "name VARCHAR(255), "
++ "\"default\" BOOLEAN, "
++ "min INTEGER NOT NULL, "
++ "max INTEGER, "
++ "qos_marking VARCHAR(9), "
++ "dscp INTEGER, "
++ "PRIMARY KEY (id), "
++ "CHECK (\"default\" IN (0, 1)), "
++ "CONSTRAINT qosqueues_qos_marking CHECK (qos_marking IN ('untrusted', 'trusted')))")
++ op.execute("INSERT INTO qosqueues "
++ "(tenant_id, id, name, \"default\", min, max, qos_marking, dscp) "
++ "SELECT tenant_id, id, name, \"default\", min, max, qos_marking, dscp "
++ "FROM qosqueues_backup")
++ op.execute("DROP TABLE qosqueues_backup")
++
+ else:
-+ op.drop_column(TABLE_NAME, 'id')
++ migration.alter_column_if_exists(
++ 'nsxrouterextattributess', 'service_router',
++ server_default=default,
++ existing_nullable=False,
++ existing_type=sa.Boolean)
++ migration.alter_column_if_exists(
++ 'nsxrouterextattributess', 'distributed',
++ server_default=default,
++ existing_nullable=False,
++ existing_type=sa.Boolean)
++ migration.alter_column_if_exists(
++ 'qosqueues', 'default',
++ server_default=default,
++ existing_type=sa.Boolean)
- # DB2 doesn't support nullable column in primary key
- if context.bind.dialect.name == 'ibm_db_sa':
---- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/538732fa21e1_nec_rename_quantum_id_to_neutron_id.py
-+++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/538732fa21e1_nec_rename_quantum_id_to_neutron_id.py
-@@ -38,14 +38,126 @@ def upgrade():
- # configured plugin did not create any ofc tables.
- return
-- for table in ['ofctenantmappings', 'ofcnetworkmappings',
-- 'ofcportmappings', 'ofcfiltermappings',
-- 'ofcroutermappings',
-- ]:
-- op.alter_column(table, 'quantum_id',
-- new_column_name='neutron_id',
-- existing_type=sa.String(length=36),
-- existing_nullable=False)
+ def set_default_agents(default=None):
+ if default:
+ default = sqlalchemy.sql.true()
+- migration.alter_column_if_exists(
+- 'agents', 'admin_state_up',
+- server_default=default,
+- existing_nullable=False,
+- existing_type=sa.Boolean)
+ if op.get_bind().engine.name == 'sqlite':
-+ # ofctenantmappings
-+ op.execute("CREATE TEMPORARY TABLE ofctenantmappings_backup ( "
-+ "ofc_id VARCHAR(255) NOT NULL, "
-+ "quantum_id VARCHAR(36) NOT NULL, "
-+ "PRIMARY KEY (quantum_id), "
-+ "UNIQUE (ofc_id))")
-+ op.execute("INSERT INTO ofctenantmappings_backup "
-+ "(ofc_id, quantum_id) "
-+ "SELECT ofc_id, quantum_id "
-+ "FROM ofctenantmappings")
-+ op.execute("DROP TABLE ofctenantmappings")
-+ op.execute("CREATE TABLE ofctenantmappings ( "
-+ "ofc_id VARCHAR(255) NOT NULL, "
-+ "neutron_id VARCHAR(36) NOT NULL, "
-+ "PRIMARY KEY (neutron_id), "
-+ "UNIQUE (ofc_id))")
-+ op.execute("INSERT INTO ofctenantmappings "
-+ "(ofc_id, neutron_id) "
-+ "SELECT ofc_id, quantum_id "
-+ "FROM ofctenantmappings_backup")
-+ op.execute("DROP TABLE ofctenantmappings_backup")
-+
-+ # ofcnetworkmappings
-+ op.execute("CREATE TEMPORARY TABLE ofcnetworkmappings_backup ( "
-+ "ofc_id VARCHAR(255) NOT NULL, "
-+ "quantum_id VARCHAR(36) NOT NULL, "
-+ "PRIMARY KEY (quantum_id), "
-+ "UNIQUE (ofc_id))")
-+ op.execute("INSERT INTO ofcnetworkmappings_backup "
-+ "(ofc_id, quantum_id) "
-+ "SELECT ofc_id, quantum_id "
-+ "FROM ofcnetworkmappings")
-+ op.execute("DROP TABLE ofcnetworkmappings")
-+ op.execute("CREATE TABLE ofcnetworkmappings ( "
-+ "ofc_id VARCHAR(255) NOT NULL, "
-+ "neutron_id VARCHAR(36) NOT NULL, "
-+ "PRIMARY KEY (neutron_id), "
-+ "UNIQUE (ofc_id))")
-+ op.execute("INSERT INTO ofcnetworkmappings "
-+ "(ofc_id, neutron_id) "
-+ "SELECT ofc_id, quantum_id "
-+ "FROM ofcnetworkmappings_backup")
-+ op.execute("DROP TABLE ofcnetworkmappings_backup")
-+
-+ # ofcportmappings
-+ op.execute("CREATE TEMPORARY TABLE ofcportmappings_backup ( "
-+ "ofc_id VARCHAR(255) NOT NULL, "
-+ "quantum_id VARCHAR(36) NOT NULL, "
-+ "PRIMARY KEY (quantum_id), "
-+ "UNIQUE (ofc_id))")
-+ op.execute("INSERT INTO ofcportmappings_backup "
-+ "(ofc_id, quantum_id) "
-+ "SELECT ofc_id, quantum_id "
-+ "FROM ofcportmappings")
-+ op.execute("DROP TABLE ofcportmappings")
-+ op.execute("CREATE TABLE ofcportmappings ( "
-+ "ofc_id VARCHAR(255) NOT NULL, "
-+ "neutron_id VARCHAR(36) NOT NULL, "
-+ "PRIMARY KEY (neutron_id), "
-+ "UNIQUE (ofc_id))")
-+ op.execute("INSERT INTO ofcportmappings "
-+ "(ofc_id, neutron_id) "
-+ "SELECT ofc_id, quantum_id "
-+ "FROM ofcportmappings_backup")
-+ op.execute("DROP TABLE ofcportmappings_backup")
++ op.execute("CREATE TEMPORARY TABLE agents_backup ( "
++ "id VARCHAR(36) NOT NULL, "
++ "agent_type VARCHAR(255) NOT NULL, "
++ "binary VARCHAR(255) NOT NULL, "
++ "topic VARCHAR(255) NOT NULL, "
++ "host VARCHAR(255) NOT NULL, "
++ "admin_state_up BOOLEAN NOT NULL DEFAULT 1, "
++ "created_at DATETIME NOT NULL, "
++ "started_at DATETIME NOT NULL, "
++ "heartbeat_timestamp DATETIME NOT NULL, "
++ "description VARCHAR(255), "
++ "configurations VARCHAR(4095) NOT NULL, "
++ "PRIMARY KEY (id), "
++ "CHECK (admin_state_up IN (0, 1)))")
++ op.execute("INSERT INTO agents_backup "
++ "(id, agent_type, binary, topic, host, admin_state_up, created_at, started_at, heartbeat_timestamp, description, configurations) "
++ "SELECT id, agent_type, binary, topic, host, admin_state_up, created_at, started_at, heartbeat_timestamp, description, configurations "
++ "FROM agents")
++ op.execute("DROP TABLE agents")
++ op.execute("CREATE TABLE agents ( "
++ "id VARCHAR(36) NOT NULL, "
++ "agent_type VARCHAR(255) NOT NULL, "
++ "binary VARCHAR(255) NOT NULL, "
++ "topic VARCHAR(255) NOT NULL, "
++ "host VARCHAR(255) NOT NULL, "
++ "admin_state_up BOOLEAN NOT NULL DEFAULT 1, "
++ "created_at DATETIME NOT NULL, "
++ "started_at DATETIME NOT NULL, "
++ "heartbeat_timestamp DATETIME NOT NULL, "
++ "description VARCHAR(255), "
++ "configurations VARCHAR(4095) NOT NULL, "
++ "PRIMARY KEY (id), "
++ "CHECK (admin_state_up IN (0, 1)))")
++ op.execute("INSERT INTO agents "
++ "(id, agent_type, binary, topic, host, admin_state_up, created_at, started_at, heartbeat_timestamp, description, configurations) "
++ "SELECT id, agent_type, binary, topic, host, admin_state_up, created_at, started_at, heartbeat_timestamp, description, configurations "
++ "FROM agents_backup")
++ op.execute("DROP TABLE agents_backup")
+
-+ # ofcfiltermappings
-+ op.execute("CREATE TEMPORARY TABLE ofcfiltermappings_backup ( "
-+ "ofc_id VARCHAR(255) NOT NULL, "
-+ "quantum_id VARCHAR(36) NOT NULL, "
-+ "PRIMARY KEY (quantum_id), "
-+ "UNIQUE (ofc_id))")
-+ op.execute("INSERT INTO ofcfiltermappings_backup "
-+ "(ofc_id, quantum_id) "
-+ "SELECT ofc_id, quantum_id "
-+ "FROM ofcfiltermappings")
-+ op.execute("DROP TABLE ofcfiltermappings")
-+ op.execute("CREATE TABLE ofcfiltermappings ( "
-+ "ofc_id VARCHAR(255) NOT NULL, "
-+ "neutron_id VARCHAR(36) NOT NULL, "
-+ "PRIMARY KEY (neutron_id), "
-+ "UNIQUE (ofc_id))")
-+ op.execute("INSERT INTO ofcfiltermappings "
-+ "(ofc_id, neutron_id) "
-+ "SELECT ofc_id, quantum_id "
-+ "FROM ofcfiltermappings_backup")
-+ op.execute("DROP TABLE ofcfiltermappings_backup")
++ else:
++ migration.alter_column_if_exists(
++ 'agents', 'admin_state_up',
++ server_default=default,
++ existing_nullable=False,
++ existing_type=sa.Boolean)
+
+
+ def set_default_ml2(default=None):
+ if default:
+ default = sqlalchemy.sql.false()
+- migration.alter_column_if_exists(
+- 'ml2_gre_allocations', 'allocated',
+- server_default=default,
+- existing_nullable=False,
+- existing_type=sa.Boolean)
+- migration.alter_column_if_exists(
+- 'ml2_vxlan_allocations', 'allocated',
+- server_default=default,
+- existing_nullable=False,
+- existing_type=sa.Boolean)
++ if op.get_bind().engine.name == 'sqlite':
++ # ml2_gre_allocations
++ op.execute("CREATE TEMPORARY TABLE ml2_gre_allocations_backup ( "
++ "gre_id INTEGER NOT NULL, "
++ "allocated BOOLEAN NOT NULL DEFAULT 0, "
++ "PRIMARY KEY (gre_id), "
++ "CHECK (allocated IN (0, 1)))")
++ op.execute("INSERT INTO ml2_gre_allocations_backup "
++ "(gre_id, allocated) "
++ "SELECT gre_id, allocated "
++ "FROM ml2_gre_allocations")
++ op.execute("DROP TABLE ml2_gre_allocations")
++ op.execute("CREATE TABLE ml2_gre_allocations ( "
++ "gre_id INTEGER NOT NULL, "
++ "allocated BOOLEAN NOT NULL DEFAULT 0, "
++ "PRIMARY KEY (gre_id), "
++ "CHECK (allocated IN (0, 1)))")
++ op.execute("INSERT INTO ml2_gre_allocations "
++ "(gre_id, allocated) "
++ "SELECT gre_id, allocated "
++ "FROM ml2_gre_allocations_backup")
++ op.execute("DROP TABLE ml2_gre_allocations_backup")
+
-+ # ofcroutermappings
-+ op.execute("CREATE TEMPORARY TABLE ofcroutermappings_backup ( "
-+ "ofc_id VARCHAR(255) NOT NULL, "
-+ "quantum_id VARCHAR(36) NOT NULL, "
-+ "PRIMARY KEY (quantum_id), "
-+ "UNIQUE (ofc_id))")
-+ op.execute("INSERT INTO ofcroutermappings_backup "
-+ "(ofc_id, quantum_id) "
-+ "SELECT ofc_id, quantum_id "
-+ "FROM ofcroutermappings")
-+ op.execute("DROP TABLE ofcroutermappings")
-+ op.execute("CREATE TABLE ofcroutermappings ( "
-+ "ofc_id VARCHAR(255) NOT NULL, "
-+ "neutron_id VARCHAR(36) NOT NULL, "
-+ "PRIMARY KEY (neutron_id), "
-+ "UNIQUE (ofc_id))")
-+ op.execute("INSERT INTO ofcroutermappings "
-+ "(ofc_id, neutron_id) "
-+ "SELECT ofc_id, quantum_id "
-+ "FROM ofcroutermappings_backup")
-+ op.execute("DROP TABLE ofcroutermappings_backup")
++ # ml2_vxlan_allocations
++ op.execute("CREATE TABLE ml2_vxlan_allocations_backup ( "
++ "vxlan_vni INTEGER NOT NULL, "
++ "allocated BOOLEAN NOT NULL DEFAULT 0, "
++ "PRIMARY KEY (vxlan_vni), "
++ "CHECK (allocated IN (0, 1)))")
++ op.execute("INSERT INTO ml2_vxlan_allocations_backup "
++ "(vxlan_vni, allocated) "
++ "SELECT vxlan_vni, allocated "
++ "FROM ml2_vxlan_allocations")
++ op.execute("DROP TABLE ml2_vxlan_allocations")
++ op.execute("CREATE TABLE ml2_vxlan_allocations ( "
++ "vxlan_vni INTEGER NOT NULL, "
++ "allocated BOOLEAN NOT NULL DEFAULT 0, "
++ "PRIMARY KEY (vxlan_vni), "
++ "CHECK (allocated IN (0, 1)))")
++ op.execute("INSERT INTO ml2_vxlan_allocations "
++ "(vxlan_vni, allocated) "
++ "SELECT vxlan_vni, allocated "
++ "FROM ml2_vxlan_allocations_backup")
++ op.execute("DROP TABLE ml2_vxlan_allocations_backup")
+
+ else:
-+ for table in ['ofctenantmappings', 'ofcnetworkmappings',
-+ 'ofcportmappings', 'ofcfiltermappings',
-+ 'ofcroutermappings',
-+ ]:
-+ op.alter_column(table, 'quantum_id',
-+ new_column_name='neutron_id',
-+ existing_type=sa.String(length=36),
-+ existing_nullable=False)
++ migration.alter_column_if_exists(
++ 'ml2_gre_allocations', 'allocated',
++ server_default=default,
++ existing_nullable=False,
++ existing_type=sa.Boolean)
++ migration.alter_column_if_exists(
++ 'ml2_vxlan_allocations', 'allocated',
++ server_default=default,
++ existing_nullable=False,
++ existing_type=sa.Boolean)
+--- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/54f7549a0e5f_set_not_null_peer_address.py
++++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/54f7549a0e5f_set_not_null_peer_address.py
+@@ -30,6 +30,7 @@ down_revision = 'icehouse'
+ # This migration will be skipped when executed in offline mode.
+
+
++from alembic import op
+ import sqlalchemy as sa
+
+ from neutron.db import migration
+@@ -37,10 +38,11 @@ from neutron.db import migration
+
+ @migration.skip_if_offline
+ def upgrade():
+- migration.alter_column_if_exists(
+- 'ipsec_site_connections', 'peer_address',
+- existing_type=sa.String(255),
+- nullable=False)
++ if op.get_bind().engine.name != 'sqlite':
++ migration.alter_column_if_exists(
++ 'ipsec_site_connections', 'peer_address',
++ existing_type=sa.String(255),
++ nullable=False)
+
+
+ @migration.skip_if_offline
+--- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/5ac1c354a051_n1kv_segment_alloc.py
++++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/5ac1c354a051_n1kv_segment_alloc.py
+@@ -42,28 +42,30 @@ def upgrade():
+ 'cisco_n1kv_vlan_allocations',
+ sa.Column('network_profile_id',
+ sa.String(length=36),
+- nullable=False)
+- )
+- op.create_foreign_key(
+- 'cisco_n1kv_vlan_allocations_ibfk_1',
+- source='cisco_n1kv_vlan_allocations',
+- referent='cisco_network_profiles',
+- local_cols=['network_profile_id'], remote_cols=['id'],
+- ondelete='CASCADE'
++ nullable='False')
+ )
++ if op.get_bind().engine.name != 'sqlite':
++ op.create_foreign_key(
++ 'cisco_n1kv_vlan_allocations_ibfk_1',
++ source='cisco_n1kv_vlan_allocations',
++ referent='cisco_network_profiles',
++ local_cols=['network_profile_id'], remote_cols=['id'],
++ ondelete='CASCADE'
++ )
+ op.add_column(
+ 'cisco_n1kv_vxlan_allocations',
+ sa.Column('network_profile_id',
+ sa.String(length=36),
+- nullable=False)
+- )
+- op.create_foreign_key(
+- 'cisco_n1kv_vxlan_allocations_ibfk_1',
+- source='cisco_n1kv_vxlan_allocations',
+- referent='cisco_network_profiles',
+- local_cols=['network_profile_id'], remote_cols=['id'],
+- ondelete='CASCADE'
++ nullable='False')
+ )
++ if op.get_bind().engine.name != 'sqlite':
++ op.create_foreign_key(
++ 'cisco_n1kv_vxlan_allocations_ibfk_1',
++ source='cisco_n1kv_vxlan_allocations',
++ referent='cisco_network_profiles',
++ local_cols=['network_profile_id'], remote_cols=['id'],
++ ondelete='CASCADE'
++ )
def downgrade():
+ 'cisco_nexusport_bindings', 'vlan_id',
+ nullable=True,
+ existing_type=sa.Integer)
+--- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/884573acbf1c_unify_nsx_router_extra_attributes.py
++++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/884573acbf1c_unify_nsx_router_extra_attributes.py
+@@ -46,12 +46,15 @@ def _migrate_data(old_table, new_table):
+ "WHERE new_t.router_id = old_t.router_id)") %
+ {'new_table': new_table, 'old_table': old_table})
+ else:
+- op.execute(("UPDATE %(new_table)s new_t "
+- "INNER JOIN %(old_table)s as old_t "
+- "ON new_t.router_id = old_t.router_id "
+- "SET new_t.distributed = old_t.distributed, "
+- "new_t.service_router = old_t.service_router") %
+- {'new_table': new_table, 'old_table': old_table})
++ if op.get_bind().engine.name == 'sqlite':
++ print("Fix this for SQLite")
++ else:
++ op.execute(("UPDATE %(new_table)s new_t "
++ "INNER JOIN %(old_table)s as old_t "
++ "ON new_t.router_id = old_t.router_id "
++ "SET new_t.distributed = old_t.distributed, "
++ "new_t.service_router = old_t.service_router") %
++ {'new_table': new_table, 'old_table': old_table})
+
+
+ def upgrade():
+--- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/abc88c33f74f_lb_stats_needs_bigint.py
++++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/abc88c33f74f_lb_stats_needs_bigint.py
+@@ -34,14 +34,15 @@ from neutron.db import migration
+
+ def upgrade():
+ if migration.schema_has_table('poolstatisticss'):
+- op.alter_column('poolstatisticss', 'bytes_in',
+- type_=sa.BigInteger(), existing_type=sa.Integer())
+- op.alter_column('poolstatisticss', 'bytes_out',
+- type_=sa.BigInteger(), existing_type=sa.Integer())
+- op.alter_column('poolstatisticss', 'active_connections',
+- type_=sa.BigInteger(), existing_type=sa.Integer())
+- op.alter_column('poolstatisticss', 'total_connections',
+- type_=sa.BigInteger(), existing_type=sa.Integer())
++ if op.get_bind().engine.name != 'sqlite':
++ op.alter_column('poolstatisticss', 'bytes_in',
++ type_=sa.BigInteger(), existing_type=sa.Integer())
++ op.alter_column('poolstatisticss', 'bytes_out',
++ type_=sa.BigInteger(), existing_type=sa.Integer())
++ op.alter_column('poolstatisticss', 'active_connections',
++ type_=sa.BigInteger(), existing_type=sa.Integer())
++ op.alter_column('poolstatisticss', 'total_connections',
++ type_=sa.BigInteger(), existing_type=sa.Integer())
+
+
+ def downgrade():
+--- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/b65aa907aec_set_length_of_protocol_field.py
++++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/b65aa907aec_set_length_of_protocol_field.py
+@@ -29,6 +29,7 @@ down_revision = '1e5dd1d09b22'
+ # the firewall service plugin
+ # This migration will not be executed in offline mode
+
++from alembic import op
+ import sqlalchemy as sa
+
+ from neutron.db import migration
+@@ -36,10 +37,13 @@ from neutron.db import migration
+
+ @migration.skip_if_offline
+ def upgrade():
+- migration.alter_column_if_exists(
+- 'firewall_rules', 'protocol',
+- type_=sa.String(40),
+- existing_nullable=True)
++ if op.get_bind().engine.name == 'sqlite':
++ print("Nothing seems needed for SQLite here.")
++ else:
++ migration.alter_column_if_exists(
++ 'firewall_rules', 'protocol',
++ type_=sa.String(40),
++ existing_nullable=True)
+
+
+ def downgrade():
--- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/d06e871c0d5_set_admin_state_up_not_null_ml2.py
+++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/d06e871c0d5_set_admin_state_up_not_null_ml2.py
@@ -30,6 +30,7 @@ down_revision = '4eca4a84f08a'
+ 'ml2_brocadeports', 'admin_state_up',
+ nullable=True,
+ existing_type=sa.Boolean)
---- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/33c3db036fe4_set_length_of_description_field_metering.py
-+++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/33c3db036fe4_set_length_of_description_field_metering.py
-@@ -34,8 +34,9 @@ from neutron.db.migration.alembic_migrat
+--- neutron-2014.2~rc1.orig/neutron/db/migration/alembic_migrations/versions/e197124d4b9_add_unique_constrain.py
++++ neutron-2014.2~rc1/neutron/db/migration/alembic_migrations/versions/e197124d4b9_add_unique_constrain.py
+@@ -36,11 +36,15 @@ TABLE_NAME = 'members'
def upgrade():
- if migration.schema_has_table('meteringlabels'):
-- op.alter_column('meteringlabels', 'description', type_=sa.String(1024),
-- existing_nullable=True)
-+ if op.get_bind().engine.name != 'sqlite':
-+ op.alter_column('meteringlabels', 'description', type_=sa.String(1024),
-+ existing_nullable=True)
- else:
- metering_init_ops.create_meteringlabels()
+ if migration.schema_has_table(TABLE_NAME):
+- op.create_unique_constraint(
+- name=CONSTRAINT_NAME,
+- source=TABLE_NAME,
+- local_cols=['pool_id', 'address', 'protocol_port']
+- )
++ if op.get_bind().engine.name == 'sqlite':
++ op.execute("CREATE UNIQUE INDEX uniq_member0pool_id0address0port "
++ "on members (pool_id,address,protocol_port);")
++ else:
++ op.create_unique_constraint(
++ name=CONSTRAINT_NAME,
++ source=TABLE_NAME,
++ local_cols=['pool_id', 'address', 'protocol_port']
++ )
+
+ def downgrade():