]> review.fuel-infra Code Review - openstack-build/neutron-build.git/commitdiff
Delete obsolete patches.
authorThomas Goirand <zigo@debian.org>
Wed, 15 Apr 2015 14:04:00 +0000 (16:04 +0200)
committerThomas Goirand <zigo@debian.org>
Wed, 15 Apr 2015 14:04:00 +0000 (16:04 +0200)
Rewritten-From: b4e3a4524ef9cffda6d5af180a6a26fea5c10834

xenial/debian/patches/fix-alembic-migrations-with-sqlite.patch [deleted file]
xenial/debian/patches/series
xenial/debian/patches/tests_dont_rely_on_configuration_files_outside_tests_directory.patch [deleted file]

diff --git a/xenial/debian/patches/fix-alembic-migrations-with-sqlite.patch b/xenial/debian/patches/fix-alembic-migrations-with-sqlite.patch
deleted file mode 100644 (file)
index 1fc5fe1..0000000
+++ /dev/null
@@ -1,1228 +0,0 @@
-Description: Fixes Alembic migrations with SQLite
- There's a number of ALTER commands that SQLite doesn't understand, and which
- Alembic / SQLAlchemey is doing, which breaks the install of Neutron if using
- SQLite. This patch fixes this. Note that this is still a WIP, and that it
- should not be considered production ready. The goal is only to make it work
- so that we can do piuparts runs, *not* to make it fully work. Upstream does
- not support SQLite anyway, and they will refuse to upstream such patch as they
- already did in the past.
-Author: Thomas Goirand <zigo@debian.org>
-Forwarded: not-needed
-Last-Update: 2014-10-10
-
-Index: neutron/neutron/db/migration/alembic_migrations/versions/117643811bca_nec_delete_ofc_mapping.py
-===================================================================
---- neutron.orig/neutron/db/migration/alembic_migrations/versions/117643811bca_nec_delete_ofc_mapping.py
-+++ neutron/neutron/db/migration/alembic_migrations/versions/117643811bca_nec_delete_ofc_mapping.py
-@@ -120,52 +120,78 @@ def upgrade():
-         sa_expr.column('network_id'))
-     # ofctenants -> ofctenantmappings
--    select_obj = sa.select([ofctenants.c.quantum_id,
--                            op.inline_literal('/tenants/') + ofctenants.c.id])
--    stmt = InsertFromSelect([ofctenantmappings.c.quantum_id,
--                             ofctenantmappings.c.ofc_id],
--                            select_obj)
--    op.execute(stmt)
-+    if op.get_bind().engine.name == 'sqlite':
-+        op.execute("INSERT INTO ofctenantmappings (quantum_id, ofc_id) SELECT ofctenants.quantum_id, '/tenants/' || ofctenants.id AS anon_1  FROM ofctenants")
-+    else:
-+        select_obj = sa.select([ofctenants.c.quantum_id,
-+                                op.inline_literal('/tenants/') + ofctenants.c.id])
-+        stmt = InsertFromSelect([ofctenantmappings.c.quantum_id,
-+                                 ofctenantmappings.c.ofc_id],
-+                                select_obj)
-+        op.execute(stmt)
-     # ofcnetworks -> ofcnetworkmappings
--    select_obj = ofcnetworks.join(
--        networks,
--        ofcnetworks.c.quantum_id == networks.c.id)
--    select_obj = select_obj.join(
--        ofctenantmappings,
--        ofctenantmappings.c.quantum_id == networks.c.tenant_id)
--    select_obj = sa.select(
--        [ofcnetworks.c.quantum_id,
--         (ofctenantmappings.c.ofc_id +
--          op.inline_literal('/networks/') + ofcnetworks.c.id)],
--        from_obj=select_obj)
--    stmt = InsertFromSelect([ofcnetworkmappings.c.quantum_id,
--                             ofcnetworkmappings.c.ofc_id],
--                            select_obj)
--    op.execute(stmt)
-+    if op.get_bind().engine.name == 'sqlite':
-+        op.execute("INSERT INTO ofcnetworkmappings (quantum_id, ofc_id) "
-+                   "SELECT ofcnetworks.quantum_id, "
-+                   "ofctenantmappings.ofc_id || '/networks/' || ofcnetworks.id "
-+                   "AS anon_1 FROM ofcnetworks "
-+                   "JOIN networks ON ofcnetworks.quantum_id = networks.id "
-+                   "JOIN ofctenantmappings "
-+                   "ON ofctenantmappings.quantum_id = networks.tenant_id")
-+    else:
-+        select_obj = ofcnetworks.join(
-+            networks,
-+            ofcnetworks.c.quantum_id == networks.c.id)
-+        select_obj = select_obj.join(
-+            ofctenantmappings,
-+            ofctenantmappings.c.quantum_id == networks.c.tenant_id)
-+        select_obj = sa.select(
-+            [ofcnetworks.c.quantum_id,
-+             (ofctenantmappings.c.ofc_id +
-+              op.inline_literal('/networks/') + ofcnetworks.c.id)],
-+            from_obj=select_obj)
-+        stmt = InsertFromSelect([ofcnetworkmappings.c.quantum_id,
-+                                 ofcnetworkmappings.c.ofc_id],
-+                                select_obj)
-+        op.execute(stmt)
-     # ofcports -> ofcportmappings
--    select_obj = ofcports.join(ports, ofcports.c.quantum_id == ports.c.id)
--    select_obj = select_obj.join(
--        ofcnetworkmappings,
--        ofcnetworkmappings.c.quantum_id == ports.c.network_id)
--    select_obj = sa.select(
--        [ofcports.c.quantum_id,
--         (ofcnetworkmappings.c.ofc_id +
--          op.inline_literal('/ports/') + ofcports.c.id)],
--        from_obj=select_obj)
--    stmt = InsertFromSelect([ofcportmappings.c.quantum_id,
--                             ofcportmappings.c.ofc_id],
--                            select_obj)
--    op.execute(stmt)
-+    if op.get_bind().engine.name == 'sqlite':
-+        op.execute("INSERT INTO ofcportmappings (quantum_id, ofc_id) "
-+                   "SELECT ofcports.quantum_id, "
-+                   "ofcnetworkmappings.ofc_id || '/ports/' || ofcports.id "
-+                   "AS anon_1 FROM ofcports "
-+                   "JOIN ports ON ofcports.quantum_id = ports.id "
-+                   "JOIN ofcnetworkmappings "
-+                   "ON ofcnetworkmappings.quantum_id = ports.network_id")
-+    else:
-+        select_obj = ofcports.join(ports, ofcports.c.quantum_id == ports.c.id)
-+        select_obj = select_obj.join(
-+            ofcnetworkmappings,
-+            ofcnetworkmappings.c.quantum_id == ports.c.network_id)
-+        select_obj = sa.select(
-+            [ofcports.c.quantum_id,
-+             (ofcnetworkmappings.c.ofc_id +
-+              op.inline_literal('/ports/') + ofcports.c.id)],
-+            from_obj=select_obj)
-+        stmt = InsertFromSelect([ofcportmappings.c.quantum_id,
-+                                 ofcportmappings.c.ofc_id],
-+                                select_obj)
-+        op.execute(stmt)
-     # ofcfilters -> ofcfiltermappings
--    select_obj = sa.select([ofcfilters.c.quantum_id,
--                            op.inline_literal('/filters/') + ofcfilters.c.id])
--    stmt = InsertFromSelect([ofcfiltermappings.c.quantum_id,
--                             ofcfiltermappings.c.ofc_id],
--                            select_obj)
--    op.execute(stmt)
-+    if op.get_bind().engine.name == 'sqlite':
-+        op.execute("INSERT INTO ofcfiltermappings (quantum_id, ofc_id) "
-+                   "SELECT ofcfilters.quantum_id, '/filters/' || ofcfilters.id "
-+                   "AS anon_1 FROM ofcfilters")
-+    else:
-+        select_obj = sa.select([ofcfilters.c.quantum_id,
-+                                op.inline_literal('/filters/') + ofcfilters.c.id])
-+        stmt = InsertFromSelect([ofcfiltermappings.c.quantum_id,
-+                                 ofcfiltermappings.c.ofc_id],
-+                                select_obj)
-+        op.execute(stmt)
-     # drop old mapping tables
-     op.drop_table('ofctenants')
-Index: neutron/neutron/db/migration/alembic_migrations/versions/1d6ee1ae5da5_db_healing.py
-===================================================================
---- neutron.orig/neutron/db/migration/alembic_migrations/versions/1d6ee1ae5da5_db_healing.py
-+++ neutron/neutron/db/migration/alembic_migrations/versions/1d6ee1ae5da5_db_healing.py
-@@ -25,11 +25,16 @@ Create Date: 2014-05-29 10:52:43.898980
- revision = 'db_healing'
- down_revision = '5446f2a45467'
-+
-+from alembic import op
-+
-+
- from neutron.db.migration.alembic_migrations import heal_script
- def upgrade():
--    heal_script.heal()
-+    if op.get_bind().engine.name != 'sqlite':
-+        heal_script.heal()
- def downgrade():
-Index: neutron/neutron/db/migration/alembic_migrations/versions/1e5dd1d09b22_set_not_null_fields_lb_stats.py
-===================================================================
---- neutron.orig/neutron/db/migration/alembic_migrations/versions/1e5dd1d09b22_set_not_null_fields_lb_stats.py
-+++ neutron/neutron/db/migration/alembic_migrations/versions/1e5dd1d09b22_set_not_null_fields_lb_stats.py
-@@ -25,6 +25,7 @@ Create Date: 2014-03-17 11:00:35.370618
- revision = '1e5dd1d09b22'
- down_revision = '54f7549a0e5f'
-+from alembic import op
- import sqlalchemy as sa
- from neutron.db import migration
-@@ -32,22 +33,23 @@ from neutron.db import migration
- @migration.skip_if_offline
- def upgrade():
--    migration.alter_column_if_exists(
--        'poolstatisticss', 'bytes_in',
--        nullable=False,
--        existing_type=sa.BigInteger())
--    migration.alter_column_if_exists(
--        'poolstatisticss', 'bytes_out',
--        nullable=False,
--        existing_type=sa.BigInteger())
--    migration.alter_column_if_exists(
--        'poolstatisticss', 'active_connections',
--        nullable=False,
--        existing_type=sa.BigInteger())
--    migration.alter_column_if_exists(
--        'poolstatisticss', 'total_connections',
--        nullable=False,
--        existing_type=sa.BigInteger())
-+    if op.get_bind().engine.name != 'sqlite':
-+        migration.alter_column_if_exists(
-+            'poolstatisticss', 'bytes_in',
-+            nullable=False,
-+            existing_type=sa.BigInteger())
-+        migration.alter_column_if_exists(
-+            'poolstatisticss', 'bytes_out',
-+            nullable=False,
-+            existing_type=sa.BigInteger())
-+        migration.alter_column_if_exists(
-+            'poolstatisticss', 'active_connections',
-+            nullable=False,
-+            existing_type=sa.BigInteger())
-+        migration.alter_column_if_exists(
-+            'poolstatisticss', 'total_connections',
-+            nullable=False,
-+            existing_type=sa.BigInteger())
- @migration.skip_if_offline
-Index: neutron/neutron/db/migration/alembic_migrations/versions/1fcfc149aca4_agents_unique_by_type_and_host.py
-===================================================================
---- neutron.orig/neutron/db/migration/alembic_migrations/versions/1fcfc149aca4_agents_unique_by_type_and_host.py
-+++ neutron/neutron/db/migration/alembic_migrations/versions/1fcfc149aca4_agents_unique_by_type_and_host.py
-@@ -41,11 +41,15 @@ def upgrade():
-         # configured plugin did not create the agents table.
-         return
--    op.create_unique_constraint(
--        name=UC_NAME,
--        source=TABLE_NAME,
--        local_cols=['agent_type', 'host']
--    )
-+    if op.get_bind().engine.name == 'sqlite':
-+        op.execute("CREATE UNIQUE INDEX uniq_agents0agent_type0host "
-+                   "on agents (agent_type,host);")
-+    else:
-+        op.create_unique_constraint(
-+            name=UC_NAME,
-+            source=TABLE_NAME,
-+            local_cols=['agent_type', 'host']
-+        )
- def downgrade():
-Index: neutron/neutron/db/migration/alembic_migrations/versions/31d7f831a591_add_constraint_for_routerid.py
-===================================================================
---- neutron.orig/neutron/db/migration/alembic_migrations/versions/31d7f831a591_add_constraint_for_routerid.py
-+++ neutron/neutron/db/migration/alembic_migrations/versions/31d7f831a591_add_constraint_for_routerid.py
-@@ -48,13 +48,22 @@ def upgrade():
-                    'ON %(table)s.id = temp.id WHERE temp.id is NULL);'
-                    % {'table': TABLE_NAME})
-     else:
--        op.execute('DELETE %(table)s FROM %(table)s LEFT OUTER JOIN '
--                   '(SELECT MIN(id) as id, router_id, l3_agent_id '
--                   ' FROM %(table)s GROUP BY router_id, l3_agent_id) AS temp '
--                   'ON %(table)s.id = temp.id WHERE temp.id is NULL;'
--                   % {'table': TABLE_NAME})
-+        if op.get_bind().engine.name == 'sqlite':
-+            # TODO: Fix this for SQLITE
-+            print("Fix this for SQLITE")
-+        else:
-+            op.execute('DELETE %(table)s FROM %(table)s LEFT OUTER JOIN '
-+                       '(SELECT MIN(id) as id, router_id, l3_agent_id '
-+                       ' FROM %(table)s GROUP BY router_id, l3_agent_id) AS temp '
-+                       'ON %(table)s.id = temp.id WHERE temp.id is NULL;'
-+                       % {'table': TABLE_NAME})
--    op.drop_column(TABLE_NAME, 'id')
-+    if op.get_bind().engine.name == 'sqlite':
-+        # TODO: Fix this for SQLITE
-+        print("Fix this for SQLITE")
-+        return
-+    else:
-+        op.drop_column(TABLE_NAME, 'id')
-     with migration.remove_fks_from_table(TABLE_NAME):
-         # DB2 doesn't support nullable column in primary key
-Index: neutron/neutron/db/migration/alembic_migrations/versions/33c3db036fe4_set_length_of_description_field_metering.py
-===================================================================
---- neutron.orig/neutron/db/migration/alembic_migrations/versions/33c3db036fe4_set_length_of_description_field_metering.py
-+++ neutron/neutron/db/migration/alembic_migrations/versions/33c3db036fe4_set_length_of_description_field_metering.py
-@@ -34,8 +34,9 @@ from neutron.db.migration.alembic_migrat
- def upgrade():
-     if migration.schema_has_table('meteringlabels'):
--        op.alter_column('meteringlabels', 'description', type_=sa.String(1024),
--                        existing_nullable=True)
-+        if op.get_bind().engine.name != 'sqlite':
-+            op.alter_column('meteringlabels', 'description', type_=sa.String(1024),
-+                            existing_nullable=True)
-     else:
-         metering_init_ops.create_meteringlabels()
-Index: neutron/neutron/db/migration/alembic_migrations/versions/3927f7f7c456_l3_extension_distributed_mode.py
-===================================================================
---- neutron.orig/neutron/db/migration/alembic_migrations/versions/3927f7f7c456_l3_extension_distributed_mode.py
-+++ neutron/neutron/db/migration/alembic_migrations/versions/3927f7f7c456_l3_extension_distributed_mode.py
-@@ -45,9 +45,14 @@ def upgrade():
-               "SELECT id as router_id, "
-               "0 as distributed from routers")
-     else:
--        op.execute("INSERT INTO router_extra_attributes "
--              "SELECT id as router_id, "
--              "False as distributed from routers")
-+        if op.get_bind().engine.name == 'sqlite':
-+            op.execute("INSERT INTO router_extra_attributes "
-+                  "SELECT id AS router_id, "
-+                  "0 AS distributed FROM routers")
-+        else:
-+            op.execute("INSERT INTO router_extra_attributes "
-+                  "SELECT id AS router_id, "
-+                  "False AS distributed FROM routers")
- def downgrade():
-Index: neutron/neutron/db/migration/alembic_migrations/versions/4eba2f05c2f4_correct_vxlan_endpoint_primary_key.py
-===================================================================
---- neutron.orig/neutron/db/migration/alembic_migrations/versions/4eba2f05c2f4_correct_vxlan_endpoint_primary_key.py
-+++ neutron/neutron/db/migration/alembic_migrations/versions/4eba2f05c2f4_correct_vxlan_endpoint_primary_key.py
-@@ -34,8 +34,11 @@ PK_NAME = 'ml2_vxlan_endpoints_pkey'
- def upgrade():
--    op.drop_constraint(PK_NAME, TABLE_NAME, type_='primary')
--    op.create_primary_key(PK_NAME, TABLE_NAME, cols=['ip_address'])
-+    if op.get_bind().engine.name == 'sqlite':
-+        print("Fix this for SQLite")
-+    else:
-+        op.drop_constraint(PK_NAME, TABLE_NAME, type_='primary')
-+        op.create_primary_key(PK_NAME, TABLE_NAME, cols=['ip_address'])
- def downgrade():
-Index: neutron/neutron/db/migration/alembic_migrations/versions/50d5ba354c23_ml2_binding_vif_details.py
-===================================================================
---- neutron.orig/neutron/db/migration/alembic_migrations/versions/50d5ba354c23_ml2_binding_vif_details.py
-+++ neutron/neutron/db/migration/alembic_migrations/versions/50d5ba354c23_ml2_binding_vif_details.py
-@@ -50,6 +50,45 @@ def upgrade():
-             "UPDATE ml2_port_bindings SET"
-             " vif_details = '{\"port_filter\": false}'"
-             " WHERE cap_port_filter = 0")
-+        op.drop_column('ml2_port_bindings', 'cap_port_filter')
-+    elif op.get_bind().engine.name == 'sqlite':
-+        op.execute("CREATE TEMPORARY TABLE ml2_port_bindings_backup ( "
-+            "port_id VARCHAR(36) NOT NULL, "    
-+            "host VARCHAR(255) NOT NULL, "      
-+            "vif_type VARCHAR(64) NOT NULL, "   
-+            "cap_port_filter BOOLEAN NOT NULL, "
-+            "driver VARCHAR(64), " 
-+            "segment VARCHAR(36), "
-+            "vnic_type VARCHAR(64) DEFAULT 'normal' NOT NULL, "
-+            "vif_details VARCHAR(4095) DEFAULT '' NOT NULL, "
-+            "PRIMARY KEY (port_id), "
-+            "FOREIGN KEY(port_id) REFERENCES ports (id) ON DELETE CASCADE, "
-+            "FOREIGN KEY(segment) REFERENCES ml2_network_segments (id) ON DELETE SET NULL, "
-+            "CHECK (cap_port_filter IN (0, 1)));") 
-+        op.execute("INSERT INTO ml2_port_bindings_backup "
-+            "(port_id,host,vif_type,cap_port_filter,driver,segment,vnic_type) "
-+            "SELECT port_id,host,vif_type,cap_port_filter,driver,segment,vnic_type "
-+            "FROM ml2_port_bindings;")
-+        for value in ('true', 'false'):
-+            op.execute("UPDATE ml2_port_bindings_backup SET"
-+                       " vif_details = '{\"port_filter\": %(value)s}'"
-+                       " WHERE cap_port_filter = '%(value)s'" % {'value': value})
-+        op.execute("DROP TABLE ml2_port_bindings")
-+        op.execute("CREATE TABLE ml2_port_bindings ( "
-+            "port_id VARCHAR(36) NOT NULL, " 
-+            "host VARCHAR(255) NOT NULL, "   
-+            "vif_type VARCHAR(64) NOT NULL, "
-+            "driver VARCHAR(64), " 
-+            "segment VARCHAR(36), "
-+            "vnic_type VARCHAR(64) DEFAULT 'normal' NOT NULL, "
-+            "vif_details VARCHAR(4095) DEFAULT '' NOT NULL, "
-+            "PRIMARY KEY (port_id), "
-+            "FOREIGN KEY(port_id) REFERENCES ports (id) ON DELETE CASCADE, "
-+            "FOREIGN KEY(segment) REFERENCES ml2_network_segments (id) ON DELETE SET NULL);")
-+        op.execute("INSERT INTO ml2_port_bindings "
-+            "SELECT port_id,host,vif_type,driver,segment,vnic_type,vif_details "
-+            "FROM ml2_port_bindings_backup;")
-+        op.execute("DROP TABLE ml2_port_bindings_backup")
-     else:
-         op.execute(
-             "UPDATE ml2_port_bindings SET"
-@@ -59,7 +98,7 @@ def upgrade():
-             "UPDATE ml2_port_bindings SET"
-             " vif_details = '{\"port_filter\": false}'"
-             " WHERE cap_port_filter = false")
--    op.drop_column('ml2_port_bindings', 'cap_port_filter')
-+        op.drop_column('ml2_port_bindings', 'cap_port_filter')
-     if op.get_bind().engine.name == 'ibm_db_sa':
-         op.execute("CALL SYSPROC.ADMIN_CMD('REORG TABLE ml2_port_bindings')")
-Index: neutron/neutron/db/migration/alembic_migrations/versions/538732fa21e1_nec_rename_quantum_id_to_neutron_id.py
-===================================================================
---- neutron.orig/neutron/db/migration/alembic_migrations/versions/538732fa21e1_nec_rename_quantum_id_to_neutron_id.py
-+++ neutron/neutron/db/migration/alembic_migrations/versions/538732fa21e1_nec_rename_quantum_id_to_neutron_id.py
-@@ -38,14 +38,126 @@ def upgrade():
-         # configured plugin did not create any ofc tables.
-         return
--    for table in ['ofctenantmappings', 'ofcnetworkmappings',
--                  'ofcportmappings', 'ofcfiltermappings',
--                  'ofcroutermappings',
--                  ]:
--        op.alter_column(table, 'quantum_id',
--                        new_column_name='neutron_id',
--                        existing_type=sa.String(length=36),
--                        existing_nullable=False)
-+    if op.get_bind().engine.name == 'sqlite':
-+        # ofctenantmappings
-+        op.execute("CREATE TEMPORARY TABLE ofctenantmappings_backup ( "
-+                   "ofc_id VARCHAR(255) NOT NULL, "
-+                   "quantum_id VARCHAR(36) NOT NULL, "
-+                   "PRIMARY KEY (quantum_id), "
-+                   "UNIQUE (ofc_id))")
-+        op.execute("INSERT INTO ofctenantmappings_backup "
-+                   "(ofc_id, quantum_id) "
-+                   "SELECT ofc_id, quantum_id "
-+                   "FROM ofctenantmappings")
-+        op.execute("DROP TABLE ofctenantmappings")
-+        op.execute("CREATE TABLE ofctenantmappings ( "
-+                   "ofc_id VARCHAR(255) NOT NULL, "
-+                   "neutron_id VARCHAR(36) NOT NULL, "
-+                   "PRIMARY KEY (neutron_id), "
-+                   "UNIQUE (ofc_id))")
-+        op.execute("INSERT INTO ofctenantmappings "
-+                   "(ofc_id, neutron_id) "
-+                   "SELECT ofc_id, quantum_id "
-+                   "FROM ofctenantmappings_backup")
-+        op.execute("DROP TABLE ofctenantmappings_backup")
-+
-+        # ofcnetworkmappings
-+        op.execute("CREATE TEMPORARY TABLE ofcnetworkmappings_backup ( "
-+                   "ofc_id VARCHAR(255) NOT NULL, "
-+                   "quantum_id VARCHAR(36) NOT NULL, "
-+                   "PRIMARY KEY (quantum_id), "
-+                   "UNIQUE (ofc_id))")
-+        op.execute("INSERT INTO ofcnetworkmappings_backup "
-+                   "(ofc_id, quantum_id) "
-+                   "SELECT ofc_id, quantum_id "
-+                   "FROM ofcnetworkmappings")
-+        op.execute("DROP TABLE ofcnetworkmappings")
-+        op.execute("CREATE TABLE ofcnetworkmappings ( "
-+                   "ofc_id VARCHAR(255) NOT NULL, "
-+                   "neutron_id VARCHAR(36) NOT NULL, "
-+                   "PRIMARY KEY (neutron_id), "
-+                   "UNIQUE (ofc_id))")
-+        op.execute("INSERT INTO ofcnetworkmappings "
-+                   "(ofc_id, neutron_id) "
-+                   "SELECT ofc_id, quantum_id "
-+                   "FROM ofcnetworkmappings_backup")
-+        op.execute("DROP TABLE ofcnetworkmappings_backup")
-+
-+        # ofcportmappings
-+        op.execute("CREATE TEMPORARY TABLE ofcportmappings_backup ( "
-+                   "ofc_id VARCHAR(255) NOT NULL, "
-+                   "quantum_id VARCHAR(36) NOT NULL, "
-+                   "PRIMARY KEY (quantum_id), "
-+                   "UNIQUE (ofc_id))")
-+        op.execute("INSERT INTO ofcportmappings_backup "
-+                   "(ofc_id, quantum_id) "
-+                   "SELECT ofc_id, quantum_id "
-+                   "FROM ofcportmappings")
-+        op.execute("DROP TABLE ofcportmappings")
-+        op.execute("CREATE TABLE ofcportmappings ( "
-+                   "ofc_id VARCHAR(255) NOT NULL, "
-+                   "neutron_id VARCHAR(36) NOT NULL, "
-+                   "PRIMARY KEY (neutron_id), "
-+                   "UNIQUE (ofc_id))")
-+        op.execute("INSERT INTO ofcportmappings "
-+                   "(ofc_id, neutron_id) "
-+                   "SELECT ofc_id, quantum_id "
-+                   "FROM ofcportmappings_backup")
-+        op.execute("DROP TABLE ofcportmappings_backup")
-+
-+        # ofcfiltermappings
-+        op.execute("CREATE TEMPORARY TABLE ofcfiltermappings_backup ( "
-+                   "ofc_id VARCHAR(255) NOT NULL, "
-+                   "quantum_id VARCHAR(36) NOT NULL, "
-+                   "PRIMARY KEY (quantum_id), "
-+                   "UNIQUE (ofc_id))")
-+        op.execute("INSERT INTO ofcfiltermappings_backup "
-+                   "(ofc_id, quantum_id) "
-+                   "SELECT ofc_id, quantum_id "
-+                   "FROM ofcfiltermappings")
-+        op.execute("DROP TABLE ofcfiltermappings")
-+        op.execute("CREATE TABLE ofcfiltermappings ( "
-+                   "ofc_id VARCHAR(255) NOT NULL, "
-+                   "neutron_id VARCHAR(36) NOT NULL, "
-+                   "PRIMARY KEY (neutron_id), "
-+                   "UNIQUE (ofc_id))")
-+        op.execute("INSERT INTO ofcfiltermappings "
-+                   "(ofc_id, neutron_id) "
-+                   "SELECT ofc_id, quantum_id "
-+                   "FROM ofcfiltermappings_backup")
-+        op.execute("DROP TABLE ofcfiltermappings_backup")
-+
-+      # ofcroutermappings
-+        op.execute("CREATE TEMPORARY TABLE ofcroutermappings_backup ( "
-+                   "ofc_id VARCHAR(255) NOT NULL, "
-+                   "quantum_id VARCHAR(36) NOT NULL, "
-+                   "PRIMARY KEY (quantum_id), "
-+                   "UNIQUE (ofc_id))")
-+        op.execute("INSERT INTO ofcroutermappings_backup "
-+                   "(ofc_id, quantum_id) "
-+                   "SELECT ofc_id, quantum_id "
-+                   "FROM ofcroutermappings")
-+        op.execute("DROP TABLE ofcroutermappings")
-+        op.execute("CREATE TABLE ofcroutermappings ( "
-+                   "ofc_id VARCHAR(255) NOT NULL, "
-+                   "neutron_id VARCHAR(36) NOT NULL, "
-+                   "PRIMARY KEY (neutron_id), "
-+                   "UNIQUE (ofc_id))")
-+        op.execute("INSERT INTO ofcroutermappings "
-+                   "(ofc_id, neutron_id) "
-+                   "SELECT ofc_id, quantum_id "
-+                   "FROM ofcroutermappings_backup")
-+        op.execute("DROP TABLE ofcroutermappings_backup")
-+
-+    else:
-+        for table in ['ofctenantmappings', 'ofcnetworkmappings',
-+                      'ofcportmappings', 'ofcfiltermappings',
-+                      'ofcroutermappings',
-+                      ]:
-+            op.alter_column(table, 'quantum_id',
-+                            new_column_name='neutron_id',
-+                            existing_type=sa.String(length=36),
-+                            existing_nullable=False)
- def downgrade():
-Index: neutron/neutron/db/migration/alembic_migrations/versions/5446f2a45467_set_server_default.py
-===================================================================
---- neutron.orig/neutron/db/migration/alembic_migrations/versions/5446f2a45467_set_server_default.py
-+++ neutron/neutron/db/migration/alembic_migrations/versions/5446f2a45467_set_server_default.py
-@@ -26,6 +26,7 @@ revision = '5446f2a45467'
- down_revision = '2db5203cb7a9'
-+from alembic import op
- import sqlalchemy as sa
- import sqlalchemy.sql
-@@ -70,20 +71,78 @@ def run(default=None):
- def set_default_brocade(default):
-     if default:
-         default = ''
--    migration.alter_column_if_exists(
--        'brocadeports', 'port_id',
--        server_default=default,
--        existing_type=sa.String(36))
-+    if op.get_bind().engine.name == 'sqlite':
-+        op.execute("CREATE TEMPORARY TABLE brocadeports_backup ( "
-+                   "port_id VARCHAR(36) NOT NULL DEFAULT '', "
-+                   "network_id VARCHAR(36) NOT NULL, "
-+                   "admin_state_up BOOLEAN NOT NULL, "
-+                   "physical_interface VARCHAR(36), "
-+                   "vlan_id VARCHAR(36), "
-+                   "tenant_id VARCHAR(36)," 
-+                   "PRIMARY KEY (port_id), "
-+                   "FOREIGN KEY(network_id) REFERENCES brocadenetworks (id), "
-+                   "CHECK (admin_state_up IN (0, 1)))")
-+        op.execute("INSERT INTO brocadeports_backup "
-+                   "(port_id, network_id, admin_state_up, physical_interface, vlan_id, tenant_id) "
-+                   "SELECT port_id, network_id, admin_state_up, physical_interface, vlan_id, tenant_id "
-+                   "FROM brocadeports")
-+        op.execute("DROP TABLE brocadeports")
-+        op.execute("CREATE TABLE brocadeports ( "
-+                   "port_id VARCHAR(36) NOT NULL DEFAULT '', "
-+                   "network_id VARCHAR(36) NOT NULL, "
-+                   "admin_state_up BOOLEAN NOT NULL, "
-+                   "physical_interface VARCHAR(36), "
-+                   "vlan_id VARCHAR(36), "
-+                   "tenant_id VARCHAR(36)," 
-+                   "PRIMARY KEY (port_id), "
-+                   "FOREIGN KEY(network_id) REFERENCES brocadenetworks (id), "
-+                   "CHECK (admin_state_up IN (0, 1)))")
-+        op.execute("INSERT INTO brocadeports "
-+                   "(port_id, network_id, admin_state_up, physical_interface, vlan_id, tenant_id) "
-+                   "SELECT port_id, network_id, admin_state_up, physical_interface, vlan_id, tenant_id "
-+                   "FROM brocadeports_backup")
-+        op.execute("DROP TABLE brocadeports_backup")
-+    else:
-+        migration.alter_column_if_exists(
-+            'brocadeports', 'port_id',
-+            server_default=default,
-+            existing_type=sa.String(36))
- def set_default_mlnx(default):
-     if default:
-         default = sqlalchemy.sql.false()
--    migration.alter_column_if_exists(
--        'segmentation_id_allocation', 'allocated',
--        server_default=default,
--        existing_nullable=False,
--        existing_type=sa.Boolean)
-+
-+    if op.get_bind().engine.name == 'sqlite':
-+        op.execute("CREATE TEMPORARY TABLE segmentation_id_allocation_backup ( "
-+                   "physical_network VARCHAR(64) NOT NULL, "
-+                   "segmentation_id INTEGER NOT NULL, "
-+                   "allocated BOOLEAN NOT NULL DEFAULT 0, "
-+                   "PRIMARY KEY (physical_network, segmentation_id), "
-+                   "CHECK (allocated IN (0, 1)))")
-+        op.execute("INSERT INTO segmentation_id_allocation_backup "
-+                   "(physical_network, segmentation_id, allocated) "
-+                   "SELECT physical_network, segmentation_id, allocated "
-+                   "FROM segmentation_id_allocation")
-+        op.execute("DROP TABLE segmentation_id_allocation")
-+        op.execute("CREATE TABLE segmentation_id_allocation ( "
-+                   "physical_network VARCHAR(64) NOT NULL, "
-+                   "segmentation_id INTEGER NOT NULL, "
-+                   "allocated BOOLEAN NOT NULL DEFAULT 0, "
-+                   "PRIMARY KEY (physical_network, segmentation_id), "
-+                   "CHECK (allocated IN (0, 1)))")
-+        op.execute("INSERT INTO segmentation_id_allocation "
-+                   "(physical_network, segmentation_id, allocated) "
-+                   "SELECT physical_network, segmentation_id, allocated "
-+                   "FROM segmentation_id_allocation_backup")
-+        op.execute("DROP TABLE segmentation_id_allocation_backup")
-+
-+    else:
-+        migration.alter_column_if_exists(
-+            'segmentation_id_allocation', 'allocated',
-+            server_default=default,
-+            existing_nullable=False,
-+            existing_type=sa.Boolean)
- def set_default_cisco(default):
-@@ -92,61 +151,299 @@ def set_default_cisco(default):
-     profile_default = '0' if default else None
-     if default:
-         default = sqlalchemy.sql.false()
--    migration.alter_column_if_exists(
--        'cisco_n1kv_profile_bindings', 'tenant_id',
--        existing_type=sa.String(length=36),
--        server_default=profile_binding_default,
--        existing_nullable=False)
--    migration.alter_column_if_exists(
--        'cisco_network_profiles', 'multicast_ip_index',
--        server_default=profile_default,
--        existing_type=sa.Integer)
--    migration.alter_column_if_exists(
--        'cisco_n1kv_vlan_allocations', 'allocated',
--        existing_type=sa.Boolean,
--        server_default=default,
--        existing_nullable=False)
-+    if op.get_bind().engine.name == 'sqlite':
-+        # cisco_n1kv_profile_bindings_backup
-+        op.execute("CREATE TEMPORARY TABLE cisco_n1kv_profile_bindings_backup ( "
-+                   "profile_type VARCHAR(7), "
-+                   "tenant_id VARCHAR(36) NOT NULL DEFAULT 'TENANT_ID_NOT_SET', "
-+                   "profile_id VARCHAR(36) NOT NULL, "
-+                   "PRIMARY KEY (tenant_id, profile_id), "
-+                   "CONSTRAINT profile_type CHECK (profile_type IN ('network', 'policy')))")
-+        op.execute("INSERT INTO cisco_n1kv_profile_bindings_backup "
-+                   "(profile_type, tenant_id, profile_id) "
-+                   "SELECT profile_type, tenant_id, profile_id "
-+                   "FROM cisco_n1kv_profile_bindings")
-+        op.execute("DROP TABLE cisco_n1kv_profile_bindings")
-+        op.execute("CREATE TABLE cisco_n1kv_profile_bindings ( "
-+                   "profile_type VARCHAR(7), "
-+                   "tenant_id VARCHAR(36) NOT NULL DEFAULT 'TENANT_ID_NOT_SET', "
-+                   "profile_id VARCHAR(36) NOT NULL, "
-+                   "PRIMARY KEY (tenant_id, profile_id), "
-+                   "CONSTRAINT profile_type CHECK (profile_type IN ('network', 'policy')))")
-+        op.execute("INSERT INTO cisco_n1kv_profile_bindings "
-+                   "(profile_type, tenant_id, profile_id) "
-+                   "SELECT profile_type, tenant_id, profile_id "
-+                   "FROM cisco_n1kv_profile_bindings_backup")
-+        op.execute("DROP TABLE cisco_n1kv_profile_bindings_backup")
-+
-+        # cisco_network_profiles
-+        op.execute("CREATE TEMPORARY TABLE cisco_network_profiles_backup ( "
-+                   "id VARCHAR(36) NOT NULL, "
-+                   "name VARCHAR(255), "
-+                   "segment_type VARCHAR(13) NOT NULL, "
-+                   "sub_type VARCHAR(255), "
-+                   "segment_range VARCHAR(255), "
-+                   "multicast_ip_index INTEGER DEFAULT '0', "
-+                   "multicast_ip_range VARCHAR(255), "
-+                   "physical_network VARCHAR(255), "
-+                   "PRIMARY KEY (id), "
-+                   "CONSTRAINT segment_type CHECK (segment_type IN ('vlan', 'overlay', 'trunk', 'multi-segment')))")
-+        op.execute("INSERT INTO cisco_network_profiles_backup "
-+                   "(id, name, segment_type, sub_type, segment_range, multicast_ip_index, multicast_ip_range, physical_network) "
-+                   "SELECT id, name, segment_type, sub_type, segment_range, multicast_ip_index, multicast_ip_range, physical_network "
-+                   "FROM cisco_network_profiles")
-+        op.execute("DROP TABLE cisco_network_profiles")
-+        op.execute("CREATE TABLE cisco_network_profiles ( "
-+                   "id VARCHAR(36) NOT NULL, "
-+                   "name VARCHAR(255), "
-+                   "segment_type VARCHAR(13) NOT NULL, "
-+                   "sub_type VARCHAR(255), "
-+                   "segment_range VARCHAR(255), "
-+                   "multicast_ip_index INTEGER DEFAULT '0', "
-+                   "multicast_ip_range VARCHAR(255), "
-+                   "physical_network VARCHAR(255), "
-+                   "PRIMARY KEY (id), "
-+                   "CONSTRAINT segment_type CHECK (segment_type IN ('vlan', 'overlay', 'trunk', 'multi-segment')))")
-+        op.execute("INSERT INTO cisco_network_profiles "
-+                   "(id, name, segment_type, sub_type, segment_range, multicast_ip_index, multicast_ip_range, physical_network) "
-+                   "SELECT id, name, segment_type, sub_type, segment_range, multicast_ip_index, multicast_ip_range, physical_network "
-+                   "FROM cisco_network_profiles_backup")
-+        op.execute("DROP TABLE cisco_network_profiles_backup")
-+
-+        # cisco_n1kv_vlan_allocations
-+        op.execute("CREATE TEMPORARY TABLE zigo_backup ( "
-+                   "physical_network VARCHAR(64) NOT NULL, "
-+                   "vlan_id INTEGER NOT NULL, "
-+                   "allocated BOOLEAN NOT NULL DEFAULT 0, "
-+                   "network_profile_id VARCHAR(36), "
-+                   "PRIMARY KEY (physical_network, vlan_id), "
-+                   "CHECK (allocated IN (0, 1)))")
-+        op.execute("INSERT INTO zigo_backup "
-+                   "(physical_network, vlan_id, allocated, allocated) "
-+                   "SELECT physical_network, vlan_id, allocated, allocated "
-+                   "FROM cisco_n1kv_vlan_allocations")
-+        op.execute("DROP TABLE cisco_n1kv_vlan_allocations")
-+        op.execute("CREATE TABLE cisco_n1kv_vlan_allocations ( "
-+                   "physical_network VARCHAR(64) NOT NULL, "
-+                   "vlan_id INTEGER NOT NULL, "
-+                   "allocated BOOLEAN NOT NULL DEFAULT 0, "
-+                   "network_profile_id VARCHAR(36), "
-+                   "PRIMARY KEY (physical_network, vlan_id), "
-+                   "CHECK (allocated IN (0, 1)))")
-+        op.execute("INSERT INTO cisco_n1kv_vlan_allocations "
-+                   "(physical_network, vlan_id, allocated, allocated) "
-+                   "SELECT physical_network, vlan_id, allocated, allocated "
-+                   "FROM zigo_backup")
-+        op.execute("DROP TABLE zigo_backup")
-+
-+    else:
-+        migration.alter_column_if_exists(
-+            'cisco_n1kv_profile_bindings', 'tenant_id',
-+            existing_type=sa.String(length=36),
-+            server_default=profile_binding_default,
-+            existing_nullable=False)
-+        migration.alter_column_if_exists(
-+            'cisco_network_profiles', 'multicast_ip_index',
-+            server_default=profile_default,
-+            existing_type=sa.Integer)
-+        migration.alter_column_if_exists(
-+            'cisco_n1kv_vlan_allocations', 'allocated',
-+            existing_type=sa.Boolean,
-+            server_default=default,
-+            existing_nullable=False)
- def set_default_vmware(default=None):
-     if default:
-         default = sqlalchemy.sql.false()
--    migration.alter_column_if_exists(
--        'nsxrouterextattributess', 'service_router',
--        server_default=default,
--        existing_nullable=False,
--        existing_type=sa.Boolean)
--    migration.alter_column_if_exists(
--        'nsxrouterextattributess', 'distributed',
--        server_default=default,
--        existing_nullable=False,
--        existing_type=sa.Boolean)
--    migration.alter_column_if_exists(
--        'qosqueues', 'default',
--        server_default=default,
--        existing_type=sa.Boolean)
-+    if op.get_bind().engine.name == 'sqlite':
-+        # nsxrouterextattributess
-+        op.execute("CREATE TEMPORARY TABLE nsxrouterextattributess_backup ( "
-+                   "router_id VARCHAR(36) NOT NULL, "
-+                   "distributed BOOLEAN NOT NULL, "
-+                   "service_router BOOLEAN DEFAULT '0' NOT NULL, "
-+                   "PRIMARY KEY (router_id), "
-+                   "FOREIGN KEY(router_id) REFERENCES routers (id) ON DELETE CASCADE, "
-+                   "CHECK (distributed IN (0, 1)), "
-+                   "CHECK (service_router IN (0, 1)))")
-+        op.execute("INSERT INTO nsxrouterextattributess_backup "
-+                   "(router_id, distributed, service_router) "
-+                   "SELECT router_id, distributed, service_router "
-+                   "FROM nsxrouterextattributess")
-+        op.execute("DROP TABLE nsxrouterextattributess")
-+        op.execute("CREATE TABLE nsxrouterextattributess ( "
-+                   "router_id VARCHAR(36) NOT NULL, "
-+                   "distributed BOOLEAN NOT NULL DEFAULT 0, "
-+                   "service_router BOOLEAN DEFAULT '0' NOT NULL, "
-+                   "PRIMARY KEY (router_id), "
-+                   "FOREIGN KEY(router_id) REFERENCES routers (id) ON DELETE CASCADE, "
-+                   "CHECK (distributed IN (0, 1)), "
-+                   "CHECK (service_router IN (0, 1)))")
-+        op.execute("INSERT INTO nsxrouterextattributess "
-+                   "(router_id, distributed, service_router) "
-+                   "SELECT router_id, distributed, service_router "
-+                   "FROM nsxrouterextattributess_backup")
-+        op.execute("DROP TABLE nsxrouterextattributess_backup")
-+
-+        op.execute("CREATE TEMPORARY TABLE qosqueues_backup ("
-+                   "tenant_id VARCHAR(255), "
-+                   "id VARCHAR(36) NOT NULL, "
-+                   "name VARCHAR(255), "
-+                   "\"default\" BOOLEAN, "
-+                   "min INTEGER NOT NULL, "
-+                   "max INTEGER, "
-+                   "qos_marking VARCHAR(9), "
-+                   "dscp INTEGER, "
-+                   "PRIMARY KEY (id), "
-+                   "CHECK (\"default\" IN (0, 1)), "
-+                   "CONSTRAINT qosqueues_qos_marking CHECK (qos_marking IN ('untrusted', 'trusted')))")
-+        op.execute("INSERT INTO qosqueues_backup "
-+                   "(tenant_id, id, name, \"default\", min, max, qos_marking, dscp) "
-+                   "SELECT tenant_id, id, name, \"default\", min, max, qos_marking, dscp "
-+                   "FROM qosqueues")
-+        op.execute("DROP TABLE qosqueues")
-+        op.execute("CREATE TABLE qosqueues ("
-+                   "tenant_id VARCHAR(255), "
-+                   "id VARCHAR(36) NOT NULL, "
-+                   "name VARCHAR(255), "
-+                   "\"default\" BOOLEAN, "
-+                   "min INTEGER NOT NULL, "
-+                   "max INTEGER, "
-+                   "qos_marking VARCHAR(9), "
-+                   "dscp INTEGER, "
-+                   "PRIMARY KEY (id), "
-+                   "CHECK (\"default\" IN (0, 1)), "
-+                   "CONSTRAINT qosqueues_qos_marking CHECK (qos_marking IN ('untrusted', 'trusted')))")
-+        op.execute("INSERT INTO qosqueues "
-+                   "(tenant_id, id, name, \"default\", min, max, qos_marking, dscp) "
-+                   "SELECT tenant_id, id, name, \"default\", min, max, qos_marking, dscp "
-+                   "FROM qosqueues_backup")
-+        op.execute("DROP TABLE qosqueues_backup")
-+
-+    else:
-+        migration.alter_column_if_exists(
-+            'nsxrouterextattributess', 'service_router',
-+            server_default=default,
-+            existing_nullable=False,
-+            existing_type=sa.Boolean)
-+        migration.alter_column_if_exists(
-+            'nsxrouterextattributess', 'distributed',
-+            server_default=default,
-+            existing_nullable=False,
-+            existing_type=sa.Boolean)
-+        migration.alter_column_if_exists(
-+            'qosqueues', 'default',
-+            server_default=default,
-+            existing_type=sa.Boolean)
- def set_default_agents(default=None):
-     if default:
-         default = sqlalchemy.sql.true()
--    migration.alter_column_if_exists(
--        'agents', 'admin_state_up',
--        server_default=default,
--        existing_nullable=False,
--        existing_type=sa.Boolean)
-+    if op.get_bind().engine.name == 'sqlite':
-+        op.execute("CREATE TEMPORARY TABLE agents_backup ( "
-+                   "id VARCHAR(36) NOT NULL, "
-+                   "agent_type VARCHAR(255) NOT NULL, "
-+                   "binary VARCHAR(255) NOT NULL, "
-+                   "topic VARCHAR(255) NOT NULL, "
-+                   "host VARCHAR(255) NOT NULL, "
-+                   "admin_state_up BOOLEAN NOT NULL DEFAULT 1, "
-+                   "created_at DATETIME NOT NULL, "
-+                   "started_at DATETIME NOT NULL, "
-+                   "heartbeat_timestamp DATETIME NOT NULL, "
-+                   "description VARCHAR(255), "
-+                   "configurations VARCHAR(4095) NOT NULL, "
-+                   "PRIMARY KEY (id), "
-+                   "CHECK (admin_state_up IN (0, 1)))")
-+        op.execute("INSERT INTO agents_backup "
-+                   "(id, agent_type, binary, topic, host, admin_state_up, created_at, started_at, heartbeat_timestamp, description, configurations) "
-+                   "SELECT id, agent_type, binary, topic, host, admin_state_up, created_at, started_at, heartbeat_timestamp, description, configurations "
-+                   "FROM agents")
-+        op.execute("DROP TABLE agents")
-+        op.execute("CREATE TABLE agents ( "
-+                   "id VARCHAR(36) NOT NULL, "
-+                   "agent_type VARCHAR(255) NOT NULL, "
-+                   "binary VARCHAR(255) NOT NULL, "
-+                   "topic VARCHAR(255) NOT NULL, "
-+                   "host VARCHAR(255) NOT NULL, "
-+                   "admin_state_up BOOLEAN NOT NULL DEFAULT 1, "
-+                   "created_at DATETIME NOT NULL, "
-+                   "started_at DATETIME NOT NULL, "
-+                   "heartbeat_timestamp DATETIME NOT NULL, "
-+                   "description VARCHAR(255), "
-+                   "configurations VARCHAR(4095) NOT NULL, "
-+                   "PRIMARY KEY (id), "
-+                   "CHECK (admin_state_up IN (0, 1)))")
-+        op.execute("INSERT INTO agents "
-+                   "(id, agent_type, binary, topic, host, admin_state_up, created_at, started_at, heartbeat_timestamp, description, configurations) "
-+                   "SELECT id, agent_type, binary, topic, host, admin_state_up, created_at, started_at, heartbeat_timestamp, description, configurations "
-+                   "FROM agents_backup")
-+        op.execute("DROP TABLE agents_backup")
-+
-+    else:
-+        migration.alter_column_if_exists(
-+            'agents', 'admin_state_up',
-+            server_default=default,
-+            existing_nullable=False,
-+            existing_type=sa.Boolean)
- def set_default_ml2(default=None):
-     if default:
-         default = sqlalchemy.sql.false()
--    migration.alter_column_if_exists(
--        'ml2_gre_allocations', 'allocated',
--        server_default=default,
--        existing_nullable=False,
--        existing_type=sa.Boolean)
--    migration.alter_column_if_exists(
--        'ml2_vxlan_allocations', 'allocated',
--        server_default=default,
--        existing_nullable=False,
--        existing_type=sa.Boolean)
-+    if op.get_bind().engine.name == 'sqlite':
-+        # ml2_gre_allocations
-+        op.execute("CREATE TEMPORARY TABLE ml2_gre_allocations_backup ( "
-+                   "gre_id INTEGER NOT NULL, "
-+                   "allocated BOOLEAN NOT NULL DEFAULT 0, "
-+                   "PRIMARY KEY (gre_id), "
-+                   "CHECK (allocated IN (0, 1)))")
-+        op.execute("INSERT INTO ml2_gre_allocations_backup "
-+                   "(gre_id, allocated) "
-+                   "SELECT gre_id, allocated "
-+                   "FROM ml2_gre_allocations")
-+        op.execute("DROP TABLE ml2_gre_allocations")
-+        op.execute("CREATE TABLE ml2_gre_allocations ( "
-+                   "gre_id INTEGER NOT NULL, "
-+                   "allocated BOOLEAN NOT NULL DEFAULT 0, "
-+                   "PRIMARY KEY (gre_id), "
-+                   "CHECK (allocated IN (0, 1)))")
-+        op.execute("INSERT INTO ml2_gre_allocations "
-+                   "(gre_id, allocated) "
-+                   "SELECT gre_id, allocated "
-+                   "FROM ml2_gre_allocations_backup")
-+        op.execute("DROP TABLE ml2_gre_allocations_backup")
-+
-+        # ml2_vxlan_allocations
-+        op.execute("CREATE TABLE ml2_vxlan_allocations_backup ( "
-+                   "vxlan_vni INTEGER NOT NULL, "
-+                   "allocated BOOLEAN NOT NULL DEFAULT 0, "
-+                   "PRIMARY KEY (vxlan_vni), "
-+                   "CHECK (allocated IN (0, 1)))")
-+        op.execute("INSERT INTO ml2_vxlan_allocations_backup "
-+                   "(vxlan_vni, allocated) "
-+                   "SELECT vxlan_vni, allocated "
-+                   "FROM ml2_vxlan_allocations")
-+        op.execute("DROP TABLE ml2_vxlan_allocations")
-+        op.execute("CREATE TABLE ml2_vxlan_allocations ( "
-+                   "vxlan_vni INTEGER NOT NULL, "
-+                   "allocated BOOLEAN NOT NULL DEFAULT 0, "
-+                   "PRIMARY KEY (vxlan_vni), "
-+                   "CHECK (allocated IN (0, 1)))")
-+        op.execute("INSERT INTO ml2_vxlan_allocations "
-+                   "(vxlan_vni, allocated) "
-+                   "SELECT vxlan_vni, allocated "
-+                   "FROM ml2_vxlan_allocations_backup")
-+        op.execute("DROP TABLE ml2_vxlan_allocations_backup")
-+
-+    else:
-+        migration.alter_column_if_exists(
-+            'ml2_gre_allocations', 'allocated',
-+            server_default=default,
-+            existing_nullable=False,
-+            existing_type=sa.Boolean)
-+        migration.alter_column_if_exists(
-+            'ml2_vxlan_allocations', 'allocated',
-+            server_default=default,
-+            existing_nullable=False,
-+            existing_type=sa.Boolean)
-Index: neutron/neutron/db/migration/alembic_migrations/versions/54f7549a0e5f_set_not_null_peer_address.py
-===================================================================
---- neutron.orig/neutron/db/migration/alembic_migrations/versions/54f7549a0e5f_set_not_null_peer_address.py
-+++ neutron/neutron/db/migration/alembic_migrations/versions/54f7549a0e5f_set_not_null_peer_address.py
-@@ -30,6 +30,7 @@ down_revision = 'icehouse'
- # This migration will be skipped when executed in offline mode.
-+from alembic import op
- import sqlalchemy as sa
- from neutron.db import migration
-@@ -37,10 +38,11 @@ from neutron.db import migration
- @migration.skip_if_offline
- def upgrade():
--    migration.alter_column_if_exists(
--        'ipsec_site_connections', 'peer_address',
--        existing_type=sa.String(255),
--        nullable=False)
-+    if op.get_bind().engine.name != 'sqlite':
-+        migration.alter_column_if_exists(
-+            'ipsec_site_connections', 'peer_address',
-+            existing_type=sa.String(255),
-+            nullable=False)
- @migration.skip_if_offline
-Index: neutron/neutron/db/migration/alembic_migrations/versions/5ac1c354a051_n1kv_segment_alloc.py
-===================================================================
---- neutron.orig/neutron/db/migration/alembic_migrations/versions/5ac1c354a051_n1kv_segment_alloc.py
-+++ neutron/neutron/db/migration/alembic_migrations/versions/5ac1c354a051_n1kv_segment_alloc.py
-@@ -42,28 +42,30 @@ def upgrade():
-         'cisco_n1kv_vlan_allocations',
-         sa.Column('network_profile_id',
-                   sa.String(length=36),
--                  nullable=False)
--    )
--    op.create_foreign_key(
--        'cisco_n1kv_vlan_allocations_ibfk_1',
--        source='cisco_n1kv_vlan_allocations',
--        referent='cisco_network_profiles',
--        local_cols=['network_profile_id'], remote_cols=['id'],
--        ondelete='CASCADE'
-+                  nullable='False')
-     )
-+    if op.get_bind().engine.name != 'sqlite':
-+        op.create_foreign_key(
-+            'cisco_n1kv_vlan_allocations_ibfk_1',
-+            source='cisco_n1kv_vlan_allocations',
-+            referent='cisco_network_profiles',
-+            local_cols=['network_profile_id'], remote_cols=['id'],
-+            ondelete='CASCADE'
-+        )
-     op.add_column(
-         'cisco_n1kv_vxlan_allocations',
-         sa.Column('network_profile_id',
-                   sa.String(length=36),
--                  nullable=False)
--    )
--    op.create_foreign_key(
--        'cisco_n1kv_vxlan_allocations_ibfk_1',
--        source='cisco_n1kv_vxlan_allocations',
--        referent='cisco_network_profiles',
--        local_cols=['network_profile_id'], remote_cols=['id'],
--        ondelete='CASCADE'
-+                  nullable='False')
-     )
-+    if op.get_bind().engine.name != 'sqlite':
-+        op.create_foreign_key(
-+            'cisco_n1kv_vxlan_allocations_ibfk_1',
-+            source='cisco_n1kv_vxlan_allocations',
-+            referent='cisco_network_profiles',
-+            local_cols=['network_profile_id'], remote_cols=['id'],
-+            ondelete='CASCADE'
-+        )
- def downgrade():
-Index: neutron/neutron/db/migration/alembic_migrations/versions/6be312499f9_set_not_null_vlan_id_cisco.py
-===================================================================
---- neutron.orig/neutron/db/migration/alembic_migrations/versions/6be312499f9_set_not_null_vlan_id_cisco.py
-+++ neutron/neutron/db/migration/alembic_migrations/versions/6be312499f9_set_not_null_vlan_id_cisco.py
-@@ -29,6 +29,7 @@ down_revision = 'd06e871c0d5'
- # contains the tables for the cisco plugin.
- # This migration will be skipped when executed in offline mode.
-+from alembic import op
- import sqlalchemy as sa
- from neutron.db import migration
-@@ -36,15 +37,17 @@ from neutron.db import migration
- @migration.skip_if_offline
- def upgrade():
--    migration.alter_column_if_exists(
--        'cisco_nexusport_bindings', 'vlan_id',
--        nullable=False,
--        existing_type=sa.Integer)
-+    if op.get_bind().engine.name != 'sqlite':
-+        migration.alter_column_if_exists(
-+            'cisco_nexusport_bindings', 'vlan_id',
-+            nullable=False,
-+            existing_type=sa.Integer)
- @migration.skip_if_offline
- def downgrade():
--    migration.alter_column_if_exists(
--        'cisco_nexusport_bindings', 'vlan_id',
--        nullable=True,
--        existing_type=sa.Integer)
-+    if op.get_bind().engine.name != 'sqlite':
-+        migration.alter_column_if_exists(
-+            'cisco_nexusport_bindings', 'vlan_id',
-+            nullable=True,
-+            existing_type=sa.Integer)
-Index: neutron/neutron/db/migration/alembic_migrations/versions/884573acbf1c_unify_nsx_router_extra_attributes.py
-===================================================================
---- neutron.orig/neutron/db/migration/alembic_migrations/versions/884573acbf1c_unify_nsx_router_extra_attributes.py
-+++ neutron/neutron/db/migration/alembic_migrations/versions/884573acbf1c_unify_nsx_router_extra_attributes.py
-@@ -46,12 +46,15 @@ def _migrate_data(old_table, new_table):
-                     "WHERE new_t.router_id = old_t.router_id)") %
-                    {'new_table': new_table, 'old_table': old_table})
-     else:
--        op.execute(("UPDATE %(new_table)s new_t "
--                    "INNER JOIN %(old_table)s as old_t "
--                    "ON new_t.router_id = old_t.router_id "
--                    "SET new_t.distributed = old_t.distributed, "
--                    "new_t.service_router = old_t.service_router") %
--                   {'new_table': new_table, 'old_table': old_table})
-+        if op.get_bind().engine.name == 'sqlite':
-+            print("Fix this for SQLite")
-+        else:
-+            op.execute(("UPDATE %(new_table)s new_t "
-+                        "INNER JOIN %(old_table)s as old_t "
-+                        "ON new_t.router_id = old_t.router_id "
-+                        "SET new_t.distributed = old_t.distributed, "
-+                        "new_t.service_router = old_t.service_router") %
-+                       {'new_table': new_table, 'old_table': old_table})
- def upgrade():
-Index: neutron/neutron/db/migration/alembic_migrations/versions/abc88c33f74f_lb_stats_needs_bigint.py
-===================================================================
---- neutron.orig/neutron/db/migration/alembic_migrations/versions/abc88c33f74f_lb_stats_needs_bigint.py
-+++ neutron/neutron/db/migration/alembic_migrations/versions/abc88c33f74f_lb_stats_needs_bigint.py
-@@ -34,14 +34,15 @@ from neutron.db import migration
- def upgrade():
-     if migration.schema_has_table('poolstatisticss'):
--        op.alter_column('poolstatisticss', 'bytes_in',
--                        type_=sa.BigInteger(), existing_type=sa.Integer())
--        op.alter_column('poolstatisticss', 'bytes_out',
--                        type_=sa.BigInteger(), existing_type=sa.Integer())
--        op.alter_column('poolstatisticss', 'active_connections',
--                        type_=sa.BigInteger(), existing_type=sa.Integer())
--        op.alter_column('poolstatisticss', 'total_connections',
--                        type_=sa.BigInteger(), existing_type=sa.Integer())
-+        if op.get_bind().engine.name != 'sqlite':
-+            op.alter_column('poolstatisticss', 'bytes_in',
-+                            type_=sa.BigInteger(), existing_type=sa.Integer())
-+            op.alter_column('poolstatisticss', 'bytes_out',
-+                            type_=sa.BigInteger(), existing_type=sa.Integer())
-+            op.alter_column('poolstatisticss', 'active_connections',
-+                            type_=sa.BigInteger(), existing_type=sa.Integer())
-+            op.alter_column('poolstatisticss', 'total_connections',
-+                            type_=sa.BigInteger(), existing_type=sa.Integer())
- def downgrade():
-Index: neutron/neutron/db/migration/alembic_migrations/versions/b65aa907aec_set_length_of_protocol_field.py
-===================================================================
---- neutron.orig/neutron/db/migration/alembic_migrations/versions/b65aa907aec_set_length_of_protocol_field.py
-+++ neutron/neutron/db/migration/alembic_migrations/versions/b65aa907aec_set_length_of_protocol_field.py
-@@ -29,6 +29,7 @@ down_revision = '1e5dd1d09b22'
- # the firewall service plugin
- # This migration will not be executed in offline mode
-+from alembic import op
- import sqlalchemy as sa
- from neutron.db import migration
-@@ -36,10 +37,13 @@ from neutron.db import migration
- @migration.skip_if_offline
- def upgrade():
--    migration.alter_column_if_exists(
--        'firewall_rules', 'protocol',
--        type_=sa.String(40),
--        existing_nullable=True)
-+    if op.get_bind().engine.name == 'sqlite':
-+        print("Nothing seems needed for SQLite here.")
-+    else:
-+        migration.alter_column_if_exists(
-+            'firewall_rules', 'protocol',
-+            type_=sa.String(40),
-+            existing_nullable=True)
- def downgrade():
-Index: neutron/neutron/db/migration/alembic_migrations/versions/d06e871c0d5_set_admin_state_up_not_null_ml2.py
-===================================================================
---- neutron.orig/neutron/db/migration/alembic_migrations/versions/d06e871c0d5_set_admin_state_up_not_null_ml2.py
-+++ neutron/neutron/db/migration/alembic_migrations/versions/d06e871c0d5_set_admin_state_up_not_null_ml2.py
-@@ -30,6 +30,7 @@ down_revision = '4eca4a84f08a'
- # This migration will be skipped when executed in offline mode.
-+from alembic import op
- import sqlalchemy as sa
- from neutron.db import migration
-@@ -37,15 +38,17 @@ from neutron.db import migration
- @migration.skip_if_offline
- def upgrade():
--    migration.alter_column_if_exists(
--        'ml2_brocadeports', 'admin_state_up',
--        nullable=False,
--        existing_type=sa.Boolean)
-+    if op.get_bind().engine.name != 'sqlite':
-+        migration.alter_column_if_exists(
-+            'ml2_brocadeports', 'admin_state_up',
-+            nullable=False,
-+            existing_type=sa.Boolean)
- @migration.skip_if_offline
- def downgrade():
--    migration.alter_column_if_exists(
--        'ml2_brocadeports', 'admin_state_up',
--        nullable=True,
--        existing_type=sa.Boolean)
-+    if op.get_bind().engine.name != 'sqlite':
-+        migration.alter_column_if_exists(
-+            'ml2_brocadeports', 'admin_state_up',
-+            nullable=True,
-+            existing_type=sa.Boolean)
-Index: neutron/neutron/db/migration/alembic_migrations/versions/e197124d4b9_add_unique_constrain.py
-===================================================================
---- neutron.orig/neutron/db/migration/alembic_migrations/versions/e197124d4b9_add_unique_constrain.py
-+++ neutron/neutron/db/migration/alembic_migrations/versions/e197124d4b9_add_unique_constrain.py
-@@ -36,11 +36,15 @@ TABLE_NAME = 'members'
- def upgrade():
-     if migration.schema_has_table(TABLE_NAME):
--        op.create_unique_constraint(
--            name=CONSTRAINT_NAME,
--            source=TABLE_NAME,
--            local_cols=['pool_id', 'address', 'protocol_port']
--        )
-+        if op.get_bind().engine.name == 'sqlite':
-+            op.execute("CREATE UNIQUE INDEX uniq_member0pool_id0address0port "
-+                       "on members (pool_id,address,protocol_port);")
-+        else:
-+            op.create_unique_constraint(
-+                name=CONSTRAINT_NAME,
-+                source=TABLE_NAME,
-+                local_cols=['pool_id', 'address', 'protocol_port']
-+            )
- def downgrade():
index e6459d3f9c30971f17d216c75859d771555f5d3a..d45a00c11eec7d81e767aed4db3cf66296514db9 100644 (file)
@@ -1,3 +1 @@
-#fix-alembic-migrations-with-sqlite.patch
-tests_dont_rely_on_configuration_files_outside_tests_directory.patch
 better-config-defaults.patch
diff --git a/xenial/debian/patches/tests_dont_rely_on_configuration_files_outside_tests_directory.patch b/xenial/debian/patches/tests_dont_rely_on_configuration_files_outside_tests_directory.patch
deleted file mode 100644 (file)
index eb5aec6..0000000
+++ /dev/null
@@ -1,247 +0,0 @@
-Subject: tests: don't rely on configuration files outside tests directory
- etc/... may be non existent in some build environments. It's also pip
- does not install those files under site-packages neutron module, so
- paths relative to python files don't work.
- .
- So instead of using relative paths to etc/... contents, maintain our own
- version of configuration files. It means we need to maintain tests only
- policy.json file too, in addition to neutron.conf.test and
- api-paste.ini.test.
- .
- Ideally, we would make etc/policy.json copied under site-packages in
- addition to /etc/neutron/. In that way, we would not maintain a copy of
- policy.json file in two places.
- .
- Though it seems that setuputils does not have a good way to install
- files under site-packages that would consider all the differences
- between python environments (specifically, different prefixes used in
- different systems).
- .
- Note: it's not *absolutely* needed to update the test policy.json file
- on each next policy update, though it will be needed in cases when we
- want to test policy changes in unit tests. So adding a check to make
- sure files are identical.
- .
- This partially reverts commit 1404f33b50452d4c0e0ef8c748011ce80303c2fd.
-Author: Ihar Hrachyshka <ihrachys@redhat.com>
-Date: Wed, 18 Mar 2015 13:21:57 +0000 (+0100)
-X-Git-Url: https://review.openstack.org/gitweb?p=openstack%2Fneutron.git;a=commitdiff_plain;h=9231a132f79f8427d410a8ef165b674578addac3
-Related-Bug: #1433146
-Change-Id: If1f5ebd981cf06558d5102524211799676068889
-Origin: upstream, https://review.openstack.org/#/c/165237/
-Last-Update: 2015-03-18
-
-diff --git a/neutron/tests/base.py b/neutron/tests/base.py
-index 6886af9..d8bc0ce 100644
---- a/neutron/tests/base.py
-+++ b/neutron/tests/base.py
-@@ -42,12 +42,12 @@ CONF = cfg.CONF
- CONF.import_opt('state_path', 'neutron.common.config')
- LOG_FORMAT = sub_base.LOG_FORMAT
--ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..')
--TEST_ROOT_DIR = os.path.dirname(__file__)
-+ROOTDIR = os.path.dirname(__file__)
-+ETCDIR = os.path.join(ROOTDIR, 'etc')
--def etcdir(filename, root=TEST_ROOT_DIR):
--    return os.path.join(root, 'etc', filename)
-+def etcdir(*p):
-+    return os.path.join(ETCDIR, *p)
- def fake_use_fatal_exceptions(*args):
-@@ -69,11 +69,6 @@ class BaseTestCase(sub_base.SubBaseTestCase):
-         # neutron.conf.test includes rpc_backend which needs to be cleaned up
-         if args is None:
-             args = ['--config-file', etcdir('neutron.conf.test')]
--        # this is needed to add ROOT_DIR to the list of paths that oslo.config
--        # will try to traverse when searching for a new config file (it's
--        # needed so that policy module can locate policy_file)
--        args += ['--config-file', etcdir('neutron.conf', root=ROOT_DIR)]
--
-         if conf is None:
-             config.init(args=args)
-         else:
-diff --git a/neutron/tests/etc/policy.json b/neutron/tests/etc/policy.json
-new file mode 100644
-index 0000000..4fc6c1c
---- /dev/null
-+++ b/neutron/tests/etc/policy.json
-@@ -0,0 +1,147 @@
-+{
-+    "context_is_admin":  "role:admin",
-+    "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s",
-+    "context_is_advsvc":  "role:advsvc",
-+    "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
-+    "admin_only": "rule:context_is_admin",
-+    "regular_user": "",
-+    "shared": "field:networks:shared=True",
-+    "shared_firewalls": "field:firewalls:shared=True",
-+    "shared_firewall_policies": "field:firewall_policies:shared=True",
-+    "external": "field:networks:router:external=True",
-+    "default": "rule:admin_or_owner",
-+
-+    "create_subnet": "rule:admin_or_network_owner",
-+    "get_subnet": "rule:admin_or_owner or rule:shared",
-+    "update_subnet": "rule:admin_or_network_owner",
-+    "delete_subnet": "rule:admin_or_network_owner",
-+
-+    "create_network": "",
-+    "get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc",
-+    "get_network:router:external": "rule:regular_user",
-+    "get_network:segments": "rule:admin_only",
-+    "get_network:provider:network_type": "rule:admin_only",
-+    "get_network:provider:physical_network": "rule:admin_only",
-+    "get_network:provider:segmentation_id": "rule:admin_only",
-+    "get_network:queue_id": "rule:admin_only",
-+    "create_network:shared": "rule:admin_only",
-+    "create_network:router:external": "rule:admin_only",
-+    "create_network:segments": "rule:admin_only",
-+    "create_network:provider:network_type": "rule:admin_only",
-+    "create_network:provider:physical_network": "rule:admin_only",
-+    "create_network:provider:segmentation_id": "rule:admin_only",
-+    "update_network": "rule:admin_or_owner",
-+    "update_network:segments": "rule:admin_only",
-+    "update_network:shared": "rule:admin_only",
-+    "update_network:provider:network_type": "rule:admin_only",
-+    "update_network:provider:physical_network": "rule:admin_only",
-+    "update_network:provider:segmentation_id": "rule:admin_only",
-+    "update_network:router:external": "rule:admin_only",
-+    "delete_network": "rule:admin_or_owner",
-+
-+    "create_port": "",
-+    "create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc",
-+    "create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
-+    "create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
-+    "create_port:binding:host_id": "rule:admin_only",
-+    "create_port:binding:profile": "rule:admin_only",
-+    "create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
-+    "get_port": "rule:admin_or_owner or rule:context_is_advsvc",
-+    "get_port:queue_id": "rule:admin_only",
-+    "get_port:binding:vif_type": "rule:admin_only",
-+    "get_port:binding:vif_details": "rule:admin_only",
-+    "get_port:binding:host_id": "rule:admin_only",
-+    "get_port:binding:profile": "rule:admin_only",
-+    "update_port": "rule:admin_or_owner or rule:context_is_advsvc",
-+    "update_port:mac_address": "rule:admin_only or rule:context_is_advsvc",
-+    "update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
-+    "update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
-+    "update_port:binding:host_id": "rule:admin_only",
-+    "update_port:binding:profile": "rule:admin_only",
-+    "update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
-+    "delete_port": "rule:admin_or_owner or rule:context_is_advsvc",
-+
-+    "get_router:ha": "rule:admin_only",
-+    "create_router": "rule:regular_user",
-+    "create_router:external_gateway_info:enable_snat": "rule:admin_only",
-+    "create_router:distributed": "rule:admin_only",
-+    "create_router:ha": "rule:admin_only",
-+    "get_router": "rule:admin_or_owner",
-+    "get_router:distributed": "rule:admin_only",
-+    "update_router:external_gateway_info:enable_snat": "rule:admin_only",
-+    "update_router:distributed": "rule:admin_only",
-+    "update_router:ha": "rule:admin_only",
-+    "delete_router": "rule:admin_or_owner",
-+
-+    "add_router_interface": "rule:admin_or_owner",
-+    "remove_router_interface": "rule:admin_or_owner",
-+
-+    "create_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
-+    "update_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
-+
-+    "create_firewall": "",
-+    "get_firewall": "rule:admin_or_owner",
-+    "create_firewall:shared": "rule:admin_only",
-+    "get_firewall:shared": "rule:admin_only",
-+    "update_firewall": "rule:admin_or_owner",
-+    "update_firewall:shared": "rule:admin_only",
-+    "delete_firewall": "rule:admin_or_owner",
-+
-+    "create_firewall_policy": "",
-+    "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewall_policies",
-+    "create_firewall_policy:shared": "rule:admin_or_owner",
-+    "update_firewall_policy": "rule:admin_or_owner",
-+    "delete_firewall_policy": "rule:admin_or_owner",
-+
-+    "create_firewall_rule": "",
-+    "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls",
-+    "update_firewall_rule": "rule:admin_or_owner",
-+    "delete_firewall_rule": "rule:admin_or_owner",
-+
-+    "create_qos_queue": "rule:admin_only",
-+    "get_qos_queue": "rule:admin_only",
-+
-+    "update_agent": "rule:admin_only",
-+    "delete_agent": "rule:admin_only",
-+    "get_agent": "rule:admin_only",
-+
-+    "create_dhcp-network": "rule:admin_only",
-+    "delete_dhcp-network": "rule:admin_only",
-+    "get_dhcp-networks": "rule:admin_only",
-+    "create_l3-router": "rule:admin_only",
-+    "delete_l3-router": "rule:admin_only",
-+    "get_l3-routers": "rule:admin_only",
-+    "get_dhcp-agents": "rule:admin_only",
-+    "get_l3-agents": "rule:admin_only",
-+    "get_loadbalancer-agent": "rule:admin_only",
-+    "get_loadbalancer-pools": "rule:admin_only",
-+    "get_agent-loadbalancers": "rule:admin_only",
-+    "get_loadbalancer-hosting-agent": "rule:admin_only",
-+
-+    "create_floatingip": "rule:regular_user",
-+    "create_floatingip:floating_ip_address": "rule:admin_only",
-+    "update_floatingip": "rule:admin_or_owner",
-+    "delete_floatingip": "rule:admin_or_owner",
-+    "get_floatingip": "rule:admin_or_owner",
-+
-+    "create_network_profile": "rule:admin_only",
-+    "update_network_profile": "rule:admin_only",
-+    "delete_network_profile": "rule:admin_only",
-+    "get_network_profiles": "",
-+    "get_network_profile": "",
-+    "update_policy_profiles": "rule:admin_only",
-+    "get_policy_profiles": "",
-+    "get_policy_profile": "",
-+
-+    "create_metering_label": "rule:admin_only",
-+    "delete_metering_label": "rule:admin_only",
-+    "get_metering_label": "rule:admin_only",
-+
-+    "create_metering_label_rule": "rule:admin_only",
-+    "delete_metering_label_rule": "rule:admin_only",
-+    "get_metering_label_rule": "rule:admin_only",
-+
-+    "get_service_provider": "rule:regular_user",
-+    "get_lsn": "rule:admin_only",
-+    "create_lsn": "rule:admin_only"
-+}
-diff --git a/tools/misc-sanity-checks.sh b/tools/misc-sanity-checks.sh
-index bc4d2eb..eeac227 100644
---- a/tools/misc-sanity-checks.sh
-+++ b/tools/misc-sanity-checks.sh
-@@ -61,10 +61,23 @@ check_pot_files_errors () {
-     fi
- }
-+
-+check_identical_policy_files () {
-+    # For unit tests, we maintain their own policy.json file to make test suite
-+    # independent of whether it's executed from the neutron source tree or from
-+    # site-packages installation path. We don't want two copies of the same
-+    # file to diverge, so checking that they are identical
-+    diff etc/policy.json neutron/tests/etc/policy.json 2>&1 > /dev/null
-+    if [ "$?" -ne 0 ]; then
-+        echo "policy.json files must be identical!" >>$FAILURES
-+    fi
-+}
-+
- # Add your checks here...
- check_opinionated_shell
- check_no_symlinks_allowed
- check_pot_files_errors
-+check_identical_policy_files
- # Fail, if there are emitted failures
- if [ -f $FAILURES ]; then