]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Remove downgrade migrations
authorIvan Kolodyazhny <e0ne@e0ne.info>
Thu, 17 Dec 2015 11:15:57 +0000 (13:15 +0200)
committerIvan Kolodyazhny <e0ne@e0ne.info>
Fri, 18 Dec 2015 12:04:16 +0000 (14:04 +0200)
According to cross project spec[1] downgrade migrations should be removed.

[1] I622f89fe63327d44f9b229d3bd9e76e15acbaa7a

Implements blueprint: no-downward-sql-migration

Change-Id: I111cdb4bba361de5da0ce7db8144965c947ada41

74 files changed:
cinder/db/migration.py
cinder/db/sqlalchemy/migrate_repo/versions/001_cinder_init.py
cinder/db/sqlalchemy/migrate_repo/versions/002_quota_class.py
cinder/db/sqlalchemy/migrate_repo/versions/003_glance_metadata.py
cinder/db/sqlalchemy/migrate_repo/versions/004_volume_type_to_uuid.py
cinder/db/sqlalchemy/migrate_repo/versions/005_add_source_volume_column.py
cinder/db/sqlalchemy/migrate_repo/versions/005_sqlite_downgrade.sql [deleted file]
cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py
cinder/db/sqlalchemy/migrate_repo/versions/007_add_volume_snapshot_fk.py
cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql [deleted file]
cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py
cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py
cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py
cinder/db/sqlalchemy/migrate_repo/versions/011_add_bootable_column.py
cinder/db/sqlalchemy/migrate_repo/versions/011_sqlite_downgrade.sql [deleted file]
cinder/db/sqlalchemy/migrate_repo/versions/012_add_attach_host_column.py
cinder/db/sqlalchemy/migrate_repo/versions/012_sqlite_downgrade.sql [deleted file]
cinder/db/sqlalchemy/migrate_repo/versions/013_add_provider_geometry_column.py
cinder/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql [deleted file]
cinder/db/sqlalchemy/migrate_repo/versions/014_add_name_id.py
cinder/db/sqlalchemy/migrate_repo/versions/014_sqlite_downgrade.sql [deleted file]
cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py
cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py
cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py
cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py
cinder/db/sqlalchemy/migrate_repo/versions/019_add_migration_status.py
cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py
cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py
cinder/db/sqlalchemy/migrate_repo/versions/022_add_reason_column_to_service.py
cinder/db/sqlalchemy/migrate_repo/versions/023_add_expire_reservations_index.py
cinder/db/sqlalchemy/migrate_repo/versions/024_add_replication_support.py
cinder/db/sqlalchemy/migrate_repo/versions/025_add_consistencygroup.py
cinder/db/sqlalchemy/migrate_repo/versions/026_add_consistencygroup_quota_class.py
cinder/db/sqlalchemy/migrate_repo/versions/027_placeholder.py
cinder/db/sqlalchemy/migrate_repo/versions/028_placeholder.py
cinder/db/sqlalchemy/migrate_repo/versions/029_placeholder.py
cinder/db/sqlalchemy/migrate_repo/versions/030_placeholder.py
cinder/db/sqlalchemy/migrate_repo/versions/031_placeholder.py
cinder/db/sqlalchemy/migrate_repo/versions/032_add_volume_type_projects.py
cinder/db/sqlalchemy/migrate_repo/versions/032_sqlite_downgrade.sql [deleted file]
cinder/db/sqlalchemy/migrate_repo/versions/033_add_encryption_unique_key.py
cinder/db/sqlalchemy/migrate_repo/versions/033_sqlite_downgrade.sql [deleted file]
cinder/db/sqlalchemy/migrate_repo/versions/034_sqlite_downgrade.sql [deleted file]
cinder/db/sqlalchemy/migrate_repo/versions/034_volume_type_add_desc_column.py
cinder/db/sqlalchemy/migrate_repo/versions/035_add_provider_id_column.py
cinder/db/sqlalchemy/migrate_repo/versions/036_add_provider_id_column_to_snapshots.py
cinder/db/sqlalchemy/migrate_repo/versions/037_add_cgsnapshot_id_column_to_consistencygroups.py
cinder/db/sqlalchemy/migrate_repo/versions/038_add_driver_initiator_data_table.py
cinder/db/sqlalchemy/migrate_repo/versions/039_add_parent_id_to_backups.py
cinder/db/sqlalchemy/migrate_repo/versions/040_add_volume_attachment.py
cinder/db/sqlalchemy/migrate_repo/versions/040_sqlite_downgrade.sql [deleted file]
cinder/db/sqlalchemy/migrate_repo/versions/041_add_modified_at_column_to_service.py
cinder/db/sqlalchemy/migrate_repo/versions/042_placeholder.py
cinder/db/sqlalchemy/migrate_repo/versions/043_placeholder.py
cinder/db/sqlalchemy/migrate_repo/versions/044_placeholder.py
cinder/db/sqlalchemy/migrate_repo/versions/045_placeholder.py
cinder/db/sqlalchemy/migrate_repo/versions/046_placeholder.py
cinder/db/sqlalchemy/migrate_repo/versions/047_add_per_volume_quota.py
cinder/db/sqlalchemy/migrate_repo/versions/048_add_allocated_in_quotas.py
cinder/db/sqlalchemy/migrate_repo/versions/049_add_temp_volume_snapshot_ids_to_backups.py
cinder/db/sqlalchemy/migrate_repo/versions/050_add_previous_status_to_volumes.py
cinder/db/sqlalchemy/migrate_repo/versions/051_add_source_cgid_column_to_consistencygroups.py
cinder/db/sqlalchemy/migrate_repo/versions/052_add_provider_auth_column_to_snapshots.py
cinder/db/sqlalchemy/migrate_repo/versions/053_add_version_columns_to_service.py
cinder/db/sqlalchemy/migrate_repo/versions/054_add_has_dependent_backups_column_to_backups.py
cinder/db/sqlalchemy/migrate_repo/versions/055_add_image_volume_cache_table.py
cinder/db/sqlalchemy/migrate_repo/versions/056_placeholder.py
cinder/db/sqlalchemy/migrate_repo/versions/057_placeholder.py
cinder/db/sqlalchemy/migrate_repo/versions/058_placeholder.py
cinder/db/sqlalchemy/migrate_repo/versions/059_placeholder.py
cinder/db/sqlalchemy/migrate_repo/versions/060_placeholder.py
cinder/tests/unit/test_cmd.py
cinder/tests/unit/test_migrations.py
cinder/tests/unit/test_misc.py

index 7603313f10c967aa9d12981ce1ac09366890d173..856e16c811b44e2d6b0a3d2ea48b3cfbbc6e2943 100644 (file)
@@ -24,6 +24,9 @@ from oslo_db import options
 from stevedore import driver
 
 from cinder.db.sqlalchemy import api as db_api
+from cinder import exception
+from cinder.i18n import _
+
 
 INIT_VERSION = 000
 
@@ -55,6 +58,15 @@ def db_sync(version=None, init_version=INIT_VERSION, engine=None):
 
     if engine is None:
         engine = db_api.get_engine()
+
+    current_db_version = get_backend().db_version(engine,
+                                                  MIGRATE_REPO_PATH,
+                                                  init_version)
+
+    # TODO(e0ne): drop version validation when new oslo.db will be released
+    if version and int(version) < current_db_version:
+        msg = _('Database schema downgrade is not allowed.')
+        raise exception.InvalidInput(reason=msg)
     return get_backend().db_sync(engine=engine,
                                  abs_path=MIGRATE_REPO_PATH,
                                  version=version,
index a39aed63ce22ea58c1954a7a8cebe010b5ab5fe4..550f85d2846d3bcc02ad146bc1b717abfd47d0c4 100644 (file)
@@ -256,12 +256,3 @@ def upgrade(migrate_engine):
             "ALTER DATABASE %s DEFAULT CHARACTER SET utf8" %
             migrate_engine.url.database)
         migrate_engine.execute("ALTER TABLE %s Engine=InnoDB" % table)
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-    tables = define_tables(meta)
-    tables.reverse()
-    for table in tables:
-        table.drop()
index 8d133ed80cba2bf702ad71cca97ae06a07d32340..f23e61f0579e2e2a3c6c0983459bb1d80e2c5d61 100644 (file)
@@ -12,7 +12,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from migrate import ForeignKeyConstraint
 from sqlalchemy import Boolean, Column, DateTime
 from sqlalchemy import MetaData, Integer, String, Table, ForeignKey
 
@@ -88,37 +87,3 @@ def upgrade(migrate_engine):
                          )
 
     reservations.create()
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    fk_name = None
-
-    if migrate_engine.name == 'mysql':
-        fk_name = 'reservations_ibfk_1'
-    elif migrate_engine.name == 'postgresql':
-        fk_name = 'reservations_usage_id_fkey'
-
-    # NOTE: MySQL and PostgreSQL Cannot drop the quota_usages table
-    # until the foreign key is removed.  We remove the foreign key first,
-    # and then we drop the table.
-    table = Table('reservations', meta, autoload=True)
-    ref_table = Table('reservations', meta, autoload=True)
-    params = {'columns': [table.c['usage_id']],
-              'refcolumns': [ref_table.c['id']],
-              'name': fk_name}
-
-    if fk_name:
-        fkey = ForeignKeyConstraint(**params)
-        fkey.drop()
-
-    quota_classes = Table('quota_classes', meta, autoload=True)
-    quota_classes.drop()
-
-    quota_usages = Table('quota_usages', meta, autoload=True)
-    quota_usages.drop()
-
-    reservations = Table('reservations', meta, autoload=True)
-    reservations.drop()
index 4c38c2dbe4653d617099785b98e43d9b51f77ede..6004b9e07e77b6dcce430960fe8aedf1d75fd881 100644 (file)
@@ -53,12 +53,3 @@ def upgrade(migrate_engine):
     except Exception:
         meta.drop_all(tables=[volume_glance_metadata])
         raise
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    volume_glance_metadata = Table('volume_glance_metadata',
-                                   meta, autoload=True)
-    volume_glance_metadata.drop()
index bbb6fa80cf4cd23bd521865bb886db7b36d369cc..538a568e830b908b8b8f80576e0e6fca6cb56fcc 100644 (file)
@@ -13,7 +13,7 @@
 import uuid
 
 from migrate import ForeignKeyConstraint
-from sqlalchemy import Integer, MetaData, String, Table
+from sqlalchemy import MetaData, String, Table
 
 
 def upgrade(migrate_engine):
@@ -79,81 +79,3 @@ def upgrade(migrate_engine):
                     pass
                 else:
                     raise
-
-
-def downgrade(migrate_engine):
-    """Convert volume_type from UUID back to int."""
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    volumes = Table('volumes', meta, autoload=True)
-    volume_types = Table('volume_types', meta, autoload=True)
-    extra_specs = Table('volume_type_extra_specs', meta, autoload=True)
-
-    fkey_remove_list = [volumes.c.volume_type_id,
-                        volume_types.c.id,
-                        extra_specs.c.volume_type_id]
-
-    for column in fkey_remove_list:
-        fkeys = list(column.foreign_keys)
-        if fkeys:
-            fkey_name = fkeys[0].constraint.name
-            fkey = ForeignKeyConstraint(columns=[column],
-                                        refcolumns=[volume_types.c.id],
-                                        name=fkey_name)
-
-            try:
-                fkey.drop()
-            except Exception:
-                if migrate_engine.url.get_dialect().name.startswith('sqlite'):
-                    pass
-                else:
-                    raise
-
-    vtype_list = list(volume_types.select().execute())
-    new_id = 1
-
-    for t in vtype_list:
-        volumes.update().\
-            where(volumes.c.volume_type_id == t['id']).\
-            values(volume_type_id=new_id).execute()
-
-        extra_specs.update().\
-            where(extra_specs.c.volume_type_id == t['id']).\
-            values(volume_type_id=new_id).execute()
-
-        volume_types.update().\
-            where(volume_types.c.id == t['id']).\
-            values(id=new_id).execute()
-
-        new_id += 1
-
-    if migrate_engine.name == 'postgresql':
-        # NOTE(e0ne): PostgreSQL can't cast string to int automatically
-        table_column_pairs = [('volumes', 'volume_type_id'),
-                              ('volume_types', 'id'),
-                              ('volume_type_extra_specs', 'volume_type_id')]
-        sql = 'ALTER TABLE {0} ALTER COLUMN {1} ' + \
-            'TYPE INTEGER USING {1}::numeric'
-
-        for table, column in table_column_pairs:
-            migrate_engine.execute(sql.format(table, column))
-    else:
-        volumes.c.volume_type_id.alter(Integer)
-        volume_types.c.id.alter(Integer)
-        extra_specs.c.volume_type_id.alter(Integer)
-
-    for column in fkey_remove_list:
-        fkeys = list(column.foreign_keys)
-        if fkeys:
-            fkey_name = fkeys[0].constraint.name
-            fkey = ForeignKeyConstraint(columns=[column],
-                                        refcolumns=[volume_types.c.id],
-                                        name=fkey_name)
-            try:
-                fkey.create()
-            except Exception:
-                if migrate_engine.url.get_dialect().name.startswith('sqlite'):
-                    pass
-                else:
-                    raise
index 2315ad38ca42eeea520fde884be989fb3c1cf3d1..3cf535015fdc0b4367c704292c2603b62c6f8c3e 100644 (file)
@@ -23,13 +23,3 @@ def upgrade(migrate_engine):
     source_volid = Column('source_volid', String(36))
     volumes.create_column(source_volid)
     volumes.update().values(source_volid=None).execute()
-
-
-def downgrade(migrate_engine):
-    """Remove source volume id column to volumes."""
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    volumes = Table('volumes', meta, autoload=True)
-    source_volid = Column('source_volid', String(36))
-    volumes.drop_column(source_volid)
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/005_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/005_sqlite_downgrade.sql
deleted file mode 100644 (file)
index 51dd54f..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-CREATE TEMPORARY TABLE volumes_backup (
-    created_at DATETIME,
-    updated_at DATETIME,
-    deleted_at DATETIME,
-    deleted BOOLEAN,
-    id VARCHAR(36) NOT NULL,
-    ec2_id VARCHAR(255),
-    user_id VARCHAR(255),
-    project_id VARCHAR(255),
-    host VARCHAR(255),
-    size INTEGER,
-    availability_zone VARCHAR(255),
-    instance_uuid VARCHAR(36),
-    mountpoint VARCHAR(255),
-    attach_time VARCHAR(255),
-    status VARCHAR(255),
-    attach_status VARCHAR(255),
-    scheduled_at DATETIME,
-    launched_at DATETIME,
-    terminated_at DATETIME,
-    display_name VARCHAR(255),
-    display_description VARCHAR(255),
-    provider_location VARCHAR(256),
-    provider_auth VARCHAR(256),
-    snapshot_id VARCHAR(36),
-    volume_type_id VARCHAR(36),
-    source_volid VARCHAR(36),
-    PRIMARY KEY (id),
-    CHECK (deleted IN (0, 1))
-);
-
-INSERT INTO volumes_backup
-    SELECT created_at,
-          updated_at,
-          deleted_at,
-          deleted,
-          id,
-          ec2_id,
-          user_id,
-          project_id,
-          host,
-          size,
-          availability_zone,
-          instance_uuid,
-          mountpoint,
-          attach_time,
-          status,
-          attach_status,
-          scheduled_at,
-          launched_at,
-          terminated_at,
-          display_name,
-          display_description,
-          provider_location,
-          provider_auth,
-          snapshot_id,
-          volume_type_id,
-          source_volid
-      FROM volumes;
-
-DROP TABLE volumes;
-
-CREATE TABLE volumes (
-    created_at DATETIME,
-    updated_at DATETIME,
-    deleted_at DATETIME,
-    deleted BOOLEAN,
-    id VARCHAR(36) NOT NULL,
-    ec2_id VARCHAR(255),
-    user_id VARCHAR(255),
-    project_id VARCHAR(255),
-    host VARCHAR(255),
-    size INTEGER,
-    availability_zone VARCHAR(255),
-    instance_uuid VARCHAR(36),
-    mountpoint VARCHAR(255),
-    attach_time VARCHAR(255),
-    status VARCHAR(255),
-    attach_status VARCHAR(255),
-    scheduled_at DATETIME,
-    launched_at DATETIME,
-    terminated_at DATETIME,
-    display_name VARCHAR(255),
-    display_description VARCHAR(255),
-    provider_location VARCHAR(256),
-    provider_auth VARCHAR(256),
-    snapshot_id VARCHAR(36),
-    volume_type_id VARCHAR(36),
-    PRIMARY KEY (id),
-    CHECK (deleted IN (0, 1))
-);
-
-INSERT INTO volumes
-    SELECT created_at,
-          updated_at,
-          deleted_at,
-          deleted,
-          id,
-          ec2_id,
-          user_id,
-          project_id,
-          host,
-          size,
-          availability_zone,
-          instance_uuid,
-          mountpoint,
-          attach_time,
-          status,
-          attach_status,
-          scheduled_at,
-          launched_at,
-          terminated_at,
-          display_name,
-          display_description,
-          provider_location,
-          provider_auth,
-          snapshot_id,
-          volume_type_id
-    FROM volumes_backup;
-
-DROP TABLE volumes_backup;
index 9c6aced6b11720b2b8507903b566bbe560696902..b132bc30bc54e6d4fea9ec3fdffa6216f72664f8 100644 (file)
@@ -23,12 +23,3 @@ def upgrade(migrate_engine):
     provider_location = Column('provider_location', String(255))
     snapshots.create_column(provider_location)
     snapshots.update().values(provider_location=None).execute()
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    snapshots = Table('snapshots', meta, autoload=True)
-    provider_location = snapshots.columns.provider_location
-    snapshots.drop_column(provider_location)
index 007a15db05b620758193fd822be084667099107d..27351458c742bc4e5be86b5fbe1f4d6f3031a78c 100644 (file)
@@ -25,15 +25,3 @@ def upgrade(migrate_engine):
     ForeignKeyConstraint(
         columns=[snapshots.c.volume_id],
         refcolumns=[volumes.c.id]).create()
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    snapshots = Table('snapshots', meta, autoload=True)
-    volumes = Table('volumes', meta, autoload=True)
-
-    ForeignKeyConstraint(
-        columns=[snapshots.c.volume_id],
-        refcolumns=[volumes.c.id]).drop()
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql
deleted file mode 100644 (file)
index 5c73a1c..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
--- As sqlite does not support the DROP FOREIGN KEY, we need to create
--- the table, and move all the data to it.
-
-CREATE TABLE snapshots_v6 (
-    created_at DATETIME,
-    updated_at DATETIME,
-    deleted_at DATETIME,
-    deleted BOOLEAN,
-    id VARCHAR(36) NOT NULL,
-    volume_id VARCHAR(36) NOT NULL,
-    user_id VARCHAR(255),
-    project_id VARCHAR(255),
-    status VARCHAR(255),
-    progress VARCHAR(255),
-    volume_size INTEGER,
-    scheduled_at DATETIME,
-    display_name VARCHAR(255),
-    display_description VARCHAR(255),
-    provider_location VARCHAR(255),
-    PRIMARY KEY (id),
-    CHECK (deleted IN (0, 1))
-);
-
-INSERT INTO snapshots_v6 SELECT * FROM snapshots;
-
-DROP TABLE snapshots;
-
-ALTER TABLE snapshots_v6 RENAME TO snapshots;
index 9b50c89b7b8dde96b091823701ead2b5efaef541..39c3741b04948a98fec22c12131c785735caa976 100644 (file)
@@ -47,11 +47,3 @@ def upgrade(migrate_engine):
     )
 
     backups.create()
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    backups = Table('backups', meta, autoload=True)
-    backups.drop()
index 86596db1a5c605965577703e5947041a07ff4ae4..9a2b397c6ba7c9ec5343e5308e9dc30f8d318e44 100644 (file)
@@ -36,12 +36,3 @@ def upgrade(migrate_engine):
     )
 
     snapshot_metadata.create()
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-    snapshot_metadata = Table('snapshot_metadata',
-                              meta,
-                              autoload=True)
-    snapshot_metadata.drop()
index 7207423e35cf7292884333aa9b532c7e46b1bf00..99f7420a19951f0975204e796da565b1219ef1a3 100644 (file)
@@ -39,12 +39,3 @@ def upgrade(migrate_engine):
     )
 
     transfers.create()
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-    transfers = Table('transfers',
-                      meta,
-                      autoload=True)
-    transfers.drop()
index b6adb3bf5424ab718ec75710f2b6b9a462bde0e3..db2ec5c5db30a806be094bc51a36d2a0b1b4c9d6 100644 (file)
@@ -31,13 +31,3 @@ def upgrade(migrate_engine):
         volumes.update().\
             where(volumes.c.id == item['volume_id']).\
             values(bootable=True).execute()
-
-
-def downgrade(migrate_engine):
-    """Remove bootable column to volumes."""
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    volumes = Table('volumes', meta, autoload=True)
-    bootable = volumes.columns.bootable
-    volumes.drop_column(bootable)
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/011_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/011_sqlite_downgrade.sql
deleted file mode 100644 (file)
index d86ea6a..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-CREATE TABLE volumes_v10 (
-    created_at DATETIME,
-    updated_at DATETIME,
-    deleted_at DATETIME,
-    deleted BOOLEAN,
-    id VARCHAR(36) NOT NULL,
-    ec2_id INTEGER,
-    user_id VARCHAR(255),
-    project_id VARCHAR(255),
-    snapshot_id VARCHAR(36),
-    host VARCHAR(255),
-    size INTEGER,
-    availability_zone VARCHAR(255),
-    instance_uuid VARCHAR(36),
-    mountpoint VARCHAR(255),
-    attach_time VARCHAR(255),
-    status VARCHAR(255),
-    attach_status VARCHAR(255),
-    scheduled_at DATETIME,
-    launched_at DATETIME,
-    terminated_at DATETIME,
-    display_name VARCHAR(255),
-    display_description VARCHAR(255),
-    provider_location VARCHAR(255),
-    provider_auth VARCHAR(255),
-    volume_type_id VARCHAR(36),
-    source_volid VARCHAR(36),
-    PRIMARY KEY (id)
-);
-
-INSERT INTO volumes_v10
-    SELECT created_at,
-        updated_at,
-        deleted_at,
-        deleted,
-        id,
-        ec2_id,
-        user_id,
-        project_id,
-        snapshot_id,
-        host,
-        size,
-        availability_zone,
-        instance_uuid,
-        mountpoint,
-        attach_time,
-        status,
-        attach_status,
-        scheduled_at,
-        launched_at,
-        terminated_at,
-        display_name,
-        display_description,
-        provider_location,
-        provider_auth,
-        volume_type_id,
-        source_volid
-    FROM volumes;
-
-DROP TABLE volumes;
-ALTER TABLE volumes_v10 RENAME TO volumes;
index 58b03bcfc202f8a9e9fff5e7ab71ffebf196a6bd..82169e764e3a1680db10a49cb0903ba5c2eb1237 100644 (file)
@@ -23,13 +23,3 @@ def upgrade(migrate_engine):
     attached_host = Column('attached_host', String(255))
     volumes.create_column(attached_host)
     volumes.update().values(attached_host=None).execute()
-
-
-def downgrade(migrate_engine):
-    """Remove attach host column from volumes."""
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    volumes = Table('volumes', meta, autoload=True)
-    attached_host = Column('attached_host', String(255))
-    volumes.drop_column(attached_host)
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/012_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/012_sqlite_downgrade.sql
deleted file mode 100644 (file)
index 0170e8d..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-CREATE TABLE volumes_v11 (
-    created_at DATETIME,
-    updated_at DATETIME,
-    deleted_at DATETIME,
-    deleted BOOLEAN,
-    id VARCHAR(36) NOT NULL,
-    ec2_id INTEGER,
-    user_id VARCHAR(255),
-    project_id VARCHAR(255),
-    snapshot_id VARCHAR(36),
-    host VARCHAR(255),
-    size INTEGER,
-    availability_zone VARCHAR(255),
-    instance_uuid VARCHAR(36),
-    mountpoint VARCHAR(255),
-    attach_time VARCHAR(255),
-    status VARCHAR(255),
-    attach_status VARCHAR(255),
-    scheduled_at DATETIME,
-    launched_at DATETIME,
-    terminated_at DATETIME,
-    display_name VARCHAR(255),
-    display_description VARCHAR(255),
-    provider_location VARCHAR(255),
-    provider_auth VARCHAR(255),
-    volume_type_id VARCHAR(36),
-    source_volid VARCHAR(36),
-    bootable BOOLEAN,
-    PRIMARY KEY (id)
-);
-
-INSERT INTO volumes_v11
-    SELECT created_at,
-        updated_at,
-        deleted_at,
-        deleted,
-        id,
-        ec2_id,
-        user_id,
-        project_id,
-        snapshot_id,
-        host,
-        size,
-        availability_zone,
-        instance_uuid,
-        mountpoint,
-        attach_time,
-        status,
-        attach_status,
-        scheduled_at,
-        launched_at,
-        terminated_at,
-        display_name,
-        display_description,
-        provider_location,
-        provider_auth,
-        volume_type_id,
-        source_volid,
-        bootable
-    FROM volumes;
-
-DROP TABLE volumes;
-ALTER TABLE volumes_v11 RENAME TO volumes;
index afc3b5b35c6c8d24a839250bcf4ce74389bb5f13..0ac71ca3fd7f63207d8f6a0a3386fc58e86c5aee 100644 (file)
@@ -23,13 +23,3 @@ def upgrade(migrate_engine):
     provider_geometry = Column('provider_geometry', String(255))
     volumes.create_column(provider_geometry)
     volumes.update().values(provider_geometry=None).execute()
-
-
-def downgrade(migrate_engine):
-    """Remove provider_geometry column from volumes."""
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    volumes = Table('volumes', meta, autoload=True)
-    provider_geometry = Column('provider_geometry', String(255))
-    volumes.drop_column(provider_geometry)
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql
deleted file mode 100644 (file)
index b421628..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-CREATE TABLE volumes_v12 (
-    created_at DATETIME,
-    updated_at DATETIME,
-    deleted_at DATETIME,
-    deleted BOOLEAN,
-    id VARCHAR(36) NOT NULL,
-    ec2_id INTEGER,
-    user_id VARCHAR(255),
-    project_id VARCHAR(255),
-    snapshot_id VARCHAR(36),
-    host VARCHAR(255),
-    size INTEGER,
-    availability_zone VARCHAR(255),
-    instance_uuid VARCHAR(36),
-    mountpoint VARCHAR(255),
-    attach_time VARCHAR(255),
-    status VARCHAR(255),
-    attach_status VARCHAR(255),
-    scheduled_at DATETIME,
-    launched_at DATETIME,
-    terminated_at DATETIME,
-    display_name VARCHAR(255),
-    display_description VARCHAR(255),
-    provider_location VARCHAR(255),
-    provider_auth VARCHAR(255),
-    volume_type_id VARCHAR(36),
-    source_volid VARCHAR(36),
-    bootable BOOLEAN,
-    attached_host VARCHAR(255),
-    PRIMARY KEY (id)
-);
-
-INSERT INTO volumes_v12
-    SELECT created_at,
-        updated_at,
-        deleted_at,
-        deleted,
-        id,
-        ec2_id,
-        user_id,
-        project_id,
-        snapshot_id,
-        host,
-        size,
-        availability_zone,
-        instance_uuid,
-        mountpoint,
-        attach_time,
-        status,
-        attach_status,
-        scheduled_at,
-        launched_at,
-        terminated_at,
-        display_name,
-        display_description,
-        provider_location,
-        provider_auth,
-        volume_type_id,
-        source_volid,
-        bootable,
-        attached_host
-    FROM volumes;
-
-DROP TABLE volumes;
-ALTER TABLE volumes_v12 RENAME TO volumes;
index 4bf8bffff247c3d7c0faf09fa67e6ff8fe06c85b..41e46efc58713f26e0d8c79d358e05104b7790e9 100644 (file)
@@ -23,13 +23,3 @@ def upgrade(migrate_engine):
     _name_id = Column('_name_id', String(36))
     volumes.create_column(_name_id)
     volumes.update().values(_name_id=None).execute()
-
-
-def downgrade(migrate_engine):
-    """Remove _name_id column from volumes."""
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    volumes = Table('volumes', meta, autoload=True)
-    _name_id = volumes.columns._name_id
-    volumes.drop_column(_name_id)
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/014_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/014_sqlite_downgrade.sql
deleted file mode 100644 (file)
index 6885168..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-CREATE TABLE volumes_v13 (
-    created_at DATETIME,
-    updated_at DATETIME,
-    deleted_at DATETIME,
-    deleted BOOLEAN,
-    id VARCHAR(36) NOT NULL,
-    ec2_id INTEGER,
-    user_id VARCHAR(255),
-    project_id VARCHAR(255),
-    snapshot_id VARCHAR(36),
-    host VARCHAR(255),
-    size INTEGER,
-    availability_zone VARCHAR(255),
-    instance_uuid VARCHAR(36),
-    attached_host VARCHAR(255),
-    mountpoint VARCHAR(255),
-    attach_time VARCHAR(255),
-    status VARCHAR(255),
-    attach_status VARCHAR(255),
-    scheduled_at DATETIME,
-    launched_at DATETIME,
-    terminated_at DATETIME,
-    display_name VARCHAR(255),
-    display_description VARCHAR(255),
-    provider_location VARCHAR(255),
-    provider_auth VARCHAR(255),
-    volume_type_id VARCHAR(36),
-    source_volid VARCHAR(36),
-    bootable BOOLEAN,
-    provider_geometry VARCHAR(255),
-    PRIMARY KEY (id)
-);
-
-INSERT INTO volumes_v13
-    SELECT created_at,
-        updated_at,
-        deleted_at,
-        deleted,
-        id,
-        ec2_id,
-        user_id,
-        project_id,
-        snapshot_id,
-        host,
-        size,
-        availability_zone,
-        instance_uuid,
-        attached_host,
-        mountpoint,
-        attach_time,
-        status,
-        attach_status,
-        scheduled_at,
-        launched_at,
-        terminated_at,
-        display_name,
-        display_description,
-        provider_location,
-        provider_auth,
-        volume_type_id,
-        source_volid,
-        bootable,
-        provider_geometry
-    FROM volumes;
-
-DROP TABLE volumes;
-ALTER TABLE volumes_v13 RENAME TO volumes;
index 86503ef29b54930b578d265dbc91e0f923059d3d..4156e4e77ce612050351e56c7f2ee8d768f16f99 100644 (file)
@@ -10,8 +10,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import MetaData, String, Table
+from sqlalchemy import MetaData, Table
 
 
 TABLE_NAME = 'migrations'
@@ -22,29 +21,3 @@ def upgrade(migrate_engine):
     meta.bind = migrate_engine
     table = Table(TABLE_NAME, meta, autoload=True)
     table.drop()
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    table = Table(
-        TABLE_NAME, meta,
-        Column('created_at', DateTime(timezone=False)),
-        Column('updated_at', DateTime(timezone=False)),
-        Column('deleted_at', DateTime(timezone=False)),
-        Column('deleted', Boolean),
-        Column('id', Integer, primary_key=True, nullable=False),
-
-        Column('source_compute', String(length=255)),
-        Column('dest_compute', String(length=255)),
-        Column('dest_host', String(length=255)),
-        Column('old_instance_type_id', Integer),
-        Column('new_instance_type_id', Integer),
-        Column('instance_uuid', String(length=255), nullable=True),
-        Column('status', String(length=255)),
-        mysql_engine='InnoDB',
-        mysql_charset='utf8'
-    )
-
-    table.create()
index d242a408282464d56d939692e8a95f7e97eadf8d..9660c75c77977b37ed6ad9790ad9505534c88355 100644 (file)
@@ -12,8 +12,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from sqlalchemy import Boolean, Column, DateTime, ForeignKey
-from sqlalchemy import Integer, MetaData, String, Table
+from sqlalchemy import MetaData, Table
 
 
 def upgrade(migrate_engine):
@@ -28,61 +27,3 @@ def upgrade(migrate_engine):
 
     for table in tables:
         table.drop()
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    Table('volumes', meta, autoload=True)
-
-    sm_backend_config = Table(
-        'sm_backend_config', meta,
-        Column('created_at', DateTime),
-        Column('updated_at', DateTime),
-        Column('deleted_at', DateTime),
-        Column('deleted', Boolean),
-        Column('id', Integer, primary_key=True, nullable=False),
-        Column('flavor_id', Integer, ForeignKey('sm_flavors.id'),
-               nullable=False),
-        Column('sr_uuid', String(length=255)),
-        Column('sr_type', String(length=255)),
-        Column('config_params', String(length=2047)),
-        mysql_engine='InnoDB',
-        mysql_charset='utf8'
-    )
-
-    sm_flavors = Table(
-        'sm_flavors', meta,
-        Column('created_at', DateTime),
-        Column('updated_at', DateTime),
-        Column('deleted_at', DateTime),
-        Column('deleted', Boolean),
-        Column('id', Integer, primary_key=True, nullable=False),
-        Column('label', String(length=255)),
-        Column('description', String(length=255)),
-        mysql_engine='InnoDB',
-        mysql_charset='utf8'
-    )
-
-    sm_volume = Table(
-        'sm_volume', meta,
-        Column('created_at', DateTime),
-        Column('updated_at', DateTime),
-        Column('deleted_at', DateTime),
-        Column('deleted', Boolean),
-        Column('id', String(length=36),
-               ForeignKey('volumes.id'),
-               primary_key=True,
-               nullable=False),
-        Column('backend_id', Integer, ForeignKey('sm_backend_config.id'),
-               nullable=False),
-        Column('vdi_uuid', String(length=255)),
-        mysql_engine='InnoDB',
-        mysql_charset='utf8'
-    )
-
-    tables = [sm_flavors, sm_backend_config, sm_volume]
-
-    for table in tables:
-        table.create()
index 32669994a62ad53c9fbae621b44839882493574e..11871f37ea9dcb49cf0f9c5b743aaeb655076c04 100644 (file)
@@ -58,20 +58,3 @@ def upgrade(migrate_engine):
     )
 
     encryption.create()
-
-
-def downgrade(migrate_engine):
-    meta = MetaData(bind=migrate_engine)
-
-    # drop encryption key UUID for volumes
-    volumes = Table('volumes', meta, autoload=True)
-    volumes.c.encryption_key_id.drop()
-
-    # drop encryption key UUID and volume type id for snapshots
-    snapshots = Table('snapshots', meta, autoload=True)
-    snapshots.c.encryption_key_id.drop()
-    snapshots.c.volume_type_id.drop()
-
-    # drop encryption types table
-    encryption = Table('encryption', meta, autoload=True)
-    encryption.drop()
index 238ee70f651945f0ced5197e8ae01afd2ac391e2..2dcbe1ed79e4adcebef4940ac9a131204662a428 100644 (file)
@@ -16,7 +16,6 @@
 
 from sqlalchemy import Boolean, Column, DateTime
 from sqlalchemy import ForeignKey, MetaData, String, Table
-from migrate import ForeignKeyConstraint
 
 
 def upgrade(migrate_engine):
@@ -47,30 +46,3 @@ def upgrade(migrate_engine):
 
     volume_types.create_column(qos_specs_id)
     volume_types.update().values(qos_specs_id=None).execute()
-
-
-def downgrade(migrate_engine):
-    """Remove volume_type_rate_limit table."""
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    qos_specs = Table('quality_of_service_specs', meta, autoload=True)
-
-    if migrate_engine.name == 'mysql':
-        # NOTE(alanmeadows): MySQL Cannot drop column qos_specs_id
-        # until the foreign key volumes_types_ibfk_1 is removed.  We
-        # remove the foreign key first, and then we drop the column.
-        table = Table('volume_types', meta, autoload=True)
-        ref_table = Table('volume_types', meta, autoload=True)
-        params = {'columns': [table.c['qos_specs_id']],
-                  'refcolumns': [ref_table.c['id']],
-                  'name': 'volume_types_ibfk_1'}
-
-        fkey = ForeignKeyConstraint(**params)
-        fkey.drop()
-
-    volume_types = Table('volume_types', meta, autoload=True)
-    qos_specs_id = Column('qos_specs_id', String(36))
-
-    volume_types.drop_column(qos_specs_id)
-    qos_specs.drop()
index 5ae25f3b5a931fb936142c3e4e0febeff6f41496..3d0fa36eaf1650c2a7ae425a7dfc81cc314194e5 100644 (file)
@@ -24,13 +24,3 @@ def upgrade(migrate_engine):
     volumes = Table('volumes', meta, autoload=True)
     migration_status = Column('migration_status', String(255))
     volumes.create_column(migration_status)
-
-
-def downgrade(migrate_engine):
-    """Remove migration_status column from volumes."""
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    volumes = Table('volumes', meta, autoload=True)
-    migration_status = volumes.columns.migration_status
-    volumes.drop_column(migration_status)
index 96543879bb1093683c38e362b6eb72edc23d5995..0f9345a9714a37fec499e6137ec514782a8c56fb 100644 (file)
@@ -37,12 +37,3 @@ def upgrade(migrate_engine):
     )
 
     volume_admin_metadata.create()
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-    volume_admin_metadata = Table('volume_admin_metadata',
-                                  meta,
-                                  autoload=True)
-    volume_admin_metadata.drop()
index 96785d08d16e745058e64598128bad67a1f7c7df..61e480fd4fd2432422cb94239687fa7e74956836 100644 (file)
@@ -64,13 +64,3 @@ def upgrade(migrate_engine):
                  'resource': 'gigabytes',
                  'hard_limit': CONF.quota_gigabytes,
                  'deleted': False, })
-
-
-def downgrade(migrate_engine):
-    """Don't delete the 'default' entries at downgrade time.
-
-    We don't know if the user had default entries when we started.
-    If they did, we wouldn't want to remove them.  So, the safest
-    thing to do is just leave the 'default' entries at downgrade time.
-    """
-    pass
index cc0288ee27822072cfc4f060fd9f16518ddd040c..098544888ba0512c517dee5ad3912b6eabbd440a 100644 (file)
@@ -21,10 +21,3 @@ def upgrade(migrate_engine):
     services = Table('services', meta, autoload=True)
     reason = Column('disabled_reason', String(255))
     services.create_column(reason)
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-    services = Table('services', meta, autoload=True)
-    services.drop_column('disabled_reason')
index b82beb13ee506f65c0b1cc059e9dc39fcd017fc9..793588ea433308b74ba9f67a2a3f7870eb377e47 100644 (file)
@@ -36,14 +36,3 @@ def upgrade(migrate_engine):
                   reservations.c.deleted, reservations.c.expire)
 
     index.create(migrate_engine)
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    reservations = Table('reservations', meta, autoload=True)
-
-    index = _get_deleted_expire_index(reservations)
-    if index:
-        index.drop(migrate_engine)
index 4e5021ba3e529ae968503a3bfac799a4d5a21f77..02d392342aa428e61ab4157d7dcc5b6b166219af 100644 (file)
@@ -32,16 +32,3 @@ def upgrade(migrate_engine):
     volumes.update().values(replication_status='disabled',
                             replication_extended_status=None,
                             replication_driver_data=None).execute()
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    volumes = Table('volumes', meta, autoload=True)
-    replication_status = volumes.columns.replication_status
-    replication_extended_status = volumes.columns.replication_extended_status
-    replication_driver_data = volumes.columns.replication_driver_data
-    volumes.drop_column(replication_status)
-    volumes.drop_column(replication_extended_status)
-    volumes.drop_column(replication_driver_data)
index 5ca70fbb5c1c1896105417727081bb6f238f622f..4cc953547ee53541e27f7fe572dd6ac941ddde52 100644 (file)
@@ -13,7 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from migrate import ForeignKeyConstraint
 from sqlalchemy import Boolean, Column, DateTime
 from sqlalchemy import ForeignKey, MetaData, String, Table
 
@@ -80,52 +79,3 @@ def upgrade(migrate_engine):
 
     snapshots.create_column(cgsnapshot_id)
     snapshots.update().values(cgsnapshot_id=None).execute()
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    # Drop column from snapshots table
-    if migrate_engine.name == 'mysql':
-        # MySQL cannot drop column cgsnapshot_id until the foreign key
-        # constraint is removed. So remove the foreign key first, and
-        # then drop the column.
-        table = Table('snapshots', meta, autoload=True)
-        ref_table = Table('snapshots', meta, autoload=True)
-        params = {'columns': [table.c['cgsnapshot_id']],
-                  'refcolumns': [ref_table.c['id']],
-                  'name': 'snapshots_ibfk_1'}
-
-        fkey = ForeignKeyConstraint(**params)
-        fkey.drop()
-
-    snapshots = Table('snapshots', meta, autoload=True)
-    cgsnapshot_id = snapshots.columns.cgsnapshot_id
-    snapshots.drop_column(cgsnapshot_id)
-
-    # Drop column from volumes table
-    if migrate_engine.name == 'mysql':
-        # MySQL cannot drop column consistencygroup_id until the foreign
-        # key constraint is removed. So remove the foreign key first,
-        # and then drop the column.
-        table = Table('volumes', meta, autoload=True)
-        ref_table = Table('volumes', meta, autoload=True)
-        params = {'columns': [table.c['consistencygroup_id']],
-                  'refcolumns': [ref_table.c['id']],
-                  'name': 'volumes_ibfk_1'}
-
-        fkey = ForeignKeyConstraint(**params)
-        fkey.drop()
-
-    volumes = Table('volumes', meta, autoload=True)
-    consistencygroup_id = volumes.columns.consistencygroup_id
-    volumes.drop_column(consistencygroup_id)
-
-    # Drop table
-    cgsnapshots = Table('cgsnapshots', meta, autoload=True)
-    cgsnapshots.drop()
-
-    # Drop table
-    consistencygroups = Table('consistencygroups', meta, autoload=True)
-    consistencygroups.drop()
index 7bc6609648111a96aaba2cb61fc5720d6f280b3a..afd9a5d99e762f486c09097ce61bb0ca7963027f 100644 (file)
@@ -50,13 +50,3 @@ def upgrade(migrate_engine):
                  'resource': 'consistencygroups',
                  'hard_limit': CONF.quota_consistencygroups,
                  'deleted': False, })
-
-
-def downgrade(migrate_engine):
-    """Don't delete the 'default' entries at downgrade time.
-
-    We don't know if the user had default entries when we started.
-    If they did, we wouldn't want to remove them.  So, the safest
-    thing to do is just leave the 'default' entries at downgrade time.
-    """
-    pass
index 27d01e5006fe561bbe4fa25891ccb83d16b5f6df..e31c02cf175591d951e79aba94f90a482258bc96 100644 (file)
@@ -17,7 +17,3 @@
 
 def upgrade(migrate_engine):
     pass
-
-
-def downgrade(migration_engine):
-    pass
index 27d01e5006fe561bbe4fa25891ccb83d16b5f6df..e31c02cf175591d951e79aba94f90a482258bc96 100644 (file)
@@ -17,7 +17,3 @@
 
 def upgrade(migrate_engine):
     pass
-
-
-def downgrade(migration_engine):
-    pass
index 27d01e5006fe561bbe4fa25891ccb83d16b5f6df..e31c02cf175591d951e79aba94f90a482258bc96 100644 (file)
@@ -17,7 +17,3 @@
 
 def upgrade(migrate_engine):
     pass
-
-
-def downgrade(migration_engine):
-    pass
index 27d01e5006fe561bbe4fa25891ccb83d16b5f6df..e31c02cf175591d951e79aba94f90a482258bc96 100644 (file)
@@ -17,7 +17,3 @@
 
 def upgrade(migrate_engine):
     pass
-
-
-def downgrade(migration_engine):
-    pass
index 27d01e5006fe561bbe4fa25891ccb83d16b5f6df..e31c02cf175591d951e79aba94f90a482258bc96 100644 (file)
@@ -17,7 +17,3 @@
 
 def upgrade(migrate_engine):
     pass
-
-
-def downgrade(migration_engine):
-    pass
index 99cb5a32271705226ec933858f59efe1df273ff4..b1bf1b97cfa6eea786c5adea229d63d8891851cf 100644 (file)
@@ -39,15 +39,3 @@ def upgrade(migrate_engine):
     )
 
     volume_type_projects.create()
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    volume_types = Table('volume_types', meta, autoload=True)
-    is_public = volume_types.columns.is_public
-    volume_types.drop_column(is_public)
-
-    volume_type_projects = Table('volume_type_projects', meta, autoload=True)
-    volume_type_projects.drop()
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/032_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/032_sqlite_downgrade.sql
deleted file mode 100644 (file)
index ade3dc2..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
--- As sqlite does not support the DROP CHECK, we need to create
--- the table, and move all the data to it.
-
-CREATE TABLE volume_types_v31 (
-  created_at DATETIME,
-  updated_at DATETIME,
-  deleted_at DATETIME,
-  deleted BOOLEAN,
-  id VARCHAR(36) NOT NULL,
-  name VARCHAR(255),
-  qos_specs_id VARCHAR(36),
-  PRIMARY KEY (id),
-  CHECK (deleted IN (0, 1)),
-  FOREIGN KEY(qos_specs_id) REFERENCES quality_of_service_specs (id)
-);
-
-INSERT INTO volume_types_v31
-    SELECT created_at,
-        updated_at,
-        deleted_at,
-        deleted,
-        id,
-        name,
-        qos_specs_id
-    FROM volume_types;
-
-DROP TABLE volume_types;
-ALTER TABLE volume_types_v31 RENAME TO volume_types;
-DROP TABLE volume_type_projects;
index 3c347d4e2fa9f0e7ef1ac969e99ca3ad73ffcecf..92467100a2ece2b597857741aeb9b4674cbdb992 100644 (file)
@@ -62,27 +62,6 @@ def upgrade(migrate_engine):
         pkey.create()
 
 
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    encryptions = Table('encryption', meta, autoload=True)
-    encryption_id_pk = PrimaryKeyConstraint(encryptions.columns.encryption_id)
-
-    encryption_id_pk.drop()
-    encryptions.drop_column(encryptions.columns.encryption_id)
-
-    volume_type_pk = PrimaryKeyConstraint(encryptions.columns.volume_type_id)
-    volume_type_pk.create()
-
-    ref_table = Table('volume_types', meta, autoload=True)
-    params = {'columns': [encryptions.c['volume_type_id']],
-              'refcolumns': [ref_table.c['id']],
-              'name': 'encryption_ibfk_1'}
-    volume_type_fk = ForeignKeyConstraint(**params)
-    volume_type_fk.create()
-
-
 def _upgrade_sqlite(meta, encryptions):
     new_encryptions = Table(
         'encryption_33', meta,
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/033_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/033_sqlite_downgrade.sql
deleted file mode 100644 (file)
index 996f6cd..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-CREATE TABLE encryption_v32 (
-    created_at DATETIME,
-    updated_at DATETIME,
-    deleted_at DATETIME,
-    deleted BOOLEAN,
-    cipher VARCHAR(255),
-    control_location VARCHAR(255),
-    key_size INTEGER,
-    provider VARCHAR(255),
-    volume_type_id VARCHAR(36),
-    PRIMARY KEY (volume_type_id),
-    FOREIGN KEY(volume_type_id) REFERENCES volume_types(id)
-);
-
-INSERT INTO encryption_v32
-    SELECT created_at,
-        updated_at,
-        deleted_at,
-        deleted,
-        cipher,
-        control_location,
-        key_size,
-        provider,
-        volume_type_id
-    FROM encryption;
-
-DROP TABLE encryption;
-ALTER TABLE encryption_v32 RENAME TO encryption;
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/034_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/034_sqlite_downgrade.sql
deleted file mode 100644 (file)
index 9d4d1a8..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-CREATE TABLE volume_types_v33 (
-    created_at DATETIME,
-    updated_at DATETIME,
-    deleted_at DATETIME,
-    deleted BOOLEAN,
-    id VARCHAR(36) NOT NULL,
-    name VARCHAR(255),
-    is_public BOOLEAN,
-    qos_specs_id VARCHAR(36),
-    PRIMARY KEY (id)
-);
-
-INSERT INTO volume_types_v33
-    SELECT created_at,
-        updated_at,
-        deleted_at,
-        deleted,
-        id,
-        name,
-        is_public,
-        qos_specs_id
-    FROM volume_types;
-
-DROP TABLE volume_types;
-ALTER TABLE volume_types_v33 RENAME TO volume_types;
index b336aaeea15ae95d38e5a34eb6cccea38b587328..1ceb1bc96b44d175f0300c7a4d7679f8c2ed76bf 100644 (file)
@@ -23,13 +23,3 @@ def upgrade(migrate_engine):
     description = Column('description', String(255))
     volume_types.create_column(description)
     volume_types.update().values(description=None).execute()
-
-
-def downgrade(migrate_engine):
-    """Remove description column to volumes."""
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    volume_types = Table('volume_types', meta, autoload=True)
-    description = volume_types.columns.description
-    volume_types.drop_column(description)
index e0a1ce8360797fea2529efa50eff5aafe5a9b609..ce0393f2afd77eaaf4a6e42782c81f5528f59322 100644 (file)
@@ -23,13 +23,3 @@ def upgrade(migrate_engine):
     provider_id = Column('provider_id', String(255))
     volumes.create_column(provider_id)
     volumes.update().values(provider_id=None).execute()
-
-
-def downgrade(migrate_engine):
-    """Remove provider_id column from volumes."""
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    volumes = Table('volumes', meta, autoload=True)
-    provider_id = volumes.columns.provider_id
-    volumes.drop_column(provider_id)
index 7698ec8eb1b6f94669b2af6f4a2934f7ebef3343..f83605ffd9449b55c2dd9a8f3ebf7ef350bb4968 100644 (file)
@@ -23,13 +23,3 @@ def upgrade(migrate_engine):
     provider_id = Column('provider_id', String(255))
     snapshots.create_column(provider_id)
     snapshots.update().values(provider_id=None).execute()
-
-
-def downgrade(migrate_engine):
-    """Remove provider_id column from snapshots."""
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    snapshots = Table('snapshots', meta, autoload=True)
-    provider_id = snapshots.columns.provider_id
-    snapshots.drop_column(provider_id)
index f9b595748e5ec8a7b7e4a75550aaa5faaba0769f..0b75e16ba5f9ce57ffde5e0696a9e19361cabe64 100644 (file)
@@ -24,14 +24,3 @@ def upgrade(migrate_engine):
 
     consistencygroups.create_column(cgsnapshot_id)
     consistencygroups.update().values(cgsnapshot_id=None).execute()
-
-
-def downgrade(migrate_engine):
-    """Remove cgsnapshot_id column from consistencygroups."""
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    consistencygroups = Table('consistencygroups', meta, autoload=True)
-    cgsnapshot_id = consistencygroups.columns.cgsnapshot_id
-
-    consistencygroups.drop_column(cgsnapshot_id)
index da029bbeab6d79c96c38c9157c41d3fc77502718..d1c6f3e915581eea24c47e6d48ae9897c6cc7567 100644 (file)
@@ -34,11 +34,3 @@ def upgrade(migrate_engine):
     )
 
     initiator_data.create()
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-    table_name = 'driver_initiator_data'
-    initiator_data = Table(table_name, meta, autoload=True)
-    initiator_data.drop()
index 0ffddb5e81f1728fec0fec0eb877ce139dc53e3e..7c9c42f927a2cfe1b6206b29303b041fe45765df 100644 (file)
@@ -26,13 +26,3 @@ def upgrade(migrate_engine):
 
     backups.create_column(parent_id)
     backups.update().values(parent_id=None).execute()
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    backups = Table('backups', meta, autoload=True)
-    parent_id = backups.columns.parent_id
-
-    backups.drop_column(parent_id)
index f51b8fd42e8ca8b22878811d552e686763f30296..9727c7ccb3e02f9fd85e4f12d75fe149fb297a2c 100644 (file)
@@ -86,48 +86,3 @@ def upgrade(migrate_engine):
     volumes.drop_column(attach_time)
     attached_host = volumes.columns.attached_host
     volumes.drop_column(attached_host)
-
-
-def downgrade(migrate_engine):
-    """Remove volume_attachment table."""
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    # Put the needed volumes table columns back
-    volumes = Table('volumes', meta, autoload=True)
-    multiattach = volumes.columns.multiattach
-    volumes.drop_column(multiattach)
-
-    attached_host = Column('attached_host', String(length=255))
-    volumes.create_column(attached_host)
-    volumes.update().values(attached_host=None).execute()
-
-    attach_time = Column('attach_time', String(length=255))
-    volumes.create_column(attach_time)
-    volumes.update().values(attach_time=None).execute()
-
-    instance_uuid = Column('instance_uuid', String(length=36))
-    volumes.create_column(instance_uuid)
-    volumes.update().values(instance_uuid=None).execute()
-
-    mountpoint = Column('mountpoint', String(length=255))
-    volumes.create_column(mountpoint)
-    volumes.update().values(mountpoint=None).execute()
-
-    volume_attachment = Table('volume_attachment', meta, autoload=True)
-    attachments = list(volume_attachment.select().execute())
-    for attachment in attachments:
-        # we are going to lose data here for
-        # multiple attaches.  We'll migrate and the
-        # last update wins.
-
-        if not attachment.deleted_at:
-            volume_id = attachment.volume_id
-            volumes.update().\
-                where(volumes.c.id == volume_id).\
-                values(mountpoint=attachment.mountpoint,
-                       attached_host=attachment.attached_host,
-                       attach_time=attachment.attach_time,
-                       instance_uuid=attachment.instance_uuid).\
-                execute()
-    volume_attachment.drop()
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/040_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/040_sqlite_downgrade.sql
deleted file mode 100644 (file)
index e0d0aff..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-CREATE TABLE volumes_v39 (
-    created_at DATETIME,
-    updated_at DATETIME,
-    deleted_at DATETIME,
-    deleted BOOLEAN,
-    id VARCHAR(36) NOT NULL,
-    ec2_id INTEGER,
-    user_id VARCHAR(255),
-    project_id VARCHAR(255),
-    snapshot_id VARCHAR(36),
-    host VARCHAR(255),
-    size INTEGER,
-    availability_zone VARCHAR(255),
-    status VARCHAR(255),
-    attach_status VARCHAR(255),
-    scheduled_at DATETIME,
-    launched_at DATETIME,
-    terminated_at DATETIME,
-    display_name VARCHAR(255),
-    display_description VARCHAR(255),
-    provider_location VARCHAR(255),
-    provider_auth VARCHAR(255),
-    volume_type_id VARCHAR(36),
-    source_volid VARCHAR(36),
-    bootable INTEGER,
-    provider_geometry VARCHAR(255),
-    _name_id VARCHAR(36),
-    encryption_key_id VARCHAR(36),
-    migration_status VARCHAR(255),
-    attached_host VARCHAR(255),
-    attach_time VARCHAR(255),
-    instance_uuid VARCHAR(36),
-    mountpoint VARCHAR(255),
-    consistencygroup_id VARCHAR(36),
-    replication_status VARCHAR(255),
-    replication_extended_status VARCHAR(255),
-    replication_driver_data VARCHAR(255),
-    PRIMARY KEY (id)
-);
-
-INSERT INTO volumes_v39
-    SELECT volumes.created_at,
-        volumes.updated_at,
-        volumes.deleted_at,
-        volumes.deleted,
-        volumes.id,
-        volumes.ec2_id,
-        volumes.user_id,
-        volumes.project_id,
-        volumes.snapshot_id,
-        volumes.host,
-        volumes.size,
-        volumes.availability_zone,
-        volumes.status,
-        volumes.attach_status,
-        volumes.scheduled_at,
-        volumes.launched_at,
-        volumes.terminated_at,
-        volumes.display_name,
-        volumes.display_description,
-        volumes.provider_location,
-        volumes.provider_auth,
-        volumes.volume_type_id,
-        volumes.source_volid,
-        volumes.bootable,
-        volumes.provider_geometry,
-        volumes._name_id,
-        volumes.encryption_key_id,
-        volumes.migration_status,
-        volume_attachment.attached_host,
-        volume_attachment.attach_time,
-        volume_attachment.instance_uuid,
-        volume_attachment.mountpoint,
-        volumes.consistencygroup_id,
-        volumes.replication_status,
-        volumes.replication_extended_status,
-        volumes.replication_driver_data
-    FROM volumes
-    LEFT OUTER JOIN volume_attachment
-    ON volumes.id=volume_attachment.volume_id;
-
-DROP TABLE volumes;
-ALTER TABLE volumes_v39 RENAME TO volumes;
-DROP TABLE volume_attachment;
index 10d1750ae93aa97772a61d9f1a081cd66bb7fa8c..b613621bab7ab56420e2ea1828f02db76b8338dd 100644 (file)
@@ -19,10 +19,3 @@ def upgrade(migrate_engine):
     services = Table('services', meta, autoload=True)
     modified_at = Column('modified_at', DateTime(timezone=False))
     services.create_column(modified_at)
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-    services = Table('services', meta, autoload=True)
-    services.drop_column('modified_at')
index 9dfc9cb471e844544056da61a74dc175273d8def..b088f203c599fcc96289621a4c4294a9211b745a 100644 (file)
@@ -20,7 +20,3 @@
 
 def upgrade(migrate_engine):
     pass
-
-
-def downgrade(migration_engine):
-    pass
index 9dfc9cb471e844544056da61a74dc175273d8def..b088f203c599fcc96289621a4c4294a9211b745a 100644 (file)
@@ -20,7 +20,3 @@
 
 def upgrade(migrate_engine):
     pass
-
-
-def downgrade(migration_engine):
-    pass
index 9dfc9cb471e844544056da61a74dc175273d8def..b088f203c599fcc96289621a4c4294a9211b745a 100644 (file)
@@ -20,7 +20,3 @@
 
 def upgrade(migrate_engine):
     pass
-
-
-def downgrade(migration_engine):
-    pass
index 9dfc9cb471e844544056da61a74dc175273d8def..b088f203c599fcc96289621a4c4294a9211b745a 100644 (file)
@@ -20,7 +20,3 @@
 
 def upgrade(migrate_engine):
     pass
-
-
-def downgrade(migration_engine):
-    pass
index 9dfc9cb471e844544056da61a74dc175273d8def..b088f203c599fcc96289621a4c4294a9211b745a 100644 (file)
@@ -20,7 +20,3 @@
 
 def upgrade(migrate_engine):
     pass
-
-
-def downgrade(migration_engine):
-    pass
index 981381781c638e1ee1263e619aa3149cd0a01347..5e9f7d2cdb788c8320b038ae147437de7402c8d0 100644 (file)
@@ -42,14 +42,3 @@ def upgrade(migrate_engine):
                  'resource': 'per_volume_gigabytes',
                  'hard_limit': -1,
                  'deleted': False, })
-
-
-def downgrade(migrate_engine):
-    """Downgrade.
-
-    Don't delete the 'default' entries at downgrade time.
-    We don't know if the user had default entries when we started.
-    If they did, we wouldn't want to remove them.  So, the safest
-    thing to do is just leave the 'default' entries at downgrade time.
-    """
-    pass
index 372236426d4183a0aa491a91a1244303a969e927..b626a08bfde2da9836a0b40d6d7d99d3e911bad6 100644 (file)
@@ -23,12 +23,3 @@ def upgrade(migrate_engine):
     # Add a new column allocated to save allocated quota
     allocated = Column('allocated', Integer, default=0)
     quotas.create_column(allocated)
-
-
-def downgrade(migrate_engine):
-    """Remove allocated column from quotas."""
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    quotas = Table('quotas', meta, autoload=True)
-    quotas.drop_column('allocated')
index 11ed0c57603d95648d617a32464a3f4138bb7d6b..037a7b1412af8687708073f8bd9089a35323c42f 100644 (file)
@@ -29,15 +29,3 @@ def upgrade(migrate_engine):
 
     backups.create_column(temp_snapshot_id)
     backups.update().values(temp_snapshot_id=None).execute()
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    backups = Table('backups', meta, autoload=True)
-    temp_volume_id = backups.columns.temp_volume_id
-    temp_snapshot_id = backups.columns.temp_snapshot_id
-
-    backups.drop_column(temp_volume_id)
-    backups.drop_column(temp_snapshot_id)
index ddf1d1665c7d3a07fbb948aa3a90913ebeb6d4e7..654fdce7ccf1b1da115b76a309dad9898fcf4116 100644 (file)
@@ -25,13 +25,3 @@ def upgrade(migrate_engine):
 
     volumes.create_column(previous_status)
     volumes.update().values(previous_status=None).execute()
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    volumes = Table('volumes', meta, autoload=True)
-    previous_status = volumes.columns.previous_status
-
-    volumes.drop_column(previous_status)
index 044e3cc6341f439d3aa6c16c393791eb45f8530f..01b9ee77430aed1c2057425adf8351baae317304 100644 (file)
@@ -24,14 +24,3 @@ def upgrade(migrate_engine):
 
     consistencygroups.create_column(source_cgid)
     consistencygroups.update().values(source_cgid=None).execute()
-
-
-def downgrade(migrate_engine):
-    """Remove source_cgid column from consistencygroups."""
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    consistencygroups = Table('consistencygroups', meta, autoload=True)
-    source_cgid = consistencygroups.columns.source_cgid
-
-    consistencygroups.drop_column(source_cgid)
index dbfc231e0fa9148557e68b782a46e924e385b141..d618f88f83f2b6a57de11f09a4e7ecc5cf709e15 100644 (file)
@@ -23,13 +23,3 @@ def upgrade(migrate_engine):
     provider_auth = Column('provider_auth', String(255))
     snapshots.create_column(provider_auth)
     snapshots.update().values(provider_auth=None).execute()
-
-
-def downgrade(migrate_engine):
-    """Remove provider_auth column from snapshots."""
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    snapshots = Table('snapshots', meta, autoload=True)
-    provider_auth = snapshots.columns.provider_auth
-    snapshots.drop_column(provider_auth)
index 8caae5ca8b6d4e1740a8ddb2d000451d9c3331dc..25ead6561fe85e7fe70b22aae999748fe86049fc 100644 (file)
@@ -34,18 +34,3 @@ def upgrade(migrate_engine):
     services.update().values(rpc_available_version=None).execute()
     services.update().values(object_current_version=None).execute()
     services.update().values(object_available_version=None).execute()
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    services = Table('services', meta, autoload=True)
-    rpc_current_version = services.columns.rpc_current_version
-    rpc_available_version = services.columns.rpc_available_version
-    object_current_version = services.columns.object_current_version
-    object_available_version = services.columns.object_available_version
-    services.drop_column(rpc_current_version)
-    services.drop_column(rpc_available_version)
-    services.drop_column(object_current_version)
-    services.drop_column(object_available_version)
index 39603cbc73a811fa4d9efa13110443ca9e08a8c7..078ced0d70ed9c85cefeb272584d3d14fd23ba63 100644 (file)
@@ -31,14 +31,3 @@ def upgrade(migrate_engine):
         if dep_bks_list:
             backups.update().where(backups.columns.id == backup.id).values(
                 num_dependent_backups=len(dep_bks_list)).execute()
-
-
-def downgrade(migrate_engine):
-    """Remove num_dependent_backups column to backups."""
-    meta = MetaData()
-    meta.bind = migrate_engine
-
-    backups = Table('backups', meta, autoload=True)
-    num_dependent_backups = backups.columns.num_dependent_backups
-
-    backups.drop_column(num_dependent_backups)
index d540df366e6759d768eae33d579e38bfc31ededb..5d509a333b961571fe17dfea113bb78e78057063 100644 (file)
@@ -35,12 +35,3 @@ def upgrade(migrate_engine):
     )
 
     image_volume_cache.create()
-
-
-def downgrade(migrate_engine):
-    meta = MetaData()
-    meta.bind = migrate_engine
-    table_name = 'image_volume_cache_entries'
-    image_volume_cache = Table(table_name, meta, autoload=True)
-
-    image_volume_cache.drop()
index 6751db1d9806e2547d7361394168d9425ee3d3cb..37c8ad186a401c9003829e0ee17b69adbb53fd02 100644 (file)
@@ -17,7 +17,3 @@
 
 def upgrade(migrate_engine):
     pass
-
-
-def downgrade(migration_engine):
-    pass
index 27d01e5006fe561bbe4fa25891ccb83d16b5f6df..e31c02cf175591d951e79aba94f90a482258bc96 100644 (file)
@@ -17,7 +17,3 @@
 
 def upgrade(migrate_engine):
     pass
-
-
-def downgrade(migration_engine):
-    pass
index 27d01e5006fe561bbe4fa25891ccb83d16b5f6df..e31c02cf175591d951e79aba94f90a482258bc96 100644 (file)
@@ -17,7 +17,3 @@
 
 def upgrade(migrate_engine):
     pass
-
-
-def downgrade(migration_engine):
-    pass
index 27d01e5006fe561bbe4fa25891ccb83d16b5f6df..e31c02cf175591d951e79aba94f90a482258bc96 100644 (file)
@@ -17,7 +17,3 @@
 
 def upgrade(migrate_engine):
     pass
-
-
-def downgrade(migration_engine):
-    pass
index 27d01e5006fe561bbe4fa25891ccb83d16b5f6df..e31c02cf175591d951e79aba94f90a482258bc96 100644 (file)
@@ -17,7 +17,3 @@
 
 def upgrade(migrate_engine):
     pass
-
-
-def downgrade(migration_engine):
-    pass
index de8c574900141a0e244f35d50325406299d77ed3..cc0b3256020fe6f1b7f193eb4b6c10cadce5df66 100644 (file)
@@ -35,6 +35,7 @@ from cinder.cmd import scheduler as cinder_scheduler
 from cinder.cmd import volume as cinder_volume
 from cinder.cmd import volume_usage_audit
 from cinder import context
+from cinder import exception
 from cinder import test
 from cinder.tests.unit import fake_volume
 from cinder import version
@@ -384,6 +385,13 @@ class TestCinderManageCmd(test.TestCase):
             db_cmds.version()
             self.assertEqual(1, db_version.call_count)
 
+    @mock.patch('oslo_db.sqlalchemy.migration.db_version')
+    def test_db_commands_downgrade_fails(self, db_version):
+        db_version.return_value = 2
+        db_cmds = cinder_manage.DbCommands()
+        with mock.patch('sys.stdout', new=six.StringIO()):
+            self.assertRaises(exception.InvalidInput, db_cmds.sync, 1)
+
     @mock.patch('cinder.version.version_string')
     def test_versions_commands_list(self, version_string):
         version_cmds = cinder_manage.VersionCommands()
index 462c8b1ae32e4ad7c6a12a1cca21b950683633e1..75df2424fc493f91d1a6de05df2da54450f87717 100644 (file)
@@ -159,21 +159,12 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         self.assertIsInstance(snapshots.c.provider_location.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_006(self, engine):
-        snapshots = db_utils.get_table(engine, 'snapshots')
-        self.assertNotIn('provider_location', snapshots.c)
-
     def _check_007(self, engine, data):
         snapshots = db_utils.get_table(engine, 'snapshots')
         fkey, = snapshots.c.volume_id.foreign_keys
 
         self.assertIsNotNone(fkey)
 
-    def _post_downgrade_007(self, engine):
-        snapshots = db_utils.get_table(engine, 'snapshots')
-
-        self.assertEqual(0, len(snapshots.c.volume_id.foreign_keys))
-
     def _pre_upgrade_008(self, engine):
         self.assertFalse(engine.dialect.has_table(engine.connect(),
                                                   "backups"))
@@ -249,10 +240,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         self.assertIsInstance(snapshot_metadata.c.value.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_008(self, engine):
-        self.assertFalse(engine.dialect.has_table(engine.connect(),
-                                                  "snapshot_metadata"))
-
     def _check_010(self, engine, data):
         """Test adding transfers table works correctly."""
         self.assertTrue(engine.dialect.has_table(engine.connect(),
@@ -280,10 +267,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         self.assertIsInstance(transfers.c.expires_at.type,
                               self.TIME_TYPE)
 
-    def _post_downgrade_010(self, engine):
-        self.assertFalse(engine.dialect.has_table(engine.connect(),
-                                                  "transfers"))
-
     def _check_011(self, engine, data):
         """Test adding transfers table works correctly."""
         volumes = db_utils.get_table(engine, 'volumes')
@@ -291,49 +274,29 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         self.assertIsInstance(volumes.c.bootable.type,
                               self.BOOL_TYPE)
 
-    def _post_downgrade_011(self, engine):
-        volumes = db_utils.get_table(engine, 'volumes')
-        self.assertNotIn('bootable', volumes.c)
-
     def _check_012(self, engine, data):
         """Test that adding attached_host column works correctly."""
         volumes = db_utils.get_table(engine, 'volumes')
         self.assertIsInstance(volumes.c.attached_host.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_012(self, engine):
-        volumes = db_utils.get_table(engine, 'volumes')
-        self.assertNotIn('attached_host', volumes.c)
-
     def _check_013(self, engine, data):
         """Test that adding provider_geometry column works correctly."""
         volumes = db_utils.get_table(engine, 'volumes')
         self.assertIsInstance(volumes.c.provider_geometry.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_013(self, engine):
-        volumes = db_utils.get_table(engine, 'volumes')
-        self.assertNotIn('provider_geometry', volumes.c)
-
     def _check_014(self, engine, data):
         """Test that adding _name_id column works correctly."""
         volumes = db_utils.get_table(engine, 'volumes')
         self.assertIsInstance(volumes.c._name_id.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_014(self, engine):
-        volumes = db_utils.get_table(engine, 'volumes')
-        self.assertNotIn('_name_id', volumes.c)
-
     def _check_015(self, engine, data):
         """Test removing migrations table works correctly."""
         self.assertFalse(engine.dialect.has_table(engine.connect(),
                                                   "migrations"))
 
-    def _post_downgrade_015(self, engine):
-        self.assertTrue(engine.dialect.has_table(engine.connect(),
-                                                 "migrations"))
-
     def _check_016(self, engine, data):
         """Test that dropping xen storage manager tables works correctly."""
         self.assertFalse(engine.dialect.has_table(engine.connect(),
@@ -343,14 +306,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         self.assertFalse(engine.dialect.has_table(engine.connect(),
                                                   'sm_volume'))
 
-    def _post_downgrade_016(self, engine):
-        self.assertTrue(engine.dialect.has_table(engine.connect(),
-                                                 'sm_flavors'))
-        self.assertTrue(engine.dialect.has_table(engine.connect(),
-                                                 'sm_backend_config'))
-        self.assertTrue(engine.dialect.has_table(engine.connect(),
-                                                 'sm_volume'))
-
     def _check_017(self, engine, data):
         """Test that added encryption information works correctly."""
         # encryption key UUID
@@ -378,16 +333,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         self.assertIsInstance(encryption.c.provider.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_017(self, engine):
-        volumes = db_utils.get_table(engine, 'volumes')
-        self.assertNotIn('encryption_key_id', volumes.c)
-
-        snapshots = db_utils.get_table(engine, 'snapshots')
-        self.assertNotIn('encryption_key_id', snapshots.c)
-
-        self.assertFalse(engine.dialect.has_table(engine.connect(),
-                                                  'encryption'))
-
     def _check_018(self, engine, data):
         """Test that added qos_specs table works correctly."""
         self.assertTrue(engine.dialect.has_table(
@@ -410,20 +355,12 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         self.assertIsInstance(qos_specs.c.value.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_018(self, engine):
-        self.assertFalse(engine.dialect.has_table(
-            engine.connect(), "quality_of_service_specs"))
-
     def _check_019(self, engine, data):
         """Test that adding migration_status column works correctly."""
         volumes = db_utils.get_table(engine, 'volumes')
         self.assertIsInstance(volumes.c.migration_status.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_019(self, engine):
-        volumes = db_utils.get_table(engine, 'volumes')
-        self.assertNotIn('migration_status', volumes.c)
-
     def _check_020(self, engine, data):
         """Test adding volume_admin_metadata table works correctly."""
         self.assertTrue(engine.dialect.has_table(engine.connect(),
@@ -448,10 +385,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         self.assertIsInstance(volume_admin_metadata.c.value.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_020(self, engine):
-        self.assertFalse(engine.dialect.has_table(engine.connect(),
-                                                  "volume_admin_metadata"))
-
     def _verify_quota_defaults(self, engine):
         quota_class_metadata = db_utils.get_table(engine, 'quota_classes')
 
@@ -465,20 +398,12 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         """Test adding default data for quota classes works correctly."""
         self._verify_quota_defaults(engine)
 
-    def _post_downgrade_021(self, engine):
-        # Defaults should not be deleted during downgrade
-        self._verify_quota_defaults(engine)
-
     def _check_022(self, engine, data):
         """Test that adding disabled_reason column works correctly."""
         services = db_utils.get_table(engine, 'services')
         self.assertIsInstance(services.c.disabled_reason.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_022(self, engine):
-        services = db_utils.get_table(engine, 'services')
-        self.assertNotIn('disabled_reason', services.c)
-
     def _check_023(self, engine, data):
         """Test that adding reservations index works correctly."""
         reservations = db_utils.get_table(engine, 'reservations')
@@ -491,11 +416,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         self.assertEqual(sorted(['deleted', 'expire']),
                          sorted(index_columns))
 
-    def _post_downgrade_023(self, engine):
-        reservations = db_utils.get_table(engine, 'reservations')
-        index_names = [idx.name for idx in reservations.indexes]
-        self.assertNotIn('reservations_deleted_expire_idx', index_names)
-
     def _check_024(self, engine, data):
         """Test adding replication columns to volume table."""
         volumes = db_utils.get_table(engine, 'volumes')
@@ -506,12 +426,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         self.assertIsInstance(volumes.c.replication_driver_data.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_024(self, engine):
-        volumes = db_utils.get_table(engine, 'volumes')
-        self.assertNotIn('replication_status', volumes.c)
-        self.assertNotIn('replication_extended_status', volumes.c)
-        self.assertNotIn('replication_driver_data', volumes.c)
-
     def _check_025(self, engine, data):
         """Test adding table and columns for consistencygroups."""
         # Test consistencygroup_id is in Table volumes
@@ -600,31 +514,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         # 2 foreign keys in Table snapshots
         self.assertEqual(2, len(snapshots.foreign_keys))
 
-    def _post_downgrade_025(self, engine):
-        metadata = sqlalchemy.MetaData()
-        # Test consistencygroup_id is not in Table volumes
-        volumes = self.get_table_ref(engine, 'volumes', metadata)
-        self.assertNotIn('consistencygroup_id', volumes.c)
-
-        # Test cgsnapshot_id is not in Table snapshots
-        snapshots = self.get_table_ref(engine, 'snapshots', metadata)
-        self.assertNotIn('cgsnapshot_id', snapshots.c)
-
-        # Verify foreign keys are removed
-        self.assertEqual(0, len(volumes.foreign_keys))
-        self.assertEqual(1, len(snapshots.foreign_keys))
-        # volume_id foreign key is still in Table snapshots
-        fkey, = snapshots.c.volume_id.foreign_keys
-        self.assertEqual(volumes.c.id, fkey.column)
-
-        # Test Table cgsnapshots doesn't exist any more
-        self.assertFalse(engine.dialect.has_table(engine.connect(),
-                                                  "cgsnapshots"))
-
-        # Test Table consistencygroups doesn't exist any more
-        self.assertFalse(engine.dialect.has_table(engine.connect(),
-                                                  "consistencygroups"))
-
     def _pre_upgrade_026(self, engine):
         """Test adding default data for consistencygroups quota class."""
         quota_class_metadata = db_utils.get_table(engine, 'quota_classes')
@@ -643,15 +532,6 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
 
         self.assertEqual(4, num_defaults)
 
-    def _post_downgrade_026(self, engine):
-        # Defaults should not be deleted during downgrade
-        quota_class_metadata = db_utils.get_table(engine, 'quota_classes')
-        num_defaults = quota_class_metadata.count().\
-            where(quota_class_metadata.c.class_name == 'default').\
-            execute().scalar()
-
-        self.assertEqual(4, num_defaults)
-
     def _check_032(self, engine, data):
         """Test adding volume_type_projects table works correctly."""
         volume_type_projects = db_utils.get_table(engine,
@@ -675,62 +555,33 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         self.assertIsInstance(volume_types.c.is_public.type,
                               self.BOOL_TYPE)
 
-    def _post_downgrade_032(self, engine):
-        self.assertFalse(engine.dialect.has_table(engine.connect(),
-                                                  "volume_type_projects"))
-        volume_types = db_utils.get_table(engine, 'volume_types')
-        self.assertNotIn('is_public', volume_types.c)
-
     def _check_033(self, engine, data):
         """Test adding encryption_id column to encryption table."""
         encryptions = db_utils.get_table(engine, 'encryption')
         self.assertIsInstance(encryptions.c.encryption_id.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_033(self, engine):
-        metadata = sqlalchemy.schema.MetaData()
-        metadata.bind = engine
-
-        encryptions = db_utils.get_table(engine, 'encryption')
-        self.assertNotIn('encryption_id', encryptions.c)
-
     def _check_034(self, engine, data):
         """Test adding description columns to volume_types table."""
         volume_types = db_utils.get_table(engine, 'volume_types')
         self.assertIsInstance(volume_types.c.description.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_034(self, engine):
-        volume_types = db_utils.get_table(engine, 'volume_types')
-        self.assertNotIn('description', volume_types.c)
-
     def _check_035(self, engine, data):
         volumes = db_utils.get_table(engine, 'volumes')
         self.assertIsInstance(volumes.c.provider_id.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_035(self, engine):
-        volumes = db_utils.get_table(engine, 'volumes')
-        self.assertNotIn('provider_id', volumes.c)
-
     def _check_036(self, engine, data):
         snapshots = db_utils.get_table(engine, 'snapshots')
         self.assertIsInstance(snapshots.c.provider_id.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_036(self, engine):
-        snapshots = db_utils.get_table(engine, 'snapshots')
-        self.assertNotIn('provider_id', snapshots.c)
-
     def _check_037(self, engine, data):
         consistencygroups = db_utils.get_table(engine, 'consistencygroups')
         self.assertIsInstance(consistencygroups.c.cgsnapshot_id.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_037(self, engine):
-        consistencygroups = db_utils.get_table(engine, 'consistencygroups')
-        self.assertNotIn('cgsnapshot_id', consistencygroups.c)
-
     def _check_038(self, engine, data):
         """Test adding and removing driver_initiator_data table."""
 
@@ -758,20 +609,11 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         self.assertIsInstance(private_data.c.value.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_038(self, engine):
-        has_table = engine.dialect.has_table(engine.connect(),
-                                             "driver_initiator_data")
-        self.assertFalse(has_table)
-
     def _check_039(self, engine, data):
         backups = db_utils.get_table(engine, 'backups')
         self.assertIsInstance(backups.c.parent_id.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_039(self, engine):
-        backups = db_utils.get_table(engine, 'backups')
-        self.assertNotIn('parent_id', backups.c)
-
     def _check_40(self, engine, data):
         volumes = db_utils.get_table(engine, 'volumes')
         self.assertNotIn('instance_uuid', volumes.c)
@@ -793,39 +635,17 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         self.assertIsInstance(attachments.c.attach_status.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_040(self, engine):
-        self.assertFalse(engine.dialect.has_table(engine.connect(),
-                                                  "volume_attachment"))
-        volumes = db_utils.get_table(engine, 'volumes')
-        self.assertNotIn('multiattach', volumes.c)
-        self.assertIsInstance(volumes.c.instance_uuid.type,
-                              sqlalchemy.types.VARCHAR)
-        self.assertIsInstance(volumes.c.attached_host.type,
-                              sqlalchemy.types.VARCHAR)
-        self.assertIsInstance(volumes.c.attach_time.type,
-                              sqlalchemy.types.VARCHAR)
-        self.assertIsInstance(volumes.c.mountpoint.type,
-                              sqlalchemy.types.VARCHAR)
-
     def _check_041(self, engine, data):
         """Test that adding modified_at column works correctly."""
         services = db_utils.get_table(engine, 'services')
         self.assertIsInstance(services.c.modified_at.type,
                               self.TIME_TYPE)
 
-    def _post_downgrade_041(self, engine):
-        services = db_utils.get_table(engine, 'services')
-        self.assertNotIn('modified_at', services.c)
-
     def _check_048(self, engine, data):
         quotas = db_utils.get_table(engine, 'quotas')
         self.assertIsInstance(quotas.c.allocated.type,
                               sqlalchemy.types.INTEGER)
 
-    def _post_downgrade_048(self, engine):
-        quotas = db_utils.get_table(engine, 'quotas')
-        self.assertNotIn('allocated', quotas.c)
-
     def _check_049(self, engine, data):
         backups = db_utils.get_table(engine, 'backups')
         self.assertIsInstance(backups.c.temp_volume_id.type,
@@ -833,38 +653,21 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         self.assertIsInstance(backups.c.temp_snapshot_id.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_049(self, engine):
-        backups = db_utils.get_table(engine, 'backups')
-        self.assertNotIn('temp_volume_id', backups.c)
-        self.assertNotIn('temp_snapshot_id', backups.c)
-
     def _check_050(self, engine, data):
         volumes = db_utils.get_table(engine, 'volumes')
         self.assertIsInstance(volumes.c.previous_status.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_050(self, engine):
-        volumes = db_utils.get_table(engine, 'volumes')
-        self.assertNotIn('previous_status', volumes.c)
-
     def _check_051(self, engine, data):
         consistencygroups = db_utils.get_table(engine, 'consistencygroups')
         self.assertIsInstance(consistencygroups.c.source_cgid.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_051(self, engine):
-        consistencygroups = db_utils.get_table(engine, 'consistencygroups')
-        self.assertNotIn('source_cgid', consistencygroups.c)
-
     def _check_052(self, engine, data):
         snapshots = db_utils.get_table(engine, 'snapshots')
         self.assertIsInstance(snapshots.c.provider_auth.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_052(self, engine):
-        snapshots = db_utils.get_table(engine, 'snapshots')
-        self.assertNotIn('provider_auth', snapshots.c)
-
     def _check_053(self, engine, data):
         services = db_utils.get_table(engine, 'services')
         self.assertIsInstance(services.c.rpc_current_version.type,
@@ -876,22 +679,11 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         self.assertIsInstance(services.c.object_available_version.type,
                               sqlalchemy.types.VARCHAR)
 
-    def _post_downgrade_053(self, engine):
-        services = db_utils.get_table(engine, 'services')
-        self.assertNotIn('rpc_current_version', services.c)
-        self.assertNotIn('rpc_available_version', services.c)
-        self.assertNotIn('object_current_version', services.c)
-        self.assertNotIn('object_available_version', services.c)
-
     def _check_054(self, engine, data):
         backups = db_utils.get_table(engine, 'backups')
         self.assertIsInstance(backups.c.num_dependent_backups.type,
                               sqlalchemy.types.INTEGER)
 
-    def _post_downgrade_054(self, engine):
-        backups = db_utils.get_table(engine, 'backups')
-        self.assertNotIn('num_dependent_backups', backups.c)
-
     def _check_055(self, engine, data):
         """Test adding image_volume_cache_entries table."""
         has_table = engine.dialect.has_table(engine.connect(),
@@ -918,14 +710,8 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
         self.assertIsInstance(private_data.c.last_used.type,
                               self.TIME_TYPE)
 
-    def _post_downgrade_055(self, engine):
-        """Test removing image_volume_cache_entries table."""
-        has_table = engine.dialect.has_table(engine.connect(),
-                                             "image_volume_cache_entries")
-        self.assertFalse(has_table)
-
     def test_walk_versions(self):
-        self.walk_versions(True, False)
+        self.walk_versions(False, False)
 
 
 class TestSqliteMigrations(test_base.DbTestCase,
index 8458bf5b6a7c601290cc87fd8b415f7eee600a9f..5f44f6220dda62cda7ab40464c655a8ef03b5281 100644 (file)
@@ -38,10 +38,10 @@ class ExceptionTestCase(test.TestCase):
 
 class ProjectTestCase(test.TestCase):
     def test_all_migrations_have_downgrade(self):
-        topdir = os.path.normpath(os.path.dirname(__file__) + '/../../')
+        topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../')
         py_glob = os.path.join(topdir, "cinder", "db", "sqlalchemy",
                                "migrate_repo", "versions", "*.py")
-        missing_downgrade = []
+        downgrades = []
         for path in glob.iglob(py_glob):
             has_upgrade = False
             has_downgrade = False
@@ -52,10 +52,11 @@ class ProjectTestCase(test.TestCase):
                     if 'def downgrade(' in line:
                         has_downgrade = True
 
-                if has_upgrade and not has_downgrade:
+                if has_upgrade and has_downgrade:
                     fname = os.path.basename(path)
-                    missing_downgrade.append(fname)
+                    downgrades.append(fname)
 
-        helpful_msg = (_("The following migrations are missing a downgrade:"
-                         "\n\t%s") % '\n\t'.join(sorted(missing_downgrade)))
-        self.assertFalse(missing_downgrade, msg=helpful_msg)
+        helpful_msg = (_("The following migrations have a downgrade, "
+                         "which are not allowed: "
+                         "\n\t%s") % '\n\t'.join(sorted(downgrades)))
+        self.assertFalse(downgrades, msg=helpful_msg)