--- /dev/null
+BEGIN TRANSACTION;
+
+CREATE TEMPORARY TABLE volumes_backup (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id VARCHAR(36) NOT NULL,
+ ec2_id VARCHAR(255),
+ user_id VARCHAR(255),
+ project_id VARCHAR(255),
+ host VARCHAR(255),
+ size INTEGER,
+ availability_zone VARCHAR(255),
+ instance_uuid VARCHAR(36),
+ mountpoint VARCHAR(255),
+ attach_time VARCHAR(255),
+ status VARCHAR(255),
+ attach_status VARCHAR(255),
+ scheduled_at DATETIME,
+ launched_at DATETIME,
+ terminated_at DATETIME,
+ display_name VARCHAR(255),
+ display_description VARCHAR(255),
+ provider_location VARCHAR(256),
+ provider_auth VARCHAR(256),
+ snapshot_id VARCHAR(36),
+ volume_type_id VARCHAR(36),
+ source_volid VARCHAR(36),
+ PRIMARY KEY (id),
+ CHECK (deleted IN (0, 1))
+);
+
+INSERT INTO volumes_backup
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ ec2_id,
+ user_id,
+ project_id,
+ host,
+ size,
+ availability_zone,
+ instance_uuid,
+ mountpoint,
+ attach_time,
+ status,
+ attach_status,
+ scheduled_at,
+ launched_at,
+ terminated_at,
+ display_name,
+ display_description,
+ provider_location,
+ provider_auth,
+ snapshot_id,
+ volume_type_id,
+ source_volid
+ FROM volumes;
+
+DROP TABLE volumes;
+
+CREATE TABLE volumes (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id VARCHAR(36) NOT NULL,
+ ec2_id VARCHAR(255),
+ user_id VARCHAR(255),
+ project_id VARCHAR(255),
+ host VARCHAR(255),
+ size INTEGER,
+ availability_zone VARCHAR(255),
+ instance_uuid VARCHAR(36),
+ mountpoint VARCHAR(255),
+ attach_time VARCHAR(255),
+ status VARCHAR(255),
+ attach_status VARCHAR(255),
+ scheduled_at DATETIME,
+ launched_at DATETIME,
+ terminated_at DATETIME,
+ display_name VARCHAR(255),
+ display_description VARCHAR(255),
+ provider_location VARCHAR(256),
+ provider_auth VARCHAR(256),
+ snapshot_id VARCHAR(36),
+ volume_type_id VARCHAR(36),
+ PRIMARY KEY (id),
+ CHECK (deleted IN (0, 1))
+);
+
+INSERT INTO volumes
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ ec2_id,
+ user_id,
+ project_id,
+ host,
+ size,
+ availability_zone,
+ instance_uuid,
+ mountpoint,
+ attach_time,
+ status,
+ attach_status,
+ scheduled_at,
+ launched_at,
+ terminated_at,
+ display_name,
+ display_description,
+ provider_location,
+ provider_auth,
+ snapshot_id,
+ volume_type_id
+ FROM volumes_backup;
+
+DROP TABLE volumes_backup;
+COMMIT;
class TestMigrations(test.TestCase):
"""Test sqlalchemy-migrate migrations."""
- TEST_DATABASES = {}
DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
'test_migrations.conf')
# Test machines can set the CINDER_TEST_MIGRATIONS_CONF variable
super(TestMigrations, self).setUp()
self.snake_walk = False
+ self.test_databases = {}
# Load test databases from the config file. Only do this
# once. No need to re-run this on each test...
LOG.debug('config_path is %s' % TestMigrations.CONFIG_FILE_PATH)
- if not TestMigrations.TEST_DATABASES:
+ if not self.test_databases:
if os.path.exists(TestMigrations.CONFIG_FILE_PATH):
cp = ConfigParser.RawConfigParser()
try:
cp.read(TestMigrations.CONFIG_FILE_PATH)
defaults = cp.defaults()
for key, value in defaults.items():
- TestMigrations.TEST_DATABASES[key] = value
+ self.test_databases[key] = value
self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
except ConfigParser.ParsingError, e:
self.fail("Failed to read test_migrations.conf config "
"file.")
self.engines = {}
- for key, value in TestMigrations.TEST_DATABASES.items():
+ for key, value in self.test_databases.items():
self.engines[key] = sqlalchemy.create_engine(value)
# We start each test case with a completely blank slate.
# We destroy the test data store between each test case,
# and recreate it, which ensures that we have no side-effects
# from the tests
- # self._reset_databases()
-
- # remove these from the list so they aren't used in the migration tests
- if "mysqlcitest" in self.engines:
- del self.engines["mysqlcitest"]
- if "mysqlcitest" in TestMigrations.TEST_DATABASES:
- del TestMigrations.TEST_DATABASES["mysqlcitest"]
+ self._reset_databases()
super(TestMigrations, self).tearDown()
def _reset_databases(self):
LOG.debug(output)
self.assertEqual(0, status)
for key, engine in self.engines.items():
- conn_string = TestMigrations.TEST_DATABASES[key]
+ conn_string = self.test_databases[key]
conn_pieces = urlparse.urlparse(conn_string)
if conn_string.startswith('sqlite'):
# We can just delete the SQLite database, which is
connect_string = _get_connect_string('mysql')
engine = sqlalchemy.create_engine(connect_string)
self.engines["mysqlcitest"] = engine
- TestMigrations.TEST_DATABASES["mysqlcitest"] = connect_string
+ self.test_databases["mysqlcitest"] = connect_string
# build a fully populated mysql database with all the tables
self._reset_databases()