sqlalchemy='cinder.db.sqlalchemy.migration')
+INIT_VERSION = 000
+
+
def db_sync(version=None):
"""Migrate the database to `version` or the most recent version."""
return IMPL.db_sync(version=version)
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-## Table code mostly autogenerated by genmodel.py
-from sqlalchemy import Boolean, Column, DateTime, ForeignKey
-from sqlalchemy import ForeignKeyConstraint, Integer, MetaData, String
-from sqlalchemy import Table, Text
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- #
- # New Tables
- #
- auth_tokens = Table('auth_tokens', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('token_hash',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- primary_key=True,
- nullable=False),
- Column('user_id', Integer()),
- Column('server_manageent_url',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('storage_url',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('cdn_management_url',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- )
-
- export_devices = Table('export_devices', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('shelf_id', Integer()),
- Column('blade_id', Integer()),
- Column('volume_id',
- Integer(),
- ForeignKey('volumes.id'),
- nullable=True),
- )
-
- fixed_ips = Table('fixed_ips', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('address',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('network_id',
- Integer(),
- ForeignKey('networks.id'),
- nullable=True),
- Column('instance_id',
- Integer(),
- ForeignKey('instances.id'),
- nullable=True),
- Column('allocated', Boolean(create_constraint=True, name=None)),
- Column('leased', Boolean(create_constraint=True, name=None)),
- Column('reserved', Boolean(create_constraint=True, name=None)),
- )
-
- floating_ips = Table('floating_ips', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('address',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('fixed_ip_id',
- Integer(),
- ForeignKey('fixed_ips.id'),
- nullable=True),
- Column('project_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('host',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- )
-
- instances = Table('instances', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('internal_id', Integer()),
- Column('admin_pass',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('project_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('image_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('kernel_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('ramdisk_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('server_name',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('launch_index', Integer()),
- Column('key_name',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('key_data',
- Text(length=None, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('state', Integer()),
- Column('state_description',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('memory_mb', Integer()),
- Column('vcpus', Integer()),
- Column('local_gb', Integer()),
- Column('hostname',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('host',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('instance_type',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('user_data',
- Text(length=None, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('reservation_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('mac_address',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('scheduled_at', DateTime(timezone=False)),
- Column('launched_at', DateTime(timezone=False)),
- Column('terminated_at', DateTime(timezone=False)),
- Column('display_name',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('display_description',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- )
-
- key_pairs = Table('key_pairs', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('name',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('fingerprint',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('public_key',
- Text(length=None, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- )
-
- networks = Table('networks', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('injected', Boolean(create_constraint=True, name=None)),
- Column('cidr',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('netmask',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('bridge',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('gateway',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('broadcast',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('dns',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('vlan', Integer()),
- Column('vpn_public_address',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('vpn_public_port', Integer()),
- Column('vpn_private_address',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('dhcp_start',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('project_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('host',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- )
-
- projects = Table('projects', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- primary_key=True,
- nullable=False),
- Column('name',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('description',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('project_manager',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- ForeignKey('users.id')),
- )
-
- quotas = Table('quotas', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('project_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('instances', Integer()),
- Column('cores', Integer()),
- Column('volumes', Integer()),
- Column('gigabytes', Integer()),
- Column('floating_ips', Integer()),
- )
-
- security_groups = Table('security_groups', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('name',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('description',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('project_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- )
-
- security_group_inst_assoc = Table('security_group_instance_association',
- meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('security_group_id',
- Integer(),
- ForeignKey('security_groups.id')),
- Column('instance_id', Integer(), ForeignKey('instances.id')),
- )
-
- security_group_rules = Table('security_group_rules', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('parent_group_id',
- Integer(),
- ForeignKey('security_groups.id')),
- Column('protocol',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('from_port', Integer()),
- Column('to_port', Integer()),
- Column('cidr',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('group_id',
- Integer(),
- ForeignKey('security_groups.id')),
- )
-
- services = Table('services', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('host',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('binary',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('topic',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('report_count', Integer(), nullable=False),
- Column('disabled', Boolean(create_constraint=True, name=None)),
- )
-
- users = Table('users', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- primary_key=True,
- nullable=False),
- Column('name',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('access_key',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('secret_key',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('is_admin', Boolean(create_constraint=True, name=None)),
- )
-
- user_project_association = Table('user_project_association', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- ForeignKey('users.id'),
- primary_key=True,
- nullable=False),
- Column('project_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- ForeignKey('projects.id'),
- primary_key=True,
- nullable=False),
- )
-
- user_project_role_association = Table('user_project_role_association',
- meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- primary_key=True,
- nullable=False),
- Column('project_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- primary_key=True,
- nullable=False),
- Column('role',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- primary_key=True,
- nullable=False),
- ForeignKeyConstraint(['user_id',
- 'project_id'],
- ['user_project_association.user_id',
- 'user_project_association.project_id']),
- )
-
- user_role_association = Table('user_role_association', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- ForeignKey('users.id'),
- primary_key=True,
- nullable=False),
- Column('role',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- primary_key=True,
- nullable=False),
- )
-
- volumes = Table('volumes', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('ec2_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('project_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('host',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('size', Integer()),
- Column('availability_zone',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('instance_id',
- Integer(),
- ForeignKey('instances.id'),
- nullable=True),
- Column('mountpoint',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('attach_time',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('status',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('attach_status',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('scheduled_at', DateTime(timezone=False)),
- Column('launched_at', DateTime(timezone=False)),
- Column('terminated_at', DateTime(timezone=False)),
- Column('display_name',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('display_description',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- )
- tables = [auth_tokens,
- instances, key_pairs, networks, fixed_ips, floating_ips,
- quotas, security_groups, security_group_inst_assoc,
- security_group_rules, services, users, projects,
- user_project_association, user_project_role_association,
- user_role_association, volumes, export_devices]
-
- for table in tables:
- try:
- table.create()
- except Exception:
- LOG.info(repr(table))
- LOG.exception('Exception while creating table')
- meta.drop_all(tables=tables)
- raise
-
-
-def downgrade(migrate_engine):
- # Operations to reverse the above upgrade go here.
- meta = MetaData()
- meta.bind = migrate_engine
-
- auth_tokens = Table('auth_tokens', meta, autoload=True)
- export_devices = Table('export_devices', meta, autoload=True)
- fixed_ips = Table('fixed_ips', meta, autoload=True)
- floating_ips = Table('floating_ips', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- key_pairs = Table('key_pairs', meta, autoload=True)
- networks = Table('networks', meta, autoload=True)
- projects = Table('projects', meta, autoload=True)
- quotas = Table('quotas', meta, autoload=True)
- security_groups = Table('security_groups', meta, autoload=True)
- security_group_inst_assoc = Table('security_group_instance_association',
- meta, autoload=True)
- security_group_rules = Table('security_group_rules', meta, autoload=True)
- services = Table('services', meta, autoload=True)
- users = Table('users', meta, autoload=True)
- user_project_association = Table('user_project_association', meta,
- autoload=True)
- user_project_role_association = Table('user_project_role_association',
- meta,
- autoload=True)
- user_role_association = Table('user_role_association', meta, autoload=True)
- volumes = Table('volumes', meta, autoload=True)
-
- # table order matters, don't change
- for table in (auth_tokens, export_devices, floating_ips, fixed_ips,
- key_pairs, networks,
- quotas, security_group_inst_assoc,
- security_group_rules, security_groups, services,
- user_project_role_association, user_project_association,
- user_role_association,
- projects, users, volumes, instances):
- table.drop()
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Boolean, Column, DateTime, ForeignKey
+from sqlalchemy import Integer, MetaData, String, Table
+
+from cinder import flags
+from cinder import log as logging
+
+FLAGS = flags.FLAGS
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ migrations = Table(
+ 'migrations', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('source_compute', String(length=255)),
+ Column('dest_compute', String(length=255)),
+ Column('dest_host', String(length=255)),
+ Column('status', String(length=255)),
+ Column('instance_uuid', String(length=255)),
+ Column('old_instance_type_id', Integer),
+ Column('new_instance_type_id', Integer),
+ mysql_engine='InnoDB'
+ )
+
+ services = Table(
+ 'services', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('host', String(length=255)),
+ Column('binary', String(length=255)),
+ Column('topic', String(length=255)),
+ Column('report_count', Integer, nullable=False),
+ Column('disabled', Boolean),
+ Column('availability_zone', String(length=255)),
+ mysql_engine='InnoDB'
+ )
+
+ sm_flavors = Table(
+ 'sm_flavors', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('label', String(length=255)),
+ Column('description', String(length=255)),
+ mysql_engine='InnoDB'
+ )
+
+ sm_backend_config = Table(
+ 'sm_backend_config', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('flavor_id', Integer, ForeignKey('sm_flavors.id'),
+ nullable=False),
+ Column('sr_uuid', String(length=255)),
+ Column('sr_type', String(length=255)),
+ Column('config_params', String(length=2047)),
+ mysql_engine='InnoDB'
+ )
+
+ sm_volume = Table(
+ 'sm_volume', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', String(length=36),
+ ForeignKey('volumes.id'),
+ primary_key=True,
+ nullable=False),
+ Column('backend_id', Integer, ForeignKey('sm_backend_config.id'),
+ nullable=False),
+ Column('vdi_uuid', String(length=255)),
+ mysql_engine='InnoDB'
+ )
+
+ snapshots = Table(
+ 'snapshots', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', String(length=36), primary_key=True, nullable=False),
+ Column('volume_id', String(length=36), nullable=False),
+ Column('user_id', String(length=255)),
+ Column('project_id', String(length=255)),
+ Column('status', String(length=255)),
+ Column('progress', String(length=255)),
+ Column('volume_size', Integer),
+ Column('scheduled_at', DateTime),
+ Column('display_name', String(length=255)),
+ Column('display_description', String(length=255)),
+ mysql_engine='InnoDB'
+ )
+
+ volume_types = Table(
+ 'volume_types', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('name', String(length=255)),
+ mysql_engine='InnoDB'
+ )
+
+ volume_metadata = Table(
+ 'volume_metadata', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('volume_id', String(length=36), ForeignKey('volumes.id'),
+ nullable=False),
+ Column('key', String(length=255)),
+ Column('value', String(length=255)),
+ mysql_engine='InnoDB'
+ )
+
+ volume_type_extra_specs = Table(
+ 'volume_type_extra_specs', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('volume_type_id', Integer, ForeignKey('volume_types.id'),
+ nullable=False),
+ Column('key', String(length=255)),
+ Column('value', String(length=255)),
+ mysql_engine='InnoDB'
+ )
+
+ volumes = Table(
+ 'volumes', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', String(length=36), primary_key=True, nullable=False),
+ Column('ec2_id', String(length=255)),
+ Column('user_id', String(length=255)),
+ Column('project_id', String(length=255)),
+ Column('host', String(length=255)),
+ Column('size', Integer),
+ Column('availability_zone', String(length=255)),
+ Column('instance_uuid', String(length=36)),
+ Column('mountpoint', String(length=255)),
+ Column('attach_time', String(length=255)),
+ Column('status', String(length=255)),
+ Column('attach_status', String(length=255)),
+ Column('scheduled_at', DateTime),
+ Column('launched_at', DateTime),
+ Column('terminated_at', DateTime),
+ Column('display_name', String(length=255)),
+ Column('display_description', String(length=255)),
+ Column('provider_location', String(length=256)),
+ Column('provider_auth', String(length=256)),
+ Column('snapshot_id', String(length=36)),
+ Column('volume_type_id', Integer),
+ mysql_engine='InnoDB'
+ )
+
+ quotas = Table(
+ 'quotas', meta,
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('project_id', String(length=255)),
+ Column('resource', String(length=255), nullable=False),
+ Column('hard_limit', Integer),
+ mysql_engine='InnoDB'
+ )
+
+ iscsi_targets = Table(
+ 'iscsi_targets', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('target_num', Integer),
+ Column('host', String(length=255)),
+ Column('volume_id', String(length=36), ForeignKey('volumes.id'),
+ nullable=True),
+ mysql_engine='InnoDB'
+ )
+
+ # create all tables
+ # Take care on create order for those with FK dependencies
+ tables = [sm_flavors,
+ sm_backend_config,
+ snapshots,
+ volume_types,
+ volumes,
+ iscsi_targets,
+ migrations,
+ quotas,
+ services,
+ sm_volume,
+ volume_metadata,
+ volume_type_extra_specs]
+
+ for table in tables:
+ try:
+ table.create()
+ except Exception:
+ LOG.info(repr(table))
+ LOG.exception('Exception while creating table.')
+ raise
+
+ if migrate_engine.name == "mysql":
+ tables = ["sm_flavors",
+ "sm_backend_config",
+ "snapshots",
+ "volume_types",
+ "volumes",
+ "iscsi_targets",
+ "migrate_version",
+ "migrations",
+ "quotas",
+ "services",
+ "sm_volume",
+ "volume_metadata",
+ "volume_type_extra_specs"]
+
+ sql = "SET foreign_key_checks = 0;"
+ for table in tables:
+ sql += "ALTER TABLE %s CONVERT TO CHARACTER SET utf8;" % table
+ sql += "SET foreign_key_checks = 1;"
+ sql += "ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" \
+ % migrate_engine.url.database
+ sql += "ALTER TABLE %s Engine=InnoDB;" % table
+ migrate_engine.execute(sql)
+
+
+def downgrade(migrate_engine):
+ LOG.exception('Downgrade from initial Cinder install is unsupported.')
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, ForeignKey
-from sqlalchemy import Integer, MetaData, String, Table, Text
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- volumes = Table('volumes', meta, autoload=True)
-
- instances = Table('instances', meta, autoload=True)
- services = Table('services', meta, autoload=True)
- networks = Table('networks', meta, autoload=True)
- auth_tokens = Table('auth_tokens', meta, autoload=True)
-
- #
- # New Tables
- #
- certificates = Table('certificates', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('project_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('file_name',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- )
-
- consoles = Table('consoles', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('instance_name',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('instance_id', Integer()),
- Column('password',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('port', Integer(), nullable=True),
- Column('pool_id',
- Integer(),
- ForeignKey('console_pools.id')),
- )
-
- console_pools = Table('console_pools', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('address',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('username',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('password',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('console_type',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('public_hostname',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('host',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('compute_host',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- )
-
- instance_actions = Table('instance_actions', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('instance_id',
- Integer(),
- ForeignKey('instances.id')),
- Column('action',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('error',
- Text(length=None, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- )
-
- iscsi_targets = Table('iscsi_targets', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('target_num', Integer()),
- Column('host',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('volume_id',
- Integer(),
- ForeignKey('volumes.id'),
- nullable=True),
- )
-
- tables = [certificates, console_pools, consoles, instance_actions,
- iscsi_targets]
- for table in tables:
- try:
- table.create()
- except Exception:
- LOG.info(repr(table))
- LOG.exception('Exception while creating table')
- meta.drop_all(tables=tables)
- raise
-
- auth_tokens.c.user_id.alter(type=String(length=255,
- convert_unicode=False,
- assert_unicode=None,
- unicode_error=None,
- _warn_on_bytestring=False))
-
- #
- # New Columns
- #
- instances_availability_zone = Column(
- 'availability_zone',
- String(length=255, convert_unicode=False, assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False))
-
- instances_locked = Column('locked',
- Boolean(create_constraint=True, name=None))
-
- networks_cidr_v6 = Column(
- 'cidr_v6',
- String(length=255, convert_unicode=False, assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False))
-
- networks_ra_server = Column(
- 'ra_server',
- String(length=255, convert_unicode=False, assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False))
-
- services_availability_zone = Column(
- 'availability_zone',
- String(length=255, convert_unicode=False, assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False))
-
- instances.create_column(instances_availability_zone)
- instances.create_column(instances_locked)
- networks.create_column(networks_cidr_v6)
- networks.create_column(networks_ra_server)
- services.create_column(services_availability_zone)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- volumes = Table('volumes', meta, autoload=True)
-
- instances = Table('instances', meta, autoload=True)
- services = Table('services', meta, autoload=True)
- networks = Table('networks', meta, autoload=True)
- auth_tokens = Table('auth_tokens', meta, autoload=True)
-
- certificates = Table('certificates', meta, autoload=True)
- consoles = Table('consoles', meta, autoload=True)
- console_pools = Table('console_pools', meta, autoload=True)
- instance_actions = Table('instance_actions', meta, autoload=True)
- iscsi_targets = Table('iscsi_targets', meta, autoload=True)
-
- # table order matters, don't change
- tables = [certificates, consoles, console_pools, instance_actions,
- iscsi_targets]
- for table in tables:
- table.drop()
-
- auth_tokens.c.user_id.alter(type=Integer())
-
- instances.drop_column('availability_zone')
- instances.drop_column('locked')
- networks.drop_column('cidr_v6')
- networks.drop_column('ra_server')
- services.drop_column('availability_zone')
+++ /dev/null
-BEGIN;
-
- DROP TABLE certificates;
- DROP TABLE consoles;
- DROP TABLE console_pools;
- DROP TABLE instance_actions;
- DROP TABLE iscsi_targets;
-
- ALTER TABLE auth_tokens ADD COLUMN user_id_backup INTEGER;
- UPDATE auth_tokens SET user_id_backup = CAST(user_id AS INTEGER);
- ALTER TABLE auth_tokens DROP COLUMN user_id;
- ALTER TABLE auth_tokens RENAME COLUMN user_id_backup TO user_id;
-
- ALTER TABLE instances DROP COLUMN availability_zone;
- ALTER TABLE instances DROP COLUMN locked;
- ALTER TABLE networks DROP COLUMN cidr_v6;
- ALTER TABLE networks DROP COLUMN ra_server;
- ALTER TABLE services DROP COLUMN availability_zone;
-
-COMMIT;
+++ /dev/null
-BEGIN TRANSACTION;
-
- DROP TABLE certificates;
-
- DROP TABLE console_pools;
-
- DROP TABLE consoles;
-
- DROP TABLE instance_actions;
-
- DROP TABLE iscsi_targets;
-
- CREATE TEMPORARY TABLE auth_tokens_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- token_hash VARCHAR(255) NOT NULL,
- user_id VARCHAR(255),
- server_manageent_url VARCHAR(255),
- storage_url VARCHAR(255),
- cdn_management_url VARCHAR(255),
- PRIMARY KEY (token_hash),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO auth_tokens_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- token_hash,
- user_id,
- server_manageent_url,
- storage_url,
- cdn_management_url
- FROM auth_tokens;
-
- DROP TABLE auth_tokens;
-
- CREATE TABLE auth_tokens (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- token_hash VARCHAR(255) NOT NULL,
- user_id INTEGER,
- server_manageent_url VARCHAR(255),
- storage_url VARCHAR(255),
- cdn_management_url VARCHAR(255),
- PRIMARY KEY (token_hash),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO auth_tokens
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- token_hash,
- user_id,
- server_manageent_url,
- storage_url,
- cdn_management_url
- FROM auth_tokens_backup;
-
- DROP TABLE auth_tokens_backup;
-
- CREATE TEMPORARY TABLE instances_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- internal_id INTEGER,
- admin_pass VARCHAR(255),
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- image_id VARCHAR(255),
- kernel_id VARCHAR(255),
- ramdisk_id VARCHAR(255),
- server_name VARCHAR(255),
- launch_index INTEGER,
- key_name VARCHAR(255),
- key_data TEXT,
- state INTEGER,
- state_description VARCHAR(255),
- memory_mb INTEGER,
- vcpus INTEGER,
- local_gb INTEGER,
- hostname VARCHAR(255),
- host VARCHAR(255),
- instance_type VARCHAR(255),
- user_data TEXT,
- reservation_id VARCHAR(255),
- mac_address VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- availability_zone VARCHAR(255),
- locked BOOLEAN,
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- CHECK (locked IN (0, 1))
- );
-
- INSERT INTO instances_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- internal_id,
- admin_pass,
- user_id,
- project_id,
- image_id,
- kernel_id,
- ramdisk_id,
- server_name,
- launch_index,
- key_name,
- key_data,
- state,
- state_description,
- memory_mb,
- vcpus,
- local_gb,
- hostname,
- host,
- instance_type,
- user_data,
- reservation_id,
- mac_address,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- availability_zone,
- locked
- FROM instances;
-
- DROP TABLE instances;
-
- CREATE TABLE instances (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- internal_id INTEGER,
- admin_pass VARCHAR(255),
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- image_id VARCHAR(255),
- kernel_id VARCHAR(255),
- ramdisk_id VARCHAR(255),
- server_name VARCHAR(255),
- launch_index INTEGER,
- key_name VARCHAR(255),
- key_data TEXT,
- state INTEGER,
- state_description VARCHAR(255),
- memory_mb INTEGER,
- vcpus INTEGER,
- local_gb INTEGER,
- hostname VARCHAR(255),
- host VARCHAR(255),
- instance_type VARCHAR(255),
- user_data TEXT,
- reservation_id VARCHAR(255),
- mac_address VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO instances
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- internal_id,
- admin_pass,
- user_id,
- project_id,
- image_id,
- kernel_id,
- ramdisk_id,
- server_name,
- launch_index,
- key_name,
- key_data,
- state,
- state_description,
- memory_mb,
- vcpus,
- local_gb,
- hostname,
- host,
- instance_type,
- user_data,
- reservation_id,
- mac_address,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description
- FROM instances_backup;
-
- DROP TABLE instances_backup;
-
- CREATE TEMPORARY TABLE networks_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- injected BOOLEAN,
- cidr VARCHAR(255),
- netmask VARCHAR(255),
- bridge VARCHAR(255),
- gateway VARCHAR(255),
- broadcast VARCHAR(255),
- dns VARCHAR(255),
- vlan INTEGER,
- vpn_public_address VARCHAR(255),
- vpn_public_port INTEGER,
- vpn_private_address VARCHAR(255),
- dhcp_start VARCHAR(255),
- project_id VARCHAR(255),
- host VARCHAR(255),
- cidr_v6 VARCHAR(255),
- ra_server VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- CHECK (injected IN (0, 1))
- );
-
- INSERT INTO networks_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- injected,
- cidr,
- netmask,
- bridge,
- gateway,
- broadcast,
- dns,
- vlan,
- vpn_public_address,
- vpn_public_port,
- vpn_private_address,
- dhcp_start,
- project_id,
- host,
- cidr_v6,
- ra_server
- FROM networks;
-
- DROP TABLE networks;
-
- CREATE TABLE networks (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- injected BOOLEAN,
- cidr VARCHAR(255),
- netmask VARCHAR(255),
- bridge VARCHAR(255),
- gateway VARCHAR(255),
- broadcast VARCHAR(255),
- dns VARCHAR(255),
- vlan INTEGER,
- vpn_public_address VARCHAR(255),
- vpn_public_port INTEGER,
- vpn_private_address VARCHAR(255),
- dhcp_start VARCHAR(255),
- project_id VARCHAR(255),
- host VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- CHECK (injected IN (0, 1))
- );
-
- INSERT INTO networks
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- injected,
- cidr,
- netmask,
- bridge,
- gateway,
- broadcast,
- dns,
- vlan,
- vpn_public_address,
- vpn_public_port,
- vpn_private_address,
- dhcp_start,
- project_id,
- host
- FROM networks_backup;
-
- DROP TABLE networks_backup;
-
- CREATE TEMPORARY TABLE services_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- host VARCHAR(255),
- binary VARCHAR(255),
- topic VARCHAR(255),
- report_count INTEGER NOT NULL,
- disabled BOOLEAN,
- availability_zone VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- CHECK (disabled IN (0, 1))
- );
-
- INSERT INTO services_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- host,
- binary,
- topic,
- report_count,
- disabled,
- availability_zone
- FROM services;
-
- DROP TABLE services;
-
- CREATE TABLE services (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- host VARCHAR(255),
- binary VARCHAR(255),
- topic VARCHAR(255),
- report_count INTEGER NOT NULL,
- disabled BOOLEAN,
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- CHECK (disabled IN (0, 1))
- );
-
- INSERT INTO services
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- host,
- binary,
- topic,
- report_count,
- disabled
- FROM services_backup;
-
- DROP TABLE services_backup;
-
-COMMIT;
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, String, Table
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- networks = Table('networks', meta, autoload=True)
-
- networks_label = Column(
- 'label',
- String(length=255, convert_unicode=False, assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False))
- networks.create_column(networks_label)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- networks = Table('networks', meta, autoload=True)
-
- networks.drop_column('label')
+++ /dev/null
-BEGIN TRANSACTION;
-
- CREATE TEMPORARY TABLE networks_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- injected BOOLEAN,
- cidr VARCHAR(255),
- netmask VARCHAR(255),
- bridge VARCHAR(255),
- gateway VARCHAR(255),
- broadcast VARCHAR(255),
- dns VARCHAR(255),
- vlan INTEGER,
- vpn_public_address VARCHAR(255),
- vpn_public_port INTEGER,
- vpn_private_address VARCHAR(255),
- dhcp_start VARCHAR(255),
- project_id VARCHAR(255),
- host VARCHAR(255),
- cidr_v6 VARCHAR(255),
- ra_server VARCHAR(255),
- label VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- CHECK (injected IN (0, 1))
- );
-
- INSERT INTO networks_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- injected,
- cidr,
- netmask,
- bridge,
- gateway,
- broadcast,
- dns,
- vlan,
- vpn_public_address,
- vpn_public_port,
- vpn_private_address,
- dhcp_start,
- project_id,
- host,
- cidr_v6,
- ra_server,
- label
- FROM networks;
-
- DROP TABLE networks;
-
- CREATE TABLE networks (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- injected BOOLEAN,
- cidr VARCHAR(255),
- netmask VARCHAR(255),
- bridge VARCHAR(255),
- gateway VARCHAR(255),
- broadcast VARCHAR(255),
- dns VARCHAR(255),
- vlan INTEGER,
- vpn_public_address VARCHAR(255),
- vpn_public_port INTEGER,
- vpn_private_address VARCHAR(255),
- dhcp_start VARCHAR(255),
- project_id VARCHAR(255),
- host VARCHAR(255),
- cidr_v6 VARCHAR(255),
- ra_server VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- CHECK (injected IN (0, 1))
- );
-
- INSERT INTO networks
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- injected,
- cidr,
- netmask,
- bridge,
- gateway,
- broadcast,
- dns,
- vlan,
- vpn_public_address,
- vpn_public_port,
- vpn_private_address,
- dhcp_start,
- project_id,
- host,
- cidr_v6,
- ra_server
- FROM networks_backup;
-
- DROP TABLE networks_backup;
-
-COMMIT;
+++ /dev/null
-# Copyright 2010 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import MetaData, String, Table
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- #
- # New Tables
- #
- zones = Table('zones', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('api_url',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('username',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('password',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- )
-
- for table in (zones, ):
- try:
- table.create()
- except Exception:
- LOG.info(repr(table))
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- zones = Table('zones', meta, autoload=True)
-
- for table in (zones, ):
- table.drop()
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Justin Santa Barbara
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
-from sqlalchemy import MetaData, String, Table
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- instances = Table('instances', meta, autoload=True)
-
- quotas = Table('quotas', meta, autoload=True)
-
- instance_metadata_table = Table('instance_metadata', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('instance_id',
- Integer(),
- ForeignKey('instances.id'),
- nullable=False),
- Column('key',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('value',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)))
-
- for table in (instance_metadata_table, ):
- try:
- table.create()
- except Exception:
- LOG.info(repr(table))
- LOG.exception('Exception while creating table')
- raise
-
- quota_metadata_items = Column('metadata_items', Integer())
- quotas.create_column(quota_metadata_items)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- instances = Table('instances', meta, autoload=True)
-
- quotas = Table('quotas', meta, autoload=True)
-
- instance_metadata_table = Table('instance_metadata', meta, autoload=True)
-
- for table in (instance_metadata_table, ):
- table.drop()
-
- quotas.drop_column('metadata_items')
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Justin Santa Barbara.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, String, Table
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- volumes = Table('volumes', meta, autoload=True)
-
- # Add columns to existing tables
- volumes_provider_location = Column('provider_location',
- String(length=256,
- convert_unicode=False,
- assert_unicode=None,
- unicode_error=None,
- _warn_on_bytestring=False))
-
- volumes_provider_auth = Column('provider_auth',
- String(length=256,
- convert_unicode=False,
- assert_unicode=None,
- unicode_error=None,
- _warn_on_bytestring=False))
- volumes.create_column(volumes_provider_location)
- volumes.create_column(volumes_provider_auth)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- volumes = Table('volumes', meta, autoload=True)
-
- volumes.drop_column('provider_location')
- volumes.drop_column('provider_auth')
+++ /dev/null
-BEGIN TRANSACTION;
-
- CREATE TEMPORARY TABLE volumes_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- ec2_id VARCHAR(255),
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(256),
- provider_auth VARCHAR(256),
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- INSERT INTO volumes_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- host,
- size,
- availability_zone,
- instance_id,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth
- FROM volumes;
-
- DROP TABLE volumes;
-
- CREATE TABLE volumes (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- ec2_id VARCHAR(255),
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- INSERT INTO volumes
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- host,
- size,
- availability_zone,
- instance_id,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description
- FROM volumes_backup;
-
- DROP TABLE volumes_backup;
-
-COMMIT;
+++ /dev/null
-# Copyright 2011 OpenStack LLC
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, String, Table
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- fixed_ips = Table('fixed_ips', meta, autoload=True)
-
- #
- # New Columns
- #
- fixed_ips_addressV6 = Column(
- "addressV6",
- String(
- length=255,
- convert_unicode=False,
- assert_unicode=None,
- unicode_error=None,
- _warn_on_bytestring=False))
-
- fixed_ips_netmaskV6 = Column(
- "netmaskV6",
- String(
- length=3,
- convert_unicode=False,
- assert_unicode=None,
- unicode_error=None,
- _warn_on_bytestring=False))
-
- fixed_ips_gatewayV6 = Column(
- "gatewayV6",
- String(
- length=255,
- convert_unicode=False,
- assert_unicode=None,
- unicode_error=None,
- _warn_on_bytestring=False))
- # Add columns to existing tables
- fixed_ips.create_column(fixed_ips_addressV6)
- fixed_ips.create_column(fixed_ips_netmaskV6)
- fixed_ips.create_column(fixed_ips_gatewayV6)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- fixed_ips = Table('fixed_ips', meta, autoload=True)
-
- fixed_ips.drop_column('addressV6')
- fixed_ips.drop_column('netmaskV6')
- fixed_ips.drop_column('gatewayV6')
+++ /dev/null
-BEGIN TRANSACTION;
-
- CREATE TEMPORARY TABLE fixed_ips_backup (
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER,
- allocated BOOLEAN DEFAULT FALSE,
- leased BOOLEAN DEFAULT FALSE,
- reserved BOOLEAN DEFAULT FALSE,
- created_at DATETIME NOT NULL,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN NOT NULL,
- addressV6 VARCHAR(255),
- netmaskV6 VARCHAR(3),
- gatewayV6 VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (leased IN (0, 1)),
- CHECK (allocated IN (0, 1)),
- CHECK (deleted IN (0, 1)),
- CHECK (reserved IN (0, 1))
- );
-
- INSERT INTO fixed_ips_backup
- SELECT id,
- address,
- network_id,
- instance_id,
- allocated,
- leased,
- reserved,
- created_at,
- updated_at,
- deleted_at,
- deleted,
- addressV6,
- netmaskV6,
- gatewayV6
- FROM fixed_ips;
-
- DROP TABLE fixed_ips;
-
- CREATE TABLE fixed_ips (
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER,
- allocated BOOLEAN DEFAULT FALSE,
- leased BOOLEAN DEFAULT FALSE,
- reserved BOOLEAN DEFAULT FALSE,
- created_at DATETIME NOT NULL,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN NOT NULL,
- PRIMARY KEY (id),
- CHECK (leased IN (0, 1)),
- CHECK (allocated IN (0, 1)),
- CHECK (deleted IN (0, 1)),
- CHECK (reserved IN (0, 1))
- );
-
- INSERT INTO fixed_ips
- SELECT id,
- address,
- network_id,
- instance_id,
- allocated,
- leased,
- reserved,
- created_at,
- updated_at,
- deleted_at,
- deleted
- FROM fixed_ips_backup;
-
- DROP TABLE fixed_ips_backup;
-
-COMMIT;
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Ken Pepple
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import MetaData, String, Table
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here
- # Don't create your own engine; bind migrate_engine
- # to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
- #
- # New Tables
- #
- instance_types = Table('instance_types', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('name',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- unique=True),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('memory_mb', Integer(), nullable=False),
- Column('vcpus', Integer(), nullable=False),
- Column('local_gb', Integer(), nullable=False),
- Column('flavorid', Integer(), nullable=False, unique=True),
- Column('swap', Integer(), nullable=False, default=0),
- Column('rxtx_quota', Integer(), nullable=False, default=0),
- Column('rxtx_cap', Integer(), nullable=False, default=0))
- try:
- instance_types.create()
- except Exception:
- LOG.info(repr(instance_types))
- LOG.exception('Exception while creating instance_types table')
- raise
-
- # Here are the old static instance types
- INSTANCE_TYPES = {
- 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
- 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
- 'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
- 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
- 'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
- try:
- i = instance_types.insert()
- for name, values in INSTANCE_TYPES.iteritems():
- # FIXME(kpepple) should we be seeding created_at / updated_at ?
- # now = datetime.datatime.utcnow()
- i.execute({'name': name, 'memory_mb': values["memory_mb"],
- 'vcpus': values["vcpus"], 'deleted': False,
- 'local_gb': values["local_gb"],
- 'flavorid': values["flavorid"]})
- except Exception:
- LOG.info(repr(instance_types))
- LOG.exception('Exception while seeding instance_types table')
- raise
-
-
-def downgrade(migrate_engine):
- # Operations to reverse the above upgrade go here.
- meta = MetaData()
- meta.bind = migrate_engine
- instance_types = Table('instance_types', meta, autoload=True)
- for table in (instance_types, ):
- table.drop()
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
-from sqlalchemy import MetaData, String, Table
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- instances = Table('instances', meta, autoload=True)
-
- #
- # New Tables
- #
- migrations = Table('migrations', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('source_compute', String(255)),
- Column('dest_compute', String(255)),
- Column('dest_host', String(255)),
- Column('instance_id', Integer, ForeignKey('instances.id'),
- nullable=True),
- Column('status', String(255)),
- )
-
- for table in (migrations, ):
- try:
- table.create()
- except Exception:
- LOG.info(repr(table))
- LOG.exception('Exception while creating table')
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- instances = Table('instances', meta, autoload=True)
-
- migrations = Table('migrations', meta, autoload=True)
-
- for table in (migrations, ):
- table.drop()
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, String, Table
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- instances_os_type = Column('os_type',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- nullable=True)
- instances.create_column(instances_os_type)
- migrate_engine.execute(instances.update()
- .where(instances.c.os_type is None)
- .values(os_type='linux'))
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- instances.drop_column('os_type')
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, Integer, MetaData
-from sqlalchemy import Table, Text
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- compute_nodes = Table('compute_nodes', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('service_id', Integer(), nullable=False),
-
- Column('vcpus', Integer(), nullable=False),
- Column('memory_mb', Integer(), nullable=False),
- Column('local_gb', Integer(), nullable=False),
- Column('vcpus_used', Integer(), nullable=False),
- Column('memory_mb_used', Integer(), nullable=False),
- Column('local_gb_used', Integer(), nullable=False),
- Column('hypervisor_type',
- Text(convert_unicode=False, assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- nullable=False),
- Column('hypervisor_version', Integer(), nullable=False),
- Column('cpu_info',
- Text(convert_unicode=False, assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- nullable=False),
- )
-
- try:
- compute_nodes.create()
- except Exception:
- LOG.info(repr(compute_nodes))
- LOG.exception('Exception while creating table')
- meta.drop_all(tables=[compute_nodes])
- raise
-
- instances_launched_on = Column(
- 'launched_on',
- Text(convert_unicode=False, assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- nullable=True)
- instances.create_column(instances_launched_on)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- compute_nodes = Table('compute_nodes', meta, autoload=True)
-
- compute_nodes.drop()
-
- instances.drop_column('launched_on')
+++ /dev/null
-# Copyright (c) 2011 NTT.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
-from sqlalchemy import MetaData, String, Table
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- instances = Table('instances', meta, autoload=True)
-
- networks = Table('networks', meta, autoload=True)
- fixed_ips = Table('fixed_ips', meta, autoload=True)
-
- # Alter column name
- networks.c.ra_server.alter(name='gateway_v6')
- # Add new column to existing table
- networks_netmask_v6 = Column(
- 'netmask_v6',
- String(length=255, convert_unicode=False, assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False))
- networks.create_column(networks_netmask_v6)
-
- # drop existing columns from table
- fixed_ips.c.addressV6.drop()
- fixed_ips.c.netmaskV6.drop()
- fixed_ips.c.gatewayV6.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- instances = Table('instances', meta, autoload=True)
-
- networks = Table('networks', meta, autoload=True)
- fixed_ips = Table('fixed_ips', meta, autoload=True)
-
- networks.c.gateway_v6.alter(name='ra_server')
- networks.drop_column('netmask_v6')
-
- fixed_ips_addressV6 = Column(
- "addressV6",
- String(
- length=255,
- convert_unicode=False,
- assert_unicode=None,
- unicode_error=None,
- _warn_on_bytestring=False))
-
- fixed_ips_netmaskV6 = Column(
- "netmaskV6",
- String(
- length=3,
- convert_unicode=False,
- assert_unicode=None,
- unicode_error=None,
- _warn_on_bytestring=False))
-
- fixed_ips_gatewayV6 = Column(
- "gatewayV6",
- String(
- length=255,
- convert_unicode=False,
- assert_unicode=None,
- unicode_error=None,
- _warn_on_bytestring=False))
-
- for column in (fixed_ips_addressV6,
- fixed_ips_netmaskV6,
- fixed_ips_gatewayV6):
- fixed_ips.create_column(column)
+++ /dev/null
-BEGIN TRANSACTION;
-
- CREATE TEMPORARY TABLE networks_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- injected BOOLEAN,
- cidr VARCHAR(255),
- netmask VARCHAR(255),
- bridge VARCHAR(255),
- gateway VARCHAR(255),
- broadcast VARCHAR(255),
- dns VARCHAR(255),
- vlan INTEGER,
- vpn_public_address VARCHAR(255),
- vpn_public_port INTEGER,
- vpn_private_address VARCHAR(255),
- dhcp_start VARCHAR(255),
- project_id VARCHAR(255),
- host VARCHAR(255),
- cidr_v6 VARCHAR(255),
- ra_server VARCHAR(255),
- label VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (injected IN (0, 1)),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO networks_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- injected,
- cidr,
- netmask,
- bridge,
- gateway,
- broadcast,
- dns,
- vlan,
- vpn_public_address,
- vpn_public_port,
- vpn_private_address,
- dhcp_start,
- project_id,
- host,
- cidr_v6,
- ra_server,
- label
- FROM networks;
-
- DROP TABLE networks;
-
- CREATE TABLE networks (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- injected BOOLEAN,
- cidr VARCHAR(255),
- netmask VARCHAR(255),
- bridge VARCHAR(255),
- gateway VARCHAR(255),
- broadcast VARCHAR(255),
- dns VARCHAR(255),
- vlan INTEGER,
- vpn_public_address VARCHAR(255),
- vpn_public_port INTEGER,
- vpn_private_address VARCHAR(255),
- dhcp_start VARCHAR(255),
- project_id VARCHAR(255),
- host VARCHAR(255),
- cidr_v6 VARCHAR(255),
- gateway_v6 VARCHAR(255),
- label VARCHAR(255),
- netmask_v6 VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (injected IN (0, 1)),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO networks
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- injected,
- cidr,
- netmask,
- bridge,
- gateway,
- broadcast,
- dns,
- vlan,
- vpn_public_address,
- vpn_public_port,
- vpn_private_address,
- dhcp_start,
- project_id,
- host,
- cidr_v6,
- ra_server AS gateway_v6,
- label,
- NULL AS netmask_v6
- FROM networks_backup;
-
- DROP TABLE networks_backup;
-
- CREATE TEMPORARY TABLE fixed_ips_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER,
- allocated BOOLEAN,
- leased BOOLEAN,
- reserved BOOLEAN,
- addressV6 VARCHAR(255),
- netmaskV6 VARCHAR(3),
- gatewayV6 VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (reserved IN (0, 1)),
- CHECK (allocated IN (0, 1)),
- CHECK (leased IN (0, 1)),
- CHECK (deleted IN (0, 1)),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- FOREIGN KEY(network_id) REFERENCES networks (id)
- );
-
- INSERT INTO fixed_ips_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- instance_id,
- allocated,
- leased,
- reserved,
- addressV6,
- netmaskV6,
- gatewayV6
- FROM fixed_ips;
-
- DROP TABLE fixed_ips;
-
- CREATE TABLE fixed_ips (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER,
- allocated BOOLEAN,
- leased BOOLEAN,
- reserved BOOLEAN,
- PRIMARY KEY (id),
- CHECK (reserved IN (0, 1)),
- CHECK (allocated IN (0, 1)),
- CHECK (leased IN (0, 1)),
- CHECK (deleted IN (0, 1)),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- FOREIGN KEY(network_id) REFERENCES networks (id)
- );
-
- INSERT INTO fixed_ips
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- instance_id,
- allocated,
- leased,
- reserved
- FROM fixed_ips_backup;
-
- DROP TABLE fixed_ips_backup;
-
-COMMIT;
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, Table
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- migrations = Table('migrations', meta, autoload=True)
-
- old_flavor_id = Column('old_flavor_id', Integer())
- new_flavor_id = Column('new_flavor_id', Integer())
-
- migrations.create_column(old_flavor_id)
- migrations.create_column(new_flavor_id)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- migrations = Table('migrations', meta, autoload=True)
-
- migrations.drop_column('old_flavor_id')
- migrations.drop_column('new_flavor_id')
+++ /dev/null
-BEGIN TRANSACTION;
-
- CREATE TEMPORARY TABLE migrations_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- source_compute VARCHAR(255),
- dest_compute VARCHAR(255),
- dest_host VARCHAR(255),
- instance_id INTEGER,
- status VARCHAR(255),
- old_flavor_id INTEGER,
- new_flavor_id INTEGER,
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- INSERT INTO migrations_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- source_compute,
- dest_compute,
- dest_host,
- instance_id,
- status,
- old_flavor_id,
- new_flavor_id
- FROM migrations;
-
- DROP TABLE migrations;
-
- CREATE TABLE migrations (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- source_compute VARCHAR(255),
- dest_compute VARCHAR(255),
- dest_host VARCHAR(255),
- instance_id INTEGER,
- status VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- INSERT INTO migrations
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- source_compute,
- dest_compute,
- dest_host,
- instance_id,
- status
- FROM migrations_backup;
-
- DROP TABLE migrations_backup;
-
-COMMIT;
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, String, Table
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- instance_types = Table('instance_types', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
-
- c_instance_type_id = Column('instance_type_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- nullable=True)
-
- instances.create_column(c_instance_type_id)
-
- type_names = {}
- recs = migrate_engine.execute(instance_types.select())
- for row in recs:
- type_names[row[0]] = row[1]
-
- for type_id, type_name in type_names.iteritems():
- migrate_engine.execute(instances.update()
- .where(instances.c.instance_type == type_name)
- .values(instance_type_id=type_id))
-
- instances.c.instance_type.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instance_types = Table('instance_types', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
-
- c_instance_type = Column('instance_type',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- nullable=True)
- instances.create_column(c_instance_type)
-
- type_names = {}
- recs = migrate_engine.execute(instance_types.select())
- for row in recs:
- type_names[row[0]] = row[1]
-
- for type_id, type_name in type_names.iteritems():
- migrate_engine.execute(instances.update()
- .where(instances.c.instance_type_id == type_id)
- .values(instance_type=type_name))
-
- instances.c.instance_type_id.drop()
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2011 Grid Dynamics
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, MetaData, Table
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
- floating_ips = Table('floating_ips', meta, autoload=True)
- c_auto_assigned = Column('auto_assigned', Boolean, default=False)
- floating_ips.create_column(c_auto_assigned)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- floating_ips = Table('floating_ips', meta, autoload=True)
- floating_ips.drop_column('auto_assigned')
+++ /dev/null
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE floating_ips_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- fixed_ip_id INTEGER,
- project_id VARCHAR(255),
- host VARCHAR(255),
- auto_assigned BOOLEAN,
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- CHECK (auto_assigned IN (0, 1)),
- FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id)
- );
-
- INSERT INTO floating_ips_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- fixed_ip_id,
- project_id,
- host,
- auto_assigned
- FROM floating_ips;
-
- DROP TABLE floating_ips;
-
- CREATE TABLE floating_ips (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- fixed_ip_id INTEGER,
- project_id VARCHAR(255),
- host VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id)
- );
-
- INSERT INTO floating_ips
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- fixed_ip_id,
- project_id,
- host
- FROM floating_ips_backup;
-
- DROP TABLE floating_ips_backup;
-COMMIT;
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import MetaData, String, Table
-
-from cinder import utils
-
-resources = [
- 'instances',
- 'cores',
- 'volumes',
- 'gigabytes',
- 'floating_ips',
- 'metadata_items',
-]
-
-
-def old_style_quotas_table(meta, name):
- return Table(name, meta,
- Column('id', Integer(), primary_key=True),
- Column('created_at', DateTime(),
- default=utils.utcnow),
- Column('updated_at', DateTime(),
- onupdate=utils.utcnow),
- Column('deleted_at', DateTime()),
- Column('deleted', Boolean(), default=False),
- Column('project_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- Column('instances', Integer()),
- Column('cores', Integer()),
- Column('volumes', Integer()),
- Column('gigabytes', Integer()),
- Column('floating_ips', Integer()),
- Column('metadata_items', Integer()),
- )
-
-
-def new_style_quotas_table(meta, name):
- return Table(name, meta,
- Column('id', Integer(), primary_key=True),
- Column('created_at', DateTime(),
- default=utils.utcnow),
- Column('updated_at', DateTime(),
- onupdate=utils.utcnow),
- Column('deleted_at', DateTime()),
- Column('deleted', Boolean(), default=False),
- Column('project_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- Column('resource',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- nullable=False),
- Column('hard_limit', Integer(), nullable=True),
- )
-
-
-def quotas_table(meta, name='quotas'):
- return Table(name, meta, autoload=True)
-
-
-def _assert_no_duplicate_project_ids(quotas):
- project_ids = set()
- message = ('There are multiple active quotas for project "%s" '
- '(among others, possibly). '
- 'Please resolve all ambiguous quotas before '
- 'reattempting the migration.')
- for quota in quotas:
- assert quota.project_id not in project_ids, message % quota.project_id
- project_ids.add(quota.project_id)
-
-
-def assert_old_quotas_have_no_active_duplicates(migrate_engine, quotas):
- """Ensure that there are no duplicate non-deleted quota entries."""
- select = quotas.select().where(quotas.c.deleted is False)
- results = migrate_engine.execute(select)
- _assert_no_duplicate_project_ids(list(results))
-
-
-def assert_new_quotas_have_no_active_duplicates(migrate_engine, quotas):
- """Ensure that there are no duplicate non-deleted quota entries."""
- for resource in resources:
- select = quotas.select().\
- where(quotas.c.deleted is False).\
- where(quotas.c.resource == resource)
- results = migrate_engine.execute(select)
- _assert_no_duplicate_project_ids(list(results))
-
-
-def convert_forward(migrate_engine, old_quotas, new_quotas):
- quotas = list(migrate_engine.execute(old_quotas.select()))
- for quota in quotas:
- for resource in resources:
- hard_limit = getattr(quota, resource)
- if hard_limit is None:
- continue
- insert = new_quotas.insert().values(
- created_at=quota.created_at,
- updated_at=quota.updated_at,
- deleted_at=quota.deleted_at,
- deleted=quota.deleted,
- project_id=quota.project_id,
- resource=resource,
- hard_limit=hard_limit)
- migrate_engine.execute(insert)
-
-
-def earliest(date1, date2):
- if date1 is None and date2 is None:
- return None
- if date1 is None:
- return date2
- if date2 is None:
- return date1
- if date1 < date2:
- return date1
- return date2
-
-
-def latest(date1, date2):
- if date1 is None and date2 is None:
- return None
- if date1 is None:
- return date2
- if date2 is None:
- return date1
- if date1 > date2:
- return date1
- return date2
-
-
-def convert_backward(migrate_engine, old_quotas, new_quotas):
- quotas = {}
- for quota in migrate_engine.execute(new_quotas.select()):
- if (quota.resource not in resources
- or quota.hard_limit is None or quota.deleted):
- continue
- if not quota.project_id in quotas:
- quotas[quota.project_id] = {
- 'project_id': quota.project_id,
- 'created_at': quota.created_at,
- 'updated_at': quota.updated_at,
- quota.resource: quota.hard_limit,
- }
- else:
- quotas[quota.project_id]['created_at'] = earliest(
- quota.created_at, quotas[quota.project_id]['created_at'])
- quotas[quota.project_id]['updated_at'] = latest(
- quota.updated_at, quotas[quota.project_id]['updated_at'])
- quotas[quota.project_id][quota.resource] = quota.hard_limit
-
- for quota in quotas.itervalues():
- insert = old_quotas.insert().values(**quota)
- migrate_engine.execute(insert)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- old_quotas = quotas_table(meta)
- assert_old_quotas_have_no_active_duplicates(migrate_engine, old_quotas)
-
- new_quotas = new_style_quotas_table(meta, 'quotas_new')
- new_quotas.create()
- convert_forward(migrate_engine, old_quotas, new_quotas)
- old_quotas.drop()
-
- # clear metadata to work around this:
- # http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=128
- meta.clear()
- new_quotas = quotas_table(meta, 'quotas_new')
- new_quotas.rename('quotas')
-
-
-def downgrade(migrate_engine):
- # Operations to reverse the above upgrade go here.
- meta = MetaData()
- meta.bind = migrate_engine
-
- new_quotas = quotas_table(meta)
- assert_new_quotas_have_no_active_duplicates(migrate_engine, new_quotas)
-
- old_quotas = old_style_quotas_table(meta, 'quotas_old')
- old_quotas.create()
- convert_backward(migrate_engine, old_quotas, new_quotas)
- new_quotas.drop()
-
- # clear metadata to work around this:
- # http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=128
- meta.clear()
- old_quotas = quotas_table(meta, 'quotas_old')
- old_quotas.rename('quotas')
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, String, Table
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- types = {}
- for instance in migrate_engine.execute(instances.select()):
- if instance.instance_type_id is None:
- types[instance.id] = None
- continue
- try:
- types[instance.id] = int(instance.instance_type_id)
- except ValueError:
- LOG.warn("Instance %s did not have instance_type_id "
- "converted to an integer because its value is %s" %
- (instance.id, instance.instance_type_id))
- types[instance.id] = None
-
- integer_column = Column('instance_type_id_int', Integer(), nullable=True)
- string_column = instances.c.instance_type_id
-
- integer_column.create(instances)
- for instance_id, instance_type_id in types.iteritems():
- update = instances.update().\
- where(instances.c.id == instance_id).\
- values(instance_type_id_int=instance_type_id)
- migrate_engine.execute(update)
-
- string_column.alter(name='instance_type_id_str')
- integer_column.alter(name='instance_type_id')
- string_column.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- integer_column = instances.c.instance_type_id
- string_column = Column('instance_type_id_str',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- nullable=True)
-
- types = {}
- for instance in migrate_engine.execute(instances.select()):
- if instance.instance_type_id is None:
- types[instance.id] = None
- else:
- types[instance.id] = str(instance.instance_type_id)
-
- string_column.create(instances)
- for instance_id, instance_type_id in types.iteritems():
- update = instances.update().\
- where(instances.c.id == instance_id).\
- values(instance_type_id_str=instance_type_id)
- migrate_engine.execute(update)
-
- integer_column.alter(name='instance_type_id_int')
- string_column.alter(name='instance_type_id')
- integer_column.drop()
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData, Table
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
- tokens = Table('auth_tokens', meta, autoload=True)
- c_manageent = tokens.c.server_manageent_url
- c_manageent.alter(name='server_management_url')
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- tokens = Table('auth_tokens', meta, autoload=True)
- c_management = tokens.c.server_management_url
- c_management.alter(name='server_manageent_url')
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 MORITA Kazutaka.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Table, MetaData
-from sqlalchemy import Integer, DateTime, Boolean, String
-
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- #
- # New Tables
- #
- snapshots = Table('snapshots', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('volume_id', Integer(), nullable=False),
- Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('project_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('status',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('progress',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('volume_size', Integer()),
- Column('scheduled_at', DateTime(timezone=False)),
- Column('display_name',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('display_description',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)))
- try:
- snapshots.create()
- except Exception:
- LOG.info(repr(snapshots))
- LOG.exception('Exception while creating table')
- meta.drop_all(tables=[snapshots])
- raise
-
-
-def downgrade(migrate_engine):
- # Operations to reverse the above upgrade go here.
- meta = MetaData()
- meta.bind = migrate_engine
- snapshots = Table('snapshots', meta, autoload=True)
- snapshots.drop()
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 MORITA Kazutaka.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Table, MetaData, Integer
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- volumes = Table('volumes', meta, autoload=True)
-
- snapshot_id = Column('snapshot_id', Integer())
- # Add columns to existing tables
- volumes.create_column(snapshot_id)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- volumes = Table('volumes', meta, autoload=True)
-
- volumes.drop_column('snapshot_id')
+++ /dev/null
-BEGIN TRANSACTION;
-
- CREATE TEMPORARY TABLE volumes_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- ec2_id VARCHAR(255),
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(256),
- provider_auth VARCHAR(256),
- snapshot_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- host,
- size,
- availability_zone,
- instance_id,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- snapshot_id
- FROM volumes;
-
- DROP TABLE volumes;
-
- CREATE TABLE volumes (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- ec2_id VARCHAR(255),
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(256),
- provider_auth VARCHAR(256),
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- host,
- size,
- availability_zone,
- instance_id,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth
- FROM volumes_backup;
-
- DROP TABLE volumes_backup;
-
-COMMIT;
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData, Table
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
-
- image_id_column = instances.c.image_id
- image_id_column.alter(name='image_ref')
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- image_ref_column = instances.c.image_ref
- image_ref_column.alter(name='image_id')
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
- if migrate_engine.name == "mysql":
- migrate_engine.execute("ALTER TABLE auth_tokens Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE certificates Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE compute_nodes Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE console_pools Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE consoles Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE export_devices Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE fixed_ips Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE floating_ips Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE instance_actions Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE instance_metadata Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE instance_types Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE instances Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE iscsi_targets Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE key_pairs Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE migrate_version Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE migrations Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE networks Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE projects Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE quotas Engine=InnoDB")
- migrate_engine.execute(
- "ALTER TABLE security_group_instance_association Engine=InnoDB")
- migrate_engine.execute(
- "ALTER TABLE security_group_rules Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE security_groups Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE services Engine=InnoDB")
- migrate_engine.execute(
- "ALTER TABLE user_project_association Engine=InnoDB")
- migrate_engine.execute(
- "ALTER TABLE user_project_role_association Engine=InnoDB")
- migrate_engine.execute(
- "ALTER TABLE user_role_association Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE users Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE volumes Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE zones Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE snapshots Engine=InnoDB")
-
-
-def downgrade(migrate_engine):
- pass
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, MetaData, String, Table
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- instances_vm_mode = Column('vm_mode',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- nullable=True)
- instances.create_column(instances_vm_mode)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- instances.drop_column('vm_mode')
+++ /dev/null
-# Copyright 2011 OpenStack LLC.
-# Copyright 2011 Isaku Yamahata
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData, Table, Column
-from sqlalchemy import DateTime, Boolean, Integer, String
-from sqlalchemy import ForeignKey
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- instances = Table('instances', meta, autoload=True)
- volumes = Table('volumes', meta, autoload=True)
- snapshots = Table('snapshots', meta, autoload=True)
-
- #
- # New Tables
- #
- block_device_mapping = Table('block_device_mapping', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, autoincrement=True),
- Column('instance_id',
- Integer(),
- ForeignKey('instances.id'),
- nullable=False),
- Column('device_name',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- nullable=False),
- Column('delete_on_termination',
- Boolean(create_constraint=True, name=None),
- default=False),
- Column('virtual_name',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- nullable=True),
- Column('snapshot_id',
- Integer(),
- ForeignKey('snapshots.id'),
- nullable=True),
- Column('volume_id', Integer(), ForeignKey('volumes.id'),
- nullable=True),
- Column('volume_size', Integer(), nullable=True),
- Column('no_device',
- Boolean(create_constraint=True, name=None),
- nullable=True),
- )
- try:
- block_device_mapping.create()
- except Exception:
- LOG.info(repr(block_device_mapping))
- LOG.exception('Exception while creating table')
- meta.drop_all(tables=[block_device_mapping])
- raise
-
-
-def downgrade(migrate_engine):
- # Operations to reverse the above upgrade go here.
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- instances = Table('instances', meta, autoload=True)
- volumes = Table('volumes', meta, autoload=True)
- snapshots = Table('snapshots', meta, autoload=True)
-
- block_device_mapping = Table('block_device_mapping', meta, autoload=True)
- block_device_mapping.drop()
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, String, Table
-
-from cinder import utils
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- uuid_column = Column("uuid", String(36))
- instances.create_column(uuid_column)
-
- rows = migrate_engine.execute(instances.select())
- for row in rows:
- instance_uuid = str(utils.gen_uuid())
- migrate_engine.execute(instances.update()
- .where(instances.c.id == row[0])
- .values(uuid=instance_uuid))
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- instances.drop_column('uuid')
+++ /dev/null
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import MetaData, String, Table
-from cinder import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
- #
- # New Tables
- #
- builds = Table('agent_builds', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('hypervisor',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('os',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('architecture',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('version',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('url',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('md5hash',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- )
- for table in (builds, ):
- try:
- table.create()
- except Exception:
- LOG.info(repr(table))
-
- instances = Table('instances', meta, autoload=True)
-
- #
- # New Columns
- #
- architecture = Column('architecture', String(length=255))
-
- # Add columns to existing tables
- instances.create_column(architecture)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- builds = Table('agent_builds', meta, autoload=True)
- for table in (builds, ):
- table.drop()
-
- instances = Table('instances', meta, autoload=True)
- instances.drop_column('architecture')
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime
-from sqlalchemy import Integer, MetaData, String
-from sqlalchemy import Table
-
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
- #
- # New Tables
- #
- provider_fw_rules = Table('provider_fw_rules', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('protocol',
- String(length=5, convert_unicode=False, assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('from_port', Integer()),
- Column('to_port', Integer()),
- Column('cidr',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)))
- for table in (provider_fw_rules,):
- try:
- table.create()
- except Exception:
- LOG.info(repr(table))
- LOG.exception('Exception while creating table')
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- provider_fw_rules = Table('provider_fw_rules', meta, autoload=True)
- for table in (provider_fw_rules,):
- table.drop()
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 University of Southern California
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
-from sqlalchemy import MetaData, String, Table
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- instance_types = Table('instance_types', meta, autoload=True)
-
- #
- # New Tables
- #
- instance_type_extra_specs_table = Table('instance_type_extra_specs', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('instance_type_id',
- Integer(),
- ForeignKey('instance_types.id'),
- nullable=False),
- Column('key',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('value',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)))
-
- for table in (instance_type_extra_specs_table, ):
- try:
- table.create()
- except Exception:
- LOG.info(repr(table))
- LOG.exception('Exception while creating table')
- raise
-
-
-def downgrade(migrate_engine):
- # Operations to reverse the above upgrade go here.
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- instance_types = Table('instance_types', meta, autoload=True)
-
- instance_type_extra_specs_table = Table('instance_type_extra_specs',
- meta,
- autoload=True)
- for table in (instance_type_extra_specs_table, ):
- table.drop()
+++ /dev/null
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Float, Integer, MetaData, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- zones = Table('zones', meta, autoload=True)
-
- #
- # New Columns
- #
- weight_offset = Column('weight_offset', Float(), default=0.0)
- weight_scale = Column('weight_scale', Float(), default=1.0)
-
- zones.create_column(weight_offset)
- zones.create_column(weight_scale)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- zones = Table('zones', meta, autoload=True)
-
- zones.drop_column('weight_offset')
- zones.drop_column('weight_scale')
+++ /dev/null
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import select, Boolean, Column, DateTime, ForeignKey
-from sqlalchemy import Integer, MetaData, String
-from sqlalchemy import Table
-
-from cinder import log as logging
-from cinder import utils
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # grab tables and (column for dropping later)
- instances = Table('instances', meta, autoload=True)
- networks = Table('networks', meta, autoload=True)
- fixed_ips = Table('fixed_ips', meta, autoload=True)
- c = instances.columns['mac_address']
-
- interface = Column('bridge_interface',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False))
-
- virtual_interface_id = Column('virtual_interface_id',
- Integer())
- # add interface column to networks table
- # values will have to be set manually before running cinder
- try:
- networks.create_column(interface)
- except Exception:
- LOG.error(_("interface column not added to networks table"))
- raise
-
- #
- # New Tables
- #
- virtual_interfaces = Table('virtual_interfaces', meta,
- Column('created_at', DateTime(timezone=False),
- default=utils.utcnow()),
- Column('updated_at', DateTime(timezone=False),
- onupdate=utils.utcnow()),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('address',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- unique=True),
- Column('network_id',
- Integer(),
- ForeignKey('networks.id')),
- Column('instance_id',
- Integer(),
- ForeignKey('instances.id'),
- nullable=False),
- mysql_engine='InnoDB')
-
- # create virtual_interfaces table
- try:
- virtual_interfaces.create()
- except Exception:
- LOG.error(_("Table |%s| not created!"), repr(virtual_interfaces))
- raise
-
- # add virtual_interface_id column to fixed_ips table
- try:
- fixed_ips.create_column(virtual_interface_id)
- except Exception:
- LOG.error(_("VIF column not added to fixed_ips table"))
- raise
-
- # populate the virtual_interfaces table
- # extract data from existing instance and fixed_ip tables
- s = select([instances.c.id, instances.c.mac_address,
- fixed_ips.c.network_id],
- fixed_ips.c.instance_id == instances.c.id)
- keys = ('instance_id', 'address', 'network_id')
- join_list = [dict(zip(keys, row)) for row in s.execute()]
- LOG.debug(_("join list for moving mac_addresses |%s|"), join_list)
-
- # insert data into the table
- if join_list:
- i = virtual_interfaces.insert()
- i.execute(join_list)
-
- # populate the fixed_ips virtual_interface_id column
- s = select([fixed_ips.c.id, fixed_ips.c.instance_id],
- fixed_ips.c.instance_id is not None)
-
- for row in s.execute():
- m = select([virtual_interfaces.c.id]).\
- where(virtual_interfaces.c.instance_id == row['instance_id']).\
- as_scalar()
- u = fixed_ips.update().values(virtual_interface_id=m).\
- where(fixed_ips.c.id == row['id'])
- u.execute()
-
- # drop the mac_address column from instances
- c.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # grab tables and (column for dropping later)
- instances = Table('instances', meta, autoload=True)
- networks = Table('networks', meta, autoload=True)
- fixed_ips = Table('fixed_ips', meta, autoload=True)
- virtual_interfaces = Table('virtual_interfaces', meta, autoload=True)
-
- mac_address = Column('mac_address',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False))
-
- instances.create_column(mac_address)
-
- s = select([instances.c.id, virtual_interfaces.c.address],
- virtual_interfaces.c.instance_id == instances.c.id)
-
- for row in s.execute():
- u = instances.update().values(mac_address=row['address']).\
- where(instances.c.id == row['id'])
-
- networks.drop_column('bridge_interface')
- virtual_interfaces.drop()
- fixed_ips.drop_column('virtual_interface_id')
+++ /dev/null
-BEGIN TRANSACTION;
-
- CREATE TEMPORARY TABLE instances_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- internal_id INTEGER,
- admin_pass VARCHAR(255),
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- image_ref VARCHAR(255),
- kernel_id VARCHAR(255),
- ramdisk_id VARCHAR(255),
- server_name VARCHAR(255),
- launch_index INTEGER,
- key_name VARCHAR(255),
- key_data TEXT,
- state INTEGER,
- state_description VARCHAR(255),
- memory_mb INTEGER,
- vcpus INTEGER,
- local_gb INTEGER,
- hostname VARCHAR(255),
- host VARCHAR(255),
- user_data TEXT,
- reservation_id VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- availability_zone VARCHAR(255),
- locked BOOLEAN,
- os_type VARCHAR(255),
- launched_on TEXT,
- instance_type_id INTEGER,
- vm_mode VARCHAR(255),
- uuid VARCHAR(36),
- architecture VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (locked IN (0, 1)),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO instances_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- internal_id,
- admin_pass,
- user_id,
- project_id,
- image_ref,
- kernel_id,
- ramdisk_id,
- server_name,
- launch_index,
- key_name,
- key_data,
- state,
- state_description,
- memory_mb,
- vcpus,
- local_gb,
- hostname,
- host,
- user_data,
- reservation_id,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- availability_zone,
- locked,
- os_type,
- launched_on,
- instance_type_id,
- vm_mode,
- uuid,
- architecture
- FROM instances;
-
- DROP TABLE instances;
-
- CREATE TABLE instances (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- internal_id INTEGER,
- admin_pass VARCHAR(255),
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- image_ref VARCHAR(255),
- kernel_id VARCHAR(255),
- ramdisk_id VARCHAR(255),
- server_name VARCHAR(255),
- launch_index INTEGER,
- key_name VARCHAR(255),
- key_data TEXT,
- state INTEGER,
- state_description VARCHAR(255),
- memory_mb INTEGER,
- vcpus INTEGER,
- local_gb INTEGER,
- hostname VARCHAR(255),
- host VARCHAR(255),
- user_data TEXT,
- reservation_id VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- availability_zone VARCHAR(255),
- locked BOOLEAN,
- os_type VARCHAR(255),
- launched_on TEXT,
- instance_type_id INTEGER,
- vm_mode VARCHAR(255),
- uuid VARCHAR(36),
- architecture VARCHAR(255),
- mac_address VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (locked IN (0, 1)),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO instances
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- internal_id,
- admin_pass,
- user_id,
- project_id,
- image_ref,
- kernel_id,
- ramdisk_id,
- server_name,
- launch_index,
- key_name,
- key_data,
- state,
- state_description,
- memory_mb,
- vcpus,
- local_gb,
- hostname,
- host,
- user_data,
- reservation_id,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- availability_zone,
- locked,
- os_type,
- launched_on,
- instance_type_id,
- vm_mode,
- uuid,
- architecture,
- NULL AS mac_address
- FROM instances_backup;
-
- DROP TABLE instances_backup;
-
- UPDATE instances SET mac_address=(SELECT address
- FROM virtual_interfaces
- WHERE virtual_interfaces.instance_id = instances.id);
-
- CREATE TEMPORARY TABLE networks_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- injected BOOLEAN,
- cidr VARCHAR(255),
- netmask VARCHAR(255),
- bridge VARCHAR(255),
- gateway VARCHAR(255),
- broadcast VARCHAR(255),
- dns VARCHAR(255),
- vlan INTEGER,
- vpn_public_address VARCHAR(255),
- vpn_public_port INTEGER,
- vpn_private_address VARCHAR(255),
- dhcp_start VARCHAR(255),
- project_id VARCHAR(255),
- host VARCHAR(255),
- cidr_v6 VARCHAR(255),
- gateway_v6 VARCHAR(255),
- label VARCHAR(255),
- netmask_v6 VARCHAR(255),
- bridge_interface VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (injected IN (0, 1)),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO networks_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- injected,
- cidr,
- netmask,
- bridge,
- gateway,
- broadcast,
- dns,
- vlan,
- vpn_public_address,
- vpn_public_port,
- vpn_private_address,
- dhcp_start,
- project_id,
- host,
- cidr_v6,
- gateway_v6,
- label,
- netmask_v6,
- bridge_interface
- FROM networks;
-
- DROP TABLE networks;
-
- CREATE TABLE networks (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- injected BOOLEAN,
- cidr VARCHAR(255),
- netmask VARCHAR(255),
- bridge VARCHAR(255),
- gateway VARCHAR(255),
- broadcast VARCHAR(255),
- dns VARCHAR(255),
- vlan INTEGER,
- vpn_public_address VARCHAR(255),
- vpn_public_port INTEGER,
- vpn_private_address VARCHAR(255),
- dhcp_start VARCHAR(255),
- project_id VARCHAR(255),
- host VARCHAR(255),
- cidr_v6 VARCHAR(255),
- gateway_v6 VARCHAR(255),
- label VARCHAR(255),
- netmask_v6 VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (injected IN (0, 1)),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO networks
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- injected,
- cidr,
- netmask,
- bridge,
- gateway,
- broadcast,
- dns,
- vlan,
- vpn_public_address,
- vpn_public_port,
- vpn_private_address,
- dhcp_start,
- project_id,
- host,
- cidr_v6,
- gateway_v6,
- label,
- netmask_v6
- FROM networks_backup;
-
- DROP TABLE networks_backup;
-
- DROP TABLE virtual_interfaces;
-
- CREATE TEMPORARY TABLE fixed_ips_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER,
- allocated BOOLEAN,
- leased BOOLEAN,
- reserved BOOLEAN,
- virtual_interface_id INTEGER,
- PRIMARY KEY (id),
- CHECK (reserved IN (0, 1)),
- CHECK (allocated IN (0, 1)),
- CHECK (leased IN (0, 1)),
- CHECK (deleted IN (0, 1)),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- FOREIGN KEY(network_id) REFERENCES networks (id)
- );
-
- INSERT INTO fixed_ips_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- instance_id,
- allocated,
- leased,
- reserved,
- virtual_interface_id
- FROM fixed_ips;
-
- DROP TABLE fixed_ips;
-
- CREATE TABLE fixed_ips (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER,
- allocated BOOLEAN,
- leased BOOLEAN,
- reserved BOOLEAN,
- PRIMARY KEY (id),
- CHECK (reserved IN (0, 1)),
- CHECK (allocated IN (0, 1)),
- CHECK (leased IN (0, 1)),
- CHECK (deleted IN (0, 1)),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- FOREIGN KEY(network_id) REFERENCES networks (id)
- );
-
- INSERT INTO fixed_ips
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- instance_id,
- allocated,
- leased,
- reserved
- FROM fixed_ips_backup;
-
- DROP TABLE fixed_ips_backup;
-
-COMMIT;
+++ /dev/null
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData, Table
-from migrate import ForeignKeyConstraint
-
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- dialect = migrate_engine.url.get_dialect().name
-
- # grab tables
- fixed_ips = Table('fixed_ips', meta, autoload=True)
- virtual_interfaces = Table('virtual_interfaces', meta, autoload=True)
-
- # add foreignkey if not sqlite
- try:
- if not dialect.startswith('sqlite'):
- ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id],
- refcolumns=[virtual_interfaces.c.id]).create()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be added"))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- dialect = migrate_engine.url.get_dialect().name
-
- # grab tables
- fixed_ips = Table('fixed_ips', meta, autoload=True)
- virtual_interfaces = Table('virtual_interfaces', meta, autoload=True)
-
- # drop foreignkey if not sqlite
- try:
- if not dialect.startswith('sqlite'):
- ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id],
- refcolumns=[virtual_interfaces.c.id]).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be dropped"))
- raise
+++ /dev/null
-BEGIN TRANSACTION;
-
- CREATE TEMPORARY TABLE fixed_ips_backup (
- id INTEGER NOT NULL,
- address VARCHAR(255),
- virtual_interface_id INTEGER,
- network_id INTEGER,
- instance_id INTEGER,
- allocated BOOLEAN default FALSE,
- leased BOOLEAN default FALSE,
- reserved BOOLEAN default FALSE,
- created_at DATETIME NOT NULL,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN NOT NULL,
- PRIMARY KEY (id),
- FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id)
- );
-
- INSERT INTO fixed_ips_backup
- SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted
- FROM fixed_ips;
-
- DROP TABLE fixed_ips;
-
- CREATE TABLE fixed_ips (
- id INTEGER NOT NULL,
- address VARCHAR(255),
- virtual_interface_id INTEGER,
- network_id INTEGER,
- instance_id INTEGER,
- allocated BOOLEAN default FALSE,
- leased BOOLEAN default FALSE,
- reserved BOOLEAN default FALSE,
- created_at DATETIME NOT NULL,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN NOT NULL,
- PRIMARY KEY (id)
- );
-
- INSERT INTO fixed_ips
- SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted
- FROM fixed_ips;
-
- DROP TABLE fixed_ips_backup;
-
-COMMIT;
+++ /dev/null
-BEGIN TRANSACTION;
-
- CREATE TEMPORARY TABLE fixed_ips_backup (
- id INTEGER NOT NULL,
- address VARCHAR(255),
- virtual_interface_id INTEGER,
- network_id INTEGER,
- instance_id INTEGER,
- allocated BOOLEAN default FALSE,
- leased BOOLEAN default FALSE,
- reserved BOOLEAN default FALSE,
- created_at DATETIME NOT NULL,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN NOT NULL,
- PRIMARY KEY (id)
- );
-
- INSERT INTO fixed_ips_backup
- SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted
- FROM fixed_ips;
-
- DROP TABLE fixed_ips;
-
- CREATE TABLE fixed_ips (
- id INTEGER NOT NULL,
- address VARCHAR(255),
- virtual_interface_id INTEGER,
- network_id INTEGER,
- instance_id INTEGER,
- allocated BOOLEAN default FALSE,
- leased BOOLEAN default FALSE,
- reserved BOOLEAN default FALSE,
- created_at DATETIME NOT NULL,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN NOT NULL,
- PRIMARY KEY (id),
- FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id)
- );
-
- INSERT INTO fixed_ips
- SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted
- FROM fixed_ips;
-
- DROP TABLE fixed_ips_backup;
-
-COMMIT;
+++ /dev/null
-# Copyright 2011 OpenStack LLC.
-# Copyright 2011 Isaku Yamahata
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, Table, String
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- root_device_name = Column(
- 'root_device_name',
- String(length=255, convert_unicode=False, assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- nullable=True)
- instances.create_column(root_device_name)
-
-
-def downgrade(migrate_engine):
- # Operations to reverse the above upgrade go here.
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- instances.drop_column('root_device_name')
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 OpenStack, LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Table, MetaData, Boolean, String
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- fixed_ips_host = Column('host', String(255))
- fixed_ips = Table('fixed_ips', meta, autoload=True)
- fixed_ips.create_column(fixed_ips_host)
-
- networks_multi_host = Column('multi_host', Boolean, default=False)
- networks = Table('networks', meta, autoload=True)
- networks.create_column(networks_multi_host)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- fixed_ips = Table('fixed_ips', meta, autoload=True)
- fixed_ips.drop_column('host')
-
- networks = Table('networks', meta, autoload=True)
- networks.drop_column('multi_host')
+++ /dev/null
-BEGIN TRANSACTION;
-
- CREATE TEMPORARY TABLE fixed_ips_backup (
- id INTEGER NOT NULL,
- address VARCHAR(255),
- virtual_interface_id INTEGER,
- network_id INTEGER,
- instance_id INTEGER,
- allocated BOOLEAN default FALSE,
- leased BOOLEAN default FALSE,
- reserved BOOLEAN default FALSE,
- created_at DATETIME NOT NULL,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN NOT NULL,
- host VARCHAR(255),
- PRIMARY KEY (id),
- FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id)
- );
-
- INSERT INTO fixed_ips_backup
- SELECT id,
- address,
- virtual_interface_id,
- network_id,
- instance_id,
- allocated,
- leased,
- reserved,
- created_at,
- updated_at,
- deleted_at,
- deleted,
- host
- FROM fixed_ips;
-
- DROP TABLE fixed_ips;
-
- CREATE TABLE fixed_ips (
- id INTEGER NOT NULL,
- address VARCHAR(255),
- virtual_interface_id INTEGER,
- network_id INTEGER,
- instance_id INTEGER,
- allocated BOOLEAN default FALSE,
- leased BOOLEAN default FALSE,
- reserved BOOLEAN default FALSE,
- created_at DATETIME NOT NULL,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN NOT NULL,
- PRIMARY KEY (id),
- FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id)
- );
-
- INSERT INTO fixed_ips
- SELECT id,
- address,
- virtual_interface_id,
- network_id,
- instance_id,
- allocated,
- leased,
- reserved,
- created_at,
- updated_at,
- deleted_at,
- deleted
- FROM fixed_ips_backup;
-
- DROP TABLE fixed_ips_backup;
-
- CREATE TEMPORARY TABLE networks_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- injected BOOLEAN,
- cidr VARCHAR(255),
- netmask VARCHAR(255),
- bridge VARCHAR(255),
- gateway VARCHAR(255),
- broadcast VARCHAR(255),
- dns VARCHAR(255),
- vlan INTEGER,
- vpn_public_address VARCHAR(255),
- vpn_public_port INTEGER,
- vpn_private_address VARCHAR(255),
- dhcp_start VARCHAR(255),
- project_id VARCHAR(255),
- host VARCHAR(255),
- cidr_v6 VARCHAR(255),
- gateway_v6 VARCHAR(255),
- label VARCHAR(255),
- netmask_v6 VARCHAR(255),
- bridge_interface VARCHAR(255),
- multi_host BOOLEAN,
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- CHECK (injected IN (0, 1)),
- CHECK (multi_host IN (0, 1))
- );
-
- INSERT INTO networks_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- injected,
- cidr,
- netmask,
- bridge,
- gateway,
- broadcast,
- dns,
- vlan,
- vpn_public_address,
- vpn_public_port,
- vpn_private_address,
- dhcp_start,
- project_id,
- host,
- cidr_v6,
- gateway_v6,
- label,
- netmask_v6,
- bridge_interface,
- multi_host
- FROM networks;
-
- DROP TABLE networks;
-
- CREATE TABLE networks(
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- injected BOOLEAN,
- cidr VARCHAR(255),
- netmask VARCHAR(255),
- bridge VARCHAR(255),
- gateway VARCHAR(255),
- broadcast VARCHAR(255),
- dns VARCHAR(255),
- vlan INTEGER,
- vpn_public_address VARCHAR(255),
- vpn_public_port INTEGER,
- vpn_private_address VARCHAR(255),
- dhcp_start VARCHAR(255),
- project_id VARCHAR(255),
- host VARCHAR(255),
- cidr_v6 VARCHAR(255),
- gateway_v6 VARCHAR(255),
- label VARCHAR(255),
- netmask_v6 VARCHAR(255),
- bridge_interface VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- CHECK (injected IN (0, 1))
- );
-
- INSERT INTO networks
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- injected,
- cidr,
- netmask,
- bridge,
- gateway,
- broadcast,
- dns,
- vlan,
- vpn_public_address,
- vpn_public_port,
- vpn_private_address,
- dhcp_start,
- project_id,
- host,
- cidr_v6,
- gateway_v6,
- label,
- netmask_v6,
- bridge_interface
- FROM networks_backup;
-
- DROP TABLE networks_backup;
-COMMIT;
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, String, MetaData, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- migrations = Table('migrations', meta, autoload=True)
- instance_uuid = Column('instance_uuid', String(255))
- migrations.create_column(instance_uuid)
-
- if migrate_engine.name == "mysql":
- try:
- migrate_engine.execute("ALTER TABLE migrations DROP FOREIGN KEY "
- "`migrations_ibfk_1`;")
- except Exception: # Don't care, just fail silently.
- pass
-
- migrations.c.instance_id.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- migrations = Table('migrations', meta, autoload=True)
- migrations.c.instance_uuid.drop()
- instance_id = Column('instance_id', Integer())
- migrations.create_column(instance_id)
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 OpenStack, LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Table, MetaData, String
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- networks = Table('networks', meta, autoload=True)
-
- networks.c.dns.alter(name='dns1')
- dns2 = Column('dns2', String(255))
- networks.create_column(dns2)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- networks = Table('networks', meta, autoload=True)
-
- networks.c.dns1.alter(name='dns')
- networks.drop_column('dns2')
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instance_types = Table('instance_types', meta, autoload=True)
- migrations = Table('migrations', meta, autoload=True)
-
- old_instance_type_id = Column('old_instance_type_id', Integer())
- new_instance_type_id = Column('new_instance_type_id', Integer())
- migrations.create_column(old_instance_type_id)
- migrations.create_column(new_instance_type_id)
-
- # Convert flavor_id to instance_type_id
- itypes = {}
- for instance_type in migrate_engine.execute(instance_types.select()):
- itypes[instance_type.id] = instance_type.flavorid
-
- for instance_type_id in itypes.keys():
- migrate_engine.execute(migrations.update()
- .where(migrations.c.old_flavor_id == itypes[instance_type_id])
- .values(old_instance_type_id=instance_type_id))
- migrate_engine.execute(migrations.update()
- .where(migrations.c.new_flavor_id == itypes[instance_type_id])
- .values(new_instance_type_id=instance_type_id))
-
- migrations.c.old_flavor_id.drop()
- migrations.c.new_flavor_id.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instance_types = Table('instance_types', meta, autoload=True)
- migrations = Table('migrations', meta, autoload=True)
-
- old_flavor_id = Column('old_flavor_id', Integer())
- new_flavor_id = Column('new_flavor_id', Integer())
-
- migrations.create_column(old_flavor_id)
- migrations.create_column(new_flavor_id)
-
- # Convert instance_type_id to flavor_id
- itypes = {}
- for instance_type in migrate_engine.execute(instance_types.select()):
- itypes[instance_type.flavorid] = instance_type.id
-
- for instance_type_flavorid in itypes.keys():
- migrate_engine.execute(migrations.update()
- .where(migrations.c.old_instance_type_id ==
- itypes[instance_type_flavorid])
- .values(old_flavor_id=instance_type_flavorid))
- migrate_engine.execute(migrations.update()
- .where(migrations.c.new_instance_type_id ==
- itypes[instance_type_flavorid])
- .values(new_flavor_id=instance_type_flavorid))
-
- migrations.c.old_instance_type_id.drop()
- migrations.c.new_instance_type_id.drop()
+++ /dev/null
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, MetaData, Table, String
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- instances.drop_column('admin_pass')
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- #
- # New Columns
- #
- admin_pass = Column(
- 'admin_pass',
- String(length=255, convert_unicode=False, assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- nullable=True)
-
- instances.create_column(admin_pass)
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (C) 2011 Midokura KK
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, String, Table
-
-from cinder import utils
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- virtual_interfaces = Table('virtual_interfaces', meta, autoload=True)
-
- uuid_column = Column('uuid', String(36))
- virtual_interfaces.create_column(uuid_column)
-
- rows = migrate_engine.execute(virtual_interfaces.select())
- for row in rows:
- vif_uuid = str(utils.gen_uuid())
- migrate_engine.execute(virtual_interfaces.update()
- .where(virtual_interfaces.c.id == row[0])
- .values(uuid=vif_uuid))
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- virtual_interfaces = Table('virtual_interfaces', meta, autoload=True)
-
- virtual_interfaces.drop_column('uuid')
+++ /dev/null
-BEGIN TRANSACTION;
-
- CREATE TEMPORARY TABLE virtual_interfaces_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER NOT NULL,
- uuid VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(network_id) REFERENCES networks (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- UNIQUE (address),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO virtual_interfaces_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- instance_id,
- uuid
- FROM virtual_interfaces;
-
- DROP TABLE virtual_interfaces;
-
- CREATE TABLE virtual_interfaces (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER NOT NULL,
- PRIMARY KEY (id),
- FOREIGN KEY(network_id) REFERENCES networks (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- UNIQUE (address),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO virtual_interfaces
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- instance_id
- FROM virtual_interfaces_backup;
-
- DROP TABLE virtual_interfaces_backup;
-
-COMMIT;
+++ /dev/null
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, Table, String
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- accessIPv4 = Column(
- 'access_ip_v4',
- String(length=255, convert_unicode=False, assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- nullable=True)
-
- accessIPv6 = Column(
- 'access_ip_v6',
- String(length=255, convert_unicode=False, assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- nullable=True)
- instances.create_column(accessIPv4)
- instances.create_column(accessIPv6)
-
-
-def downgrade(migrate_engine):
- # Operations to reverse the above upgrade go here.
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- instances.drop_column('access_ip_v4')
- instances.drop_column('access_ip_v6')
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, String, Table
-
-from cinder import utils
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- networks = Table('networks', meta, autoload=True)
-
- uuid_column = Column("uuid", String(36))
- networks.create_column(uuid_column)
-
- rows = migrate_engine.execute(networks.select())
- for row in rows:
- networks_uuid = str(utils.gen_uuid())
- migrate_engine.execute(networks.update()
- .where(networks.c.id == row[0])
- .values(uuid=networks_uuid))
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- networks = Table('networks', meta, autoload=True)
-
- networks.drop_column('uuid')
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-#
-# Copyright 2011 Piston Cloud Computing, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, String, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table("instances", meta, autoload=True)
-
- config_drive_column = Column("config_drive", String(255), nullable=True)
- instances.create_column(config_drive_column)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table("instances", meta, autoload=True)
-
- instances.drop_column('config_drive')
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 Zadara Storage Inc.
-# Copyright (c) 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table
-from sqlalchemy import Boolean, ForeignKey
-
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- volumes = Table('volumes', meta, autoload=True)
-
- #
- # New Tables
- #
- volume_types = Table('volume_types', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('name',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- unique=True))
-
- volume_type_extra_specs_table = Table('volume_type_extra_specs', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('volume_type_id',
- Integer(),
- ForeignKey('volume_types.id'),
- nullable=False),
- Column('key',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('value',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)))
-
- volume_metadata_table = Table('volume_metadata', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('volume_id',
- Integer(),
- ForeignKey('volumes.id'),
- nullable=False),
- Column('key',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('value',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)))
-
- new_tables = (volume_types,
- volume_type_extra_specs_table,
- volume_metadata_table)
-
- for table in new_tables:
- try:
- table.create()
- except Exception:
- LOG.info(repr(table))
- LOG.exception('Exception while creating table')
- raise
-
- #
- # New Columns
- #
- volume_type_id = Column('volume_type_id', Integer(), nullable=True)
- volumes.create_column(volume_type_id)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- volumes = Table('volumes', meta, autoload=True)
-
- volumes.drop_column('volume_type_id')
-
- volume_types = Table('volume_types', meta, autoload=True)
- volume_type_extra_specs_table = Table('volume_type_extra_specs',
- meta,
- autoload=True)
- volume_metadata_table = Table('volume_metadata', meta, autoload=True)
-
- # table order matters, don't change
- for table in (volume_type_extra_specs_table,
- volume_types,
- volume_metadata_table):
- table.drop()
+++ /dev/null
-BEGIN TRANSACTION;
-
- CREATE TEMPORARY TABLE volumes_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- ec2_id VARCHAR(255),
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(256),
- provider_auth VARCHAR(256),
- snapshot_id INTEGER,
- volume_type_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- host,
- size,
- availability_zone,
- instance_id,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- snapshot_id,
- volume_type_id
- FROM volumes;
-
- DROP TABLE volumes;
-
- CREATE TABLE volumes (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- ec2_id VARCHAR(255),
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(256),
- provider_auth VARCHAR(256),
- snapshot_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- host,
- size,
- availability_zone,
- instance_id,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- snapshot_id
- FROM volumes_backup;
-
- DROP TABLE volumes_backup;
-
- DROP TABLE volume_type_extra_specs;
-
- DROP TABLE volume_types;
-
- DROP TABLE volume_metadata;
-
-COMMIT;
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 Zadara Storage Inc.
-# Copyright (c) 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table
-from sqlalchemy import Boolean
-
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- #
- # New Tables
- #
- virtual_storage_arrays = Table('virtual_storage_arrays', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('display_name',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('display_description',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('project_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('availability_zone',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('instance_type_id', Integer(), nullable=False),
- Column('image_ref',
- String(length=255, convert_unicode=False, assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('vc_count', Integer(), nullable=False),
- Column('vol_count', Integer(), nullable=False),
- Column('status',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- )
-
- try:
- virtual_storage_arrays.create()
- except Exception:
- LOG.info(repr(virtual_storage_arrays))
- LOG.exception('Exception while creating table')
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- virtual_storage_arrays = Table('virtual_storage_arrays',
- meta,
- autoload=True)
- virtual_storage_arrays.drop()
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData, Table, Column, String
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instance_table = Table('instances', meta, autoload=True)
-
- c_state = instance_table.c.state
- c_state.alter(name='power_state')
-
- c_vm_state = instance_table.c.state_description
- c_vm_state.alter(name='vm_state')
-
- c_task_state = Column('task_state',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- nullable=True)
- instance_table.create_column(c_task_state)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instance_table = Table('instances', meta, autoload=True)
-
- c_state = instance_table.c.power_state
- c_state.alter(name='state')
-
- c_vm_state = instance_table.c.vm_state
- c_vm_state.alter(name='state_description')
-
- instance_table.drop_column('task_state')
+++ /dev/null
-# Copyright 2011 Nicira, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, Table
-
-from cinder import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- networks = Table('networks', meta, autoload=True)
-
- priority = Column('priority', Integer())
- try:
- networks.create_column(priority)
- except Exception:
- LOG.error(_("priority column not added to networks table"))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- networks = Table('networks', meta, autoload=True)
-
- networks.drop_column('priority')
+++ /dev/null
-# Copyright 2011 Isaku Yamahata
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, Table, String
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- default_local_device = Column(
- 'default_local_device',
- String(length=255, convert_unicode=False, assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- nullable=True)
-
- default_swap_device = Column(
- 'default_swap_device',
- String(length=255, convert_unicode=False, assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- nullable=True)
- instances.create_column(default_local_device)
- instances.create_column(default_swap_device)
-
-
-def downgrade(migrate_engine):
- # Operations to reverse the above upgrade go here.
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- instances.drop_column('default_swap_device')
- instances.drop_column('default_local_device')
+++ /dev/null
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData, Table
-from migrate import ForeignKeyConstraint
-
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
- dialect = migrate_engine.url.get_dialect().name
- if dialect.startswith('sqlite'):
- return
-
- instances = Table('instances', meta, autoload=True)
- vifs = Table('virtual_interfaces', meta, autoload=True)
-
- try:
- fkey_name = list(vifs.c.instance_id.foreign_keys)[0].constraint.name
- ForeignKeyConstraint(columns=[vifs.c.instance_id],
- refcolumns=[instances.c.id],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
-
-def downgrade(migrate_engine):
- # Operations to reverse the above upgrade go here.
- meta = MetaData()
- meta.bind = migrate_engine
- dialect = migrate_engine.url.get_dialect().name
- if dialect.startswith('sqlite'):
- return
-
- instances = Table('instances', meta, autoload=True)
- vifs = Table('virtual_interfaces', meta, autoload=True)
-
- try:
- ForeignKeyConstraint(columns=[vifs.c.instance_id],
- refcolumns=[instances.c.id]).create()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be added"))
- raise
+++ /dev/null
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE virtual_interfaces_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER NOT NULL,
- uuid VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- INSERT INTO virtual_interfaces_backup
- SELECT created_at, updated_at, deleted_at, deleted, id, address,
- network_id, instance_id, uuid
- FROM virtual_interfaces;
-
- DROP TABLE virtual_interfaces;
-
- CREATE TABLE virtual_interfaces (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER NOT NULL,
- uuid VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(network_id) REFERENCES networks (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- UNIQUE (address),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO virtual_interfaces
- SELECT created_at, updated_at, deleted_at, deleted, id, address,
- network_id, instance_id, uuid
- FROM virtual_interfaces_backup;
-
- DROP TABLE virtual_interfaces_backup;
-
-COMMIT;
+++ /dev/null
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE virtual_interfaces_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER NOT NULL,
- uuid VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- INSERT INTO virtual_interfaces_backup
- SELECT created_at, updated_at, deleted_at, deleted, id, address,
- network_id, instance_id, uuid
- FROM virtual_interfaces;
-
- DROP TABLE virtual_interfaces;
-
- CREATE TABLE virtual_interfaces (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER NOT NULL,
- uuid VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(network_id) REFERENCES networks (id),
- UNIQUE (address),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO virtual_interfaces
- SELECT created_at, updated_at, deleted_at, deleted, id, address,
- network_id, instance_id, uuid
- FROM virtual_interfaces_backup;
-
- DROP TABLE virtual_interfaces_backup;
-
-COMMIT;
+++ /dev/null
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, String, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- zones = Table('zones', meta, autoload=True)
- name = Column('name', String(255))
- zones.create_column(name)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- zones = Table('zones', meta, autoload=True)
-
- zones.drop_column('name')
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, Table
-
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- progress = Column('progress', Integer())
- try:
- instances.create_column(progress)
- except Exception:
- LOG.error(_("progress column not added to instances table"))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- instances.drop_column('progress')
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, Integer, MetaData, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- managed_disk = Column("managed_disk", Boolean(create_constraint=False,
- name=None))
- instances.create_column(managed_disk)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
-
- instances.drop_column('managed_disk')
+++ /dev/null
-BEGIN TRANSACTION;
-
- CREATE TEMPORARY TABLE instances_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- internal_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- image_ref VARCHAR(255),
- kernel_id VARCHAR(255),
- ramdisk_id VARCHAR(255),
- server_name VARCHAR(255),
- launch_index INTEGER,
- key_name VARCHAR(255),
- key_data TEXT,
- power_state INTEGER,
- vm_state VARCHAR(255),
- memory_mb INTEGER,
- vcpus INTEGER,
- local_gb INTEGER,
- hostname VARCHAR(255),
- host VARCHAR(255),
- user_data TEXT,
- reservation_id VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- availability_zone VARCHAR(255),
- locked BOOLEAN,
- os_type VARCHAR(255),
- launched_on TEXT,
- instance_type_id INTEGER,
- vm_mode VARCHAR(255),
- uuid VARCHAR(36),
- architecture VARCHAR(255),
- root_device_name VARCHAR(255),
- access_ip_v4 VARCHAR(255),
- access_ip_v6 VARCHAR(255),
- config_drive VARCHAR(255),
- task_state VARCHAR(255),
- default_local_device VARCHAR(255),
- default_swap_device VARCHAR(255),
- progress INTEGER,
- managed_disk BOOLEAN,
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- CHECK (locked IN (0, 1)),
- CHECK (managed_disk IN (0, 1))
- );
-
- INSERT INTO instances_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- internal_id,
- user_id,
- project_id,
- image_ref,
- kernel_id,
- ramdisk_id,
- server_name,
- launch_index,
- key_name,
- key_data,
- power_state,
- vm_state,
- memory_mb,
- vcpus,
- local_gb,
- hostname,
- host,
- user_data,
- reservation_id,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- availability_zone,
- locked,
- os_type,
- launched_on,
- instance_type_id,
- vm_mode,
- uuid,
- architecture,
- root_device_name,
- access_ip_v4,
- access_ip_v6,
- config_drive,
- task_state,
- default_local_device,
- default_swap_device,
- progress,
- managed_disk
- FROM instances;
-
- DROP TABLE instances;
-
- CREATE TABLE instances (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- internal_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- image_ref VARCHAR(255),
- kernel_id VARCHAR(255),
- ramdisk_id VARCHAR(255),
- server_name VARCHAR(255),
- launch_index INTEGER,
- key_name VARCHAR(255),
- key_data TEXT,
- power_state INTEGER,
- vm_state VARCHAR(255),
- memory_mb INTEGER,
- vcpus INTEGER,
- local_gb INTEGER,
- hostname VARCHAR(255),
- host VARCHAR(255),
- user_data TEXT,
- reservation_id VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- availability_zone VARCHAR(255),
- locked BOOLEAN,
- os_type VARCHAR(255),
- launched_on TEXT,
- instance_type_id INTEGER,
- vm_mode VARCHAR(255),
- uuid VARCHAR(36),
- architecture VARCHAR(255),
- root_device_name VARCHAR(255),
- access_ip_v4 VARCHAR(255),
- access_ip_v6 VARCHAR(255),
- config_drive VARCHAR(255),
- task_state VARCHAR(255),
- default_local_device VARCHAR(255),
- default_swap_device VARCHAR(255),
- progress INTEGER,
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- CHECK (locked IN (0, 1))
- );
-
- INSERT INTO instances
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- internal_id,
- user_id,
- project_id,
- image_ref,
- kernel_id,
- ramdisk_id,
- server_name,
- launch_index,
- key_name,
- key_data,
- power_state,
- vm_state,
- memory_mb,
- vcpus,
- local_gb,
- hostname,
- host,
- user_data,
- reservation_id,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- availability_zone,
- locked,
- os_type,
- launched_on,
- instance_type_id,
- vm_mode,
- uuid,
- architecture,
- root_device_name,
- access_ip_v4,
- access_ip_v6,
- config_drive,
- task_state,
- default_local_device,
- default_swap_device,
- progress
- FROM instances_backup;
-
- DROP TABLE instances_backup;
-COMMIT;
+++ /dev/null
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instance_types = Table('instance_types', meta, autoload=True)
-
- vcpu_weight = Column("vcpu_weight", Integer())
- instance_types.create_column(vcpu_weight)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instance_types = Table('instance_types', meta, autoload=True)
-
- instance_types.drop_column('vcpu_weight')
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 University of Southern California
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
-from sqlalchemy import MetaData, Table
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- volumes = Table('volumes', meta, autoload=True)
-
- #
- # New Tables
- #
- export_devices = Table('export_devices', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('shelf_id', Integer()),
- Column('blade_id', Integer()),
- Column('volume_id',
- Integer(),
- ForeignKey('volumes.id'),
- nullable=True),
- )
-
- try:
- export_devices.create()
- except Exception:
- LOG.info(repr(export_devices))
- LOG.exception('Exception while creating table')
- raise
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- volumes = Table('volumes', meta, autoload=True)
-
- export_devices = Table('export_devices', meta, autoload=True)
-
- export_devices.drop()
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, MetaData, Table, Text
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- table = Table('block_device_mapping', meta, autoload=True)
-
- new_column = Column('connection_info', Text())
-
- table.create_column(new_column)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- table = Table('block_device_mapping', meta, autoload=True)
-
- table.c.connection_info.drop()
+++ /dev/null
-BEGIN TRANSACTION;
-
- CREATE TEMPORARY TABLE block_device_mapping_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- device_name VARCHAR(255) NOT NULL,
- delete_on_termination BOOLEAN,
- virtual_name VARCHAR(255),
- snapshot_id INTEGER,
- volume_id INTEGER,
- volume_size INTEGER,
- no_device BOOLEAN,
- connection_info TEXT,
- PRIMARY KEY (id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots (id),
- CHECK (deleted IN (0, 1)),
- CHECK (delete_on_termination IN (0, 1)),
- CHECK (no_device IN (0, 1)),
- FOREIGN KEY(volume_id) REFERENCES volumes (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- INSERT INTO block_device_mapping_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_id,
- device_name,
- delete_on_termination,
- virtual_name,
- snapshot_id,
- volume_id,
- volume_size,
- no_device,
- connection_info
- FROM block_device_mapping;
-
- DROP TABLE block_device_mapping;
-
- CREATE TABLE block_device_mapping (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- device_name VARCHAR(255) NOT NULL,
- delete_on_termination BOOLEAN,
- virtual_name VARCHAR(255),
- snapshot_id INTEGER,
- volume_id INTEGER,
- volume_size INTEGER,
- no_device BOOLEAN,
- PRIMARY KEY (id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots (id),
- CHECK (deleted IN (0, 1)),
- CHECK (delete_on_termination IN (0, 1)),
- CHECK (no_device IN (0, 1)),
- FOREIGN KEY(volume_id) REFERENCES volumes (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- INSERT INTO block_device_mapping
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_id,
- device_name,
- delete_on_termination,
- virtual_name,
- snapshot_id,
- volume_id,
- volume_size,
- no_device
- FROM block_device_mapping_backup;
-
- DROP TABLE block_device_mapping_backup;
-
-COMMIT;
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 MORITA Kazutaka.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Table, MetaData
-from sqlalchemy import Integer, BigInteger, DateTime, Boolean, String
-
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- #
- # New Tables
- #
- bw_cache = Table('bw_usage_cache', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('instance_id', Integer(), nullable=False),
- Column('network_label',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('start_period', DateTime(timezone=False), nullable=False),
- Column('last_refreshed', DateTime(timezone=False)),
- Column('bw_in', BigInteger()),
- Column('bw_out', BigInteger()))
- try:
- bw_cache.create()
- except Exception:
- LOG.info(repr(bw_cache))
- LOG.exception('Exception while creating table')
- meta.drop_all(tables=[bw_cache])
- raise
-
-
-def downgrade(migrate_engine):
- # Operations to reverse the above upgrade go here.
- meta = MetaData()
- meta.bind = migrate_engine
- bw_cache = Table('bw_usage_cache', meta, autoload=True)
- bw_cache.drop()
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import migrate.changeset
-from sqlalchemy import Column, Integer, String, MetaData, Table
-
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instance_types = Table('instance_types', meta, autoload=True)
-
- string_column = Column('flavorid_str', String(255))
-
- string_column.create(instance_types)
-
- try:
- # NOTE(bcwaldon): This catches a bug with python-migrate
- # failing to add the unique constraint
- try:
- migrate.UniqueConstraint(string_column).create()
- except migrate.changeset.NotSupportedError:
- LOG.error("Failed to add unique constraint on flavorid")
- pass
-
- # NOTE(bcwaldon): this is a hack to preserve uniqueness constraint
- # on existing 'name' column
- try:
- migrate.UniqueConstraint(instance_types.c.name).create()
- except Exception:
- pass
-
- integer_column = instance_types.c.flavorid
-
- instance_type_rows = list(instance_types.select().execute())
- for instance_type in instance_type_rows:
- flavorid_int = instance_type.flavorid
- instance_types.update()\
- .where(integer_column == flavorid_int)\
- .values(flavorid_str=str(flavorid_int))\
- .execute()
- except Exception:
- string_column.drop()
- raise
-
- integer_column.alter(name='flavorid_int')
- string_column.alter(name='flavorid')
- integer_column.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instance_types = Table('instance_types', meta, autoload=True)
-
- integer_column = Column('flavorid_int', Integer())
-
- integer_column.create(instance_types)
-
- try:
- # NOTE(bcwaldon): This catches a bug with python-migrate
- # failing to add the unique constraint
- try:
- migrate.UniqueConstraint(integer_column).create()
- except migrate.changeset.NotSupportedError:
- LOG.info("Failed to add unique constraint on flavorid")
- pass
-
- string_column = instance_types.c.flavorid
-
- instance_types_rows = list(instance_types.select().execute())
- for instance_type in instance_types_rows:
- flavorid_str = instance_type.flavorid
- try:
- flavorid_int = int(instance_type.flavorid)
- except ValueError:
- msg = _('Could not cast flavorid to integer: %s. '
- 'Set flavorid to an integer-like string to downgrade.')
- LOG.error(msg % instance_type.flavorid)
- raise
-
- instance_types.update()\
- .where(string_column == flavorid_str)\
- .values(flavorid_int=flavorid_int)\
- .execute()
- except Exception:
- integer_column.drop()
- raise
-
- string_column.alter(name='flavorid_str')
- integer_column.alter(name='flavorid')
- string_column.drop()
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import MetaData, String, Table
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- #
- # New Tables
- #
- s3_images = Table('s3_images', meta,
- Column('created_at',
- DateTime(timezone=False)),
- Column('updated_at',
- DateTime(timezone=False)),
- Column('deleted_at',
- DateTime(timezone=False)),
- Column('deleted',
- Boolean(create_constraint=True, name=None)),
- Column('id', Integer(),
- primary_key=True,
- nullable=False,
- autoincrement=True),
- Column('uuid', String(36),
- nullable=False))
- try:
- s3_images.create()
- except Exception:
- LOG.exception("Exception while creating table 's3_images'")
- meta.drop_all(tables=[s3_images])
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- s3_images = Table('s3_images', meta, autoload=True)
- s3_images.drop()
+++ /dev/null
-# Copyright (c) 2011 Citrix Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, ForeignKey
-from sqlalchemy import Integer, MetaData, String
-from sqlalchemy import Table
-
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- volumes = Table('volumes', meta, autoload=True)
-
- #
- # New Tables
- #
- flavors = Table('sm_flavors', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('label',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('description',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- )
-
- backend = Table('sm_backend_config', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('flavor_id', Integer(), ForeignKey('sm_flavors.id'),
- nullable=False),
- Column('sr_uuid',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('sr_type',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('config_params',
- String(length=2047,
- convert_unicode=False,
- assert_unicode=None,
- unicode_error=None,
- _warn_on_bytestring=False)),
- )
-
- sm_vol = Table('sm_volume', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), ForeignKey('volumes.id'),
- primary_key=True, nullable=False),
- Column('backend_id', Integer(),
- ForeignKey('sm_backend_config.id'),
- nullable=False),
- Column('vdi_uuid',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- )
- for table in (flavors, backend, sm_vol):
- try:
- table.create()
- except Exception:
- LOG.info(repr(table))
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- volumes = Table('volumes', meta, autoload=True)
-
- flavors = Table('sm_flavors', meta, autoload=True)
- backend = Table('sm_backend_config', meta, autoload=True)
- sm_vol = Table('sm_volume', meta, autoload=True)
-
- for table in (flavors, backend, sm_vol):
- try:
- table.drop()
- except Exception:
- LOG.info(repr(table))
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData, Table
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
-
- managed_disk = instances.c.managed_disk
- managed_disk.alter(name='auto_disk_config')
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
-
- image_ref_column = instances.c.auto_disk_config
- image_ref_column.alter(name='managed_disk')
+++ /dev/null
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, Float, MetaData, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instance_types = Table('instance_types', meta, autoload=True)
- networks = Table('networks', meta, autoload=True)
-
- rxtx_base = Column('rxtx_base', Integer)
- rxtx_factor = Column('rxtx_factor', Float, default=1)
- instance_types.create_column(rxtx_factor)
- networks.create_column(rxtx_base)
-
- base = migrate_engine.execute("select min(rxtx_cap) as min_rxtx from "
- "instance_types where rxtx_cap > 0").scalar()
- base = base if base > 1 else 1
- update_i_type_sql = ("update instance_types set rxtx_factor = rxtx_cap"
- "/%s where rxtx_cap > 0" % base)
- migrate_engine.execute(update_i_type_sql)
- migrate_engine.execute("update networks set rxtx_base = %s" % base)
-
- instance_types.c.rxtx_quota.drop()
- instance_types.c.rxtx_cap.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instance_types = Table('instance_types', meta, autoload=True)
- networks = Table('networks', meta, autoload=True)
-
- rxtx_quota = Column('rxtx_quota', Integer)
- rxtx_cap = Column('rxtx_cap', Integer)
- instance_types.create_column(rxtx_quota)
- instance_types.create_column(rxtx_cap)
-
- base = migrate_engine.execute("select min(rxtx_base) from networks "
- "where rxtx_base > 0").scalar()
- base = base if base > 1 else 1
-
- update_i_type_sql = ("update instance_types set rxtx_cap = "
- "rxtx_factor * %s" % base)
- migrate_engine.execute(update_i_type_sql)
-
- instance_types.c.rxtx_factor.drop()
- networks.c.rxtx_base.drop()
+++ /dev/null
-BEGIN TRANSACTION;
-
- CREATE TEMPORARY TABLE instance_types_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- name VARCHAR(255),
- id INTEGER NOT NULL,
- memory_mb INTEGER NOT NULL,
- vcpus INTEGER NOT NULL,
- local_gb INTEGER NOT NULL,
- swap INTEGER NOT NULL,
- rxtx_quota INTEGER NOT NULL,
- rxtx_cap INTEGER NOT NULL,
- vcpu_weight INTEGER,
- flavorid VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- UNIQUE (flavorid),
- UNIQUE (name)
- );
-
- INSERT INTO instance_types_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- name,
- id,
- memory_mb,
- vcpus,
- local_gb,
- swap,
- 0 as rxtx_quota,
- COALESCE(rxtx_factor, 1) * COALESCE ((SELECT MIN(rxtx_base)
- FROM networks
- WHERE rxtx_base > 0), 1)
- as rxtx_cap,
- vcpu_weight,
- flavorid FROM instance_types;
-
- DROP TABLE instance_types;
-
- CREATE TABLE instance_types (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- name VARCHAR(255),
- id INTEGER NOT NULL,
- memory_mb INTEGER NOT NULL,
- vcpus INTEGER NOT NULL,
- local_gb INTEGER NOT NULL,
- swap INTEGER NOT NULL,
- rxtx_quota INTEGER NOT NULL,
- rxtx_cap INTEGER NOT NULL,
- vcpu_weight INTEGER,
- flavorid VARCHAR(255),
- PRIMARY KEY (id),
- UNIQUE (flavorid),
- CHECK (deleted IN (0, 1)),
- UNIQUE (name)
- );
-
- INSERT INTO instance_types SELECT * FROM instance_types_backup;
- DROP TABLE instance_types_backup;
-
- CREATE TABLE networks_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- injected BOOLEAN,
- cidr VARCHAR(255),
- netmask VARCHAR(255),
- bridge VARCHAR(255),
- gateway VARCHAR(255),
- broadcast VARCHAR(255),
- dns1 VARCHAR(255),
- vlan INTEGER,
- vpn_public_address VARCHAR(255),
- vpn_public_port INTEGER,
- vpn_private_address VARCHAR(255),
- dhcp_start VARCHAR(255),
- project_id VARCHAR(255),
- host VARCHAR(255),
- cidr_v6 VARCHAR(255),
- gateway_v6 VARCHAR(255),
- label VARCHAR(255),
- netmask_v6 VARCHAR(255),
- bridge_interface VARCHAR(255),
- multi_host BOOLEAN,
- dns2 VARCHAR(255),
- uuid VARCHAR(36),
- priority INTEGER,
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- CHECK (injected IN (0, 1)),
- CHECK (multi_host IN (0, 1))
- );
-
- INSERT INTO networks_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- injected,
- cidr,
- netmask,
- bridge,
- gateway,
- broadcast,
- dns1,
- vlan,
- vpn_public_address,
- vpn_public_port,
- vpn_private_address,
- dhcp_start,
- project_id,
- host,
- cidr_v6,
- gateway_v6,
- label,
- netmask_v6,
- bridge_interface,
- multi_host,
- dns2,
- uuid,
- priority
- FROM networks;
-
- DROP TABLE networks;
- ALTER TABLE networks_backup RENAME TO networks;
-COMMIT;
+++ /dev/null
-BEGIN TRANSACTION;
-
- CREATE TEMPORARY TABLE instance_types_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- name VARCHAR(255),
- id INTEGER NOT NULL,
- memory_mb INTEGER NOT NULL,
- vcpus INTEGER NOT NULL,
- local_gb INTEGER NOT NULL,
- swap INTEGER NOT NULL,
- rxtx_factor FLOAT,
- vcpu_weight INTEGER,
- flavorid VARCHAR(255),
- PRIMARY KEY (id),
- UNIQUE (flavorid),
- CHECK (deleted IN (0, 1)),
- UNIQUE (name)
- );
-
- INSERT INTO instance_types_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- name,
- id,
- memory_mb,
- vcpus,
- local_gb,
- swap,
- COALESCE(rxtx_cap, 1) / COALESCE ((SELECT MIN(rxtx_cap)
- FROM instance_types
- WHERE rxtx_cap > 0), 1) as rxtx_cap,
- vcpu_weight,
- flavorid
- FROM instance_types;
-
- ALTER TABLE networks ADD COLUMN rxtx_base INTEGER DEFAULT 1;
-
- UPDATE networks SET rxtx_base = COALESCE((SELECT MIN(rxtx_cap)
- FROM instance_types
- WHERE rxtx_cap>0), 1);
-
- DROP TABLE instance_types;
-
- CREATE TABLE instance_types (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- name VARCHAR(255),
- id INTEGER NOT NULL,
- memory_mb INTEGER NOT NULL,
- vcpus INTEGER NOT NULL,
- local_gb INTEGER NOT NULL,
- swap INTEGER NOT NULL,
- rxtx_factor FLOAT,
- vcpu_weight INTEGER,
- flavorid VARCHAR(255),
- PRIMARY KEY (id),
- UNIQUE (flavorid),
- CHECK (deleted IN (0, 1)),
- UNIQUE (name)
- );
-
- INSERT INTO instance_types
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- name,
- id,
- memory_mb,
- vcpus,
- local_gb,
- swap,
- rxtx_factor,
- vcpu_weight,
- flavorid
- FROM instance_types_backup;
-
- DROP TABLE instance_types_backup;
-
-COMMIT;
+++ /dev/null
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData, Table
-from migrate import ForeignKeyConstraint
-
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
- dialect = migrate_engine.url.get_dialect().name
- if dialect.startswith('sqlite'):
- return
-
- networks = Table('networks', meta, autoload=True)
- vifs = Table('virtual_interfaces', meta, autoload=True)
-
- try:
- fkey_name = list(vifs.c.network_id.foreign_keys)[0].constraint.name
- ForeignKeyConstraint(columns=[vifs.c.network_id],
- refcolumns=[networks.c.id],
- name=fkey_name).drop()
-
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
-
-def downgrade(migrate_engine):
- # Operations to reverse the above upgrade go here.
- meta = MetaData()
- meta.bind = migrate_engine
- dialect = migrate_engine.url.get_dialect().name
- if dialect.startswith('sqlite'):
- return
-
- networks = Table('networks', meta, autoload=True)
- vifs = Table('virtual_interfaces', meta, autoload=True)
-
- try:
- ForeignKeyConstraint(columns=[vifs.c.network_id],
- refcolumns=[networks.c.id]).create()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be added"))
- raise
+++ /dev/null
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE virtual_interfaces_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER NOT NULL,
- uuid VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- INSERT INTO virtual_interfaces_backup
- SELECT created_at, updated_at, deleted_at, deleted, id, address,
- network_id, instance_id, uuid
- FROM virtual_interfaces;
-
- DROP TABLE virtual_interfaces;
-
- CREATE TABLE virtual_interfaces (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER NOT NULL,
- uuid VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(network_id) REFERENCES networks (id),
- UNIQUE (address),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO virtual_interfaces
- SELECT created_at, updated_at, deleted_at, deleted, id, address,
- network_id, instance_id, uuid
- FROM virtual_interfaces_backup;
-
- DROP TABLE virtual_interfaces_backup;
-
-COMMIT;
+++ /dev/null
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE virtual_interfaces_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER NOT NULL,
- uuid VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- INSERT INTO virtual_interfaces_backup
- SELECT created_at, updated_at, deleted_at, deleted, id, address,
- network_id, instance_id, uuid
- FROM virtual_interfaces;
-
- DROP TABLE virtual_interfaces;
-
- CREATE TABLE virtual_interfaces (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER NOT NULL,
- uuid VARCHAR(36),
- PRIMARY KEY (id),
- UNIQUE (address),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO virtual_interfaces
- SELECT created_at, updated_at, deleted_at, deleted, id, address,
- network_id, instance_id, uuid
- FROM virtual_interfaces_backup;
-
- DROP TABLE virtual_interfaces_backup;
-
-COMMIT;
+++ /dev/null
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
- Index('uuid', instances.c.uuid, unique=True).create(migrate_engine)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
- Index('uuid', instances.c.uuid, unique=True).drop(migrate_engine)
+++ /dev/null
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, ForeignKey
-from sqlalchemy import Integer, MetaData, String
-from sqlalchemy import Table, Text
-
-from cinder import log as logging
-from cinder import utils
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- instances = Table('instances', meta, autoload=True)
-
- #
- # New Tables
- #
- instance_info_caches = Table('instance_info_caches', meta,
- Column('created_at', DateTime(timezone=False),
- default=utils.utcnow()),
- Column('updated_at', DateTime(timezone=False),
- onupdate=utils.utcnow()),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True),
- Column('network_info', Text()),
- Column('instance_id', String(36),
- ForeignKey('instances.uuid'),
- nullable=False,
- unique=True),
- mysql_engine='InnoDB')
- # create instance_info_caches table
- try:
- instance_info_caches.create()
- except Exception:
- LOG.error(_("Table |%s| not created!"), repr(instance_info_caches))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- instances = Table('instances', meta, autoload=True)
-
- instance_info_caches = Table('instance_info_caches', meta, autoload=True)
- try:
- instance_info_caches.drop()
- except Exception:
- LOG.error(_("instance_info_caches tables not dropped"))
- raise
+++ /dev/null
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, Integer, ForeignKey
-from sqlalchemy import MetaData, String, Table, Text
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
- #
- # New Tables
- #
- instance_faults = Table('instance_faults', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None),
- default=False),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('instance_uuid', String(36, ForeignKey('instances.uuid'))),
- Column('code', Integer(), nullable=False),
- Column('message',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('details',
- Text(length=None, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- )
- try:
- instance_faults.create()
- except Exception:
- LOG.info(repr(instance_faults))
-
-
-def downgrade(migrate_engine):
- # Operations to reverse the above upgrade go here.
- meta = MetaData()
- meta.bind = migrate_engine
- instance_faults = Table('instance_faults', meta, autoload=True)
- instance_faults.drop()
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import select, Column, ForeignKey, Integer
-from sqlalchemy import MetaData, String, Table
-from migrate import ForeignKeyConstraint
-
-from cinder import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- dialect = migrate_engine.url.get_dialect().name
- instance_actions = Table('instance_actions', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- uuid_column = Column('instance_uuid', String(36))
- uuid_column.create(instance_actions)
-
- try:
- instance_actions.update().values(
- instance_uuid=select(
- [instances.c.uuid],
- instances.c.id == instance_actions.c.instance_id)
- ).execute()
- except Exception:
- uuid_column.drop()
- raise
-
- if not dialect.startswith('sqlite'):
- fkeys = list(instance_actions.c.instance_id.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[instance_actions.c.instance_id],
- refcolumns=[instances.c.id],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- instance_actions.c.instance_id.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instance_actions = Table('instance_actions', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
- id_column.create(instance_actions)
-
- try:
- instance_actions.update().values(
- instance_id=select(
- [instances.c.id],
- instances.c.uuid == instance_actions.c.instance_uuid)
- ).execute()
- except Exception:
- id_column.drop()
- raise
-
- instance_actions.c.instance_uuid.drop()
+++ /dev/null
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
- index = Index('project_id', instances.c.project_id)
- index.create(migrate_engine)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
- index = Index('project_id', instances.c.project_id)
- index.drop(migrate_engine)
+++ /dev/null
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import json
-
-from sqlalchemy import select, MetaData, Table
-
-from cinder import log as logging
-from cinder import utils
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- pass
-
-
-def downgrade(migrate_engine):
- pass
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, MetaData, String, Table
-
-from cinder import flags
-
-FLAGS = flags.FLAGS
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- table = Table('floating_ips', meta, autoload=True)
-
- pool_column = Column('pool', String(255))
- interface_column = Column('interface', String(255))
- table.create_column(pool_column)
- table.create_column(interface_column)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- table = Table('floating_ips', meta, autoload=True)
- table.c.pool.drop()
- table.c.interface.drop()
+++ /dev/null
-BEGIN TRANSACTION;
-
- CREATE TEMPORARY TABLE floating_ips_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- fixed_ip_id INTEGER,
- project_id VARCHAR(255),
- host VARCHAR(255),
- auto_assigned BOOLEAN,
- pool VARCHAR(255),
- interface VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id)
- );
-
- INSERT INTO floating_ips_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- fixed_ip_id,
- project_id,
- host,
- auto_assigned,
- pool,
- interface
- FROM floating_ips;
-
- DROP TABLE floating_ips;
-
- CREATE TABLE floating_ips (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- fixed_ip_id INTEGER,
- project_id VARCHAR(255),
- host VARCHAR(255),
- auto_assigned BOOLEAN,
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id)
- );
-
- INSERT INTO floating_ips
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- fixed_ip_id,
- project_id,
- host,
- auto_assigned
- FROM floating_ips_backup;
-
- DROP TABLE floating_ips_backup;
-
-COMMIT;
+++ /dev/null
-# Copyright 2011 Isaku Yamahata
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean
-from sqlalchemy import Column, MetaData, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
- shutdown_terminate = Column(
- 'shutdown_terminate', Boolean(), default=True)
- disable_terminate = Column(
- 'disable_terminate', Boolean(), default=False)
- instances.create_column(shutdown_terminate)
- instances.create_column(disable_terminate)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
- instances.drop_column('shutdown_terminate')
- instances.drop_column('disable_terminate')
+++ /dev/null
-BEGIN TRANSACTION;
-
- CREATE TEMPORARY TABLE instances_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- internal_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- image_ref VARCHAR(255),
- kernel_id VARCHAR(255),
- ramdisk_id VARCHAR(255),
- server_name VARCHAR(255),
- launch_index INTEGER,
- key_name VARCHAR(255),
- key_data TEXT,
- power_state INTEGER,
- vm_state VARCHAR(255),
- memory_mb INTEGER,
- vcpus INTEGER,
- local_gb INTEGER,
- hostname VARCHAR(255),
- host VARCHAR(255),
- user_data TEXT,
- reservation_id VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- availability_zone VARCHAR(255),
- locked BOOLEAN,
- os_type VARCHAR(255),
- launched_on TEXT,
- instance_type_id INTEGER,
- vm_mode VARCHAR(255),
- uuid VARCHAR(36),
- architecture VARCHAR(255),
- root_device_name VARCHAR(255),
- access_ip_v4 VARCHAR(255),
- access_ip_v6 VARCHAR(255),
- config_drive VARCHAR(255),
- task_state VARCHAR(255),
- default_local_device VARCHAR(255),
- default_swap_device VARCHAR(255),
- progress INTEGER,
- auto_disk_config BOOLEAN,
- shutdown_terminate BOOLEAN,
- disable_terminate BOOLEAN,
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- CHECK (locked IN (0, 1)),
- CHECK (auto_disk_config IN (0, 1)),
- CHECK (shutdown_terminate IN (0, 1)),
- CHECK (disable_terminate IN (0, 1))
- );
-
- INSERT INTO instances_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- internal_id,
- user_id,
- project_id,
- image_ref,
- kernel_id,
- ramdisk_id,
- server_name,
- launch_index,
- key_name,
- key_data,
- power_state,
- vm_state,
- memory_mb,
- vcpus,
- local_gb,
- hostname,
- host,
- user_data,
- reservation_id,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- availability_zone,
- locked,
- os_type,
- launched_on,
- instance_type_id,
- vm_mode,
- uuid,
- architecture,
- root_device_name,
- access_ip_v4,
- access_ip_v6,
- config_drive,
- task_state,
- default_local_device,
- default_swap_device,
- progress,
- auto_disk_config,
- shutdown_terminate,
- disable_terminate
- FROM instances;
-
- DROP TABLE instances;
-
- CREATE TABLE instances(
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- internal_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- image_ref VARCHAR(255),
- kernel_id VARCHAR(255),
- ramdisk_id VARCHAR(255),
- server_name VARCHAR(255),
- launch_index INTEGER,
- key_name VARCHAR(255),
- key_data TEXT,
- power_state INTEGER,
- vm_state VARCHAR(255),
- memory_mb INTEGER,
- vcpus INTEGER,
- local_gb INTEGER,
- hostname VARCHAR(255),
- host VARCHAR(255),
- user_data TEXT,
- reservation_id VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- availability_zone VARCHAR(255),
- locked BOOLEAN,
- os_type VARCHAR(255),
- launched_on TEXT,
- instance_type_id INTEGER,
- vm_mode VARCHAR(255),
- uuid VARCHAR(36),
- architecture VARCHAR(255),
- root_device_name VARCHAR(255),
- access_ip_v4 VARCHAR(255),
- access_ip_v6 VARCHAR(255),
- config_drive VARCHAR(255),
- task_state VARCHAR(255),
- default_local_device VARCHAR(255),
- default_swap_device VARCHAR(255),
- progress INTEGER,
- auto_disk_config BOOLEAN,
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- CHECK (locked IN (0, 1)),
- CHECK (auto_disk_config IN (0, 1))
- );
-
- CREATE INDEX project_id ON instances (project_id);
- CREATE UNIQUE INDEX uuid ON instances (uuid);
-
- INSERT INTO instances
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- internal_id,
- user_id,
- project_id,
- image_ref,
- kernel_id,
- ramdisk_id,
- server_name,
- launch_index,
- key_name,
- key_data,
- power_state,
- vm_state,
- memory_mb,
- vcpus,
- local_gb,
- hostname,
- host,
- user_data,
- reservation_id,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- availability_zone,
- locked,
- os_type,
- launched_on,
- instance_type_id,
- vm_mode,
- uuid,
- architecture,
- root_device_name,
- access_ip_v4,
- access_ip_v6,
- config_drive,
- task_state,
- default_local_device,
- default_swap_device,
- progress,
- auto_disk_config
- FROM instances_backup;
-
- DROP TABLE instances_backup;
-COMMIT;
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, Table
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- disk_available_least = Column('disk_available_least', Integer(), default=0)
- compute_nodes = Table('compute_nodes', meta, autoload=True)
- # Add disk_available_least column to compute_nodes table.
- # Thinking about qcow2 image support, both compressed and virtual disk size
- # has to be considered.
- # disk_available stores "total disk size - used disk(compressed disk size)"
- # while disk_available_least stores
- # "total disk size - used disk(virtual disk size)".
- # virtual disk size is used for kvm block migration.
- try:
- compute_nodes.create_column(disk_available_least)
- except Exception:
- LOG.error(_("progress column not added to compute_nodes table"))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- compute_nodes = Table('compute_nodes', meta, autoload=True)
- compute_nodes.drop_column('disk_available_least')
+++ /dev/null
-BEGIN TRANSACTION;
- CREATE TABLE fixed_ips_backup (
- created_at DATETIME NOT NULL,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN NOT NULL,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- virtual_interface_id INTEGER,
- network_id INTEGER,
- instance_id INTEGER,
- allocated BOOLEAN default FALSE,
- leased BOOLEAN default FALSE,
- reserved BOOLEAN default FALSE,
- host VARCHAR(255),
- PRIMARY KEY (id)
- );
-
- CREATE TABLE floating_ips_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- fixed_ip_id INTEGER,
- project_id VARCHAR(255),
- host VARCHAR(255),
- auto_assigned BOOLEAN,
- pool VARCHAR(255),
- interface VARCHAR(255),
- PRIMARY KEY (id)
- );
-
- INSERT INTO fixed_ips_backup
- SELECT created_at, updated_at, deleted_at, deleted, id, address,
- virtual_interface_id, network_id, instance_id, allocated,
- leased, reserved, host
- FROM fixed_ips;
-
- INSERT INTO floating_ips_backup
- SELECT created_at, updated_at, deleted_at, deleted, id, address,
- fixed_ip_id, project_id, host, auto_assigned, pool,
- interface
- FROM floating_ips;
-
- DROP TABLE fixed_ips;
- DROP TABLE floating_ips;
-
- CREATE TABLE fixed_ips (
- created_at DATETIME NOT NULL,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN NOT NULL,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- virtual_interface_id INTEGER,
- network_id INTEGER,
- instance_id INTEGER,
- allocated BOOLEAN default FALSE,
- leased BOOLEAN default FALSE,
- reserved BOOLEAN default FALSE,
- host VARCHAR(255),
- PRIMARY KEY (id),
- FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id),
- FOREIGN KEY(network_id) REFERENCES networks (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- CREATE TABLE floating_ips (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- fixed_ip_id INTEGER,
- project_id VARCHAR(255),
- host VARCHAR(255),
- auto_assigned BOOLEAN,
- pool VARCHAR(255),
- interface VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id)
- );
-
- INSERT INTO fixed_ips
- SELECT created_at, updated_at, deleted_at, deleted, id, address,
- virtual_interface_id, network_id, instance_id, allocated,
- leased, reserved, host
- FROM fixed_ips_backup;
-
- INSERT INTO floating_ips
- SELECT created_at, updated_at, deleted_at, deleted, id, address,
- fixed_ip_id, project_id, host, auto_assigned, pool,
- interface
- FROM floating_ips_backup;
-
- DROP TABLE fixed_ips_backup;
- DROP TABLE floating_ips_backup;
-
-COMMIT;
+++ /dev/null
-BEGIN TRANSACTION;
- CREATE TABLE fixed_ips_backup (
- created_at DATETIME NOT NULL,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN NOT NULL,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- virtual_interface_id INTEGER,
- network_id INTEGER,
- instance_id INTEGER,
- allocated BOOLEAN default FALSE,
- leased BOOLEAN default FALSE,
- reserved BOOLEAN default FALSE,
- host VARCHAR(255),
- PRIMARY KEY (id)
- );
-
- CREATE TABLE floating_ips_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- fixed_ip_id INTEGER,
- project_id VARCHAR(255),
- host VARCHAR(255),
- auto_assigned BOOLEAN,
- pool VARCHAR(255),
- interface VARCHAR(255),
- PRIMARY KEY (id)
- );
-
- INSERT INTO fixed_ips_backup
- SELECT created_at, updated_at, deleted_at, deleted, id, address,
- virtual_interface_id, network_id, instance_id, allocated,
- leased, reserved, host
- FROM fixed_ips;
-
- INSERT INTO floating_ips_backup
- SELECT created_at, updated_at, deleted_at, deleted, id, address,
- fixed_ip_id, project_id, host, auto_assigned, pool,
- interface
- FROM floating_ips;
-
- DROP TABLE fixed_ips;
- DROP TABLE floating_ips;
-
- CREATE TABLE fixed_ips (
- created_at DATETIME NOT NULL,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN NOT NULL,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- virtual_interface_id INTEGER,
- network_id INTEGER,
- instance_id INTEGER,
- allocated BOOLEAN default FALSE,
- leased BOOLEAN default FALSE,
- reserved BOOLEAN default FALSE,
- host VARCHAR(255),
- PRIMARY KEY (id)
- );
-
- CREATE TABLE floating_ips (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- fixed_ip_id INTEGER,
- project_id VARCHAR(255),
- host VARCHAR(255),
- auto_assigned BOOLEAN,
- pool VARCHAR(255),
- interface VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO fixed_ips
- SELECT created_at, updated_at, deleted_at, deleted, id, address,
- virtual_interface_id, network_id, instance_id, allocated,
- leased, reserved, host
- FROM fixed_ips_backup;
-
- INSERT INTO floating_ips
- SELECT created_at, updated_at, deleted_at, deleted, id, address,
- fixed_ip_id, project_id, host, auto_assigned, pool,
- interface
- FROM floating_ips_backup;
-
- DROP TABLE fixed_ips_backup;
- DROP TABLE floating_ips_backup;
-
-COMMIT;
+++ /dev/null
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData, Table
-from migrate import ForeignKeyConstraint
-
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
- dialect = migrate_engine.url.get_dialect().name
- if dialect.startswith('sqlite'):
- return
-
- instances = Table('instances', meta, autoload=True)
- networks = Table('networks', meta, autoload=True)
- vifs = Table('virtual_interfaces', meta, autoload=True)
- fixed_ips = Table('fixed_ips', meta, autoload=True)
- floating_ips = Table('floating_ips', meta, autoload=True)
-
- try:
- fkeys = list(fixed_ips.c.network_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[fixed_ips.c.network_id],
- refcolumns=[networks.c.id],
- name=fkey_name).drop()
-
- fkeys = list(fixed_ips.c.virtual_interface_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id],
- refcolumns=[vifs.c.id],
- name=fkey_name).drop()
-
- fkeys = list(fixed_ips.c.instance_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[fixed_ips.c.instance_id],
- refcolumns=[instances.c.id],
- name=fkey_name).drop()
-
- fkeys = list(floating_ips.c.fixed_ip_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[floating_ips.c.fixed_ip_id],
- refcolumns=[fixed_ips.c.id],
- name=fkey_name).drop()
-
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
-
-def downgrade(migrate_engine):
- # Operations to reverse the above upgrade go here.
- meta = MetaData()
- meta.bind = migrate_engine
- dialect = migrate_engine.url.get_dialect().name
- if dialect.startswith('sqlite'):
- return
-
- instances = Table('instances', meta, autoload=True)
- networks = Table('networks', meta, autoload=True)
- vifs = Table('virtual_interfaces', meta, autoload=True)
- fixed_ips = Table('fixed_ips', meta, autoload=True)
- floating_ips = Table('floating_ips', meta, autoload=True)
-
- try:
- ForeignKeyConstraint(columns=[fixed_ips.c.network_id],
- refcolumns=[networks.c.id]).create()
-
- ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id],
- refcolumns=[vifs.c.id]).create()
-
- ForeignKeyConstraint(columns=[fixed_ips.c.instance_id],
- refcolumns=[instances.c.id]).create()
-
- ForeignKeyConstraint(columns=[floating_ips.c.fixed_ip_id],
- refcolumns=[fixed_ips.c.id]).create()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be added"))
- raise
+++ /dev/null
-# Copyright (c) 2011 Citrix Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, String, DateTime, Integer
-from sqlalchemy import MetaData, Column, ForeignKey, Table
-
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- #
- # New Tables
- #
- aggregates = Table('aggregates', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(),
- primary_key=True, nullable=False, autoincrement=True),
- Column('name',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- unique=True),
- Column('operational_state',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- nullable=False),
- Column('availability_zone',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- nullable=False),
- )
-
- hosts = Table('aggregate_hosts', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('host',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- unique=True),
- Column('aggregate_id', Integer(), ForeignKey('aggregates.id'),
- nullable=False),
- )
-
- metadata = Table('aggregate_metadata', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('aggregate_id',
- Integer(),
- ForeignKey('aggregates.id'),
- nullable=False),
- Column('key',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- nullable=False),
- Column('value',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- nullable=False))
- tables = (aggregates, hosts, metadata)
- for table in tables:
- try:
- table.create()
- except Exception:
- LOG.exception(repr(table))
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- aggregates = Table('aggregates', meta, autoload=True)
- hosts = Table('aggregate_hosts', meta, autoload=True)
- metadata = Table('aggregate_metadata', meta, autoload=True)
- # table order matters, don't change
- for table in (hosts, metadata, aggregates):
- try:
- table.drop()
- except Exception:
- LOG.exception(repr(table))
+++ /dev/null
-# Copyright 2012 Andrew Bogott for The Wikimedia Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, ForeignKey
-from sqlalchemy import MetaData, String, Table
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- projects = Table('projects', meta, autoload=True)
-
- #
- # New Tables
- #
- dns_domains = Table('dns_domains', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('domain',
- String(length=512, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- primary_key=True, nullable=False),
- Column('scope',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('availability_zone',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('project_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- ForeignKey('projects.id'))
- )
- # create dns_domains table
- try:
- dns_domains.create()
- except Exception:
- LOG.error(_("Table |%s| not created!"), repr(dns_domains))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- projects = Table('projects', meta, autoload=True)
-
- dns_domains = Table('dns_domains', meta, autoload=True)
- try:
- dns_domains.drop()
- except Exception:
- LOG.error(_("dns_domains table not dropped"))
- raise
+++ /dev/null
-CREATE TABLE dns_domains (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- domain VARCHAR(512) CHARACTER SET latin1 NOT NULL,
- scope VARCHAR(255),
- availability_zone VARCHAR(255),
- project_id VARCHAR(255),
- PRIMARY KEY (domain),
- CHECK (deleted IN (0, 1)),
- FOREIGN KEY(project_id) REFERENCES projects (id)
-);
+++ /dev/null
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, Integer, MetaData, Table
-
-from cinder import log as logging
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
- compute_nodes = Table('compute_nodes', meta, autoload=True)
-
- #
- # New Columns
- #
- new_columns = [
- Column('free_ram_mb', Integer()),
- Column('free_disk_gb', Integer()),
- Column('current_workload', Integer()),
- Column('running_vms', Integer()),
- ]
- for column in new_columns:
- compute_nodes.create_column(column)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- compute_nodes = Table('compute_nodes', meta, autoload=True)
-
- for column in ('free_ram_mb',
- 'free_disk_gb',
- 'current_workload',
- 'running_vms'):
- compute_nodes.drop_column(column)
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import select, Column, Integer, MetaData, Table
-
-from cinder import exception
-from cinder import flags
-
-FLAGS = flags.FLAGS
-
-
-def upgrade_libvirt(instances, instance_types):
- # Update instance_types first
- tiny = None
- for inst_type in instance_types.select().execute():
- if inst_type['name'] == 'm1.tiny':
- tiny = inst_type['id']
- root_gb = 0
- else:
- root_gb = 10
-
- instance_types.update()\
- .values(root_gb=root_gb,
- ephemeral_gb=inst_type['local_gb'])\
- .where(instance_types.c.id == inst_type['id'])\
- .execute()
-
- # then update instances following same pattern
- instances.update()\
- .values(root_gb=10,
- ephemeral_gb=instances.c.local_gb)\
- .execute()
-
- if tiny is not None:
- instances.update()\
- .values(root_gb=0,
- ephemeral_gb=instances.c.local_gb)\
- .where(instances.c.instance_type_id == tiny)\
- .execute()
-
-
-def upgrade_other(instances, instance_types):
- for table in (instances, instance_types):
- table.update().values(root_gb=table.c.local_gb,
- ephemeral_gb=0).execute()
-
-
-def check_instance_presence(migrate_engine, instances_table):
- result = migrate_engine.execute(instances_table.select().limit(1))
- return result.fetchone() is not None
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
-
- data_present = check_instance_presence(migrate_engine, instances)
-
- if data_present and not FLAGS.connection_type:
- msg = ("Found instance records in database. You must specify "
- "connection_type to run migration migration")
- raise exception.Error(msg)
-
- instance_types = Table('instance_types', meta, autoload=True)
-
- for table in (instances, instance_types):
- root_gb = Column('root_gb', Integer)
- root_gb.create(table)
- ephemeral_gb = Column('ephemeral_gb', Integer)
- ephemeral_gb.create(table)
-
- # Since this migration is part of the work to get all drivers
- # working the same way, we need to treat the new root_gb and
- # ephemeral_gb columns differently depending on what the
- # driver implementation used to behave like.
- if FLAGS.connection_type == 'libvirt':
- upgrade_libvirt(instances, instance_types)
- else:
- upgrade_other(instances, instance_types)
-
- default_local_device = instances.c.default_local_device
- default_local_device.alter(name='default_ephemeral_device')
-
- for table in (instances, instance_types):
- table.drop_column('local_gb')
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
- instance_types = Table('instance_types', meta, autoload=True)
-
- for table in (instances, instance_types):
- local_gb = Column('local_gb', Integer)
- local_gb.create(table)
-
- try:
- for table in (instances, instance_types):
- if FLAGS.connection_type == 'libvirt':
- column = table.c.ephemeral_gb
- else:
- column = table.c.root_gb
- table.update().values(local_gb=column).execute()
- except Exception:
- for table in (instances, instance_types):
- table.drop_column('local_gb')
- raise
-
- default_ephemeral_device = instances.c.default_ephemeral_device
- default_ephemeral_device.alter(name='default_local_device')
-
- for table in (instances, instance_types):
- table.drop_column('root_gb')
- table.drop_column('ephemeral_gb')
+++ /dev/null
--- sqlalchemy-migrate is surprisingly broken when it comes to migrations
--- for sqlite. As a result, we have to do much of the work manually here
-
-BEGIN TRANSACTION;
- -- make backup of instance_types
- CREATE TEMPORARY TABLE instance_types_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- name VARCHAR(255),
- id INTEGER NOT NULL,
- memory_mb INTEGER NOT NULL,
- vcpus INTEGER NOT NULL,
- local_gb INTEGER NOT NULL,
- swap INTEGER NOT NULL,
- rxtx_factor FLOAT,
- vcpu_weight INTEGER,
- flavorid VARCHAR(255),
- PRIMARY KEY (id),
- UNIQUE (flavorid),
- CHECK (deleted IN (0, 1)),
- UNIQUE (name)
- );
-
- INSERT INTO instance_types_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- name,
- id,
- memory_mb,
- vcpus,
- local_gb,
- swap,
- rxtx_factor,
- vcpu_weight,
- flavorid
- FROM instance_types;
-
- DROP TABLE instance_types;
-
- CREATE TABLE instance_types (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- name VARCHAR(255),
- id INTEGER NOT NULL,
- memory_mb INTEGER NOT NULL,
- vcpus INTEGER NOT NULL,
- root_gb INTEGER NOT NULL,
- ephemeral_gb INTEGER NOT NULL,
- swap INTEGER NOT NULL,
- rxtx_factor FLOAT,
- vcpu_weight INTEGER,
- flavorid VARCHAR(255),
- PRIMARY KEY (id),
- UNIQUE (flavorid),
- CHECK (deleted IN (0, 1)),
- UNIQUE (name)
- );
-
- -- copy from backup to new table with root_gb set to local_gb and
- -- ephmeral_gb set to 0
- INSERT INTO instance_types
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- name,
- id,
- memory_mb,
- vcpus,
- local_gb,
- 0,
- swap,
- rxtx_factor,
- vcpu_weight,
- flavorid
- FROM instance_types_backup;
-
- DROP TABLE instance_types_backup;
-
- -- make backup of instances
- CREATE TEMPORARY TABLE instances_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- internal_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- image_ref VARCHAR(255),
- kernel_id VARCHAR(255),
- ramdisk_id VARCHAR(255),
- server_name VARCHAR(255),
- launch_index INTEGER,
- key_name VARCHAR(255),
- key_data TEXT,
- power_state INTEGER,
- vm_state VARCHAR(255),
- memory_mb INTEGER,
- vcpus INTEGER,
- local_gb INTEGER,
- hostname VARCHAR(255),
- host VARCHAR(255),
- user_data TEXT,
- reservation_id VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- availability_zone VARCHAR(255),
- locked BOOLEAN,
- os_type VARCHAR(255),
- launched_on TEXT,
- instance_type_id INTEGER,
- vm_mode VARCHAR(255),
- uuid VARCHAR(36),
- architecture VARCHAR(255),
- root_device_name VARCHAR(255),
- access_ip_v4 VARCHAR(255),
- access_ip_v6 VARCHAR(255),
- config_drive VARCHAR(255),
- task_state VARCHAR(255),
- default_local_device VARCHAR(255),
- default_swap_device VARCHAR(255),
- progress INTEGER,
- auto_disk_config BOOLEAN,
- shutdown_terminate BOOLEAN,
- disable_terminate BOOLEAN,
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- CHECK (locked IN (0, 1)),
- CHECK (auto_disk_config IN (0, 1)),
- CHECK (shutdown_terminate IN (0, 1)),
- CHECK (disable_terminate IN (0, 1))
- );
-
- INSERT INTO instances_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- internal_id,
- user_id,
- project_id,
- image_ref,
- kernel_id,
- ramdisk_id,
- server_name,
- launch_index,
- key_name,
- key_data,
- power_state,
- vm_state,
- memory_mb,
- vcpus,
- local_gb,
- hostname,
- host,
- user_data,
- reservation_id,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- availability_zone,
- locked,
- os_type,
- launched_on,
- instance_type_id,
- vm_mode,
- uuid,
- architecture,
- root_device_name,
- access_ip_v4,
- access_ip_v6,
- config_drive,
- task_state,
- default_local_device,
- default_swap_device,
- progress,
- auto_disk_config,
- shutdown_terminate,
- disable_terminate
- FROM instances;
-
- DROP TABLE instances;
-
- CREATE TABLE instances (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- internal_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- image_ref VARCHAR(255),
- kernel_id VARCHAR(255),
- ramdisk_id VARCHAR(255),
- server_name VARCHAR(255),
- launch_index INTEGER,
- key_name VARCHAR(255),
- key_data TEXT,
- power_state INTEGER,
- vm_state VARCHAR(255),
- memory_mb INTEGER,
- vcpus INTEGER,
- root_gb INTEGER,
- ephemeral_gb INTEGER,
- hostname VARCHAR(255),
- host VARCHAR(255),
- user_data TEXT,
- reservation_id VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- availability_zone VARCHAR(255),
- locked BOOLEAN,
- os_type VARCHAR(255),
- launched_on TEXT,
- instance_type_id INTEGER,
- vm_mode VARCHAR(255),
- uuid VARCHAR(36),
- architecture VARCHAR(255),
- root_device_name VARCHAR(255),
- access_ip_v4 VARCHAR(255),
- access_ip_v6 VARCHAR(255),
- config_drive VARCHAR(255),
- task_state VARCHAR(255),
- default_ephemeral_device VARCHAR(255),
- default_swap_device VARCHAR(255),
- progress INTEGER,
- auto_disk_config BOOLEAN,
- shutdown_terminate BOOLEAN,
- disable_terminate BOOLEAN,
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1)),
- CHECK (locked IN (0, 1)),
- CHECK (auto_disk_config IN (0, 1)),
- CHECK (shutdown_terminate IN (0, 1)),
- CHECK (disable_terminate IN (0, 1))
- );
-
- CREATE INDEX project_id ON instances (project_id);
- CREATE UNIQUE INDEX uuid ON instances (uuid);
-
- -- copy from backup to new table with root_gb set to local_gb and
- -- ephmeral_gb set to 0
- INSERT INTO instances
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- internal_id,
- user_id,
- project_id,
- image_ref,
- kernel_id,
- ramdisk_id,
- server_name,
- launch_index,
- key_name,
- key_data,
- power_state,
- vm_state,
- memory_mb,
- vcpus,
- local_gb,
- 0,
- hostname,
- host,
- user_data,
- reservation_id,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- availability_zone,
- locked,
- os_type,
- launched_on,
- instance_type_id,
- vm_mode,
- uuid,
- architecture,
- root_device_name,
- access_ip_v4,
- access_ip_v6,
- config_drive,
- task_state,
- default_local_device,
- default_swap_device,
- progress,
- auto_disk_config,
- shutdown_terminate,
- disable_terminate
- FROM instances_backup;
-
- DROP TABLE instances_backup;
-COMMIT;
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import and_, select
-from sqlalchemy import BigInteger, Boolean, Column, DateTime
-from sqlalchemy import Integer, MetaData, String
-from sqlalchemy import Table
-
-from cinder import utils
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- vifs = Table('virtual_interfaces', meta, autoload=True)
- networks = Table('networks', meta, autoload=True)
-
- bw_usage_cache = Table('bw_usage_cache', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('instance_id', Integer(), nullable=False),
- Column('network_label',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('start_period', DateTime(timezone=False),
- nullable=False),
- Column('last_refreshed', DateTime(timezone=False)),
- Column('bw_in', BigInteger()),
- Column('bw_out', BigInteger()),
- useexisting=True)
- mac_column = Column('mac', String(255))
- bw_usage_cache.create_column(mac_column)
-
- bw_usage_cache.update()\
- .values(mac=select([vifs.c.address])
- .where(and_(
- networks.c.label == bw_usage_cache.c.network_label,
- networks.c.id == vifs.c.network_id,
- bw_usage_cache.c.instance_id == vifs.c.instance_id))
- .as_scalar()).execute()
-
- bw_usage_cache.c.network_label.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- vifs = Table('virtual_interfaces', meta, autoload=True)
- network = Table('networks', meta, autoload=True)
-
- bw_usage_cache = Table('bw_usage_cache', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('instance_id', Integer(), nullable=False),
- Column('mac', String(255)),
- Column('start_period', DateTime(timezone=False),
- nullable=False),
- Column('last_refreshed', DateTime(timezone=False)),
- Column('bw_in', BigInteger()),
- Column('bw_out', BigInteger()),
- useexisting=True)
-
- network_label_column = Column('network_label', String(255))
- bw_usage_cache.create_column(network_label_column)
-
- bw_usage_cache.update()\
- .values(network_label=select([network.c.label])
- .where(and_(
- network.c.id == vifs.c.network_id,
- vifs.c.address == bw_usage_cache.c.mac,
- bw_usage_cache.c.instance_id == vifs.c.instance_id))
- .as_scalar()).execute()
-
- bw_usage_cache.c.mac.drop()
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) OpenStack, LLC
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.from sqlalchemy import *
-
-from sqlalchemy import MetaData, Table
-from migrate.changeset.constraint import UniqueConstraint
-
-
-def _get_constraint_names(engine_name):
-
- # NOTE(vish): These constraint names may be dependent on the backend, but
- # there doesn't seem to be we a way to determine the proper
- # name for existing constraints. These names are correct for
- # mysql and postgres.
- if engine_name == "mysql":
- return {
- "instance_types_name": ("name", "instance_types_name_key"),
- "instance_types_flavorid": "instance_types_flavorid_str_key",
- "volume_types_name": "name",
- }
- else:
- return {
- "instance_types_name": ("instance_types_name_key",),
- "instance_types_flavorid": "instance_types_flavorid_str_key",
- "volume_types_name": "volume_types_name_key",
- }
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- c_names = _get_constraint_names(migrate_engine.name)
-
- table = Table('instance_types', meta, autoload=True)
- for constraint_name in c_names['instance_types_name']:
- cons = UniqueConstraint('name',
- name=constraint_name,
- table=table)
- cons.drop()
- cons = UniqueConstraint('flavorid',
- name=c_names['instance_types_flavorid'],
- table=table)
- cons.drop()
- table = Table('volume_types', meta, autoload=True)
- cons = UniqueConstraint('name',
- name=c_names['volume_types_name'],
- table=table)
- cons.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- c_names = _get_constraint_names(migrate_engine.name)
-
- table = Table('instance_types', meta, autoload=True)
- for constraint_name in c_names['instance_types_name']:
- cons = UniqueConstraint('name',
- name=constraint_name,
- table=table)
- cons.create()
- table = Table('instance_types', meta, autoload=True)
- cons = UniqueConstraint('flavorid',
- name=c_names['instance_types_flavorid'],
- table=table)
- cons.create()
- table = Table('volume_types', meta, autoload=True)
- cons = UniqueConstraint('name',
- name=c_names['volume_types_name'],
- table=table)
- cons.create()
+++ /dev/null
--- sqlalchemy-migrate is surprisingly broken when it comes to migrations
--- for sqlite. As a result, we have to do much of the work manually here
-
-BEGIN TRANSACTION;
- CREATE TABLE instance_types_temp (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- name VARCHAR(255),
- id INTEGER NOT NULL,
- memory_mb INTEGER NOT NULL,
- vcpus INTEGER NOT NULL,
- root_gb INTEGER NOT NULL,
- ephemeral_gb INTEGER NOT NULL,
- swap INTEGER NOT NULL,
- rxtx_factor FLOAT,
- vcpu_weight INTEGER,
- flavorid VARCHAR(255),
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO instance_types_temp SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- name,
- id,
- memory_mb,
- vcpus,
- root_gb,
- ephemeral_gb,
- swap,
- rxtx_factor,
- vcpu_weight,
- flavorid
- FROM instance_types;
- DROP TABLE instance_types;
- ALTER TABLE instance_types_temp RENAME TO instance_types;
- CREATE TABLE volume_types_temp (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- name VARCHAR(255),
- id INTEGER NOT NULL,
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO volume_types_temp SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- name,
- id
- FROM volume_types;
- DROP TABLE volume_types;
- ALTER TABLE volume_types_temp RENAME TO volume_types;
-COMMIT;
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # NOTE (ironcamel): The only table we are not converting to utf8 here is
- # dns_domains. This table has a primary key that is 512 characters wide.
- # When the mysql engine attempts to convert it to utf8, it complains about
- # not supporting key columns larger than 1000.
-
- if migrate_engine.name == "mysql":
- tables = [
- # tables that are FK parents, must be converted early
- "aggregates", "console_pools", "instance_types", "instances",
- "projects", "security_groups", "sm_backend_config", "sm_flavors",
- "snapshots", "user_project_association", "users", "volume_types",
- "volumes",
- # those that are children and others later
- "agent_builds", "aggregate_hosts", "aggregate_metadata",
- "auth_tokens", "block_device_mapping", "bw_usage_cache",
- "certificates", "compute_nodes", "consoles", "fixed_ips",
- "floating_ips", "instance_actions", "instance_faults",
- "instance_info_caches", "instance_metadata",
- "instance_type_extra_specs", "iscsi_targets", "key_pairs",
- "migrate_version", "migrations", "networks", "provider_fw_rules",
- "quotas", "s3_images", "security_group_instance_association",
- "security_group_rules", "services", "sm_volume",
- "user_project_role_association", "user_role_association",
- "virtual_interfaces", "virtual_storage_arrays", "volume_metadata",
- "volume_type_extra_specs", "zones"]
- sql = "SET foreign_key_checks = 0;"
- for table in tables:
- sql += "ALTER TABLE %s CONVERT TO CHARACTER SET utf8;" % table
- sql += "SET foreign_key_checks = 1;"
- sql += "ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" \
- % migrate_engine.url.database
- migrate_engine.execute(sql)
-
-
-def downgrade(migrate_engine):
- # utf8 tables should be backwards compatible, so lets leave it alone
- pass
+++ /dev/null
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column
-from sqlalchemy import Integer, MetaData, String
-from sqlalchemy import Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- zones = Table('zones', meta, autoload=True)
-
- is_parent = Column('is_parent', Boolean(), default=False)
- rpc_host = Column('rpc_host', String(255))
- rpc_port = Column('rpc_port', Integer())
- rpc_virtual_host = Column('rpc_virtual_host', String(255))
-
- zones.create_column(is_parent)
- zones.create_column(rpc_host)
- zones.create_column(rpc_port)
- zones.create_column(rpc_virtual_host)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- zones = Table('zones', meta, autoload=True)
-
- zones.drop_column('rpc_virtual_host')
- zones.drop_column('rpc_port')
- zones.drop_column('rpc_host')
- zones.drop_column('is_parent')
+++ /dev/null
-BEGIN TRANSACTION;
-
- CREATE TEMPORARY TABLE zones_temp (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- name VARCHAR(255),
- api_url VARCHAR(255),
- username VARCHAR(255),
- password VARCHAR(255),
- weight_offset FLOAT,
- weight_scale FLOAT,
- PRIMARY KEY (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO zones_temp
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- name,
- api_url,
- username,
- password,
- weight_offset,
- weight_scale FROM zones;
-
- DROP TABLE zones;
-
- ALTER TABLE zones_temp RENAME TO zones;
-COMMIT;
+++ /dev/null
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column, MetaData, String, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
- zone_name = Column('zone_name', String(255))
- instances.create_column(zone_name)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
- instances.drop_column('zone_name')
+++ /dev/null
-# Copyright 2012 OpenStack, LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from sqlalchemy import Column, MetaData, String, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- compute_nodes = Table("compute_nodes", meta, autoload=True)
- hypervisor_hostname = Column("hypervisor_hostname", String(255))
- compute_nodes.create_column(hypervisor_hostname)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- compute_nodes = Table("compute_nodes", meta, autoload=True)
- compute_nodes.drop_column('hypervisor_hostname')
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 OpenStack, LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-import json
-
-from sqlalchemy import Column, Table, MetaData, Integer, Boolean, String
-from sqlalchemy import DateTime, BigInteger
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- bw_usage_cache = Table('bw_usage_cache', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('instance_id', Integer(), nullable=False),
- Column('mac', String(255)),
- Column('start_period', DateTime(timezone=False),
- nullable=False),
- Column('last_refreshed', DateTime(timezone=False)),
- Column('bw_in', BigInteger()),
- Column('bw_out', BigInteger()),
- useexisting=True)
-
- bw_usage_cache.drop_column('instance_id')
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instance_info_caches = Table('instance_info_caches', meta, autoload=True)
- bw_usage_cache = Table('bw_usage_cache', meta, autoload=True)
-
- instance_id = Column('instance_id', Integer)
- bw_usage_cache.create_column(instance_id)
-
- cache = {}
- for row in migrate_engine.execute(instance_info_caches.select()):
- instance_id = row['instance']['id']
- if not row['network_info']:
- continue
-
- nw_info = json.loads(row['network_info'])
- for vif in nw_info:
- cache[vif['address']] = instance_id
-
- for row in migrate_engine.execute(bw_usage_cache.select()):
- instance_id = cache[row['mac']]
- migrate_engine.execute(bw_usage_cache.update()
- .where(bw_usage_cache.c.id == row['id'])
- .values(instance_id=instance_id))
+++ /dev/null
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
- zone_name = instances.c.zone_name
- zone_name.alter(name='cell_name')
- zones = Table('zones', meta, autoload=True)
- zones.rename('cells')
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
- cell_name = instances.c.cell_name
- cell_name.alter(name='zone_name')
- cells = Table('cells', meta, autoload=True)
- cells.rename('zones')
+++ /dev/null
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime
-from sqlalchemy import MetaData, Integer, String, Table
-
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # New table
- quota_classes = Table('quota_classes', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True),
- Column('class_name',
- String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False), index=True),
- Column('resource',
- String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- Column('hard_limit', Integer(), nullable=True),
- )
-
- try:
- quota_classes.create()
- except Exception:
- LOG.error(_("Table |%s| not created!"), repr(quota_classes))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- quota_classes = Table('quota_classes', meta, autoload=True)
- try:
- quota_classes.drop()
- except Exception:
- LOG.error(_("quota_classes table not dropped"))
- raise
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 Red Hat, Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy
-
-
-def upgrade(migrate_engine):
- """Map quotas hard_limit from NULL to -1"""
- _migrate_unlimited(migrate_engine, None, -1)
-
-
-def downgrade(migrate_engine):
- """Map quotas hard_limit from -1 to NULL"""
- _migrate_unlimited(migrate_engine, -1, None)
-
-
-def _migrate_unlimited(migrate_engine, old_limit, new_limit):
- meta = sqlalchemy.MetaData()
- meta.bind = migrate_engine
-
- def _migrate(table_name):
- table = sqlalchemy.Table(table_name, meta, autoload=True)
- table.update().\
- where(table.c.hard_limit == old_limit).\
- values(hard_limit=new_limit).execute()
-
- _migrate('quotas')
- _migrate('quota_classes')
+++ /dev/null
-# Copyright 2012 IBM
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('fixed_ips', meta, autoload=True)
- index = Index('address', instances.c.address)
- index.create(migrate_engine)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('fixed_ips', meta, autoload=True)
- index = Index('address', instances.c.address)
- index.drop(migrate_engine)
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
-
- tables = ["agent_builds", "aggregate_hosts", "aggregate_metadata",
- "aggregates", "block_device_mapping", "bw_usage_cache",
- "dns_domains", "instance_faults", "instance_type_extra_specs",
- "provider_fw_rules", "quota_classes", "s3_images",
- "sm_backend_config", "sm_flavors", "sm_volume",
- "virtual_storage_arrays", "volume_metadata",
- "volume_type_extra_specs", "volume_types"]
-
- meta = MetaData()
- meta.bind = migrate_engine
- if migrate_engine.name == "mysql":
- d = migrate_engine.execute("SHOW TABLE STATUS WHERE Engine!='InnoDB';")
- for row in d.fetchall():
- table_name = row[0]
- if table_name in tables:
- migrate_engine.execute("ALTER TABLE %s Engine=InnoDB" %
- table_name)
-
-
-def downgrade(migrate_engine):
- pass
+++ /dev/null
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, BigInteger
-from sqlalchemy import MetaData, Integer, String, Table
-
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # add column:
- bw_usage_cache = Table('bw_usage_cache', meta, autoload=True)
- uuid = Column('uuid', String(36))
-
- # clear the cache to get rid of entries with no uuid
- migrate_engine.execute(bw_usage_cache.delete())
-
- bw_usage_cache.create_column(uuid)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # drop column:
- bw_usage_cache = Table('bw_usage_cache', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('mac', String(255)),
- Column('uuid', String(36)),
- Column('start_period', DateTime(timezone=False), nullable=False),
- Column('last_refreshed', DateTime(timezone=False)),
- Column('bw_in', BigInteger()),
- Column('bw_out', BigInteger()),
- useexisting=True)
-
- bw_usage_cache.drop_column('uuid')
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import select, Column, ForeignKey, Integer
-from sqlalchemy import MetaData, String, Table
-from migrate import ForeignKeyConstraint
-
-from cinder import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- dialect = migrate_engine.url.get_dialect().name
- block_device_mapping = Table('block_device_mapping', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- uuid_column = Column('instance_uuid', String(36))
- uuid_column.create(block_device_mapping)
-
- try:
- block_device_mapping.update().values(
- instance_uuid=select(
- [instances.c.uuid],
- instances.c.id == block_device_mapping.c.instance_id)
- ).execute()
- except Exception:
- uuid_column.drop()
- raise
-
- fkeys = list(block_device_mapping.c.instance_id.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[block_device_mapping.c.instance_id],
- refcolumns=[instances.c.id],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- block_device_mapping.c.instance_id.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- block_device_mapping = Table('block_device_mapping', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
- id_column.create(block_device_mapping)
-
- try:
- block_device_mapping.update().values(
- instance_id=select(
- [instances.c.id],
- instances.c.uuid == block_device_mapping.c.instance_uuid)
- ).execute()
- except Exception:
- id_column.drop()
- raise
-
- block_device_mapping.c.instance_uuid.drop()
+++ /dev/null
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE block_device_mapping_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- device_name VARCHAR(255) NOT NULL,
- delete_on_termination BOOLEAN,
- virtual_name VARCHAR(255),
- snapshot_id INTEGER,
- volume_id INTEGER,
- volume_size INTEGER,
- no_device BOOLEAN,
- connection_info TEXT,
- instance_uuid VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots (id),
- CHECK (deleted IN (0, 1)),
- CHECK (delete_on_termination IN (0, 1)),
- CHECK (no_device IN (0, 1)),
- FOREIGN KEY(volume_id) REFERENCES volumes (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- INSERT INTO block_device_mapping_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- NULL,
- device_name,
- delete_on_termination,
- virtual_name,
- snapshot_id,
- volume_id,
- volume_size,
- no_device,
- connection_info,
- instance_uuid
- FROM block_device_mapping;
-
- UPDATE block_device_mapping_backup
- SET instance_id=
- (SELECT id
- FROM instances
- WHERE block_device_mapping_backup.instance_uuid = instances.uuid
- );
-
- DROP TABLE block_device_mapping;
-
- CREATE TABLE block_device_mapping (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- device_name VARCHAR(255) NOT NULL,
- delete_on_termination BOOLEAN,
- virtual_name VARCHAR(255),
- snapshot_id INTEGER,
- volume_id INTEGER,
- volume_size INTEGER,
- no_device BOOLEAN,
- connection_info TEXT,
- PRIMARY KEY (id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots (id),
- CHECK (deleted IN (0, 1)),
- CHECK (delete_on_termination IN (0, 1)),
- CHECK (no_device IN (0, 1)),
- FOREIGN KEY(volume_id) REFERENCES volumes (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- INSERT INTO block_device_mapping
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_id,
- device_name,
- delete_on_termination,
- virtual_name,
- snapshot_id,
- volume_id,
- volume_size,
- no_device,
- connection_info
- FROM block_device_mapping_backup;
-
- DROP TABLE block_device_mapping_backup;
-
-COMMIT;
\ No newline at end of file
+++ /dev/null
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE block_device_mapping_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- device_name VARCHAR(255) NOT NULL,
- delete_on_termination BOOLEAN,
- virtual_name VARCHAR(255),
- snapshot_id INTEGER,
- volume_id INTEGER,
- volume_size INTEGER,
- no_device BOOLEAN,
- connection_info TEXT,
- instance_uuid VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots (id),
- CHECK (deleted IN (0, 1)),
- CHECK (delete_on_termination IN (0, 1)),
- CHECK (no_device IN (0, 1)),
- FOREIGN KEY(volume_id) REFERENCES volumes (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- INSERT INTO block_device_mapping_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_id,
- device_name,
- delete_on_termination,
- virtual_name,
- snapshot_id,
- volume_id,
- volume_size,
- no_device,
- connection_info,
- NULL
- FROM block_device_mapping;
-
- UPDATE block_device_mapping_backup
- SET instance_uuid=
- (SELECT uuid
- FROM instances
- WHERE block_device_mapping_backup.instance_id = instances.id
- );
-
- DROP TABLE block_device_mapping;
-
- CREATE TABLE block_device_mapping (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- device_name VARCHAR(255) NOT NULL,
- delete_on_termination BOOLEAN,
- virtual_name VARCHAR(255),
- snapshot_id INTEGER,
- volume_id INTEGER,
- volume_size INTEGER,
- no_device BOOLEAN,
- connection_info TEXT,
- instance_uuid VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots (id),
- CHECK (deleted IN (0, 1)),
- CHECK (delete_on_termination IN (0, 1)),
- CHECK (no_device IN (0, 1)),
- FOREIGN KEY(volume_id) REFERENCES volumes (id),
- FOREIGN KEY(instance_uuid) REFERENCES instances (uuid)
- );
-
- INSERT INTO block_device_mapping
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- device_name,
- delete_on_termination,
- virtual_name,
- snapshot_id,
- volume_id,
- volume_size,
- no_device,
- connection_info,
- instance_uuid
- FROM block_device_mapping_backup;
-
- DROP TABLE block_device_mapping_backup;
-
-COMMIT;
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import MetaData, String, Table
-from cinder import log as logging
-from cinder import utils
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- """Build mapping tables for our volume uuid migration.
-
- These mapping tables serve two purposes:
- 1. Provide a method for downgrade after UUID conversion
- 2. Provide a uuid to associate with existing volumes and snapshots
- when we do the actual datatype migration from int to uuid
-
- """
- meta = MetaData()
- meta.bind = migrate_engine
-
- volume_id_mappings = Table('volume_id_mappings', meta,
- Column('created_at',
- DateTime(timezone=False)),
- Column('updated_at',
- DateTime(timezone=False)),
- Column('deleted_at',
- DateTime(timezone=False)),
- Column('deleted',
- Boolean(create_constraint=True, name=None)),
- Column('id', Integer(),
- primary_key=True,
- nullable=False,
- autoincrement=True),
- Column('uuid', String(36),
- nullable=False))
- try:
- volume_id_mappings.create()
- except Exception:
- LOG.exception("Exception while creating table 'volume_id_mappings'")
- meta.drop_all(tables=[volume_id_mappings])
- raise
-
- snapshot_id_mappings = Table('snapshot_id_mappings', meta,
- Column('created_at',
- DateTime(timezone=False)),
- Column('updated_at',
- DateTime(timezone=False)),
- Column('deleted_at',
- DateTime(timezone=False)),
- Column('deleted',
- Boolean(create_constraint=True, name=None)),
- Column('id', Integer(),
- primary_key=True,
- nullable=False,
- autoincrement=True),
- Column('uuid', String(36),
- nullable=False))
- try:
- snapshot_id_mappings.create()
- except Exception:
- LOG.exception("Exception while creating table 'snapshot_id_mappings'")
- meta.drop_all(tables=[snapshot_id_mappings])
- raise
-
- if migrate_engine.name == "mysql":
- migrate_engine.execute("ALTER TABLE volume_id_mappings Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE snapshot_id_mappings "
- "Engine=InnoDB")
-
- volumes = Table('volumes', meta, autoload=True)
- snapshots = Table('snapshots', meta, autoload=True)
- volume_id_mappings = Table('volume_id_mappings', meta, autoload=True)
- snapshot_id_mappings = Table('snapshot_id_mappings', meta, autoload=True)
-
- volume_list = list(volumes.select().execute())
- for v in volume_list:
- old_id = v['id']
- new_id = utils.gen_uuid()
- row = volume_id_mappings.insert()
- row.execute({'id': old_id,
- 'uuid': str(new_id)})
-
- snapshot_list = list(snapshots.select().execute())
- for s in snapshot_list:
- old_id = s['id']
- new_id = utils.gen_uuid()
- row = snapshot_id_mappings.insert()
- row.execute({'id': old_id,
- 'uuid': str(new_id)})
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- volume_id_mappings = Table('volume_id_mappings', meta, autoload=True)
- volume_id_mappings.drop()
-
- snapshot_id_mappings = Table('snapshot_id_mappings', meta, autoload=True)
- snapshot_id_mappings.drop()
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Integer
-from sqlalchemy import MetaData, String, Table
-from migrate import ForeignKeyConstraint
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- """Convert volume and snapshot id columns from int to varchar."""
- meta = MetaData()
- meta.bind = migrate_engine
- dialect = migrate_engine.url.get_dialect().name
-
- if dialect.startswith('sqlite'):
- return
-
- volumes = Table('volumes', meta, autoload=True)
- snapshots = Table('snapshots', meta, autoload=True)
- iscsi_targets = Table('iscsi_targets', meta, autoload=True)
- volume_metadata = Table('volume_metadata', meta, autoload=True)
- sm_volume = Table('sm_volume', meta, autoload=True)
- block_device_mapping = Table('block_device_mapping', meta, autoload=True)
-
- try:
- fkeys = list(snapshots.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[snapshots.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(iscsi_targets.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(volume_metadata.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[volume_metadata.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(sm_volume.c.id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[sm_volume.c.id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(block_device_mapping.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[block_device_mapping.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(block_device_mapping.c.snapshot_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[block_device_mapping.c.snapshot_id],
- refcolumns=[snapshots.c.id],
- name=fkey_name).drop()
-
- except Exception:
- LOG.error(_("Foreign Key constraint couldn't be removed"))
- raise
-
- volumes.c.id.alter(String(36), primary_key=True)
- volumes.c.snapshot_id.alter(String(36))
- volume_metadata.c.volume_id.alter(String(36), nullable=False)
- snapshots.c.id.alter(String(36), primary_key=True)
- snapshots.c.volume_id.alter(String(36))
- sm_volume.c.id.alter(String(36))
- block_device_mapping.c.volume_id.alter(String(36))
- block_device_mapping.c.snapshot_id.alter(String(36))
- iscsi_targets.c.volume_id.alter(String(36), nullable=True)
-
- try:
- fkeys = list(snapshots.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[snapshots.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- fkeys = list(iscsi_targets.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- fkeys = list(volume_metadata.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[volume_metadata.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- fkeys = list(sm_volume.c.id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[sm_volume.c.id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
- # NOTE(jdg) We're intentionally leaving off FK's on BDM
-
- except Exception:
- LOG.error(_("Foreign Key constraint couldn't be removed"))
- raise
-
-
-def downgrade(migrate_engine):
- """Convert volume and snapshot id columns back to int."""
- meta = MetaData()
- meta.bind = migrate_engine
- dialect = migrate_engine.url.get_dialect().name
-
- if dialect.startswith('sqlite'):
- return
-
- volumes = Table('volumes', meta, autoload=True)
- snapshots = Table('snapshots', meta, autoload=True)
- iscsi_targets = Table('iscsi_targets', meta, autoload=True)
- volume_metadata = Table('volume_metadata', meta, autoload=True)
- sm_volume = Table('sm_volume', meta, autoload=True)
- block_device_mapping = Table('block_device_mapping', meta, autoload=True)
-
- try:
- fkeys = list(snapshots.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[snapshots.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(iscsi_targets.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(volume_metadata.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[volume_metadata.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(sm_volume.c.id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[sm_volume.c.id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- except Exception:
- LOG.error(_("Foreign Key constraint couldn't be removed"))
- raise
-
- volumes.c.id.alter(Integer, primary_key=True, autoincrement=True)
- volumes.c.snapshot_id.alter(Integer)
- volume_metadata.c.volume_id.alter(Integer, nullable=False)
- snapshots.c.id.alter(Integer, primary_key=True, autoincrement=True)
- snapshots.c.volume_id.alter(Integer)
- sm_volume.c.id.alter(Integer)
- block_device_mapping.c.volume_id.alter(Integer)
- block_device_mapping.c.snapshot_id.alter(Integer)
- iscsi_targets.c.volume_id.alter(Integer, nullable=True)
-
- try:
- fkeys = list(snapshots.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[snapshots.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- fkeys = list(iscsi_targets.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- fkeys = list(volume_metadata.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[volume_metadata.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- fkeys = list(sm_volume.c.id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[sm_volume.c.id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- # NOTE(jdg) Put the BDM foreign keys back in place
- fkeys = list(block_device_mapping.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[block_device_mapping.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(block_device_mapping.c.snapshot_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[block_device_mapping.c.snapshot_id],
- refcolumns=[snapshots.c.id],
- name=fkey_name).drop()
-
- except Exception:
- LOG.error(_("Foreign Key constraint couldn't be removed"))
- raise
+++ /dev/null
-BEGIN TRANSACTION;
-
- -- change id and snapshot_id datatypes in volumes table
- CREATE TABLE volumes_backup(
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- ec2_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- snapshot_id VARCHAR(255),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(255),
- provider_auth VARCHAR(255),
- volume_type_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- snapshot_id,
- host,
- size,
- availability_zone,
- instance_id,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- volume_type_id
- FROM volumes;
- DROP TABLE volumes;
- ALTER TABLE volumes_backup RENAME TO volumes;
-
- -- change id and volume_id datatypes in snapshots table
- CREATE TABLE snapshots_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- volume_id INTEGER,
- status VARCHAR(255),
- progress VARCHAR(255),
- volume_size INTEGER,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- PRIMARY KEY (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO snapshots_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- user_id,
- project_id,
- volume_id,
- status,
- progress,
- volume_size,
- display_name,
- display_description
- FROM snapshots;
- DROP TABLE snapshots;
- ALTER TABLE snapshots_backup RENAME TO snapshots;
-
- -- change id and volume_id datatypes in iscsi_targets table
- CREATE TABLE iscsi_targets_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- target_num INTEGER,
- host VARCHAR(255),
- volume_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(volume_id) REFERENCES volumes(id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO iscsi_targets_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- target_num,
- host,
- volume_id
- FROM iscsi_targets;
- DROP TABLE iscsi_targets;
- ALTER TABLE iscsi_targets_backup RENAME TO iscsi_targets;
-
- CREATE TABLE volume_metadata_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- key VARCHAR(255),
- value VARCHAR(255),
- volume_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(volume_id) REFERENCES volumes(id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO volume_metadata_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- key,
- value,
- volume_id
- FROM volume_metadata;
- DROP TABLE volume_metadata;
- ALTER TABLE volume_metadata_backup RENAME TO volume_metadata;
-
- -- change volume_id and snapshot_id datatypes in bdm table
- CREATE TABLE block_device_mapping_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_uuid VARCHAR(36) NOT NULL,
- device_name VARCHAR(255),
- delete_on_termination BOOLEAN,
- virtual_name VARCHAR(255),
- snapshot_id INTEGER,
- volume_id INTEGER,
- volume_size INTEGER,
- no_device BOOLEAN,
- connection_info VARCHAR(255),
- FOREIGN KEY(instance_uuid) REFERENCES instances(id),
- FOREIGN KEY(volume_id) REFERENCES volumes(id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots(id),
- PRIMARY KEY (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO block_device_mapping_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_uuid,
- device_name,
- delete_on_termination,
- virtual_name,
- snapshot_id,
- volume_id,
- volume_size,
- no_device,
- connection_info
- FROM block_device_mapping;
- DROP TABLE block_device_mapping;
- ALTER TABLE block_device_mapping_backup RENAME TO block_device_mapping;
-
- -- change volume_id and sm_volume_table
- CREATE TABLE sm_volume_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- backend_id INTEGER NOT NULL,
- vdi_uuid VARCHAR(255),
- PRIMARY KEY (id),
- FOREIGN KEY(id) REFERENCES volumes(id),
- UNIQUE (id),
- CHECK (deleted IN (0,1))
- );
- INSERT INTO sm_volume_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- backend_id,
- vdi_uuid
- FROM sm_volume;
- DROP TABLE sm_volume;
- ALTER TABLE sm_volume_backup RENAME TO sm_volume;
-
-COMMIT;
+++ /dev/null
-BEGIN TRANSACTION;
-
- -- change id and snapshot_id datatypes in volumes table
- CREATE TABLE volumes_backup(
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- ec2_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- snapshot_id VARCHAR(36),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(255),
- provider_auth VARCHAR(255),
- volume_type_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- snapshot_id,
- host,
- size,
- availability_zone,
- instance_id,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- volume_type_id
- FROM volumes;
- DROP TABLE volumes;
- ALTER TABLE volumes_backup RENAME TO volumes;
-
- -- change id and volume_id datatypes in snapshots table
- CREATE TABLE snapshots_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- volume_id VARCHAR(36),
- status VARCHAR(255),
- progress VARCHAR(255),
- volume_size INTEGER,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- PRIMARY KEY (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO snapshots_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- user_id,
- project_id,
- volume_id,
- status,
- progress,
- volume_size,
- display_name,
- display_description
- FROM snapshots;
- DROP TABLE snapshots;
- ALTER TABLE snapshots_backup RENAME TO snapshots;
-
- -- change id and volume_id datatypes in iscsi_targets table
- CREATE TABLE iscsi_targets_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- target_num INTEGER,
- host VARCHAR(255),
- volume_id VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(volume_id) REFERENCES volumes(id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO iscsi_targets_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- target_num,
- host,
- volume_id
- FROM iscsi_targets;
- DROP TABLE iscsi_targets;
- ALTER TABLE iscsi_targets_backup RENAME TO iscsi_targets;
-
- CREATE TABLE volume_metadata_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- key VARCHAR(255),
- value VARCHAR(255),
- volume_id VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(volume_id) REFERENCES volumes(id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO volume_metadata_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- key,
- value,
- volume_id
- FROM volume_metadata;
- DROP TABLE volume_metadata;
- ALTER TABLE volume_metadata_backup RENAME TO volume_metadata;
-
- -- change volume_id and snapshot_id datatypes in bdm table
- CREATE TABLE block_device_mapping_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_uuid VARCHAR(36) NOT NULL,
- device_name VARCHAR(255),
- delete_on_termination BOOLEAN,
- virtual_name VARCHAR(255),
- snapshot_id VARCHAR(36),
- volume_id VARCHAR(36),
- volume_size INTEGER,
- no_device BOOLEAN,
- connection_info VARCHAR(255),
- FOREIGN KEY(instance_uuid) REFERENCES instances(id),
- FOREIGN KEY(volume_id) REFERENCES volumes(id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots(id),
- PRIMARY KEY (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO block_device_mapping_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_uuid,
- device_name,
- delete_on_termination,
- virtual_name,
- snapshot_id,
- volume_id,
- volume_size,
- no_device,
- connection_info
- FROM block_device_mapping;
- DROP TABLE block_device_mapping;
- ALTER TABLE block_device_mapping_backup RENAME TO block_device_mapping;
-
- -- change volume_id and sm_volume_table
- CREATE TABLE sm_volume_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- backend_id INTEGER NOT NULL,
- vdi_uuid VARCHAR(255),
- PRIMARY KEY (id),
- FOREIGN KEY(id) REFERENCES volumes(id),
- UNIQUE (id),
- CHECK (deleted IN (0,1))
- );
- INSERT INTO sm_volume_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- backend_id,
- vdi_uuid
- FROM sm_volume;
- DROP TABLE sm_volume;
- ALTER TABLE sm_volume_backup RENAME TO sm_volume;
-
-COMMIT;
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData, select, Table
-from cinder import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- """Convert volume and snapshot id columns from int to varchar."""
- meta = MetaData()
- meta.bind = migrate_engine
-
- volumes = Table('volumes', meta, autoload=True)
- snapshots = Table('snapshots', meta, autoload=True)
- iscsi_targets = Table('iscsi_targets', meta, autoload=True)
- volume_metadata = Table('volume_metadata', meta, autoload=True)
- block_device_mapping = Table('block_device_mapping', meta, autoload=True)
- sm_volumes = Table('sm_volume', meta, autoload=True)
-
- volume_mappings = Table('volume_id_mappings', meta, autoload=True)
- snapshot_mappings = Table('snapshot_id_mappings', meta, autoload=True)
-
- volume_list = list(volumes.select().execute())
- for v in volume_list:
- new_id = select([volume_mappings.c.uuid],
- volume_mappings.c.id == v['id'])
-
- volumes.update().\
- where(volumes.c.id == v['id']).\
- values(id=new_id).execute()
-
- sm_volumes.update().\
- where(sm_volumes.c.id == v['id']).\
- values(id=new_id).execute()
-
- snapshots.update().\
- where(snapshots.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- iscsi_targets.update().\
- where(iscsi_targets.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- volume_metadata.update().\
- where(volume_metadata.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- block_device_mapping.update().\
- where(block_device_mapping.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- snapshot_list = list(snapshots.select().execute())
- for s in snapshot_list:
- new_id = select([snapshot_mappings.c.uuid],
- volume_mappings.c.id == s['id'])
-
- volumes.update().\
- where(volumes.c.snapshot_id == s['id']).\
- values(snapshot_id=new_id).execute()
-
- snapshots.update().\
- where(snapshots.c.id == s['id']).\
- values(volume_id=new_id).execute()
-
- block_device_mapping.update().\
- where(block_device_mapping.c.snapshot_id == s['id']).\
- values(snapshot_id=new_id).execute()
-
-
-def downgrade(migrate_engine):
- """Convert volume and snapshot id columns back to int."""
- meta = MetaData()
- meta.bind = migrate_engine
-
- volumes = Table('volumes', meta, autoload=True)
- snapshots = Table('snapshots', meta, autoload=True)
- iscsi_targets = Table('iscsi_targets', meta, autoload=True)
- volume_metadata = Table('volume_metadata', meta, autoload=True)
- block_device_mapping = Table('block_device_mapping', meta, autoload=True)
- sm_volumes = Table('sm_volume', meta, autoload=True)
-
- volume_mappings = Table('volume_id_mappings', meta, autoload=True)
- snapshot_mappings = Table('snapshot_id_mappings', meta, autoload=True)
-
- volume_list = list(volumes.select().execute())
- for v in volume_list:
- new_id = select([volume_mappings.c.id],
- volume_mappings.c.uuid == v['id'])
-
- volumes.update().\
- where(volumes.c.id == v['id']).\
- values(id=new_id).execute()
-
- sm_volumes.update().\
- where(sm_volumes.c.id == v['id']).\
- values(id=new_id).execute()
-
- snapshots.update().\
- where(snapshots.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- iscsi_targets.update().\
- where(iscsi_targets.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- volume_metadata.update().\
- where(volume_metadata.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- block_device_mapping.update().\
- where(block_device_mapping.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- snapshot_list = list(snapshots.select().execute())
- for s in snapshot_list:
- new_id = select([snapshot_mappings.c.id],
- volume_mappings.c.uuid == s['id'])
-
- volumes.update().\
- where(volumes.c.snapshot_id == s['id']).\
- values(snapshot_id=new_id).execute()
-
- snapshots.update().\
- where(snapshots.c.id == s['id']).\
- values(volume_id=new_id).execute()
-
- block_device_mapping.update().\
- where(block_device_mapping.c.snapshot_id == s['id']).\
- values(snapshot_id=new_id).execute()
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# Copyright 2012 SolidFire Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import select, Column
-from sqlalchemy import MetaData, Integer, String, Table
-from migrate import ForeignKeyConstraint
-
-from cinder import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
- volumes = Table('volumes', meta, autoload=True)
- instance_uuid_column = Column('instance_uuid', String(36))
-
- instance_uuid_column.create(volumes)
- try:
- volumes.update().values(
- instance_uuid=select(
- [instances.c.uuid],
- instances.c.id == volumes.c.instance_id)
- ).execute()
- except Exception:
- instance_uuid_column.drop()
-
- fkeys = list(volumes.c.instance_id.foreign_keys)
- if fkeys:
- try:
- fk_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[volumes.c.instance_id],
- refcolumns=[instances.c.id],
- name=fk_name).drop()
-
- except Exception:
- LOG.error(_("foreign key could not be dropped"))
- raise
-
- volumes.c.instance_id.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
- volumes = Table('volumes', meta, autoload=True)
- instance_id_column = Column('instance_id', Integer)
-
- instance_id_column.create(volumes)
- try:
- volumes.update().values(
- instance_id=select(
- [instances.c.id],
- instances.c.uuid == volumes.c.instance_uuid)
- ).execute()
- except Exception:
- instance_id_column.drop()
-
- fkeys = list(volumes.c.instance_id.foreign_keys)
- if fkeys:
- try:
- fk_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[volumes.c.instance_id],
- refcolumns=[instances.c.id],
- name=fk_name).create()
-
- except Exception:
- LOG.error(_("foreign key could not be created"))
- raise
-
- volumes.c.instance_uuid.drop()
+++ /dev/null
-BEGIN TRANSACTION;
- -- change instance_id volumes table
- CREATE TABLE volumes_backup(
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- ec2_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- snapshot_id VARCHAR(36),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- instance_uuid VARCHAR(36),
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(255),
- provider_auth VARCHAR(255),
- volume_type_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- snapshot_id,
- host,
- size,
- availability_zone,
- NULL,
- instance_uuid,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- volume_type_id
- FROM volumes;
-
- UPDATE volumes_backup
- SET instance_id =
- (SELECT id
- FROM instances
- WHERE volumes_backup.instance_uuid = instances.uuid
- );
- DROP TABLE volumes;
-
- CREATE TABLE volumes(
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- ec2_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- snapshot_id VARCHAR(36),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(255),
- provider_auth VARCHAR(255),
- volume_type_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY (instance_id) REFERENCES instances (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- snapshot_id,
- host,
- size,
- availability_zone,
- instance_id,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- volume_type_id
- FROM volumes_backup;
- DROP TABLE volumes_backup;
-COMMIT;
+++ /dev/null
-BEGIN TRANSACTION;
- -- change instance_id volumes table
- CREATE TABLE volumes_backup(
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- ec2_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- snapshot_id VARCHAR(36),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- instance_uuid VARCHAR(36),
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(255),
- provider_auth VARCHAR(255),
- volume_type_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- snapshot_id,
- host,
- size,
- availability_zone,
- instance_id,
- NULL,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- volume_type_id
- FROM volumes;
-
- UPDATE volumes_backup
- SET instance_uuid =
- (SELECT uuid
- FROM instances
- WHERE volumes_backup.instance_id = instances.id
- );
- DROP TABLE volumes;
-
- CREATE TABLE volumes(
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- ec2_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- snapshot_id VARCHAR(36),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_uuid VARCHAR(36),
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(255),
- provider_auth VARCHAR(255),
- volume_type_id INTEGER,
- PRIMARY KEY (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- snapshot_id,
- host,
- size,
- availability_zone,
- instance_uuid,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- volume_type_id
- FROM volumes_backup;
- DROP TABLE volumes_backup;
-COMMIT;
import os
import sys
+from cinder.db import migration
from cinder.db.sqlalchemy.session import get_engine
from cinder import exception
from cinder import flags
meta = sqlalchemy.MetaData()
engine = get_engine()
meta.reflect(bind=engine)
- try:
- for table in ('auth_tokens', 'zones', 'export_devices',
- 'fixed_ips', 'floating_ips', 'instances',
- 'key_pairs', 'networks', 'projects', 'quotas',
- 'security_group_instance_association',
- 'security_group_rules', 'security_groups',
- 'services', 'migrations',
- 'users', 'user_project_association',
- 'user_project_role_association',
- 'user_role_association',
- 'virtual_storage_arrays',
- 'volumes', 'volume_metadata',
- 'volume_types', 'volume_type_extra_specs'):
- assert table in meta.tables
- return db_version_control(1)
- except AssertionError:
- return db_version_control(0)
+ tables = meta.tables
+ if len(tables) == 0:
+ db_version_control(migration.INIT_VERSION)
+ return versioning_api.db_version(get_engine(), repository)
+ else:
+ raise exception.Error(_("Upgrade DB using Essex release first."))
def db_version_control(version=None):
"""
from sqlalchemy.orm import relationship, backref, object_mapper
-from sqlalchemy import Column, Integer, BigInteger, String, schema
-from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
+from sqlalchemy import Column, Integer, String, schema
+from sqlalchemy import ForeignKey, DateTime, Boolean
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
-from sqlalchemy.schema import ForeignKeyConstraint
from cinder.db.sqlalchemy.session import get_session
availability_zone = Column(String(255), default='cinder')
-class ComputeNode(BASE, CinderBase):
- """Represents a running compute service on a host."""
+class CinderNode(BASE, CinderBase):
+ """Represents a running cinder service on a host."""
- __tablename__ = 'compute_nodes'
+ __tablename__ = 'cinder_nodes'
id = Column(Integer, primary_key=True)
service_id = Column(Integer, ForeignKey('services.id'), nullable=True)
- service = relationship(Service,
- backref=backref('compute_node'),
- foreign_keys=service_id,
- primaryjoin='and_('
- 'ComputeNode.service_id == Service.id,'
- 'ComputeNode.deleted == False)')
-
- vcpus = Column(Integer)
- memory_mb = Column(Integer)
- local_gb = Column(Integer)
- vcpus_used = Column(Integer)
- memory_mb_used = Column(Integer)
- local_gb_used = Column(Integer)
- hypervisor_type = Column(Text)
- hypervisor_version = Column(Integer)
- hypervisor_hostname = Column(String(255))
-
- # Free Ram, amount of activity (resize, migration, boot, etc) and
- # the number of running VM's are a good starting point for what's
- # important when making scheduling decisions.
- #
- # NOTE(sandy): We'll need to make this extensible for other schedulers.
- free_ram_mb = Column(Integer)
- free_disk_gb = Column(Integer)
- current_workload = Column(Integer)
- running_vms = Column(Integer)
-
- # Note(masumotok): Expected Strings example:
- #
- # '{"arch":"x86_64",
- # "model":"Nehalem",
- # "topology":{"sockets":1, "threads":2, "cores":3},
- # "features":["tdtscp", "xtpr"]}'
- #
- # Points are "json translatable" and it must have all dictionary keys
- # above, since it is copied from <cpu> tag of getCapabilities()
- # (See libvirt.virtConnection).
- cpu_info = Column(Text, nullable=True)
- disk_available_least = Column(Integer)
-
-
-class Certificate(BASE, CinderBase):
- """Represents a an x509 certificate"""
- __tablename__ = 'certificates'
- id = Column(Integer, primary_key=True)
-
- user_id = Column(String(255))
- project_id = Column(String(255))
- file_name = Column(String(255))
-
-
-class Instance(BASE, CinderBase):
- """Represents a guest vm."""
- __tablename__ = 'instances'
- injected_files = []
-
- id = Column(Integer, primary_key=True, autoincrement=True)
-
- @property
- def name(self):
- try:
- base_name = FLAGS.instance_name_template % self.id
- except TypeError:
- # Support templates like "uuid-%(uuid)s", etc.
- info = {}
- for key, value in self.iteritems():
- # prevent recursion if someone specifies %(name)s
- # %(name)s will not be valid.
- if key == 'name':
- continue
- info[key] = value
- try:
- base_name = FLAGS.instance_name_template % info
- except KeyError:
- base_name = self.uuid
- if getattr(self, '_rescue', False):
- base_name += "-rescue"
- return base_name
-
- user_id = Column(String(255))
- project_id = Column(String(255))
-
- image_ref = Column(String(255))
- kernel_id = Column(String(255))
- ramdisk_id = Column(String(255))
- server_name = Column(String(255))
-
-# image_ref = Column(Integer, ForeignKey('images.id'), nullable=True)
-# kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True)
-# ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True)
-# ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id))
-# kernel = relationship(Kernel, backref=backref('instances', order_by=id))
-# project = relationship(Project, backref=backref('instances', order_by=id))
-
- launch_index = Column(Integer)
- key_name = Column(String(255))
- key_data = Column(Text)
-
- power_state = Column(Integer)
- vm_state = Column(String(255))
- task_state = Column(String(255))
-
- memory_mb = Column(Integer)
- vcpus = Column(Integer)
- root_gb = Column(Integer)
- ephemeral_gb = Column(Integer)
-
- hostname = Column(String(255))
- host = Column(String(255)) # , ForeignKey('hosts.id'))
-
- # *not* flavor_id
- instance_type_id = Column(Integer)
-
- user_data = Column(Text)
-
- reservation_id = Column(String(255))
-
- scheduled_at = Column(DateTime)
- launched_at = Column(DateTime)
- terminated_at = Column(DateTime)
-
- availability_zone = Column(String(255))
-
- # User editable field for display in user-facing UIs
- display_name = Column(String(255))
- display_description = Column(String(255))
-
- # To remember on which host a instance booted.
- # An instance may have moved to another host by live migraiton.
- launched_on = Column(Text)
- locked = Column(Boolean)
-
- os_type = Column(String(255))
- architecture = Column(String(255))
- vm_mode = Column(String(255))
- uuid = Column(String(36))
-
- root_device_name = Column(String(255))
- default_ephemeral_device = Column(String(255), nullable=True)
- default_swap_device = Column(String(255), nullable=True)
- config_drive = Column(String(255))
-
- # User editable field meant to represent what ip should be used
- # to connect to the instance
- access_ip_v4 = Column(String(255))
- access_ip_v6 = Column(String(255))
-
- auto_disk_config = Column(Boolean())
- progress = Column(Integer)
-
- # EC2 instance_initiated_shutdown_teminate
- # True: -> 'terminate'
- # False: -> 'stop'
- shutdown_terminate = Column(Boolean(), default=True, nullable=False)
-
- # EC2 disable_api_termination
- disable_terminate = Column(Boolean(), default=False, nullable=False)
-
- # OpenStack compute cell name
- cell_name = Column(String(255))
-
-
-class InstanceInfoCache(BASE, CinderBase):
- """
- Represents a cache of information about an instance
- """
- __tablename__ = 'instance_info_caches'
- id = Column(Integer, primary_key=True, autoincrement=True)
-
- # text column used for storing a json object of network data for api
- network_info = Column(Text)
-
- instance_id = Column(String(36), ForeignKey('instances.uuid'),
- nullable=False, unique=True)
- instance = relationship(Instance,
- backref=backref('info_cache', uselist=False),
- foreign_keys=instance_id,
- primaryjoin=instance_id == Instance.uuid)
-
-
-class InstanceTypes(BASE, CinderBase):
- """Represent possible instance_types or flavor of VM offered"""
- __tablename__ = "instance_types"
- id = Column(Integer, primary_key=True)
- name = Column(String(255))
- memory_mb = Column(Integer)
- vcpus = Column(Integer)
- root_gb = Column(Integer)
- ephemeral_gb = Column(Integer)
- flavorid = Column(String(255))
- swap = Column(Integer, nullable=False, default=0)
- rxtx_factor = Column(Float, nullable=False, default=1)
- vcpu_weight = Column(Integer, nullable=True)
-
- instances = relationship(Instance,
- backref=backref('instance_type', uselist=False),
- foreign_keys=id,
- primaryjoin='and_('
- 'Instance.instance_type_id == '
- 'InstanceTypes.id, '
- 'InstanceTypes.deleted == False)')
class Volume(BASE, CinderBase):
value = Column(String(255))
volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False)
volume = relationship(Volume, backref="volume_metadata",
- foreign_keys=volume_id,
- primaryjoin='and_('
- 'VolumeMetadata.volume_id == Volume.id,'
- 'VolumeMetadata.deleted == False)')
+ foreign_keys=volume_id,
+ primaryjoin='and_('
+ 'VolumeMetadata.volume_id == Volume.id,'
+ 'VolumeMetadata.deleted == False)')
class VolumeTypes(BASE, CinderBase):
backref=backref('volume_type', uselist=False),
foreign_keys=id,
primaryjoin='and_('
- 'Volume.volume_type_id == VolumeTypes.id, '
- 'VolumeTypes.deleted == False)')
+ 'Volume.volume_type_id == VolumeTypes.id, '
+ 'VolumeTypes.deleted == False)')
class VolumeTypeExtraSpecs(BASE, CinderBase):
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
- volume_type_id = Column(Integer, ForeignKey('volume_types.id'),
- nullable=False)
- volume_type = relationship(VolumeTypes, backref="extra_specs",
- foreign_keys=volume_type_id,
- primaryjoin='and_('
- 'VolumeTypeExtraSpecs.volume_type_id == VolumeTypes.id,'
- 'VolumeTypeExtraSpecs.deleted == False)')
+ volume_type_id = Column(Integer,
+ ForeignKey('volume_types.id'),
+ nullable=False)
+ volume_type = relationship(
+ VolumeTypes,
+ backref="extra_specs",
+ foreign_keys=volume_type_id,
+ primaryjoin='and_('
+ 'VolumeTypeExtraSpecs.volume_type_id == VolumeTypes.id,'
+ 'VolumeTypeExtraSpecs.deleted == False)'
+ )
class Quota(BASE, CinderBase):
display_description = Column(String(255))
-class BlockDeviceMapping(BASE, CinderBase):
- """Represents block device mapping that is defined by EC2"""
- __tablename__ = "block_device_mapping"
- id = Column(Integer, primary_key=True, autoincrement=True)
-
- instance_uuid = Column(Integer, ForeignKey('instances.uuid'),
- nullable=False)
- instance = relationship(Instance,
- backref=backref('balock_device_mapping'),
- foreign_keys=instance_uuid,
- primaryjoin='and_(BlockDeviceMapping.'
- 'instance_uuid=='
- 'Instance.uuid,'
- 'BlockDeviceMapping.deleted=='
- 'False)')
- device_name = Column(String(255), nullable=False)
-
- # default=False for compatibility of the existing code.
- # With EC2 API,
- # default True for ami specified device.
- # default False for created with other timing.
- delete_on_termination = Column(Boolean, default=False)
-
- # for ephemeral device
- virtual_name = Column(String(255), nullable=True)
-
- # for snapshot or volume
- snapshot_id = Column(String(36), ForeignKey('snapshots.id'))
- # outer join
- snapshot = relationship(Snapshot,
- foreign_keys=snapshot_id)
-
- volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=True)
- volume = relationship(Volume,
- foreign_keys=volume_id)
- volume_size = Column(Integer, nullable=True)
-
- # for no device to suppress devices.
- no_device = Column(Boolean, nullable=True)
-
- connection_info = Column(Text, nullable=True)
-
-
class IscsiTarget(BASE, CinderBase):
- """Represates an iscsi target for a given host"""
+ """Represents an iscsi target for a given host"""
__tablename__ = 'iscsi_targets'
__table_args__ = (schema.UniqueConstraint("target_num", "host"),
{'mysql_engine': 'InnoDB'})
backref=backref('iscsi_target', uselist=False),
foreign_keys=volume_id,
primaryjoin='and_(IscsiTarget.volume_id==Volume.id,'
- 'IscsiTarget.deleted==False)')
-
-
-class SecurityGroupInstanceAssociation(BASE, CinderBase):
- __tablename__ = 'security_group_instance_association'
- id = Column(Integer, primary_key=True)
- security_group_id = Column(Integer, ForeignKey('security_groups.id'))
- instance_id = Column(Integer, ForeignKey('instances.id'))
-
-
-class SecurityGroup(BASE, CinderBase):
- """Represents a security group."""
- __tablename__ = 'security_groups'
- id = Column(Integer, primary_key=True)
-
- name = Column(String(255))
- description = Column(String(255))
- user_id = Column(String(255))
- project_id = Column(String(255))
-
- instances = relationship(Instance,
- secondary="security_group_instance_association",
- primaryjoin='and_('
- 'SecurityGroup.id == '
- 'SecurityGroupInstanceAssociation.security_group_id,'
- 'SecurityGroupInstanceAssociation.deleted == False,'
- 'SecurityGroup.deleted == False)',
- secondaryjoin='and_('
- 'SecurityGroupInstanceAssociation.instance_id == Instance.id,'
- # (anthony) the condition below shouldn't be necessary now that the
- # association is being marked as deleted. However, removing this
- # may cause existing deployments to choke, so I'm leaving it
- 'Instance.deleted == False)',
- backref='security_groups')
-
-
-class SecurityGroupIngressRule(BASE, CinderBase):
- """Represents a rule in a security group."""
- __tablename__ = 'security_group_rules'
- id = Column(Integer, primary_key=True)
-
- parent_group_id = Column(Integer, ForeignKey('security_groups.id'))
- parent_group = relationship("SecurityGroup", backref="rules",
- foreign_keys=parent_group_id,
- primaryjoin='and_('
- 'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,'
- 'SecurityGroupIngressRule.deleted == False)')
-
- protocol = Column(String(5)) # "tcp", "udp", or "icmp"
- from_port = Column(Integer)
- to_port = Column(Integer)
- cidr = Column(String(255))
-
- # Note: This is not the parent SecurityGroup. It's SecurityGroup we're
- # granting access for.
- group_id = Column(Integer, ForeignKey('security_groups.id'))
- grantee_group = relationship("SecurityGroup",
- foreign_keys=group_id,
- primaryjoin='and_('
- 'SecurityGroupIngressRule.group_id == SecurityGroup.id,'
- 'SecurityGroupIngressRule.deleted == False)')
-
-
-class ProviderFirewallRule(BASE, CinderBase):
- """Represents a rule in a security group."""
- __tablename__ = 'provider_fw_rules'
- id = Column(Integer, primary_key=True)
-
- protocol = Column(String(5)) # "tcp", "udp", or "icmp"
- from_port = Column(Integer)
- to_port = Column(Integer)
- cidr = Column(String(255))
-
-
-class KeyPair(BASE, CinderBase):
- """Represents a public key pair for ssh."""
- __tablename__ = 'key_pairs'
- id = Column(Integer, primary_key=True)
-
- name = Column(String(255))
-
- user_id = Column(String(255))
-
- fingerprint = Column(String(255))
- public_key = Column(Text)
+ 'IscsiTarget.deleted==False)')
class Migration(BASE, CinderBase):
dest_host = Column(String(255))
old_instance_type_id = Column(Integer())
new_instance_type_id = Column(Integer())
- instance_uuid = Column(String(255), ForeignKey('instances.uuid'),
- nullable=True)
+ instance_uuid = Column(String(255),
+ ForeignKey('instances.uuid'),
+ nullable=True)
#TODO(_cerberus_): enum
status = Column(String(255))
-class Network(BASE, CinderBase):
- """Represents a network."""
- __tablename__ = 'networks'
- __table_args__ = (schema.UniqueConstraint("vpn_public_address",
- "vpn_public_port"),
- {'mysql_engine': 'InnoDB'})
- id = Column(Integer, primary_key=True)
- label = Column(String(255))
-
- injected = Column(Boolean, default=False)
- cidr = Column(String(255), unique=True)
- cidr_v6 = Column(String(255), unique=True)
- multi_host = Column(Boolean, default=False)
-
- gateway_v6 = Column(String(255))
- netmask_v6 = Column(String(255))
- netmask = Column(String(255))
- bridge = Column(String(255))
- bridge_interface = Column(String(255))
- gateway = Column(String(255))
- broadcast = Column(String(255))
- dns1 = Column(String(255))
- dns2 = Column(String(255))
-
- vlan = Column(Integer)
- vpn_public_address = Column(String(255))
- vpn_public_port = Column(Integer)
- vpn_private_address = Column(String(255))
- dhcp_start = Column(String(255))
-
- rxtx_base = Column(Integer)
-
- project_id = Column(String(255))
- priority = Column(Integer)
- host = Column(String(255)) # , ForeignKey('hosts.id'))
- uuid = Column(String(36))
-
-
-class VirtualInterface(BASE, CinderBase):
- """Represents a virtual interface on an instance."""
- __tablename__ = 'virtual_interfaces'
- id = Column(Integer, primary_key=True)
- address = Column(String(255), unique=True)
- network_id = Column(Integer, nullable=False)
- instance_id = Column(Integer, nullable=False)
- uuid = Column(String(36))
-
-
-# TODO(vish): can these both come from the same baseclass?
-class FixedIp(BASE, CinderBase):
- """Represents a fixed ip for an instance."""
- __tablename__ = 'fixed_ips'
- id = Column(Integer, primary_key=True)
- address = Column(String(255))
- network_id = Column(Integer, nullable=True)
- virtual_interface_id = Column(Integer, nullable=True)
- instance_id = Column(Integer, nullable=True)
- # associated means that a fixed_ip has its instance_id column set
- # allocated means that a fixed_ip has a its virtual_interface_id column set
- allocated = Column(Boolean, default=False)
- # leased means dhcp bridge has leased the ip
- leased = Column(Boolean, default=False)
- reserved = Column(Boolean, default=False)
- host = Column(String(255))
-
-
-class FloatingIp(BASE, CinderBase):
- """Represents a floating ip that dynamically forwards to a fixed ip."""
- __tablename__ = 'floating_ips'
- id = Column(Integer, primary_key=True)
- address = Column(String(255))
- fixed_ip_id = Column(Integer, nullable=True)
- project_id = Column(String(255))
- host = Column(String(255)) # , ForeignKey('hosts.id'))
- auto_assigned = Column(Boolean, default=False, nullable=False)
- pool = Column(String(255))
- interface = Column(String(255))
-
-
-class AuthToken(BASE, CinderBase):
- """Represents an authorization token for all API transactions.
-
- Fields are a string representing the actual token and a user id for
- mapping to the actual user
-
- """
- __tablename__ = 'auth_tokens'
- token_hash = Column(String(255), primary_key=True)
- user_id = Column(String(255))
- server_management_url = Column(String(255))
- storage_url = Column(String(255))
- cdn_management_url = Column(String(255))
-
-
-class User(BASE, CinderBase):
- """Represents a user."""
- __tablename__ = 'users'
- id = Column(String(255), primary_key=True)
-
- name = Column(String(255))
- access_key = Column(String(255))
- secret_key = Column(String(255))
-
- is_admin = Column(Boolean)
-
-
-class Project(BASE, CinderBase):
- """Represents a project."""
- __tablename__ = 'projects'
- id = Column(String(255), primary_key=True)
- name = Column(String(255))
- description = Column(String(255))
-
- project_manager = Column(String(255), ForeignKey(User.id))
-
- members = relationship(User,
- secondary='user_project_association',
- backref='projects')
-
-
-class DNSDomain(BASE, CinderBase):
- """Represents a DNS domain with availability zone or project info."""
- __tablename__ = 'dns_domains'
- domain = Column(String(512), primary_key=True)
- scope = Column(String(255))
- availability_zone = Column(String(255))
- project_id = Column(String(255))
- project = relationship(Project,
- primaryjoin=project_id == Project.id,
- foreign_keys=[Project.id],
- uselist=False)
-
-
-class UserProjectRoleAssociation(BASE, CinderBase):
- __tablename__ = 'user_project_role_association'
- user_id = Column(String(255), primary_key=True)
- user = relationship(User,
- primaryjoin=user_id == User.id,
- foreign_keys=[User.id],
- uselist=False)
-
- project_id = Column(String(255), primary_key=True)
- project = relationship(Project,
- primaryjoin=project_id == Project.id,
- foreign_keys=[Project.id],
- uselist=False)
-
- role = Column(String(255), primary_key=True)
- ForeignKeyConstraint(['user_id',
- 'project_id'],
- ['user_project_association.user_id',
- 'user_project_association.project_id'])
-
-
-class UserRoleAssociation(BASE, CinderBase):
- __tablename__ = 'user_role_association'
- user_id = Column(String(255), ForeignKey('users.id'), primary_key=True)
- user = relationship(User, backref='roles')
- role = Column(String(255), primary_key=True)
-
-
-class UserProjectAssociation(BASE, CinderBase):
- __tablename__ = 'user_project_association'
- user_id = Column(String(255), ForeignKey(User.id), primary_key=True)
- project_id = Column(String(255), ForeignKey(Project.id), primary_key=True)
-
-
-class ConsolePool(BASE, CinderBase):
- """Represents pool of consoles on the same physical node."""
- __tablename__ = 'console_pools'
- id = Column(Integer, primary_key=True)
- address = Column(String(255))
- username = Column(String(255))
- password = Column(String(255))
- console_type = Column(String(255))
- public_hostname = Column(String(255))
- host = Column(String(255))
- compute_host = Column(String(255))
-
-
-class Console(BASE, CinderBase):
- """Represents a console session for an instance."""
- __tablename__ = 'consoles'
- id = Column(Integer, primary_key=True)
- instance_name = Column(String(255))
- instance_id = Column(Integer)
- password = Column(String(255))
- port = Column(Integer, nullable=True)
- pool_id = Column(Integer, ForeignKey('console_pools.id'))
- pool = relationship(ConsolePool, backref=backref('consoles'))
-
-
-class InstanceMetadata(BASE, CinderBase):
- """Represents a metadata key/value pair for an instance"""
- __tablename__ = 'instance_metadata'
- id = Column(Integer, primary_key=True)
- key = Column(String(255))
- value = Column(String(255))
- instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False)
- instance = relationship(Instance, backref="metadata",
- foreign_keys=instance_id,
- primaryjoin='and_('
- 'InstanceMetadata.instance_id == Instance.id,'
- 'InstanceMetadata.deleted == False)')
-
-
-class InstanceTypeExtraSpecs(BASE, CinderBase):
- """Represents additional specs as key/value pairs for an instance_type"""
- __tablename__ = 'instance_type_extra_specs'
- id = Column(Integer, primary_key=True)
- key = Column(String(255))
- value = Column(String(255))
- instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
- nullable=False)
- instance_type = relationship(InstanceTypes, backref="extra_specs",
- foreign_keys=instance_type_id,
- primaryjoin='and_('
- 'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,'
- 'InstanceTypeExtraSpecs.deleted == False)')
-
-
-class Cell(BASE, CinderBase):
- """Represents parent and child cells of this cell."""
- __tablename__ = 'cells'
- id = Column(Integer, primary_key=True)
- name = Column(String(255))
- api_url = Column(String(255))
- username = Column(String(255))
- password = Column(String(255))
- weight_offset = Column(Float(), default=0.0)
- weight_scale = Column(Float(), default=1.0)
- is_parent = Column(Boolean())
- rpc_host = Column(String(255))
- rpc_port = Column(Integer())
- rpc_virtual_host = Column(String(255))
-
-
-class AggregateHost(BASE, CinderBase):
- """Represents a host that is member of an aggregate."""
- __tablename__ = 'aggregate_hosts'
- id = Column(Integer, primary_key=True, autoincrement=True)
- host = Column(String(255), unique=True)
- aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
-
-
-class AggregateMetadata(BASE, CinderBase):
- """Represents a metadata key/value pair for an aggregate."""
- __tablename__ = 'aggregate_metadata'
- id = Column(Integer, primary_key=True)
- key = Column(String(255), nullable=False)
- value = Column(String(255), nullable=False)
- aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
-
-
-class Aggregate(BASE, CinderBase):
- """Represents a cluster of hosts that exists in this zone."""
- __tablename__ = 'aggregates'
- id = Column(Integer, primary_key=True, autoincrement=True)
- name = Column(String(255), unique=True)
- operational_state = Column(String(255), nullable=False)
- availability_zone = Column(String(255), nullable=False)
- _hosts = relationship(AggregateHost,
- secondary="aggregate_hosts",
- primaryjoin='and_('
- 'Aggregate.id == AggregateHost.aggregate_id,'
- 'AggregateHost.deleted == False,'
- 'Aggregate.deleted == False)',
- secondaryjoin='and_('
- 'AggregateHost.aggregate_id == Aggregate.id, '
- 'AggregateHost.deleted == False,'
- 'Aggregate.deleted == False)',
- backref='aggregates')
-
- _metadata = relationship(AggregateMetadata,
- secondary="aggregate_metadata",
- primaryjoin='and_('
- 'Aggregate.id == AggregateMetadata.aggregate_id,'
- 'AggregateMetadata.deleted == False,'
- 'Aggregate.deleted == False)',
- secondaryjoin='and_('
- 'AggregateMetadata.aggregate_id == Aggregate.id, '
- 'AggregateMetadata.deleted == False,'
- 'Aggregate.deleted == False)',
- backref='aggregates')
-
- @property
- def hosts(self):
- return [h.host for h in self._hosts]
-
- @property
- def metadetails(self):
- return dict([(m.key, m.value) for m in self._metadata])
-
-
-class AgentBuild(BASE, CinderBase):
- """Represents an agent build."""
- __tablename__ = 'agent_builds'
- id = Column(Integer, primary_key=True)
- hypervisor = Column(String(255))
- os = Column(String(255))
- architecture = Column(String(255))
- version = Column(String(255))
- url = Column(String(255))
- md5hash = Column(String(255))
-
-
-class BandwidthUsage(BASE, CinderBase):
- """Cache for instance bandwidth usage data pulled from the hypervisor"""
- __tablename__ = 'bw_usage_cache'
- id = Column(Integer, primary_key=True, nullable=False)
- uuid = Column(String(36), nullable=False)
- mac = Column(String(255), nullable=False)
- start_period = Column(DateTime, nullable=False)
- last_refreshed = Column(DateTime)
- bw_in = Column(BigInteger)
- bw_out = Column(BigInteger)
-
-
-class S3Image(BASE, CinderBase):
- """Compatibility layer for the S3 image service talking to Glance"""
- __tablename__ = 's3_images'
- id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
- uuid = Column(String(36), nullable=False)
-
-
-class VolumeIdMapping(BASE, CinderBase):
- """Compatability layer for the EC2 volume service"""
- __tablename__ = 'volume_id_mappings'
- id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
- uuid = Column(String(36), nullable=False)
-
-
-class SnapshotIdMapping(BASE, CinderBase):
- """Compatability layer for the EC2 snapshot service"""
- __tablename__ = 'snapshot_id_mappings'
- id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
- uuid = Column(String(36), nullable=False)
-
-
class SMFlavors(BASE, CinderBase):
"""Represents a flavor for SM volumes."""
__tablename__ = 'sm_flavors'
vdi_uuid = Column(String(255))
-class InstanceFault(BASE, CinderBase):
- __tablename__ = 'instance_faults'
- id = Column(Integer(), primary_key=True, autoincrement=True)
- instance_uuid = Column(String(36),
- ForeignKey('instances.uuid'),
- nullable=False)
- code = Column(Integer(), nullable=False)
- message = Column(String(255))
- details = Column(Text)
-
-
def register_models():
"""Register Models and create metadata.
connection is lost and needs to be reestablished.
"""
from sqlalchemy import create_engine
- models = (AgentBuild,
- Aggregate,
- AggregateHost,
- AggregateMetadata,
- AuthToken,
- Certificate,
- Cell,
- Console,
- ConsolePool,
- FixedIp,
- FloatingIp,
- Instance,
- InstanceActions,
- InstanceFault,
- InstanceMetadata,
- InstanceTypeExtraSpecs,
- InstanceTypes,
- IscsiTarget,
- Migration,
- Network,
- Project,
- SecurityGroup,
- SecurityGroupIngressRule,
- SecurityGroupInstanceAssociation,
+ models = (Migration,
Service,
SMBackendConf,
SMFlavors,
SMVolume,
- User,
Volume,
VolumeMetadata,
VolumeTypeExtraSpecs,
VolumeTypes,
- VolumeIdMapping,
- SnapshotIdMapping,
)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
+++ /dev/null
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Unit tests for the DB API"""
-
-from cinder import test
-from cinder import context
-from cinder import db
-from cinder import exception
-from cinder import flags
-
-FLAGS = flags.FLAGS
-
-
-def _get_fake_aggr_values():
- return {'name': 'fake_aggregate',
- 'availability_zone': 'fake_avail_zone', }
-
-
-def _get_fake_aggr_metadata():
- return {'fake_key1': 'fake_value1',
- 'fake_key2': 'fake_value2'}
-
-
-def _get_fake_aggr_hosts():
- return ['foo.openstack.org']
-
-
-def _create_aggregate(context=context.get_admin_context(),
- values=_get_fake_aggr_values(),
- metadata=_get_fake_aggr_metadata()):
- return db.aggregate_create(context, values, metadata)
-
-
-def _create_aggregate_with_hosts(context=context.get_admin_context(),
- values=_get_fake_aggr_values(),
- metadata=_get_fake_aggr_metadata(),
- hosts=_get_fake_aggr_hosts()):
- result = _create_aggregate(context=context,
- values=values, metadata=metadata)
- for host in hosts:
- db.aggregate_host_add(context, result.id, host)
- return result
-
-
-class AggregateDBApiTestCase(test.TestCase):
- def setUp(self):
- super(AggregateDBApiTestCase, self).setUp()
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id, self.project_id)
-
- def test_aggregate_create(self):
- """Ensure aggregate can be created with no metadata."""
- result = _create_aggregate(metadata=None)
- self.assertEqual(result['operational_state'], 'created')
-
- def test_aggregate_create_avoid_name_conflict(self):
- """Test we can avoid conflict on deleted aggregates."""
- r1 = _create_aggregate(metadata=None)
- db.aggregate_delete(context.get_admin_context(), r1.id)
- values = {'name': r1.name, 'availability_zone': 'new_zone'}
- r2 = _create_aggregate(values=values)
- self.assertEqual(r2.name, values['name'])
- self.assertEqual(r2.availability_zone, values['availability_zone'])
- self.assertEqual(r2.operational_state, "created")
-
- def test_aggregate_create_raise_exist_exc(self):
- """Ensure aggregate names are distinct."""
- _create_aggregate(metadata=None)
- self.assertRaises(exception.AggregateNameExists,
- _create_aggregate, metadata=None)
-
- def test_aggregate_get_raise_not_found(self):
- """Ensure AggregateNotFound is raised when getting an aggregate."""
- ctxt = context.get_admin_context()
- # this does not exist!
- aggregate_id = 1
- self.assertRaises(exception.AggregateNotFound,
- db.aggregate_get,
- ctxt, aggregate_id)
-
- def test_aggregate_metadata_get_raise_not_found(self):
- """Ensure AggregateNotFound is raised when getting metadata."""
- ctxt = context.get_admin_context()
- # this does not exist!
- aggregate_id = 1
- self.assertRaises(exception.AggregateNotFound,
- db.aggregate_metadata_get,
- ctxt, aggregate_id)
-
- def test_aggregate_create_with_metadata(self):
- """Ensure aggregate can be created with metadata."""
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt)
- expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
- self.assertDictMatch(expected_metadata, _get_fake_aggr_metadata())
-
- def test_aggregate_create_low_privi_context(self):
- """Ensure right context is applied when creating aggregate."""
- self.assertRaises(exception.AdminRequired,
- db.aggregate_create,
- self.context, _get_fake_aggr_values())
-
- def test_aggregate_get(self):
- """Ensure we can get aggregate with all its relations."""
- ctxt = context.get_admin_context()
- result = _create_aggregate_with_hosts(context=ctxt)
- expected = db.aggregate_get(ctxt, result.id)
- self.assertEqual(_get_fake_aggr_hosts(), expected.hosts)
- self.assertEqual(_get_fake_aggr_metadata(), expected.metadetails)
-
- def test_aggregate_get_by_host(self):
- """Ensure we can get an aggregate by host."""
- ctxt = context.get_admin_context()
- r1 = _create_aggregate_with_hosts(context=ctxt)
- r2 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org')
- self.assertEqual(r1.id, r2.id)
-
- def test_aggregate_get_by_host_not_found(self):
- """Ensure AggregateHostNotFound is raised with unknown host."""
- ctxt = context.get_admin_context()
- _create_aggregate_with_hosts(context=ctxt)
- self.assertRaises(exception.AggregateHostNotFound,
- db.aggregate_get_by_host, ctxt, 'unknown_host')
-
- def test_aggregate_delete_raise_not_found(self):
- """Ensure AggregateNotFound is raised when deleting an aggregate."""
- ctxt = context.get_admin_context()
- # this does not exist!
- aggregate_id = 1
- self.assertRaises(exception.AggregateNotFound,
- db.aggregate_delete,
- ctxt, aggregate_id)
-
- def test_aggregate_delete(self):
- """Ensure we can delete an aggregate."""
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt, metadata=None)
- db.aggregate_delete(ctxt, result['id'])
- expected = db.aggregate_get_all(ctxt)
- self.assertEqual(0, len(expected))
- aggregate = db.aggregate_get(ctxt.elevated(read_deleted='yes'),
- result['id'])
- self.assertEqual(aggregate["operational_state"], "dismissed")
-
- def test_aggregate_update(self):
- """Ensure an aggregate can be updated."""
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt, metadata=None)
- new_values = _get_fake_aggr_values()
- new_values['availability_zone'] = 'different_avail_zone'
- updated = db.aggregate_update(ctxt, 1, new_values)
- self.assertNotEqual(result.availability_zone,
- updated.availability_zone)
-
- def test_aggregate_update_with_metadata(self):
- """Ensure an aggregate can be updated with metadata."""
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt, metadata=None)
- values = _get_fake_aggr_values()
- values['metadata'] = _get_fake_aggr_metadata()
- db.aggregate_update(ctxt, 1, values)
- expected = db.aggregate_metadata_get(ctxt, result.id)
- self.assertDictMatch(_get_fake_aggr_metadata(), expected)
-
- def test_aggregate_update_with_existing_metadata(self):
- """Ensure an aggregate can be updated with existing metadata."""
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt)
- values = _get_fake_aggr_values()
- values['metadata'] = _get_fake_aggr_metadata()
- values['metadata']['fake_key1'] = 'foo'
- db.aggregate_update(ctxt, 1, values)
- expected = db.aggregate_metadata_get(ctxt, result.id)
- self.assertDictMatch(values['metadata'], expected)
-
- def test_aggregate_update_raise_not_found(self):
- """Ensure AggregateNotFound is raised when updating an aggregate."""
- ctxt = context.get_admin_context()
- # this does not exist!
- aggregate_id = 1
- new_values = _get_fake_aggr_values()
- self.assertRaises(exception.AggregateNotFound,
- db.aggregate_update, ctxt, aggregate_id, new_values)
-
- def test_aggregate_get_all(self):
- """Ensure we can get all aggregates."""
- ctxt = context.get_admin_context()
- counter = 3
- for c in xrange(counter):
- _create_aggregate(context=ctxt,
- values={'name': 'fake_aggregate_%d' % c,
- 'availability_zone': 'fake_avail_zone'},
- metadata=None)
- results = db.aggregate_get_all(ctxt)
- self.assertEqual(len(results), counter)
-
- def test_aggregate_get_all_non_deleted(self):
- """Ensure we get only non-deleted aggregates."""
- ctxt = context.get_admin_context()
- add_counter = 5
- remove_counter = 2
- aggregates = []
- for c in xrange(1, add_counter):
- values = {'name': 'fake_aggregate_%d' % c,
- 'availability_zone': 'fake_avail_zone'}
- aggregates.append(_create_aggregate(context=ctxt,
- values=values, metadata=None))
- for c in xrange(1, remove_counter):
- db.aggregate_delete(ctxt, aggregates[c - 1].id)
- results = db.aggregate_get_all(ctxt)
- self.assertEqual(len(results), add_counter - remove_counter)
-
- def test_aggregate_metadata_add(self):
- """Ensure we can add metadata for the aggregate."""
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt, metadata=None)
- metadata = _get_fake_aggr_metadata()
- db.aggregate_metadata_add(ctxt, result.id, metadata)
- expected = db.aggregate_metadata_get(ctxt, result.id)
- self.assertDictMatch(metadata, expected)
-
- def test_aggregate_metadata_update(self):
- """Ensure we can update metadata for the aggregate."""
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt)
- metadata = _get_fake_aggr_metadata()
- key = metadata.keys()[0]
- db.aggregate_metadata_delete(ctxt, result.id, key)
- new_metadata = {key: 'foo'}
- db.aggregate_metadata_add(ctxt, result.id, new_metadata)
- expected = db.aggregate_metadata_get(ctxt, result.id)
- metadata[key] = 'foo'
- self.assertDictMatch(metadata, expected)
-
- def test_aggregate_metadata_delete(self):
- """Ensure we can delete metadata for the aggregate."""
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt, metadata=None)
- metadata = _get_fake_aggr_metadata()
- db.aggregate_metadata_add(ctxt, result.id, metadata)
- db.aggregate_metadata_delete(ctxt, result.id, metadata.keys()[0])
- expected = db.aggregate_metadata_get(ctxt, result.id)
- del metadata[metadata.keys()[0]]
- self.assertDictMatch(metadata, expected)
-
- def test_aggregate_metadata_delete_raise_not_found(self):
- """Ensure AggregateMetadataNotFound is raised when deleting."""
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt)
- self.assertRaises(exception.AggregateMetadataNotFound,
- db.aggregate_metadata_delete,
- ctxt, result.id, 'foo_key')
-
- def test_aggregate_host_add(self):
- """Ensure we can add host to the aggregate."""
- ctxt = context.get_admin_context()
- result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
- expected = db.aggregate_host_get_all(ctxt, result.id)
- self.assertEqual(_get_fake_aggr_hosts(), expected)
-
- def test_aggregate_host_add_deleted(self):
- """Ensure we can add a host that was previously deleted."""
- ctxt = context.get_admin_context()
- result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
- host = _get_fake_aggr_hosts()[0]
- db.aggregate_host_delete(ctxt, result.id, host)
- db.aggregate_host_add(ctxt, result.id, host)
- expected = db.aggregate_host_get_all(ctxt, result.id)
- self.assertEqual(len(expected), 1)
-
- def test_aggregate_host_add_duplicate_raise_conflict(self):
- """Ensure we cannot add host to distinct aggregates."""
- ctxt = context.get_admin_context()
- _create_aggregate_with_hosts(context=ctxt, metadata=None)
- self.assertRaises(exception.AggregateHostConflict,
- _create_aggregate_with_hosts, ctxt,
- values={'name': 'fake_aggregate2',
- 'availability_zone': 'fake_avail_zone2', },
- metadata=None)
-
- def test_aggregate_host_add_duplicate_raise_exist_exc(self):
- """Ensure we cannot add host to the same aggregate."""
- ctxt = context.get_admin_context()
- result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
- self.assertRaises(exception.AggregateHostExists,
- db.aggregate_host_add,
- ctxt, result.id, _get_fake_aggr_hosts()[0])
-
- def test_aggregate_host_add_raise_not_found(self):
- """Ensure AggregateFound when adding a host."""
- ctxt = context.get_admin_context()
- # this does not exist!
- aggregate_id = 1
- host = _get_fake_aggr_hosts()[0]
- self.assertRaises(exception.AggregateNotFound,
- db.aggregate_host_add,
- ctxt, aggregate_id, host)
-
- def test_aggregate_host_delete(self):
- """Ensure we can add host to the aggregate."""
- ctxt = context.get_admin_context()
- result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
- db.aggregate_host_delete(ctxt, result.id,
- _get_fake_aggr_hosts()[0])
- expected = db.aggregate_host_get_all(ctxt, result.id)
- self.assertEqual(0, len(expected))
-
- def test_aggregate_host_delete_raise_not_found(self):
- """Ensure AggregateHostNotFound is raised when deleting a host."""
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt)
- self.assertRaises(exception.AggregateHostNotFound,
- db.aggregate_host_delete,
- ctxt, result.id, _get_fake_aggr_hosts()[0])
noninnodb = connection.execute("SELECT count(*) "
"from information_schema.TABLES "
"where TABLE_SCHEMA='openstack_citest' "
- "and ENGINE!='InnoDB'")
+ "and ENGINE!='InnoDB' "
+ "and TABLE_NAME!='migrate_version'")
count = noninnodb.scalar()
self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
# upgrades successfully.
# Place the database under version control
- migration_api.version_control(engine, TestMigrations.REPOSITORY)
- self.assertEqual(0,
+ migration_api.version_control(engine, TestMigrations.REPOSITORY,
+ migration.INIT_VERSION)
+ self.assertEqual(migration.INIT_VERSION,
migration_api.db_version(engine,
TestMigrations.REPOSITORY))
+ migration_api.upgrade(engine, TestMigrations.REPOSITORY,
+ migration.INIT_VERSION + 1)
+
LOG.debug('latest version is %s' % TestMigrations.REPOSITORY.latest)
- for version in xrange(1, TestMigrations.REPOSITORY.latest + 1):
+ for version in xrange(migration.INIT_VERSION + 2,
+ TestMigrations.REPOSITORY.latest + 1):
# upgrade -> downgrade -> upgrade
self._migrate_up(engine, version)
if snake_walk:
# Now walk it back down to 0 from the latest, testing
# the downgrade paths.
for version in reversed(
- xrange(0, TestMigrations.REPOSITORY.latest)):
+ xrange(migration.INIT_VERSION + 1,
+ TestMigrations.REPOSITORY.latest)):
# downgrade -> upgrade -> downgrade
self._migrate_down(engine, version)
if snake_walk: