"""Maps keys for attachment summary view."""
d = {}
- # TODO(bcwaldon): remove str cast once we use uuids
- volume_id = str(vol['id'])
+ volume_id = vol['id']
# NOTE(justinsb): We use the volume id as the id of the attachment object
d['id'] = volume_id
d['volume_id'] = volume_id
- if vol.get('instance'):
- d['server_id'] = vol['instance']['uuid']
+ d['server_id'] = vol['instance_uuid']
if vol.get('mountpoint'):
d['device'] = vol['mountpoint']
"""Maps keys for volumes summary view."""
d = {}
- # TODO(bcwaldon): remove str cast once we use uuids
- d['id'] = str(vol['id'])
+ d['id'] = vol['id']
d['status'] = vol['status']
d['size'] = vol['size']
d['availability_zone'] = vol['availability_zone']
d['volume_type'] = str(vol['volume_type_id'])
d['snapshot_id'] = vol['snapshot_id']
- # TODO(bcwaldon): remove str cast once we use uuids
- if d['snapshot_id'] is not None:
- d['snapshot_id'] = str(d['snapshot_id'])
LOG.audit(_("vol=%s"), vol, context=context)
return IMPL.volume_get_all_by_host(context, host)
-def volume_get_all_by_instance(context, instance_id):
+def volume_get_all_by_instance_uuid(context, instance_uuid):
"""Get all volumes belonging to a instance."""
- return IMPL.volume_get_all_by_instance(context, instance_id)
+ return IMPL.volume_get_all_by_instance_uuid(context, instance_uuid)
def volume_get_all_by_project(context, project_id):
return IMPL.volume_get_all_by_project(context, project_id)
-def volume_get_instance(context, volume_id):
- """Get the instance that a volume is attached to."""
- return IMPL.volume_get_instance(context, volume_id)
-
-
def volume_get_iscsi_target_num(context, volume_id):
"""Get the target num (tid) allocated to the volume."""
return IMPL.volume_get_iscsi_target_num(context, volume_id)
from cinder.db.sqlalchemy.session import get_session
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import joinedload
+from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql import func
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql.expression import desc
from sqlalchemy.sql.expression import literal_column
FLAGS = flags.FLAGS
@require_admin_context
-def volume_attached(context, volume_id, instance_id, mountpoint):
+def volume_attached(context, volume_id, instance_uuid, mountpoint):
+ if not utils.is_uuid_like(instance_uuid):
+ raise exception.InvalidUUID(instance_uuid)
+
session = get_session()
with session.begin():
volume_ref = volume_get(context, volume_id, session=session)
volume_ref['status'] = 'in-use'
volume_ref['mountpoint'] = mountpoint
volume_ref['attach_status'] = 'attached'
- volume_ref['instance_id'] = instance_id
+ volume_ref['instance_uuid'] = instance_uuid
volume_ref.save(session=session)
volume_ref['status'] = 'available'
volume_ref['mountpoint'] = None
volume_ref['attach_status'] = 'detached'
- volume_ref.instance = None
+ volume_ref['instance_uuid'] = None
volume_ref.save(session=session)
def _volume_get_query(context, session=None, project_only=False):
return model_query(context, models.Volume, session=session,
project_only=project_only).\
- options(joinedload('instance')).\
- options(joinedload('volume_metadata')).\
- options(joinedload('volume_type'))
+ options(joinedload('volume_metadata')).\
+ options(joinedload('volume_type'))
+
+
+@require_context
+def _ec2_volume_get_query(context, session=None, project_only=False):
+ return model_query(context, models.VolumeIdMapping, session=session,
+ project_only=project_only)
+
+
+@require_context
+def _ec2_snapshot_get_query(context, session=None, project_only=False):
+ return model_query(context, models.SnapshotIdMapping, session=session,
+ project_only=project_only)
@require_context
@require_admin_context
-def volume_get_all_by_instance(context, instance_id):
+def volume_get_all_by_instance_uuid(context, instance_uuid):
result = model_query(context, models.Volume, read_deleted="no").\
options(joinedload('volume_metadata')).\
options(joinedload('volume_type')).\
- filter_by(instance_id=instance_id).\
+ filter_by(instance_uuid=instance_uuid).\
all()
+ if not result:
+ return []
+
return result
return _volume_get_query(context).filter_by(project_id=project_id).all()
-@require_admin_context
-def volume_get_instance(context, volume_id):
- result = _volume_get_query(context).filter_by(id=volume_id).first()
-
- if not result:
- raise exception.VolumeNotFound(volume_id=volume_id)
-
- return result.instance
-
-
@require_admin_context
def volume_get_iscsi_target_num(context, volume_id):
result = model_query(context, models.IscsiTarget, read_deleted="yes").\
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack LLC.
+# Copyright 2012 SolidFire Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import select, Column
+from sqlalchemy import MetaData, Integer, String, Table
+from migrate import ForeignKeyConstraint
+
+from cinder import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ instances = Table('instances', meta, autoload=True)
+ volumes = Table('volumes', meta, autoload=True)
+ instance_uuid_column = Column('instance_uuid', String(36))
+
+ instance_uuid_column.create(volumes)
+ try:
+ volumes.update().values(
+ instance_uuid=select(
+ [instances.c.uuid],
+ instances.c.id == volumes.c.instance_id)
+ ).execute()
+ except Exception:
+ instance_uuid_column.drop()
+
+ fkeys = list(volumes.c.instance_id.foreign_keys)
+ if fkeys:
+ try:
+ fk_name = fkeys[0].constraint.name
+ ForeignKeyConstraint(
+ columns=[volumes.c.instance_id],
+ refcolumns=[instances.c.id],
+ name=fk_name).drop()
+
+ except Exception:
+ LOG.error(_("foreign key could not be dropped"))
+ raise
+
+ volumes.c.instance_id.drop()
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ instances = Table('instances', meta, autoload=True)
+ volumes = Table('volumes', meta, autoload=True)
+ instance_id_column = Column('instance_id', Integer)
+
+ instance_id_column.create(volumes)
+ try:
+ volumes.update().values(
+ instance_id=select(
+ [instances.c.id],
+ instances.c.uuid == volumes.c.instance_uuid)
+ ).execute()
+ except Exception:
+ instance_id_column.drop()
+
+ fkeys = list(volumes.c.instance_id.foreign_keys)
+ if fkeys:
+ try:
+ fk_name = fkeys[0].constraint.name
+ ForeignKeyConstraint(
+ columns=[volumes.c.instance_id],
+ refcolumns=[instances.c.id],
+ name=fk_name).create()
+
+ except Exception:
+ LOG.error(_("foreign key could not be created"))
+ raise
+
+ volumes.c.instance_uuid.drop()
--- /dev/null
+BEGIN TRANSACTION;
+ -- change instance_id volumes table
+ CREATE TABLE volumes_backup(
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id VARCHAR(36) NOT NULL,
+ ec2_id INTEGER,
+ user_id VARCHAR(255),
+ project_id VARCHAR(255),
+ snapshot_id VARCHAR(36),
+ host VARCHAR(255),
+ size INTEGER,
+ availability_zone VARCHAR(255),
+ instance_id INTEGER,
+ instance_uuid VARCHAR(36),
+ mountpoint VARCHAR(255),
+ attach_time VARCHAR(255),
+ status VARCHAR(255),
+ attach_status VARCHAR(255),
+ scheduled_at DATETIME,
+ launched_at DATETIME,
+ terminated_at DATETIME,
+ display_name VARCHAR(255),
+ display_description VARCHAR(255),
+ provider_location VARCHAR(255),
+ provider_auth VARCHAR(255),
+ volume_type_id INTEGER,
+ PRIMARY KEY (id),
+ FOREIGN KEY(instance_id) REFERENCES instances (id),
+ UNIQUE (id),
+ CHECK (deleted IN (0, 1))
+ );
+
+ INSERT INTO volumes_backup SELECT
+ created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ ec2_id,
+ user_id,
+ project_id,
+ snapshot_id,
+ host,
+ size,
+ availability_zone,
+ NULL,
+ instance_uuid,
+ mountpoint,
+ attach_time,
+ status,
+ attach_status,
+ scheduled_at,
+ launched_at,
+ terminated_at,
+ display_name,
+ display_description,
+ provider_location,
+ provider_auth,
+ volume_type_id
+ FROM volumes;
+
+ UPDATE volumes_backup
+ SET instance_id =
+ (SELECT id
+ FROM instances
+ WHERE volumes_backup.instance_uuid = instances.uuid
+ );
+ DROP TABLE volumes;
+
+ CREATE TABLE volumes(
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id VARCHAR(36) NOT NULL,
+ ec2_id INTEGER,
+ user_id VARCHAR(255),
+ project_id VARCHAR(255),
+ snapshot_id VARCHAR(36),
+ host VARCHAR(255),
+ size INTEGER,
+ availability_zone VARCHAR(255),
+ instance_id INTEGER,
+ mountpoint VARCHAR(255),
+ attach_time VARCHAR(255),
+ status VARCHAR(255),
+ attach_status VARCHAR(255),
+ scheduled_at DATETIME,
+ launched_at DATETIME,
+ terminated_at DATETIME,
+ display_name VARCHAR(255),
+ display_description VARCHAR(255),
+ provider_location VARCHAR(255),
+ provider_auth VARCHAR(255),
+ volume_type_id INTEGER,
+ PRIMARY KEY (id),
+ FOREIGN KEY (instance_id) REFERENCES instances (id),
+ UNIQUE (id),
+ CHECK (deleted IN (0, 1))
+ );
+
+ INSERT INTO volumes
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ ec2_id,
+ user_id,
+ project_id,
+ snapshot_id,
+ host,
+ size,
+ availability_zone,
+ instance_id,
+ mountpoint,
+ attach_time,
+ status,
+ attach_status,
+ scheduled_at,
+ launched_at,
+ terminated_at,
+ display_name,
+ display_description,
+ provider_location,
+ provider_auth,
+ volume_type_id
+ FROM volumes_backup;
+ DROP TABLE volumes_backup;
+COMMIT;
--- /dev/null
+BEGIN TRANSACTION;
+ -- change instance_id volumes table
+ CREATE TABLE volumes_backup(
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id VARCHAR(36) NOT NULL,
+ ec2_id INTEGER,
+ user_id VARCHAR(255),
+ project_id VARCHAR(255),
+ snapshot_id VARCHAR(36),
+ host VARCHAR(255),
+ size INTEGER,
+ availability_zone VARCHAR(255),
+ instance_id INTEGER,
+ instance_uuid VARCHAR(36),
+ mountpoint VARCHAR(255),
+ attach_time VARCHAR(255),
+ status VARCHAR(255),
+ attach_status VARCHAR(255),
+ scheduled_at DATETIME,
+ launched_at DATETIME,
+ terminated_at DATETIME,
+ display_name VARCHAR(255),
+ display_description VARCHAR(255),
+ provider_location VARCHAR(255),
+ provider_auth VARCHAR(255),
+ volume_type_id INTEGER,
+ PRIMARY KEY (id),
+ FOREIGN KEY(instance_id) REFERENCES instances (id),
+ UNIQUE (id),
+ CHECK (deleted IN (0, 1))
+ );
+
+ INSERT INTO volumes_backup SELECT
+ created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ ec2_id,
+ user_id,
+ project_id,
+ snapshot_id,
+ host,
+ size,
+ availability_zone,
+ instance_id,
+ NULL,
+ mountpoint,
+ attach_time,
+ status,
+ attach_status,
+ scheduled_at,
+ launched_at,
+ terminated_at,
+ display_name,
+ display_description,
+ provider_location,
+ provider_auth,
+ volume_type_id
+ FROM volumes;
+
+ UPDATE volumes_backup
+ SET instance_uuid =
+ (SELECT uuid
+ FROM instances
+ WHERE volumes_backup.instance_id = instances.id
+ );
+ DROP TABLE volumes;
+
+ CREATE TABLE volumes(
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id VARCHAR(36) NOT NULL,
+ ec2_id INTEGER,
+ user_id VARCHAR(255),
+ project_id VARCHAR(255),
+ snapshot_id VARCHAR(36),
+ host VARCHAR(255),
+ size INTEGER,
+ availability_zone VARCHAR(255),
+ instance_uuid VARCHAR(36),
+ mountpoint VARCHAR(255),
+ attach_time VARCHAR(255),
+ status VARCHAR(255),
+ attach_status VARCHAR(255),
+ scheduled_at DATETIME,
+ launched_at DATETIME,
+ terminated_at DATETIME,
+ display_name VARCHAR(255),
+ display_description VARCHAR(255),
+ provider_location VARCHAR(255),
+ provider_auth VARCHAR(255),
+ volume_type_id INTEGER,
+ PRIMARY KEY (id),
+ UNIQUE (id),
+ CHECK (deleted IN (0, 1))
+ );
+
+ INSERT INTO volumes
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ ec2_id,
+ user_id,
+ project_id,
+ snapshot_id,
+ host,
+ size,
+ availability_zone,
+ instance_uuid,
+ mountpoint,
+ attach_time,
+ status,
+ attach_status,
+ scheduled_at,
+ launched_at,
+ terminated_at,
+ display_name,
+ display_description,
+ provider_location,
+ provider_auth,
+ volume_type_id
+ FROM volumes_backup;
+ DROP TABLE volumes_backup;
+COMMIT;
primaryjoin=instance_id == Instance.uuid)
-class InstanceActions(BASE, CinderBase):
- """Represents a guest VM's actions and results"""
- __tablename__ = "instance_actions"
- id = Column(Integer, primary_key=True)
- instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
- action = Column(String(255))
- error = Column(Text)
-
-
class InstanceTypes(BASE, CinderBase):
"""Represent possible instance_types or flavor of VM offered"""
__tablename__ = "instance_types"
host = Column(String(255)) # , ForeignKey('hosts.id'))
size = Column(Integer)
availability_zone = Column(String(255)) # TODO(vish): foreign key?
- instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True)
- instance = relationship(Instance,
- backref=backref('volumes'),
- foreign_keys=instance_id,
- primaryjoin='and_(Volume.instance_id==Instance.id,'
- 'Volume.deleted==False)')
+ instance_uuid = Column(String(36))
mountpoint = Column(String(255))
attach_time = Column(String(255)) # TODO(vish): datetime
status = Column(String(255)) # TODO(vish): enum?
message = _("Instance %(instance_id)s is unacceptable: %(reason)s")
+class InvalidUUID(Invalid):
+ message = _("Expected a uuid but received %(uuid).")
+
+
class NotFound(CinderException):
message = _("Resource could not be found.")
code = 404
'host': 'fakehost',
'size': 1,
'availability_zone': 'fakeaz',
- 'instance': {'uuid': 'fakeuuid'},
+ 'instance_uuid': 'fakeuuid',
'mountpoint': '/',
'status': 'fakestatus',
'attach_status': 'attached',
def stub_volume_create(self, context, size, name, description, snapshot,
**param):
- vol = stub_volume(1)
+ vol = stub_volume('1')
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
def stub_volume_get_all(self, context, search_opts=None):
- return [stub_volume_get(self, context, 1)]
+ return [stub_volume_get(self, context, '1')]
def test_volume_show(self):
req = fakes.HTTPRequest.blank('/v1/volumes/1')
- res_dict = self.controller.show(req, 1)
+ res_dict = self.controller.show(req, '1')
expected = {'volume': {'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
- res_dict = self.controller.show(req, 1)
+ res_dict = self.controller.show(req, '1')
expected = {'volume': {'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
import sqlalchemy
import cinder.db.sqlalchemy.migrate_repo
+import cinder.db.migration as migration
from cinder.db.sqlalchemy.migration import versioning_api as migration_api
from cinder import log as logging
from cinder import test
return True
-def _missing_mysql():
- if "NOVA_TEST_MYSQL_PRESENT" in os.environ:
- return True
- return not _is_mysql_avail()
+def _have_mysql():
+ present = os.environ.get('NOVA_TEST_MYSQL_PRESENT')
+ if present is None:
+ return _is_mysql_avail()
+ return present.lower() in ('', 'true')
class TestMigrations(test.TestCase):
if _is_mysql_avail(user="openstack_cifail"):
self.fail("Shouldn't have connected")
- @test.skip_if(_missing_mysql(), "mysql not available")
+ @test.skip_unless(_have_mysql(), "mysql not available")
def test_mysql_innodb(self):
"""
Test that table creation on mysql only builds InnoDB tables
self._reset_databases()
self._walk_versions(engine, False, False)
- uri = self._mysql_get_connect_string(database="information_schema")
+ uri = _mysql_get_connect_string(database="information_schema")
connection = sqlalchemy.create_engine(uri).connect()
# sanity check
def test_run_attach_detach_volume(self):
"""Make sure volume can be attached and detached from instance."""
- instance_id = 'fake-inst'
+ instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = self._create_volume()
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
if FLAGS.fake_tests:
- db.volume_attached(self.context, volume_id, instance_id,
+ db.volume_attached(self.context, volume_id, instance_uuid,
mountpoint)
else:
self.compute.attach_volume(self.context,
- instance_id,
+ instance_uuid,
volume_id,
mountpoint)
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.assertEqual(vol['mountpoint'], mountpoint)
- self.assertEqual(vol['instance_id'], instance_id)
+ self.assertEqual(vol['instance_uuid'], instance_uuid)
self.assertRaises(exception.Error,
self.volume.delete_volume,
else:
pass
self.compute.detach_volume(self.context,
- instance_id,
+ instance_uuid,
volume_id)
vol = db.volume_get(self.context, volume_id)
self.assertEqual(vol['status'], "available")
def fake_cast(ctxt, topic, msg):
pass
self.stubs.Set(rpc, 'cast', fake_cast)
- instance_id = 'fake-inst'
+ instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = self._create_volume()
self.volume.create_volume(self.context, volume['id'])
- db.volume_attached(self.context, volume['id'], instance_id,
+ db.volume_attached(self.context, volume['id'], instance_uuid,
'/dev/sda1')
volume_api = cinder.volume.api.API()
# each volume has a different mountpoint
mountpoint = "/dev/sd" + chr((ord('b') + index))
- instance_id = 'fake-inst'
- db.volume_attached(self.context, vol_ref['id'], instance_id,
+ instance_uuid = '12345678-1234-5678-1234-567812345678'
+ db.volume_attached(self.context, vol_ref['id'], instance_uuid,
mountpoint)
volume_id_list.append(vol_ref['id'])
def test_check_for_export_with_no_volume(self):
"""No log message when no volume is attached to an instance."""
self.stream.truncate(0)
- instance_id = 'fake-inst'
- self.volume.check_for_export(self.context, instance_id)
+ instance_uuid = '12345678-1234-5678-1234-567812345678'
+ self.volume.check_for_export(self.context, instance_uuid)
self.assertEqual(self.stream.getvalue(), '')
def test_check_for_export_with_all_volume_exported(self):
self.stream.truncate(0)
self.mox.ReplayAll()
- instance_id = 'fake-inst'
- self.volume.check_for_export(self.context, instance_id)
+ instance_uuid = '12345678-1234-5678-1234-567812345678'
+ self.volume.check_for_export(self.context, instance_uuid)
self.assertEqual(self.stream.getvalue(), '')
self.mox.UnsetStubs()
"""Output a warning message when some volumes are not recognied
by ietd."""
volume_id_list = self._attach_volume()
- instance_id = 'fake-inst'
+ instance_uuid = '12345678-1234-5678-1234-567812345678'
tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0])
self.mox.StubOutWithMock(self.volume.driver.tgtadm, 'show_target')
self.assertRaises(exception.ProcessExecutionError,
self.volume.check_for_export,
self.context,
- instance_id)
+ instance_uuid)
msg = _("Cannot confirm exported volume id:%s.") % volume_id_list[0]
self.assertTrue(0 <= self.stream.getvalue().find(msg))
self.mox.UnsetStubs()
self.update(context, volume, {"status": "available"})
@wrap_check_policy
- def attach(self, context, volume, instance_id, mountpoint):
+ def attach(self, context, volume, instance_uuid, mountpoint):
host = volume['host']
queue = self.db.queue_get_for(context, FLAGS.volume_topic, host)
return rpc.call(context, queue,
{"method": "attach_volume",
"args": {"volume_id": volume['id'],
- "instance_id": instance_id,
+ "instance_uuid": instance_uuid,
"mountpoint": mountpoint}})
@wrap_check_policy
LOG.debug(_("snapshot %s: deleted successfully"), snapshot_ref['name'])
return True
- def attach_volume(self, context, volume_id, instance_id, mountpoint):
+ def attach_volume(self, context, volume_id, instance_uuid, mountpoint):
"""Updates db to show volume is attached"""
# TODO(vish): refactor this into a more general "reserve"
+ if not utils.is_uuid_like(instance_uuid):
+ raise exception.InvalidUUID(instance_uuid)
+
self.db.volume_attached(context,
volume_id,
- instance_id,
+ instance_uuid,
mountpoint)
def detach_volume(self, context, volume_id):
volume_ref = self.db.volume_get(context, volume_id)
self.driver.terminate_connection(volume_ref, connector)
- def check_for_export(self, context, instance_id):
+ def check_for_export(self, context, instance_uuid):
"""Make sure whether volume is exported."""
- volumes = self.db.volume_get_all_by_instance(context, instance_id)
+ volumes = self.db.volume_get_all_by_instance_uuid(context,
+ instance_uuid)
for volume in volumes:
self.driver.check_for_export(context, volume['id'])
###### (StrOpt) Template string to be used to generate snapshot names
# snapshot_name_template="snapshot-%08x"
###### (StrOpt) Template string to be used to generate instance names
-# volume_name_template="volume-%08x"
+# volume_name_template="volume-%s"
######### defined in cinder.crypto #########