]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Remove instance Foreign Key in volumes table, replace with instance_uuid
authorJohn Griffith <john.griffith@solidfire.com>
Wed, 9 May 2012 17:02:34 +0000 (11:02 -0600)
committerJohn Griffith <john.griffith@solidfire.com>
Fri, 11 May 2012 17:59:58 +0000 (11:59 -0600)
* Remove the instance relationship and instance_id FK
* Add instance_uuuid column to volumes table

Change-Id: I54d723dcb9819731a58ec64095c54c99aa9e5dc4

15 files changed:
cinder/api/openstack/volume/volumes.py
cinder/db/api.py
cinder/db/sqlalchemy/api.py
cinder/db/sqlalchemy/migrate_repo/versions/092_change_fk_instance_id_to_uuid.py [new file with mode: 0644]
cinder/db/sqlalchemy/migrate_repo/versions/092_sqlite_downgrade.sql [new file with mode: 0644]
cinder/db/sqlalchemy/migrate_repo/versions/092_sqlite_upgrade.sql [new file with mode: 0644]
cinder/db/sqlalchemy/models.py
cinder/exception.py
cinder/tests/api/openstack/fakes.py
cinder/tests/api/openstack/volume/test_volumes.py
cinder/tests/test_migrations.py
cinder/tests/test_volume.py
cinder/volume/api.py
cinder/volume/manager.py
etc/cinder/cinder.conf.sample

index 9d4b4b5d588f7cc6a583a3d781e63acdba37c928..289adbf40621f7a5493c1b0db75ca662ae71f64b 100644 (file)
@@ -48,15 +48,13 @@ def _translate_attachment_summary_view(_context, vol):
     """Maps keys for attachment summary view."""
     d = {}
 
-    # TODO(bcwaldon): remove str cast once we use uuids
-    volume_id = str(vol['id'])
+    volume_id = vol['id']
 
     # NOTE(justinsb): We use the volume id as the id of the attachment object
     d['id'] = volume_id
 
     d['volume_id'] = volume_id
-    if vol.get('instance'):
-        d['server_id'] = vol['instance']['uuid']
+    d['server_id'] = vol['instance_uuid']
     if vol.get('mountpoint'):
         d['device'] = vol['mountpoint']
 
@@ -77,8 +75,7 @@ def _translate_volume_summary_view(context, vol):
     """Maps keys for volumes summary view."""
     d = {}
 
-    # TODO(bcwaldon): remove str cast once we use uuids
-    d['id'] = str(vol['id'])
+    d['id'] = vol['id']
     d['status'] = vol['status']
     d['size'] = vol['size']
     d['availability_zone'] = vol['availability_zone']
@@ -99,9 +96,6 @@ def _translate_volume_summary_view(context, vol):
         d['volume_type'] = str(vol['volume_type_id'])
 
     d['snapshot_id'] = vol['snapshot_id']
-    # TODO(bcwaldon): remove str cast once we use uuids
-    if d['snapshot_id'] is not None:
-        d['snapshot_id'] = str(d['snapshot_id'])
 
     LOG.audit(_("vol=%s"), vol, context=context)
 
index 1e39531fda6e2df3cedcb26abf63fa5a5205589a..8c0f58e4738df502cef4467d755a7cd037311762 100644 (file)
@@ -542,9 +542,9 @@ def volume_get_all_by_host(context, host):
     return IMPL.volume_get_all_by_host(context, host)
 
 
-def volume_get_all_by_instance(context, instance_id):
+def volume_get_all_by_instance_uuid(context, instance_uuid):
     """Get all volumes belonging to a instance."""
-    return IMPL.volume_get_all_by_instance(context, instance_id)
+    return IMPL.volume_get_all_by_instance_uuid(context, instance_uuid)
 
 
 def volume_get_all_by_project(context, project_id):
@@ -552,11 +552,6 @@ def volume_get_all_by_project(context, project_id):
     return IMPL.volume_get_all_by_project(context, project_id)
 
 
-def volume_get_instance(context, volume_id):
-    """Get the instance that a volume is attached to."""
-    return IMPL.volume_get_instance(context, volume_id)
-
-
 def volume_get_iscsi_target_num(context, volume_id):
     """Get the target num (tid) allocated to the volume."""
     return IMPL.volume_get_iscsi_target_num(context, volume_id)
index 2d40b30467d43f8c6804e5ae8565a1886372e70f..435ace8546db861e78a8b169a4bcba807d5b1c9e 100644 (file)
@@ -33,7 +33,10 @@ from cinder.db.sqlalchemy import models
 from cinder.db.sqlalchemy.session import get_session
 from sqlalchemy.exc import IntegrityError
 from sqlalchemy.orm import joinedload
+from sqlalchemy.orm import joinedload_all
 from sqlalchemy.sql import func
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql.expression import desc
 from sqlalchemy.sql.expression import literal_column
 
 FLAGS = flags.FLAGS
@@ -576,14 +579,17 @@ def volume_allocate_iscsi_target(context, volume_id, host):
 
 
 @require_admin_context
-def volume_attached(context, volume_id, instance_id, mountpoint):
+def volume_attached(context, volume_id, instance_uuid, mountpoint):
+    if not utils.is_uuid_like(instance_uuid):
+        raise exception.InvalidUUID(instance_uuid)
+
     session = get_session()
     with session.begin():
         volume_ref = volume_get(context, volume_id, session=session)
         volume_ref['status'] = 'in-use'
         volume_ref['mountpoint'] = mountpoint
         volume_ref['attach_status'] = 'attached'
-        volume_ref['instance_id'] = instance_id
+        volume_ref['instance_uuid'] = instance_uuid
         volume_ref.save(session=session)
 
 
@@ -643,7 +649,7 @@ def volume_detached(context, volume_id):
         volume_ref['status'] = 'available'
         volume_ref['mountpoint'] = None
         volume_ref['attach_status'] = 'detached'
-        volume_ref.instance = None
+        volume_ref['instance_uuid'] = None
         volume_ref.save(session=session)
 
 
@@ -651,9 +657,20 @@ def volume_detached(context, volume_id):
 def _volume_get_query(context, session=None, project_only=False):
     return model_query(context, models.Volume, session=session,
                        project_only=project_only).\
-                     options(joinedload('instance')).\
-                     options(joinedload('volume_metadata')).\
-                     options(joinedload('volume_type'))
+                       options(joinedload('volume_metadata')).\
+                       options(joinedload('volume_type'))
+
+
+@require_context
+def _ec2_volume_get_query(context, session=None, project_only=False):
+    return model_query(context, models.VolumeIdMapping, session=session,
+                       project_only=project_only)
+
+
+@require_context
+def _ec2_snapshot_get_query(context, session=None, project_only=False):
+    return model_query(context, models.SnapshotIdMapping, session=session,
+                       project_only=project_only)
 
 
 @require_context
@@ -679,13 +696,16 @@ def volume_get_all_by_host(context, host):
 
 
 @require_admin_context
-def volume_get_all_by_instance(context, instance_id):
+def volume_get_all_by_instance_uuid(context, instance_uuid):
     result = model_query(context, models.Volume, read_deleted="no").\
                      options(joinedload('volume_metadata')).\
                      options(joinedload('volume_type')).\
-                     filter_by(instance_id=instance_id).\
+                     filter_by(instance_uuid=instance_uuid).\
                      all()
 
+    if not result:
+        return []
+
     return result
 
 
@@ -695,16 +715,6 @@ def volume_get_all_by_project(context, project_id):
     return _volume_get_query(context).filter_by(project_id=project_id).all()
 
 
-@require_admin_context
-def volume_get_instance(context, volume_id):
-    result = _volume_get_query(context).filter_by(id=volume_id).first()
-
-    if not result:
-        raise exception.VolumeNotFound(volume_id=volume_id)
-
-    return result.instance
-
-
 @require_admin_context
 def volume_get_iscsi_target_num(context, volume_id):
     result = model_query(context, models.IscsiTarget, read_deleted="yes").\
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/092_change_fk_instance_id_to_uuid.py b/cinder/db/sqlalchemy/migrate_repo/versions/092_change_fk_instance_id_to_uuid.py
new file mode 100644 (file)
index 0000000..3d6a9db
--- /dev/null
@@ -0,0 +1,94 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack LLC.
+# Copyright 2012 SolidFire Inc
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from sqlalchemy import select, Column
+from sqlalchemy import MetaData, Integer, String, Table
+from migrate import ForeignKeyConstraint
+
+from cinder import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+    meta = MetaData()
+    meta.bind = migrate_engine
+
+    instances = Table('instances', meta, autoload=True)
+    volumes = Table('volumes', meta, autoload=True)
+    instance_uuid_column = Column('instance_uuid', String(36))
+
+    instance_uuid_column.create(volumes)
+    try:
+        volumes.update().values(
+            instance_uuid=select(
+                [instances.c.uuid],
+                instances.c.id == volumes.c.instance_id)
+                ).execute()
+    except Exception:
+        instance_uuid_column.drop()
+
+    fkeys = list(volumes.c.instance_id.foreign_keys)
+    if fkeys:
+        try:
+            fk_name = fkeys[0].constraint.name
+            ForeignKeyConstraint(
+                columns=[volumes.c.instance_id],
+                refcolumns=[instances.c.id],
+                name=fk_name).drop()
+
+        except Exception:
+            LOG.error(_("foreign key could not be dropped"))
+            raise
+
+    volumes.c.instance_id.drop()
+
+
+def downgrade(migrate_engine):
+    meta = MetaData()
+    meta.bind = migrate_engine
+
+    instances = Table('instances', meta, autoload=True)
+    volumes = Table('volumes', meta, autoload=True)
+    instance_id_column = Column('instance_id', Integer)
+
+    instance_id_column.create(volumes)
+    try:
+        volumes.update().values(
+            instance_id=select(
+                [instances.c.id],
+                instances.c.uuid == volumes.c.instance_uuid)
+                ).execute()
+    except Exception:
+        instance_id_column.drop()
+
+    fkeys = list(volumes.c.instance_id.foreign_keys)
+    if fkeys:
+        try:
+            fk_name = fkeys[0].constraint.name
+            ForeignKeyConstraint(
+                columns=[volumes.c.instance_id],
+                refcolumns=[instances.c.id],
+                name=fk_name).create()
+
+        except Exception:
+            LOG.error(_("foreign key could not be created"))
+            raise
+
+    volumes.c.instance_uuid.drop()
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/092_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/092_sqlite_downgrade.sql
new file mode 100644 (file)
index 0000000..7c13455
--- /dev/null
@@ -0,0 +1,133 @@
+BEGIN TRANSACTION;
+    -- change instance_id volumes table
+    CREATE TABLE volumes_backup(
+        created_at DATETIME,
+        updated_at DATETIME,
+        deleted_at DATETIME,
+        deleted BOOLEAN,
+        id VARCHAR(36) NOT NULL,
+        ec2_id INTEGER,
+        user_id VARCHAR(255),
+        project_id VARCHAR(255),
+        snapshot_id VARCHAR(36),
+        host VARCHAR(255),
+        size INTEGER,
+        availability_zone VARCHAR(255),
+        instance_id INTEGER,
+        instance_uuid VARCHAR(36),
+        mountpoint VARCHAR(255),
+        attach_time VARCHAR(255),
+        status VARCHAR(255),
+        attach_status VARCHAR(255),
+        scheduled_at DATETIME,
+        launched_at DATETIME,
+        terminated_at DATETIME,
+        display_name VARCHAR(255),
+        display_description VARCHAR(255),
+        provider_location VARCHAR(255),
+        provider_auth VARCHAR(255),
+        volume_type_id INTEGER,
+        PRIMARY KEY (id),
+        FOREIGN KEY(instance_id) REFERENCES instances (id),
+        UNIQUE (id),
+        CHECK (deleted IN (0, 1))
+    );
+
+    INSERT INTO volumes_backup SELECT
+        created_at,
+        updated_at,
+        deleted_at,
+        deleted,
+        id,
+        ec2_id,
+        user_id,
+        project_id,
+        snapshot_id,
+        host,
+        size,
+        availability_zone,
+        NULL,
+        instance_uuid,
+        mountpoint,
+        attach_time,
+        status,
+        attach_status,
+        scheduled_at,
+        launched_at,
+        terminated_at,
+        display_name,
+        display_description,
+        provider_location,
+        provider_auth,
+        volume_type_id
+    FROM volumes;
+
+    UPDATE volumes_backup
+        SET instance_id =
+            (SELECT id
+                FROM instances
+                WHERE volumes_backup.instance_uuid = instances.uuid
+        );
+    DROP TABLE volumes;
+
+    CREATE TABLE volumes(
+        created_at DATETIME,
+        updated_at DATETIME,
+        deleted_at DATETIME,
+        deleted BOOLEAN,
+        id VARCHAR(36) NOT NULL,
+        ec2_id INTEGER,
+        user_id VARCHAR(255),
+        project_id VARCHAR(255),
+        snapshot_id VARCHAR(36),
+        host VARCHAR(255),
+        size INTEGER,
+        availability_zone VARCHAR(255),
+        instance_id INTEGER,
+        mountpoint VARCHAR(255),
+        attach_time VARCHAR(255),
+        status VARCHAR(255),
+        attach_status VARCHAR(255),
+        scheduled_at DATETIME,
+        launched_at DATETIME,
+        terminated_at DATETIME,
+        display_name VARCHAR(255),
+        display_description VARCHAR(255),
+        provider_location VARCHAR(255),
+        provider_auth VARCHAR(255),
+        volume_type_id INTEGER,
+        PRIMARY KEY (id),
+        FOREIGN KEY (instance_id) REFERENCES instances (id),
+        UNIQUE (id),
+        CHECK (deleted IN (0, 1))
+    );
+
+    INSERT INTO volumes
+        SELECT created_at,
+               updated_at,
+               deleted_at,
+               deleted,
+               id,
+               ec2_id,
+               user_id,
+               project_id,
+               snapshot_id,
+               host,
+               size,
+               availability_zone,
+               instance_id,
+               mountpoint,
+               attach_time,
+               status,
+               attach_status,
+               scheduled_at,
+               launched_at,
+               terminated_at,
+               display_name,
+               display_description,
+               provider_location,
+               provider_auth,
+               volume_type_id
+        FROM volumes_backup;
+    DROP TABLE volumes_backup;
+COMMIT;
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/092_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/092_sqlite_upgrade.sql
new file mode 100644 (file)
index 0000000..130e110
--- /dev/null
@@ -0,0 +1,132 @@
+BEGIN TRANSACTION;
+    -- change instance_id volumes table
+    CREATE TABLE volumes_backup(
+        created_at DATETIME,
+        updated_at DATETIME,
+        deleted_at DATETIME,
+        deleted BOOLEAN,
+        id VARCHAR(36) NOT NULL,
+        ec2_id INTEGER,
+        user_id VARCHAR(255),
+        project_id VARCHAR(255),
+        snapshot_id VARCHAR(36),
+        host VARCHAR(255),
+        size INTEGER,
+        availability_zone VARCHAR(255),
+        instance_id INTEGER,
+        instance_uuid VARCHAR(36),
+        mountpoint VARCHAR(255),
+        attach_time VARCHAR(255),
+        status VARCHAR(255),
+        attach_status VARCHAR(255),
+        scheduled_at DATETIME,
+        launched_at DATETIME,
+        terminated_at DATETIME,
+        display_name VARCHAR(255),
+        display_description VARCHAR(255),
+        provider_location VARCHAR(255),
+        provider_auth VARCHAR(255),
+        volume_type_id INTEGER,
+        PRIMARY KEY (id),
+        FOREIGN KEY(instance_id) REFERENCES instances (id),
+        UNIQUE (id),
+        CHECK (deleted IN (0, 1))
+    );
+
+    INSERT INTO volumes_backup SELECT
+        created_at,
+        updated_at,
+        deleted_at,
+        deleted,
+        id,
+        ec2_id,
+        user_id,
+        project_id,
+        snapshot_id,
+        host,
+        size,
+        availability_zone,
+        instance_id,
+        NULL,
+        mountpoint,
+        attach_time,
+        status,
+        attach_status,
+        scheduled_at,
+        launched_at,
+        terminated_at,
+        display_name,
+        display_description,
+        provider_location,
+        provider_auth,
+        volume_type_id
+    FROM volumes;
+
+    UPDATE volumes_backup
+        SET instance_uuid =
+            (SELECT uuid
+                FROM instances
+                WHERE volumes_backup.instance_id = instances.id
+        );
+    DROP TABLE volumes;
+
+    CREATE TABLE volumes(
+        created_at DATETIME,
+        updated_at DATETIME,
+        deleted_at DATETIME,
+        deleted BOOLEAN,
+        id VARCHAR(36) NOT NULL,
+        ec2_id INTEGER,
+        user_id VARCHAR(255),
+        project_id VARCHAR(255),
+        snapshot_id VARCHAR(36),
+        host VARCHAR(255),
+        size INTEGER,
+        availability_zone VARCHAR(255),
+        instance_uuid VARCHAR(36),
+        mountpoint VARCHAR(255),
+        attach_time VARCHAR(255),
+        status VARCHAR(255),
+        attach_status VARCHAR(255),
+        scheduled_at DATETIME,
+        launched_at DATETIME,
+        terminated_at DATETIME,
+        display_name VARCHAR(255),
+        display_description VARCHAR(255),
+        provider_location VARCHAR(255),
+        provider_auth VARCHAR(255),
+        volume_type_id INTEGER,
+        PRIMARY KEY (id),
+        UNIQUE (id),
+        CHECK (deleted IN (0, 1))
+    );
+
+    INSERT INTO volumes
+        SELECT created_at,
+               updated_at,
+               deleted_at,
+               deleted,
+               id,
+               ec2_id,
+               user_id,
+               project_id,
+               snapshot_id,
+               host,
+               size,
+               availability_zone,
+               instance_uuid,
+               mountpoint,
+               attach_time,
+               status,
+               attach_status,
+               scheduled_at,
+               launched_at,
+               terminated_at,
+               display_name,
+               display_description,
+               provider_location,
+               provider_auth,
+               volume_type_id
+        FROM volumes_backup;
+    DROP TABLE volumes_backup;
+COMMIT;
index 732e6832f0eb0deedef0932762e7acc4d3334d78..f62b8d0621ad2b3621ffcd4c25e94f514e2c2d38 100644 (file)
@@ -300,15 +300,6 @@ class InstanceInfoCache(BASE, CinderBase):
                             primaryjoin=instance_id == Instance.uuid)
 
 
-class InstanceActions(BASE, CinderBase):
-    """Represents a guest VM's actions and results"""
-    __tablename__ = "instance_actions"
-    id = Column(Integer, primary_key=True)
-    instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
-    action = Column(String(255))
-    error = Column(Text)
-
-
 class InstanceTypes(BASE, CinderBase):
     """Represent possible instance_types or flavor of VM offered"""
     __tablename__ = "instance_types"
@@ -350,12 +341,7 @@ class Volume(BASE, CinderBase):
     host = Column(String(255))  # , ForeignKey('hosts.id'))
     size = Column(Integer)
     availability_zone = Column(String(255))  # TODO(vish): foreign key?
-    instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True)
-    instance = relationship(Instance,
-                            backref=backref('volumes'),
-                            foreign_keys=instance_id,
-                            primaryjoin='and_(Volume.instance_id==Instance.id,'
-                                             'Volume.deleted==False)')
+    instance_uuid = Column(String(36))
     mountpoint = Column(String(255))
     attach_time = Column(String(255))  # TODO(vish): datetime
     status = Column(String(255))  # TODO(vish): enum?
index b57c1925ea5587cf07029fabde396f45b4813d11..e92da99639a05eb453573d9d5849c94b93785895 100644 (file)
@@ -403,6 +403,10 @@ class InstanceUnacceptable(Invalid):
     message = _("Instance %(instance_id)s is unacceptable: %(reason)s")
 
 
+class InvalidUUID(Invalid):
+    message = _("Expected a uuid but received %(uuid).")
+
+
 class NotFound(CinderException):
     message = _("Resource could not be found.")
     code = 404
index b3896d52ca73211665fc31a2162e5789aa9c022e..019acf62ea69028bb1cfd4b0511aab52f5cf714a 100644 (file)
@@ -183,7 +183,7 @@ def stub_volume(id, **kwargs):
         'host': 'fakehost',
         'size': 1,
         'availability_zone': 'fakeaz',
-        'instance': {'uuid': 'fakeuuid'},
+        'instance_uuid': 'fakeuuid',
         'mountpoint': '/',
         'status': 'fakestatus',
         'attach_status': 'attached',
@@ -202,7 +202,7 @@ def stub_volume(id, **kwargs):
 
 def stub_volume_create(self, context, size, name, description, snapshot,
                        **param):
-    vol = stub_volume(1)
+    vol = stub_volume('1')
     vol['size'] = size
     vol['display_name'] = name
     vol['display_description'] = description
@@ -231,4 +231,4 @@ def stub_volume_get_notfound(self, context, volume_id):
 
 
 def stub_volume_get_all(self, context, search_opts=None):
-    return [stub_volume_get(self, context, 1)]
+    return [stub_volume_get(self, context, '1')]
index 9563989a91bfa3d21b06cacf38d01659d5bf1120..28ce067e98254f74a926d6e761494f8016728d72 100644 (file)
@@ -115,7 +115,7 @@ class VolumeApiTest(test.TestCase):
 
     def test_volume_show(self):
         req = fakes.HTTPRequest.blank('/v1/volumes/1')
-        res_dict = self.controller.show(req, 1)
+        res_dict = self.controller.show(req, '1')
         expected = {'volume': {'status': 'fakestatus',
                                'display_description': 'displaydesc',
                                'availability_zone': 'fakeaz',
@@ -140,7 +140,7 @@ class VolumeApiTest(test.TestCase):
         self.stubs.Set(volume_api.API, 'get', stub_volume_get)
 
         req = fakes.HTTPRequest.blank('/v1/volumes/1')
-        res_dict = self.controller.show(req, 1)
+        res_dict = self.controller.show(req, '1')
         expected = {'volume': {'status': 'fakestatus',
                                'display_description': 'displaydesc',
                                'availability_zone': 'fakeaz',
index e16e9d04f91f8236379e9f912d829a20c4d76093..1b2915ff3905cf2ca25ab110cf94356b928a77dc 100644 (file)
@@ -34,6 +34,7 @@ from migrate.versioning import repository
 import sqlalchemy
 
 import cinder.db.sqlalchemy.migrate_repo
+import cinder.db.migration as migration
 from cinder.db.sqlalchemy.migration import versioning_api as migration_api
 from cinder import log as logging
 from cinder import test
@@ -68,10 +69,11 @@ def _is_mysql_avail(user="openstack_citest",
         return True
 
 
-def _missing_mysql():
-    if "NOVA_TEST_MYSQL_PRESENT" in os.environ:
-        return True
-    return not _is_mysql_avail()
+def _have_mysql():
+    present = os.environ.get('NOVA_TEST_MYSQL_PRESENT')
+    if present is None:
+        return _is_mysql_avail()
+    return present.lower() in ('', 'true')
 
 
 class TestMigrations(test.TestCase):
@@ -215,7 +217,7 @@ class TestMigrations(test.TestCase):
         if _is_mysql_avail(user="openstack_cifail"):
             self.fail("Shouldn't have connected")
 
-    @test.skip_if(_missing_mysql(), "mysql not available")
+    @test.skip_unless(_have_mysql(), "mysql not available")
     def test_mysql_innodb(self):
         """
         Test that table creation on mysql only builds InnoDB tables
@@ -231,7 +233,7 @@ class TestMigrations(test.TestCase):
         self._reset_databases()
         self._walk_versions(engine, False, False)
 
-        uri = self._mysql_get_connect_string(database="information_schema")
+        uri = _mysql_get_connect_string(database="information_schema")
         connection = sqlalchemy.create_engine(uri).connect()
 
         # sanity check
index aebc259425b4c2ad52146b718d5afc568cfa0af4..7f131a7eb4977a590523540c1e2a54aee237783b 100644 (file)
@@ -149,24 +149,24 @@ class VolumeTestCase(test.TestCase):
 
     def test_run_attach_detach_volume(self):
         """Make sure volume can be attached and detached from instance."""
-        instance_id = 'fake-inst'
+        instance_uuid = '12345678-1234-5678-1234-567812345678'
         mountpoint = "/dev/sdf"
         volume = self._create_volume()
         volume_id = volume['id']
         self.volume.create_volume(self.context, volume_id)
         if FLAGS.fake_tests:
-            db.volume_attached(self.context, volume_id, instance_id,
+            db.volume_attached(self.context, volume_id, instance_uuid,
                                 mountpoint)
         else:
             self.compute.attach_volume(self.context,
-                                       instance_id,
+                                       instance_uuid,
                                        volume_id,
                                        mountpoint)
         vol = db.volume_get(context.get_admin_context(), volume_id)
         self.assertEqual(vol['status'], "in-use")
         self.assertEqual(vol['attach_status'], "attached")
         self.assertEqual(vol['mountpoint'], mountpoint)
-        self.assertEqual(vol['instance_id'], instance_id)
+        self.assertEqual(vol['instance_uuid'], instance_uuid)
 
         self.assertRaises(exception.Error,
                           self.volume.delete_volume,
@@ -177,7 +177,7 @@ class VolumeTestCase(test.TestCase):
         else:
             pass
             self.compute.detach_volume(self.context,
-                                       instance_id,
+                                       instance_uuid,
                                        volume_id)
         vol = db.volume_get(self.context, volume_id)
         self.assertEqual(vol['status'], "available")
@@ -293,11 +293,11 @@ class VolumeTestCase(test.TestCase):
         def fake_cast(ctxt, topic, msg):
             pass
         self.stubs.Set(rpc, 'cast', fake_cast)
-        instance_id = 'fake-inst'
+        instance_uuid = '12345678-1234-5678-1234-567812345678'
 
         volume = self._create_volume()
         self.volume.create_volume(self.context, volume['id'])
-        db.volume_attached(self.context, volume['id'], instance_id,
+        db.volume_attached(self.context, volume['id'], instance_uuid,
                            '/dev/sda1')
 
         volume_api = cinder.volume.api.API()
@@ -407,8 +407,8 @@ class ISCSITestCase(DriverTestCase):
 
             # each volume has a different mountpoint
             mountpoint = "/dev/sd" + chr((ord('b') + index))
-            instance_id = 'fake-inst'
-            db.volume_attached(self.context, vol_ref['id'], instance_id,
+            instance_uuid = '12345678-1234-5678-1234-567812345678'
+            db.volume_attached(self.context, vol_ref['id'], instance_uuid,
                                mountpoint)
             volume_id_list.append(vol_ref['id'])
 
@@ -417,8 +417,8 @@ class ISCSITestCase(DriverTestCase):
     def test_check_for_export_with_no_volume(self):
         """No log message when no volume is attached to an instance."""
         self.stream.truncate(0)
-        instance_id = 'fake-inst'
-        self.volume.check_for_export(self.context, instance_id)
+        instance_uuid = '12345678-1234-5678-1234-567812345678'
+        self.volume.check_for_export(self.context, instance_uuid)
         self.assertEqual(self.stream.getvalue(), '')
 
     def test_check_for_export_with_all_volume_exported(self):
@@ -432,8 +432,8 @@ class ISCSITestCase(DriverTestCase):
 
         self.stream.truncate(0)
         self.mox.ReplayAll()
-        instance_id = 'fake-inst'
-        self.volume.check_for_export(self.context, instance_id)
+        instance_uuid = '12345678-1234-5678-1234-567812345678'
+        self.volume.check_for_export(self.context, instance_uuid)
         self.assertEqual(self.stream.getvalue(), '')
         self.mox.UnsetStubs()
 
@@ -443,7 +443,7 @@ class ISCSITestCase(DriverTestCase):
         """Output a warning message when some volumes are not recognied
            by ietd."""
         volume_id_list = self._attach_volume()
-        instance_id = 'fake-inst'
+        instance_uuid = '12345678-1234-5678-1234-567812345678'
 
         tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0])
         self.mox.StubOutWithMock(self.volume.driver.tgtadm, 'show_target')
@@ -454,7 +454,7 @@ class ISCSITestCase(DriverTestCase):
         self.assertRaises(exception.ProcessExecutionError,
                           self.volume.check_for_export,
                           self.context,
-                          instance_id)
+                          instance_uuid)
         msg = _("Cannot confirm exported volume id:%s.") % volume_id_list[0]
         self.assertTrue(0 <= self.stream.getvalue().find(msg))
         self.mox.UnsetStubs()
index 9c816e95bd8a659644f4ee2f281b19dae5e123b4..857da172b9a1e9ec288ea4fa91486ae6bf5c9dc2 100644 (file)
@@ -248,13 +248,13 @@ class API(base.Base):
             self.update(context, volume, {"status": "available"})
 
     @wrap_check_policy
-    def attach(self, context, volume, instance_id, mountpoint):
+    def attach(self, context, volume, instance_uuid, mountpoint):
         host = volume['host']
         queue = self.db.queue_get_for(context, FLAGS.volume_topic, host)
         return rpc.call(context, queue,
                         {"method": "attach_volume",
                          "args": {"volume_id": volume['id'],
-                                  "instance_id": instance_id,
+                                  "instance_uuid": instance_uuid,
                                   "mountpoint": mountpoint}})
 
     @wrap_check_policy
index efb2eabf4641c09d2cb2d63bc6176e3d8095b527..8266a64701e1c2cf334ed88dda253bda6870610a 100644 (file)
@@ -227,12 +227,15 @@ class VolumeManager(manager.SchedulerDependentManager):
         LOG.debug(_("snapshot %s: deleted successfully"), snapshot_ref['name'])
         return True
 
-    def attach_volume(self, context, volume_id, instance_id, mountpoint):
+    def attach_volume(self, context, volume_id, instance_uuid, mountpoint):
         """Updates db to show volume is attached"""
         # TODO(vish): refactor this into a more general "reserve"
+        if not utils.is_uuid_like(instance_uuid):
+            raise exception.InvalidUUID(instance_uuid)
+
         self.db.volume_attached(context,
                                 volume_id,
-                                instance_id,
+                                instance_uuid,
                                 mountpoint)
 
     def detach_volume(self, context, volume_id):
@@ -288,9 +291,10 @@ class VolumeManager(manager.SchedulerDependentManager):
         volume_ref = self.db.volume_get(context, volume_id)
         self.driver.terminate_connection(volume_ref, connector)
 
-    def check_for_export(self, context, instance_id):
+    def check_for_export(self, context, instance_uuid):
         """Make sure whether volume is exported."""
-        volumes = self.db.volume_get_all_by_instance(context, instance_id)
+        volumes = self.db.volume_get_all_by_instance_uuid(context,
+                                                          instance_uuid)
         for volume in volumes:
             self.driver.check_for_export(context, volume['id'])
 
index 6270254e366acd81d3cce9f37f1ad1ce6ea7f579..a29160ce279f664cb515afdc7fec181d690576ca 100644 (file)
 ###### (StrOpt) Template string to be used to generate snapshot names
 # snapshot_name_template="snapshot-%08x"
 ###### (StrOpt) Template string to be used to generate instance names
-# volume_name_template="volume-%08x"
+# volume_name_template="volume-%s"
 
 ######### defined in cinder.crypto #########