"""Add attachment metadata."""
context = req.environ['cinder.context']
volume = self.volume_api.get(context, id)
-
- instance_uuid = body['os-attach']['instance_uuid']
+ # instance uuid is an option now
+ instance_uuid = None
+ if 'instance_uuid' in body['os-attach']:
+ instance_uuid = body['os-attach']['instance_uuid']
+ host_name = None
+ # Keep API backward compatibility
+ if 'host_name' in body['os-attach']:
+ host_name = body['os-attach']['host_name']
mountpoint = body['os-attach']['mountpoint']
+ if instance_uuid and host_name:
+ msg = _("Invalid request to attach volume to an "
+ "instance %(instance_uuid)s and a "
+ "host %(host_name)s simultaneously") % (locals())
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+ elif instance_uuid is None and host_name is None:
+ msg = _("Invalid request to attach volume to an invalid target")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
self.volume_api.attach(context, volume,
- instance_uuid, mountpoint)
+ instance_uuid, host_name, mountpoint)
return webob.Response(status_int=202)
@wsgi.action('os-detach')
d['volume_id'] = volume_id
d['server_id'] = vol['instance_uuid']
+ d['host_name'] = vol['attached_host']
if vol.get('mountpoint'):
d['device'] = vol['mountpoint']
def make_attachment(elem):
elem.set('id')
elem.set('server_id')
+ elem.set('host_name')
elem.set('volume_id')
elem.set('device')
d['volume_id'] = volume_id
d['server_id'] = volume['instance_uuid']
+ d['host_name'] = volume['attached_host']
if volume.get('mountpoint'):
d['device'] = volume['mountpoint']
attachments.append(d)
def make_attachment(elem):
elem.set('id')
elem.set('server_id')
+ elem.set('host_name')
elem.set('volume_id')
elem.set('device')
return IMPL.volume_allocate_iscsi_target(context, volume_id, host)
-def volume_attached(context, volume_id, instance_id, mountpoint):
+def volume_attached(context, volume_id, instance_id, host_name, mountpoint):
"""Ensure that a volume is set as attached."""
- return IMPL.volume_attached(context, volume_id, instance_id, mountpoint)
+ return IMPL.volume_attached(context, volume_id, instance_id, host_name,
+ mountpoint)
def volume_create(context, values):
@require_admin_context
-def volume_attached(context, volume_id, instance_uuid, mountpoint):
- if not uuidutils.is_uuid_like(instance_uuid):
+def volume_attached(context, volume_id, instance_uuid, host_name, mountpoint):
+ if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
volume_ref['mountpoint'] = mountpoint
volume_ref['attach_status'] = 'attached'
volume_ref['instance_uuid'] = instance_uuid
+ volume_ref['attached_host'] = host_name
volume_ref.save(session=session)
volume_ref['mountpoint'] = None
volume_ref['attach_status'] = 'detached'
volume_ref['instance_uuid'] = None
+ volume_ref['attached_host'] = None
volume_ref.save(session=session)
BEGIN TRANSACTION;
- CREATE TABLE volumes_v10 (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- ec2_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- snapshot_id VARCHAR(36),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_uuid VARCHAR(36),
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(255),
- provider_auth VARCHAR(255),
- volume_type_id VARCHAR(36),
- source_volid VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- INSERT INTO volumes_v10
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- snapshot_id,
- host,
- size,
- availability_zone,
- instance_uuid,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- volume_type_id,
- source_volid
- FROM volumes;
-
- DROP TABLE volumes;
- ALTER TABLE volumes_v10 RENAME TO volumes;
- COMMIT;
+CREATE TABLE volumes_v10 (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id VARCHAR(36) NOT NULL,
+ ec2_id INTEGER,
+ user_id VARCHAR(255),
+ project_id VARCHAR(255),
+ snapshot_id VARCHAR(36),
+ host VARCHAR(255),
+ size INTEGER,
+ availability_zone VARCHAR(255),
+ instance_uuid VARCHAR(36),
+ mountpoint VARCHAR(255),
+ attach_time VARCHAR(255),
+ status VARCHAR(255),
+ attach_status VARCHAR(255),
+ scheduled_at DATETIME,
+ launched_at DATETIME,
+ terminated_at DATETIME,
+ display_name VARCHAR(255),
+ display_description VARCHAR(255),
+ provider_location VARCHAR(255),
+ provider_auth VARCHAR(255),
+ volume_type_id VARCHAR(36),
+ source_volid VARCHAR(36),
+ PRIMARY KEY (id)
+);
+INSERT INTO volumes_v10
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ ec2_id,
+ user_id,
+ project_id,
+ snapshot_id,
+ host,
+ size,
+ availability_zone,
+ instance_uuid,
+ mountpoint,
+ attach_time,
+ status,
+ attach_status,
+ scheduled_at,
+ launched_at,
+ terminated_at,
+ display_name,
+ display_description,
+ provider_location,
+ provider_auth,
+ volume_type_id,
+ source_volid
+ FROM volumes;
+DROP TABLE volumes;
+ALTER TABLE volumes_v10 RENAME TO volumes;
+COMMIT;
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column
+from sqlalchemy import MetaData, String, Table
+
+
+def upgrade(migrate_engine):
+ """Add attach host column to volumes."""
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ volumes = Table('volumes', meta, autoload=True)
+ attached_host = Column('attached_host', String(255))
+ volumes.create_column(attached_host)
+ volumes.update().values(attached_host=None).execute()
+
+
+def downgrade(migrate_engine):
+ """Remove attach host column from volumes."""
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ volumes = Table('volumes', meta, autoload=True)
+ attached_host = Column('attached_host', String(255))
+ volumes.drop_column(attached_host)
--- /dev/null
+BEGIN TRANSACTION;
+
+CREATE TABLE volumes_v11 (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id VARCHAR(36) NOT NULL,
+ ec2_id INTEGER,
+ user_id VARCHAR(255),
+ project_id VARCHAR(255),
+ snapshot_id VARCHAR(36),
+ host VARCHAR(255),
+ size INTEGER,
+ availability_zone VARCHAR(255),
+ instance_uuid VARCHAR(36),
+ mountpoint VARCHAR(255),
+ attach_time VARCHAR(255),
+ status VARCHAR(255),
+ attach_status VARCHAR(255),
+ scheduled_at DATETIME,
+ launched_at DATETIME,
+ terminated_at DATETIME,
+ display_name VARCHAR(255),
+ display_description VARCHAR(255),
+ provider_location VARCHAR(255),
+ provider_auth VARCHAR(255),
+ volume_type_id VARCHAR(36),
+ source_volid VARCHAR(36),
+ bootable BOOLEAN,
+ PRIMARY KEY (id)
+);
+
+INSERT INTO volumes_v11
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ ec2_id,
+ user_id,
+ project_id,
+ snapshot_id,
+ host,
+ size,
+ availability_zone,
+ instance_uuid,
+ mountpoint,
+ attach_time,
+ status,
+ attach_status,
+ scheduled_at,
+ launched_at,
+ terminated_at,
+ display_name,
+ display_description,
+ provider_location,
+ provider_auth,
+ volume_type_id,
+ source_volid,
+ bootable
+ FROM volumes;
+
+DROP TABLE volumes;
+ALTER TABLE volumes_v11 RENAME TO volumes;
+COMMIT;
size = Column(Integer)
availability_zone = Column(String(255)) # TODO(vish): foreign key?
instance_uuid = Column(String(36))
+ attached_host = Column(String(255))
mountpoint = Column(String(255))
attach_time = Column(String(255)) # TODO(vish): datetime
status = Column(String(255)) # TODO(vish): enum?
# cleanup
svc.stop()
- def test_force_detach_volume(self):
+ def test_force_detach_instance_attached_volume(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
self.volume_api.reserve_volume(ctx, volume)
self.volume_api.initialize_connection(ctx, volume, connector)
mountpoint = '/dev/vbd'
- self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, mountpoint)
+ self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None, mountpoint)
# volume is attached
volume = db.volume_get(ctx, volume['id'])
self.assertEquals(volume['status'], 'in-use')
self.assertEquals(volume['instance_uuid'], stubs.FAKE_UUID)
+ self.assertEquals(volume['attached_host'], None)
self.assertEquals(volume['mountpoint'], mountpoint)
self.assertEquals(volume['attach_status'], 'attached')
# build request to force detach
# cleanup
svc.stop()
- def test_attach_in_use_volume(self):
+ def test_force_detach_host_attached_volume(self):
+ # admin context
+ ctx = context.RequestContext('admin', 'fake', True)
+ # current status is available
+ volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
+ 'provider_location': ''})
+ connector = {'initiator': 'iqn.2012-07.org.fake:01'}
+ # start service to handle rpc messages for attach requests
+ svc = self.start_service('volume', host='test')
+ self.volume_api.reserve_volume(ctx, volume)
+ self.volume_api.initialize_connection(ctx, volume, connector)
+ mountpoint = '/dev/vbd'
+ host_name = 'fake-host'
+ self.volume_api.attach(ctx, volume, None, host_name, mountpoint)
+ # volume is attached
+ volume = db.volume_get(ctx, volume['id'])
+ self.assertEquals(volume['status'], 'in-use')
+ self.assertEquals(volume['instance_uuid'], None)
+ self.assertEquals(volume['attached_host'], host_name)
+ self.assertEquals(volume['mountpoint'], mountpoint)
+ self.assertEquals(volume['attach_status'], 'attached')
+ # build request to force detach
+ req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
+ req.method = 'POST'
+ req.headers['content-type'] = 'application/json'
+ # request status of 'error'
+ req.body = jsonutils.dumps({'os-force_detach': None})
+ # attach admin context to request
+ req.environ['cinder.context'] = ctx
+ # make request
+ resp = req.get_response(app())
+ # request is accepted
+ self.assertEquals(resp.status_int, 202)
+ volume = db.volume_get(ctx, volume['id'])
+ # status changed to 'available'
+ self.assertEquals(volume['status'], 'available')
+ self.assertEquals(volume['instance_uuid'], None)
+ self.assertEquals(volume['attached_host'], None)
+ self.assertEquals(volume['mountpoint'], None)
+ self.assertEquals(volume['attach_status'], 'detached')
+ # cleanup
+ svc.stop()
+
+ def test_attach_in_used_volume_by_instance(self):
"""Test that attaching to an in-use volume fails."""
# admin context
ctx = context.RequestContext('admin', 'fake', True)
self.volume_api.reserve_volume(ctx, volume)
self.volume_api.initialize_connection(ctx, volume, connector)
mountpoint = '/dev/vbd'
- self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, mountpoint)
+ self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None, mountpoint)
self.assertRaises(exception.InvalidVolume,
self.volume_api.attach,
ctx,
volume,
fakes.get_fake_uuid(),
+ None,
+ mountpoint)
+ # cleanup
+ svc.stop()
+
+ def test_attach_in_used_volume_by_host(self):
+ """Test that attaching to an in-use volume fails."""
+ # admin context
+ ctx = context.RequestContext('admin', 'fake', True)
+ # current status is available
+ volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
+ 'provider_location': ''})
+ connector = {'initiator': 'iqn.2012-07.org.fake:01'}
+ # start service to handle rpc messages for attach requests
+ svc = self.start_service('volume', host='test')
+ self.volume_api.reserve_volume(ctx, volume)
+ self.volume_api.initialize_connection(ctx, volume, connector)
+ mountpoint = '/dev/vbd'
+ host_name = 'fake_host'
+ self.volume_api.attach(ctx, volume, None, host_name, mountpoint)
+ self.assertRaises(exception.InvalidVolume,
+ self.volume_api.attach,
+ ctx,
+ volume,
+ None,
+ host_name,
mountpoint)
# cleanup
svc.stop()
ctx,
volume,
stubs.FAKE_UUID,
+ None,
mountpoint)
# cleanup
svc.stop()
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
- def test_attach(self):
+ def test_attach_to_instance(self):
body = {'os-attach': {'instance_uuid': 'fake',
'mountpoint': '/dev/vdc'}}
req = webob.Request.blank('/v2/fake/volumes/1/action')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
+ def test_attach_to_host(self):
+ body = {'os-attach': {'host_name': 'fake_host',
+ 'mountpoint': '/dev/vdc'}}
+ req = webob.Request.blank('/v2/fake/volumes/1/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+
+ def test_attach_with_invalid_arguments(self):
+ # Invalid request to attach volume an invalid target
+ body = {'os-attach': {'mountpoint': '/dev/vdc'}}
+ req = webob.Request.blank('/v2/fake/volumes/1/action')
+ req.method = "POST"
+ req.headers["content-type"] = "application/json"
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ # Invalid request to attach volume to an instance and a host
+ body = {'os-attach': {'instance_uuid': 'fake',
+ 'host_name': 'fake_host',
+ 'mountpoint': '/dev/vdc'}}
+ req = webob.Request.blank('/v2/fake/volumes/1/action')
+ req.method = "POST"
+ req.headers["content-type"] = "application/json"
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
def test_extend_volume(self):
def fake_extend_volume(*args, **kwargs):
return {}
'display_name': 'Volume Test Name',
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
+ 'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': False,
'display_name': 'Volume Test Name',
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
+ 'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': False,
'id': '1',
'volume_id': '1',
'server_id': 'fakeuuid',
+ 'host_name': None,
'device': '/',
}],
'bootable': False,
'id': '1',
'volume_id': '1',
'server_id': 'fakeuuid',
+ 'host_name': None,
'device': '/',
}],
'bootable': False,
'display_name': 'displayname',
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
+ 'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': False,
'display_name': 'displayname',
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
+ 'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': False,
'display_name': 'displayname',
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
+ 'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': False,
'display_name': 'displayname',
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
+ 'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': True,
'size': 1,
'availability_zone': 'fakeaz',
'instance_uuid': 'fakeuuid',
+ 'attached_host': None,
'mountpoint': '/',
'status': 'fakestatus',
'attach_status': 'attached',
'id': '1',
'volume_id': '1',
'server_id': 'fakeuuid',
+ 'host_name': None,
'device': '/',
}
],
'id': '1',
'volume_id': '1',
'server_id': 'fakeuuid',
+ 'host_name': None,
'device': '/',
}],
'volume_type': 'vol_type_name',
{
'device': '/',
'server_id': 'fakeuuid',
+ 'host_name': None,
'id': '1',
'volume_id': '1'
}
{
'device': '/',
'server_id': 'fakeuuid',
+ 'host_name': None,
'id': '1',
'volume_id': '1'
}
def test_volume_attached_invalid_uuid(self):
self.assertRaises(exception.InvalidUUID, db.volume_attached, self.ctxt,
- 42, 'invalid-uuid', '/tmp')
+ 42, 'invalid-uuid', None, '/tmp')
- def test_volume_attached(self):
+ def test_volume_attached_to_instance(self):
volume = db.volume_create(self.ctxt, {'host': 'host1'})
+ instance_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
db.volume_attached(self.ctxt, volume['id'],
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '/tmp')
+ instance_uuid, None, '/tmp')
volume = db.volume_get(self.ctxt, volume['id'])
self.assertEqual(volume['status'], 'in-use')
self.assertEqual(volume['mountpoint'], '/tmp')
self.assertEqual(volume['attach_status'], 'attached')
- self.assertEqual(volume['instance_uuid'],
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
+ self.assertEqual(volume['instance_uuid'], instance_uuid)
+ self.assertEqual(volume['attached_host'], None)
+
+ def test_volume_attached_to_host(self):
+ volume = db.volume_create(self.ctxt, {'host': 'host1'})
+ host_name = 'fake_host'
+ db.volume_attached(self.ctxt, volume['id'],
+ None, host_name, '/tmp')
+ volume = db.volume_get(self.ctxt, volume['id'])
+ self.assertEqual(volume['status'], 'in-use')
+ self.assertEqual(volume['mountpoint'], '/tmp')
+ self.assertEqual(volume['attach_status'], 'attached')
+ self.assertEqual(volume['instance_uuid'], None)
+ self.assertEqual(volume['attached_host'], host_name)
def test_volume_data_get_for_host(self):
for i in xrange(3):
db.volume_data_get_for_project(
self.ctxt, 'p%d' % i))
- def test_volume_detached(self):
+ def test_volume_detached_from_instance(self):
volume = db.volume_create(self.ctxt, {})
- db.volume_attached(self.ctxt,
- volume['id'],
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '/tmp')
+ db.volume_attached(self.ctxt, volume['id'],
+ 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ None, '/tmp')
+ db.volume_detached(self.ctxt, volume['id'])
+ volume = db.volume_get(self.ctxt, volume['id'])
+ self.assertEqual('available', volume['status'])
+ self.assertEqual('detached', volume['attach_status'])
+ self.assertIsNone(volume['mountpoint'])
+ self.assertIsNone(volume['instance_uuid'])
+ self.assertIsNone(volume['attached_host'])
+
+ def test_volume_detached_from_host(self):
+ volume = db.volume_create(self.ctxt, {})
+ db.volume_attached(self.ctxt, volume['id'],
+ None, 'fake_host', '/tmp')
db.volume_detached(self.ctxt, volume['id'])
volume = db.volume_get(self.ctxt, volume['id'])
self.assertEqual('available', volume['status'])
self.assertEqual('detached', volume['attach_status'])
self.assertIsNone(volume['mountpoint'])
self.assertIsNone(volume['instance_uuid'])
+ self.assertIsNone(volume['attached_host'])
def test_volume_get(self):
volume = db.volume_create(self.ctxt, {})
# Make sure we put all the columns back
for column in volumes_v10.c:
self.assertTrue(volumes.c.__contains__(column.name))
+
+ def test_migration_012(self):
+ """Test that adding attached_host column works correctly."""
+ for (key, engine) in self.engines.items():
+ migration_api.version_control(engine,
+ TestMigrations.REPOSITORY,
+ migration.INIT_VERSION)
+ migration_api.upgrade(engine, TestMigrations.REPOSITORY, 11)
+ metadata = sqlalchemy.schema.MetaData()
+ metadata.bind = engine
+
+ migration_api.upgrade(engine, TestMigrations.REPOSITORY, 12)
+ volumes = sqlalchemy.Table('volumes',
+ metadata,
+ autoload=True)
+ self.assertTrue(isinstance(volumes.c.attached_host.type,
+ sqlalchemy.types.VARCHAR))
+
+ migration_api.downgrade(engine, TestMigrations.REPOSITORY, 11)
+ metadata = sqlalchemy.schema.MetaData()
+ metadata.bind = engine
+
+ volumes = sqlalchemy.Table('volumes',
+ metadata,
+ autoload=True)
+ self.assertTrue('attached_host' not in volumes.c)
def test_run_attach_detach_volume(self):
"""Make sure volume can be attached and detached from instance."""
- instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
+ # attach volume to the instance then to detach
+ instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = self._create_volume()
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.volume.attach_volume(self.context, volume_id, instance_uuid,
- mountpoint)
+ None, mountpoint)
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertEqual(vol['instance_uuid'], instance_uuid)
+ self.assertEqual(vol['attached_host'], None)
+
+ self.assertRaises(exception.VolumeAttached,
+ self.volume.delete_volume,
+ self.context,
+ volume_id)
+ self.volume.detach_volume(self.context, volume_id)
+ vol = db.volume_get(self.context, volume_id)
+ self.assertEqual(vol['status'], "available")
+
+ self.volume.delete_volume(self.context, volume_id)
+ self.assertRaises(exception.VolumeNotFound,
+ db.volume_get,
+ self.context,
+ volume_id)
+
+ # attach volume to the host then to detach
+ volume = self._create_volume()
+ volume_id = volume['id']
+ self.volume.create_volume(self.context, volume_id)
+ self.volume.attach_volume(self.context, volume_id, None,
+ 'fake_host', mountpoint)
+ vol = db.volume_get(context.get_admin_context(), volume_id)
+ self.assertEqual(vol['status'], "in-use")
+ self.assertEqual(vol['attach_status'], "attached")
+ self.assertEqual(vol['mountpoint'], mountpoint)
+ self.assertEqual(vol['instance_uuid'], None)
+ # sanitized, conforms to RFC-952 and RFC-1123 specs.
+ self.assertEqual(vol['attached_host'], 'fake-host')
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
pass
self.stubs.Set(rpc, 'cast', fake_cast)
instance_uuid = '12345678-1234-5678-1234-567812345678'
-
+ # create volume and attach to the instance
volume = self._create_volume()
self.volume.create_volume(self.context, volume['id'])
db.volume_attached(self.context, volume['id'], instance_uuid,
- '/dev/sda1')
+ None, '/dev/sda1')
+
+ volume_api = cinder.volume.api.API()
+ volume = volume_api.get(self.context, volume['id'])
+ self.assertRaises(exception.InvalidVolume,
+ volume_api.create_snapshot,
+ self.context, volume,
+ 'fake_name', 'fake_description')
+ snapshot_ref = volume_api.create_snapshot_force(self.context,
+ volume,
+ 'fake_name',
+ 'fake_description')
+ db.snapshot_destroy(self.context, snapshot_ref['id'])
+ db.volume_destroy(self.context, volume['id'])
+
+ # create volume and attach to the host
+ volume = self._create_volume()
+ self.volume.create_volume(self.context, volume['id'])
+ db.volume_attached(self.context, volume['id'], None,
+ 'fake_host', '/dev/sda1')
volume_api = cinder.volume.api.API()
volume = volume_api.get(self.context, volume['id'])
snapshot=self.fake_snapshot,
host='fake_host')
- def test_attach_volume(self):
+ def test_attach_volume_to_instance(self):
self._test_volume_api('attach_volume',
rpc_method='call',
volume=self.fake_volume,
instance_uuid='fake_uuid',
- mountpoint='fake_mountpoint')
+ host_name=None,
+ mountpoint='fake_mountpoint',
+ version='1.7')
+
+ def test_attach_volume_to_host(self):
+ self._test_volume_api('attach_volume',
+ rpc_method='call',
+ volume=self.fake_volume,
+ instance_uuid=None,
+ host_name='fake_host',
+ mountpoint='fake_mountpoint',
+ version='1.7')
def test_detach_volume(self):
self._test_volume_api('detach_volume',
self.update(context, volume, {"status": "in-use"})
@wrap_check_policy
- def attach(self, context, volume, instance_uuid, mountpoint):
+ def attach(self, context, volume, instance_uuid, host_name, mountpoint):
return self.volume_rpcapi.attach_volume(context,
volume,
instance_uuid,
+ host_name,
mountpoint)
@wrap_check_policy
"""Disallow connection from connector"""
raise NotImplementedError()
- def attach_volume(self, context, volume_id, instance_uuid, mountpoint):
- """Callback for volume attached to instance."""
+ def attach_volume(self, context, volume_id, instance_uuid, host_name,
+ mountpoint):
+ """Callback for volume attached to instance or host."""
pass
def detach_volume(self, context, volume_id):
def ensure_export(self, context, volume):
pass
- def attach_volume(self, context, volume, instance_uuid, mountpoint):
- pass
-
def detach_volume(self, context, volume):
pass
"""Disallow connection from connector."""
pass
- def attach_volume(self, context, volume_id, instance_uuid, mountpoint):
- """Callback for volume attached to instance."""
- pass
-
def detach_volume(self, context, volume_id):
"""Callback for volume detached."""
pass
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
- RPC_API_VERSION = '1.6'
+ RPC_API_VERSION = '1.7'
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
QUOTAS.commit(context, reservations, project_id=project_id)
return True
- def attach_volume(self, context, volume_id, instance_uuid, mountpoint):
+ def attach_volume(self, context, volume_id, instance_uuid, host_name,
+ mountpoint):
"""Updates db to show volume is attached"""
-
@utils.synchronized(volume_id, external=True)
def do_attach():
# check the volume status before attaching
instance_uuid):
msg = _("being attached by another instance")
raise exception.InvalidVolume(reason=msg)
+ if (volume['attached_host'] and volume['attached_host'] !=
+ host_name):
+ msg = _("being attached by another host")
+ raise exception.InvalidVolume(reason=msg)
elif volume['status'] != "available":
msg = _("status must be available")
raise exception.InvalidVolume(reason=msg)
self.db.volume_update(context, volume_id,
{"instance_uuid": instance_uuid,
+ "attached_host": host_name,
"status": "attaching"})
- if not uuidutils.is_uuid_like(instance_uuid):
+ if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
self.db.volume_update(context,
volume_id,
{'status': 'error_attaching'})
raise exception.InvalidUUID(uuid=instance_uuid)
+ host_name_sanitized = utils.sanitize_hostname(
+ host_name) if host_name else None
+
try:
self.driver.attach_volume(context,
volume_id,
instance_uuid,
+ host_name_sanitized,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attached(context.elevated(),
volume_id,
instance_uuid,
+ host_name_sanitized,
mountpoint)
return do_attach()
with excutils.save_and_reraise_exception():
payload['message'] = unicode(error)
finally:
- if volume['instance_uuid'] is None:
+ if (volume['instance_uuid'] is None and
+ volume['attached_host'] is None):
self.db.volume_update(context, volume_id,
{'status': 'available'})
else:
1.0 - Initial version.
1.1 - Adds clone volume option to create_volume.
1.2 - Add publish_service_capabilities() method.
- 1.3 - Pass all image metadata (not just ID) in copy_volume_to_image
+ 1.3 - Pass all image metadata (not just ID) in copy_volume_to_image.
1.4 - Add request_spec, filter_properties and
allow_reschedule arguments to create_volume().
- 1.5 - Add accept_transfer
- 1.6 - Add extend_volume
+ 1.5 - Add accept_transfer.
+ 1.6 - Add extend_volume.
+ 1.7 - Adds host_name parameter to attach_volume()
+ to allow attaching to host rather than instance.
'''
BASE_RPC_API_VERSION = '1.0'
snapshot_id=snapshot['id']),
topic=rpc.queue_get_for(ctxt, self.topic, host))
- def attach_volume(self, ctxt, volume, instance_uuid, mountpoint):
+ def attach_volume(self, ctxt, volume, instance_uuid, host_name,
+ mountpoint):
return self.call(ctxt, self.make_msg('attach_volume',
volume_id=volume['id'],
instance_uuid=instance_uuid,
+ host_name=host_name,
mountpoint=mountpoint),
topic=rpc.queue_get_for(ctxt,
self.topic,
- volume['host']))
+ volume['host']),
+ version='1.7')
def detach_volume(self, ctxt, volume):
return self.call(ctxt, self.make_msg('detach_volume',