if 'host_name' in body['os-attach']:
host_name = body['os-attach']['host_name']
mountpoint = body['os-attach']['mountpoint']
+ if 'mode' in body['os-attach']:
+ mode = body['os-attach']['mode']
+ else:
+ mode = 'rw'
if instance_uuid and host_name:
msg = _("Invalid request to attach volume to an "
msg = _("Invalid request to attach volume to an invalid target")
raise webob.exc.HTTPBadRequest(explanation=msg)
+ if mode not in ('rw', 'ro'):
+ msg = _("Invalid request to attach volume with an invalid mode. "
+ "Attaching mode should be 'rw' or 'ro'")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
self.volume_api.attach(context, volume,
- instance_uuid, host_name, mountpoint)
+ instance_uuid, host_name, mountpoint, mode)
return webob.Response(status_int=202)
@wsgi.action('os-detach')
context = req.environ['cinder.context']
volume = self.volume_api.get(context, id)
try:
- val = int(body['os-extend']['new_size'])
- except ValueError:
+ _val = int(body['os-extend']['new_size'])
+ except (KeyError, ValueError):
msg = _("New volume size must be specified as an integer.")
raise webob.exc.HTTPBadRequest(explanation=msg)
self.volume_api.extend(context, volume, size)
return webob.Response(status_int=202)
+ @wsgi.action('os-update_readonly_flag')
+ def _volume_readonly_update(self, req, id, body):
+ """Update volume readonly flag."""
+ context = req.environ['cinder.context']
+ volume = self.volume_api.get(context, id)
+
+ if not self.is_valid_body(body, 'os-update_readonly_flag'):
+ msg = _("No 'os-update_readonly_flag' was specified "
+ "in request.")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ readonly_flag = body['os-update_readonly_flag'].get('readonly')
+
+ if not isinstance(readonly_flag, bool):
+ msg = _("Volume 'readonly' flag must be specified "
+ "in request as a boolean.")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ self.volume_api.update_readonly_flag(context, volume, readonly_flag)
+ return webob.Response(status_int=202)
+
class Volume_actions(extensions.ExtensionDescriptor):
"""Enable volume actions
class VolumeController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
+ _visible_admin_metadata_keys = ['readonly', 'attached_mode']
+
def __init__(self, ext_mgr):
self.volume_api = volume.API()
self.ext_mgr = ext_mgr
super(VolumeController, self).__init__()
+ def _add_visible_admin_metadata(self, context, volume):
+ if context is None:
+ return
+
+ visible_admin_meta = {}
+
+ volume_tmp = (volume if context.is_admin else
+ self.volume_api.get(context.elevated(), volume['id']))
+
+ if volume_tmp.get('volume_admin_metadata'):
+ for item in volume_tmp['volume_admin_metadata']:
+ if item['key'] in self._visible_admin_metadata_keys:
+ visible_admin_meta[item['key']] = item['value']
+ # avoid circular ref when volume is a Volume instance
+ elif (volume_tmp.get('admin_metadata') and
+ isinstance(volume_tmp.get('admin_metadata'), dict)):
+ for key in self._visible_admin_metadata_keys:
+ if key in volume_tmp['admin_metadata'].keys():
+ visible_admin_meta[key] = volume_tmp['admin_metadata'][key]
+
+ if not visible_admin_meta:
+ return
+
+ # NOTE(zhiyan): update visible administration metadata to
+ # volume metadata, administration metadata will rewrite existing key.
+ if volume.get('volume_metadata'):
+ orig_meta = volume.get('volume_metadata')
+ for item in orig_meta:
+ if item['key'] in visible_admin_meta.keys():
+ item['value'] = visible_admin_meta.pop(item['key'])
+ for key, value in visible_admin_meta.iteritems():
+ orig_meta.append({'key': key, 'value': value})
+ # avoid circular ref when vol is a Volume instance
+ elif (volume.get('metadata') and
+ isinstance(volume.get('metadata'), dict)):
+ volume['metadata'].update(visible_admin_meta)
+ else:
+ volume['metadata'] = visible_admin_meta
+
@wsgi.serializers(xml=VolumeTemplate)
def show(self, req, id):
"""Return data about the given volume."""
except exception.NotFound:
raise exc.HTTPNotFound()
+ self._add_visible_admin_metadata(context, vol)
+
return {'volume': _translate_volume_detail_view(context, vol)}
def delete(self, req, id):
volumes = self.volume_api.get_all(context, marker=None, limit=None,
sort_key='created_at',
sort_dir='desc', filters=search_opts)
+
+ for volume in volumes:
+ self._add_visible_admin_metadata(context, volume)
+
limited_list = common.limited(volumes, req)
res = [entity_maker(context, vol) for vol in limited_list]
return {'volumes': res}
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
- retval = _translate_volume_detail_view(context,
- dict(new_volume.iteritems()),
- image_uuid)
+ new_volume = dict(new_volume.iteritems())
+
+ self._add_visible_admin_metadata(context, new_volume)
+
+ retval = _translate_volume_detail_view(context, new_volume, image_uuid)
return {'volume': retval}
volume.update(update_dict)
+ self._add_visible_admin_metadata(context, volume)
+
return {'volume': _translate_volume_detail_view(context, volume)}
_view_builder_class = volume_views.ViewBuilder
+ _visible_admin_metadata_keys = ['readonly', 'attached_mode']
+
def __init__(self, ext_mgr):
self.volume_api = volume.API()
self.ext_mgr = ext_mgr
super(VolumeController, self).__init__()
+ def _add_visible_admin_metadata(self, context, volume):
+ if context is None:
+ return
+
+ visible_admin_meta = {}
+
+ volume_tmp = (volume if context.is_admin else
+ self.volume_api.get(context.elevated(), volume['id']))
+
+ if volume_tmp.get('volume_admin_metadata'):
+ for item in volume_tmp['volume_admin_metadata']:
+ if item['key'] in self._visible_admin_metadata_keys:
+ visible_admin_meta[item['key']] = item['value']
+ # avoid circular ref when volume is a Volume instance
+ elif (volume_tmp.get('admin_metadata') and
+ isinstance(volume_tmp.get('admin_metadata'), dict)):
+ for key in self._visible_admin_metadata_keys:
+ if key in volume_tmp['admin_metadata'].keys():
+ visible_admin_meta[key] = volume_tmp['admin_metadata'][key]
+
+ if not visible_admin_meta:
+ return
+
+ # NOTE(zhiyan): update visible administration metadata to
+ # volume metadata, administration metadata will rewrite existing key.
+ if volume.get('volume_metadata'):
+ orig_meta = volume.get('volume_metadata')
+ for item in orig_meta:
+ if item['key'] in visible_admin_meta.keys():
+ item['value'] = visible_admin_meta.pop(item['key'])
+ for key, value in visible_admin_meta.iteritems():
+ orig_meta.append({'key': key, 'value': value})
+ # avoid circular ref when vol is a Volume instance
+ elif (volume.get('metadata') and
+ isinstance(volume.get('metadata'), dict)):
+ volume['metadata'].update(visible_admin_meta)
+ else:
+ volume['metadata'] = visible_admin_meta
+
@wsgi.serializers(xml=VolumeTemplate)
def show(self, req, id):
"""Return data about the given volume."""
msg = _("Volume could not be found")
raise exc.HTTPNotFound(explanation=msg)
+ self._add_visible_admin_metadata(context, vol)
+
return self._view_builder.detail(req, vol)
def delete(self, req, id):
volumes = self.volume_api.get_all(context, marker, limit, sort_key,
sort_dir, filters)
+
+ for volume in volumes:
+ self._add_visible_admin_metadata(context, volume)
+
limited_list = common.limited(volumes, req)
if is_detail:
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
- retval = self._view_builder.summary(req, dict(new_volume.iteritems()))
+ new_volume = dict(new_volume.iteritems())
+
+ self._add_visible_admin_metadata(context, new_volume)
+
+ retval = self._view_builder.summary(req, new_volume)
return retval
volume.update(update_dict)
+ self._add_visible_admin_metadata(context, volume)
+
return self._view_builder.detail(req, volume)
##################
+def volume_admin_metadata_get(context, volume_id):
+ """Get all administration metadata for a volume."""
+ return IMPL.volume_admin_metadata_get(context, volume_id)
+
+
+def volume_admin_metadata_delete(context, volume_id, key):
+ """Delete the given metadata item."""
+ IMPL.volume_admin_metadata_delete(context, volume_id, key)
+
+
+def volume_admin_metadata_update(context, volume_id, metadata, delete):
+ """Update metadata if it exists, otherwise create it."""
+ IMPL.volume_admin_metadata_update(context, volume_id, metadata, delete)
+
+
+##################
+
+
def volume_type_create(context, values):
"""Create a new volume type."""
return IMPL.volume_type_create(context, values)
"""Implementation of SQLAlchemy backend."""
-import datetime
import sys
import uuid
import warnings
def volume_create(context, values):
values['volume_metadata'] = _metadata_refs(values.get('metadata'),
models.VolumeMetadata)
+ if is_admin_context(context):
+ values['volume_admin_metadata'] = \
+ _metadata_refs(values.get('admin_metadata'),
+ models.VolumeAdminMetadata)
+
volume_ref = models.Volume()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
@require_admin_context
def volume_destroy(context, volume_id):
session = get_session()
+ now = timeutils.utcnow()
with session.begin():
session.query(models.Volume).\
filter_by(id=volume_id).\
update({'status': 'deleted',
'deleted': True,
- 'deleted_at': timeutils.utcnow(),
+ 'deleted_at': now,
'updated_at': literal_column('updated_at')})
session.query(models.IscsiTarget).\
filter_by(volume_id=volume_id).\
session.query(models.VolumeMetadata).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
+ 'deleted_at': now,
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.VolumeAdminMetadata).\
+ filter_by(volume_id=volume_id).\
+ update({'deleted': True,
+ 'deleted_at': now,
'updated_at': literal_column('updated_at')})
@require_context
def _volume_get_query(context, session=None, project_only=False):
- return model_query(context, models.Volume, session=session,
- project_only=project_only).\
- options(joinedload('volume_metadata')).\
- options(joinedload('volume_type'))
+ if is_admin_context(context):
+ return model_query(context, models.Volume, session=session,
+ project_only=project_only).\
+ options(joinedload('volume_metadata')).\
+ options(joinedload('volume_admin_metadata')).\
+ options(joinedload('volume_type'))
+ else:
+ return model_query(context, models.Volume, session=session,
+ project_only=project_only).\
+ options(joinedload('volume_metadata')).\
+ options(joinedload('volume_type'))
@require_context
def volume_get_all_by_instance_uuid(context, instance_uuid):
result = model_query(context, models.Volume, read_deleted="no").\
options(joinedload('volume_metadata')).\
+ options(joinedload('volume_admin_metadata')).\
options(joinedload('volume_type')).\
filter_by(instance_uuid=instance_uuid).\
all()
with session.begin():
metadata = values.get('metadata')
if metadata is not None:
- _volume_metadata_update(context,
- volume_id,
- values.pop('metadata'),
- delete=True,
- session=session)
+ _volume_user_metadata_update(context,
+ volume_id,
+ values.pop('metadata'),
+ delete=True,
+ session=session)
+
+ admin_metadata = values.get('admin_metadata')
+ if is_admin_context(context) and admin_metadata is not None:
+ _volume_admin_metadata_update(context,
+ volume_id,
+ values.pop('admin_metadata'),
+ delete=True,
+ session=session)
volume_ref = _volume_get(context, volume_id, session=session)
volume_ref.update(values)
####################
-def _volume_metadata_get_query(context, volume_id, session=None):
- return model_query(context, models.VolumeMetadata,
- session=session, read_deleted="no").\
+def _volume_x_metadata_get_query(context, volume_id, model, session=None):
+ return model_query(context, model, session=session, read_deleted="no").\
filter_by(volume_id=volume_id)
-@require_context
-@require_volume_exists
-def _volume_metadata_get(context, volume_id, session=None):
- rows = _volume_metadata_get_query(context, volume_id, session).all()
+def _volume_x_metadata_get(context, volume_id, model, session=None):
+ rows = _volume_x_metadata_get_query(context, volume_id, model,
+ session=session).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
-@require_context
-@require_volume_exists
-def volume_metadata_get(context, volume_id):
- return _volume_metadata_get(context, volume_id)
-
-
-@require_context
-@require_volume_exists
-def volume_metadata_delete(context, volume_id, key):
- _volume_metadata_get_query(context, volume_id).\
- filter_by(key=key).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
-
-
-@require_context
-def _volume_metadata_get_item(context, volume_id, key, session=None):
- result = _volume_metadata_get_query(context, volume_id, session=session).\
+def _volume_x_metadata_get_item(context, volume_id, key, model, notfound_exec,
+ session=None):
+ result = _volume_x_metadata_get_query(context, volume_id,
+ model, session=session).\
filter_by(key=key).\
first()
if not result:
- raise exception.VolumeMetadataNotFound(metadata_key=key,
- volume_id=volume_id)
+ raise notfound_exec(metadata_key=key, volume_id=volume_id)
return result
-@require_context
-@require_volume_exists
-def volume_metadata_get_item(context, volume_id, key):
- return _volume_metadata_get_item(context, volume_id, key)
-
-
-@require_context
-@require_volume_exists
-def _volume_metadata_update(context, volume_id, metadata, delete,
- session=None):
+def _volume_x_metadata_update(context, volume_id, metadata, delete,
+ model, notfound_exec, session=None):
if not session:
session = get_session()
with session.begin(subtransactions=True):
# Set existing metadata to deleted if delete argument is True
if delete:
- original_metadata = _volume_metadata_get(context, volume_id,
- session)
+ original_metadata = _volume_x_metadata_get(context, volume_id,
+ model, session=session)
for meta_key, meta_value in original_metadata.iteritems():
if meta_key not in metadata:
- meta_ref = _volume_metadata_get_item(context, volume_id,
- meta_key, session)
+ meta_ref = _volume_x_metadata_get_item(context, volume_id,
+ meta_key, model,
+ notfound_exec,
+ session=session)
meta_ref.update({'deleted': True})
meta_ref.save(session=session)
item = {"value": meta_value}
try:
- meta_ref = _volume_metadata_get_item(context, volume_id,
- meta_key, session)
- except exception.VolumeMetadataNotFound as e:
- meta_ref = models.VolumeMetadata()
+ meta_ref = _volume_x_metadata_get_item(context, volume_id,
+ meta_key, model,
+ notfound_exec,
+ session=session)
+ except notfound_exec:
+ meta_ref = model()
item.update({"key": meta_key, "volume_id": volume_id})
meta_ref.update(item)
return metadata
+def _volume_user_metadata_get_query(context, volume_id, session=None):
+ return _volume_x_metadata_get_query(context, volume_id,
+ models.VolumeMetadata, session=session)
+
+
+@require_context
+@require_volume_exists
+def _volume_user_metadata_get(context, volume_id, session=None):
+ return _volume_x_metadata_get(context, volume_id,
+ models.VolumeMetadata, session=session)
+
+
+@require_context
+def _volume_user_metadata_get_item(context, volume_id, key, session=None):
+ return _volume_x_metadata_get_item(context, volume_id, key,
+ models.VolumeMetadata,
+ exception.VolumeMetadataNotFound,
+ session=session)
+
+
+@require_context
+@require_volume_exists
+def _volume_user_metadata_update(context, volume_id, metadata, delete,
+ session=None):
+ return _volume_x_metadata_update(context, volume_id, metadata, delete,
+ models.VolumeMetadata,
+ exception.VolumeMetadataNotFound,
+ session=session)
+
+
+@require_context
+@require_volume_exists
+def volume_metadata_get_item(context, volume_id, key):
+ return _volume_user_metadata_get_item(context, volume_id, key)
+
+
+@require_context
+@require_volume_exists
+def volume_metadata_get(context, volume_id):
+ return _volume_user_metadata_get(context, volume_id)
+
+
+@require_context
+@require_volume_exists
+def volume_metadata_delete(context, volume_id, key):
+ _volume_user_metadata_get_query(context, volume_id).\
+ filter_by(key=key).\
+ update({'deleted': True,
+ 'deleted_at': timeutils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
@require_context
@require_volume_exists
def volume_metadata_update(context, volume_id, metadata, delete):
- return _volume_metadata_update(context, volume_id, metadata, delete)
+ return _volume_user_metadata_update(context, volume_id, metadata, delete)
+
+
+###################
+
+
+def _volume_admin_metadata_get_query(context, volume_id, session=None):
+ return _volume_x_metadata_get_query(context, volume_id,
+ models.VolumeAdminMetadata,
+ session=session)
+
+
+@require_admin_context
+@require_volume_exists
+def _volume_admin_metadata_get(context, volume_id, session=None):
+ return _volume_x_metadata_get(context, volume_id,
+ models.VolumeAdminMetadata, session=session)
+
+
+@require_admin_context
+@require_volume_exists
+def _volume_admin_metadata_update(context, volume_id, metadata, delete,
+ session=None):
+ return _volume_x_metadata_update(context, volume_id, metadata, delete,
+ models.VolumeAdminMetadata,
+ exception.VolumeAdminMetadataNotFound,
+ session=session)
+
+
+@require_admin_context
+@require_volume_exists
+def volume_admin_metadata_get(context, volume_id):
+ return _volume_admin_metadata_get(context, volume_id)
+
+
+@require_admin_context
+@require_volume_exists
+def volume_admin_metadata_delete(context, volume_id, key):
+ _volume_admin_metadata_get_query(context, volume_id).\
+ filter_by(key=key).\
+ update({'deleted': True,
+ 'deleted_at': timeutils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_admin_context
+@require_volume_exists
+def volume_admin_metadata_update(context, volume_id, metadata, delete):
+ return _volume_admin_metadata_update(context, volume_id, metadata, delete)
###################
BEGIN TRANSACTION;
-CREATE TABLE volumes_v12 (
+CREATE TABLE volumes_v13 (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
volume_type_id VARCHAR(36),
source_volid VARCHAR(36),
bootable BOOLEAN,
+ provider_geometry VARCHAR(255),
PRIMARY KEY (id)
);
-INSERT INTO volumes_v12
+INSERT INTO volumes_v13
SELECT created_at,
updated_at,
deleted_at,
provider_auth,
volume_type_id,
source_volid,
- bootable
+ bootable,
+ provider_geometry
FROM volumes;
DROP TABLE volumes;
-ALTER TABLE volumes_v12 RENAME TO volumes;
+ALTER TABLE volumes_v13 RENAME TO volumes;
COMMIT;
--- /dev/null
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Boolean, Column, DateTime
+from sqlalchemy import Integer, MetaData, String, Table, ForeignKey
+
+from cinder.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ _volumes = Table('volumes', meta, autoload=True)
+
+ # New table
+ volume_admin_metadata = Table(
+ 'volume_admin_metadata', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('volume_id', String(length=36), ForeignKey('volumes.id'),
+ nullable=False),
+ Column('key', String(length=255)),
+ Column('value', String(length=255)),
+ mysql_engine='InnoDB'
+ )
+
+ try:
+ volume_admin_metadata.create()
+ except Exception:
+ LOG.error(_("Table |%s| not created!"), repr(volume_admin_metadata))
+ raise
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+ volume_admin_metadata = Table('volume_admin_metadata',
+ meta,
+ autoload=True)
+ try:
+ volume_admin_metadata.drop()
+ except Exception:
+ LOG.error(_("volume_admin_metadata table not dropped"))
'VolumeMetadata.deleted == False)')
+class VolumeAdminMetadata(BASE, CinderBase):
+ """Represents a administrator metadata key/value pair for a volume."""
+ __tablename__ = 'volume_admin_metadata'
+ id = Column(Integer, primary_key=True)
+ key = Column(String(255))
+ value = Column(String(255))
+ volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False)
+ volume = relationship(Volume, backref="volume_admin_metadata",
+ foreign_keys=volume_id,
+ primaryjoin='and_('
+ 'VolumeAdminMetadata.volume_id == Volume.id,'
+ 'VolumeAdminMetadata.deleted == False)')
+
+
class VolumeTypes(BASE, CinderBase):
"""Represent possible volume_types of volumes offered."""
__tablename__ = "volume_types"
Service,
Volume,
VolumeMetadata,
+ VolumeAdminMetadata,
SnapshotMetadata,
Transfer,
VolumeTypeExtraSpecs,
message = _("Invalid source volume %(reason)s.")
+class InvalidVolumeAttachMode(Invalid):
+ message = _("Invalid attaching mode '%(mode)s' for "
+ "volume %(volume_id)s.")
+
+
class VolumeAttached(Invalid):
message = _("Volume %(volume_id)s is still attached, detach volume first.")
"key %(metadata_key)s.")
+class VolumeAdminMetadataNotFound(NotFound):
+ message = _("Volume %(volume_id)s has no administration metadata with "
+ "key %(metadata_key)s.")
+
+
class InvalidVolumeMetadata(Invalid):
message = _("Invalid metadata") + ": %(reason)s"
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.volume_api.reserve_volume(ctx, volume)
- self.volume_api.initialize_connection(ctx, volume, connector)
mountpoint = '/dev/vbd'
- self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None, mountpoint)
+ self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None,
+ mountpoint, 'rw')
# volume is attached
volume = db.volume_get(ctx, volume['id'])
self.assertEquals(volume['status'], 'in-use')
self.assertEquals(volume['attached_host'], None)
self.assertEquals(volume['mountpoint'], mountpoint)
self.assertEquals(volume['attach_status'], 'attached')
+ admin_metadata = volume['volume_admin_metadata']
+ self.assertEquals(len(admin_metadata), 2)
+ self.assertEquals(admin_metadata[0]['key'], 'readonly')
+ self.assertEquals(admin_metadata[0]['value'], 'False')
+ self.assertEquals(admin_metadata[1]['key'], 'attached_mode')
+ self.assertEquals(admin_metadata[1]['value'], 'rw')
+ conn_info = self.volume_api.initialize_connection(ctx,
+ volume, connector)
+ self.assertEquals(conn_info['data']['access_mode'], 'rw')
# build request to force detach
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
# status changed to 'available'
self.assertEquals(volume['status'], 'available')
self.assertEquals(volume['instance_uuid'], None)
+ self.assertEquals(volume['attached_host'], None)
self.assertEquals(volume['mountpoint'], None)
self.assertEquals(volume['attach_status'], 'detached')
+ admin_metadata = volume['volume_admin_metadata']
+ self.assertEquals(len(admin_metadata), 1)
+ self.assertEquals(admin_metadata[0]['key'], 'readonly')
+ self.assertEquals(admin_metadata[0]['value'], 'False')
# cleanup
svc.stop()
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.volume_api.reserve_volume(ctx, volume)
- self.volume_api.initialize_connection(ctx, volume, connector)
mountpoint = '/dev/vbd'
host_name = 'fake-host'
- self.volume_api.attach(ctx, volume, None, host_name, mountpoint)
+ self.volume_api.attach(ctx, volume, None, host_name, mountpoint, 'ro')
# volume is attached
volume = db.volume_get(ctx, volume['id'])
self.assertEquals(volume['status'], 'in-use')
self.assertEquals(volume['attached_host'], host_name)
self.assertEquals(volume['mountpoint'], mountpoint)
self.assertEquals(volume['attach_status'], 'attached')
+ admin_metadata = volume['volume_admin_metadata']
+ self.assertEquals(len(admin_metadata), 2)
+ self.assertEquals(admin_metadata[0]['key'], 'readonly')
+ self.assertEquals(admin_metadata[0]['value'], 'False')
+ self.assertEquals(admin_metadata[1]['key'], 'attached_mode')
+ self.assertEquals(admin_metadata[1]['value'], 'ro')
+ conn_info = self.volume_api.initialize_connection(ctx,
+ volume, connector)
+ self.assertEquals(conn_info['data']['access_mode'], 'ro')
# build request to force detach
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
self.assertEquals(volume['attached_host'], None)
self.assertEquals(volume['mountpoint'], None)
self.assertEquals(volume['attach_status'], 'detached')
+ admin_metadata = volume['volume_admin_metadata']
+ self.assertEquals(len(admin_metadata), 1)
+ self.assertEquals(admin_metadata[0]['key'], 'readonly')
+ self.assertEquals(admin_metadata[0]['value'], 'False')
# cleanup
svc.stop()
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.volume_api.reserve_volume(ctx, volume)
- self.volume_api.initialize_connection(ctx, volume, connector)
mountpoint = '/dev/vbd'
- self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None, mountpoint)
+ self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None,
+ mountpoint, 'rw')
+ conn_info = self.volume_api.initialize_connection(ctx,
+ volume, connector)
+ self.assertEquals(conn_info['data']['access_mode'], 'rw')
self.assertRaises(exception.InvalidVolume,
self.volume_api.attach,
ctx,
volume,
fakes.get_fake_uuid(),
None,
- mountpoint)
+ mountpoint,
+ 'rw')
+ self.assertRaises(exception.InvalidVolume,
+ self.volume_api.attach,
+ ctx,
+ volume,
+ fakes.get_fake_uuid(),
+ None,
+ mountpoint,
+ 'ro')
# cleanup
svc.stop()
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.volume_api.reserve_volume(ctx, volume)
- self.volume_api.initialize_connection(ctx, volume, connector)
mountpoint = '/dev/vbd'
host_name = 'fake_host'
- self.volume_api.attach(ctx, volume, None, host_name, mountpoint)
+ self.volume_api.attach(ctx, volume, None, host_name, mountpoint, 'rw')
+ conn_info = self.volume_api.initialize_connection(ctx,
+ volume, connector)
+ conn_info['data']['access_mode'] = 'rw'
self.assertRaises(exception.InvalidVolume,
self.volume_api.attach,
ctx,
volume,
None,
host_name,
- mountpoint)
+ mountpoint,
+ 'rw')
+ self.assertRaises(exception.InvalidVolume,
+ self.volume_api.attach,
+ ctx,
+ volume,
+ None,
+ host_name,
+ mountpoint,
+ 'ro')
# cleanup
svc.stop()
# current status is available
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
'provider_location': ''})
- connector = {'initiator': 'iqn.2012-07.org.fake:01'}
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
- self.volume_api.initialize_connection(ctx, volume, connector)
values = {'status': 'attaching',
'instance_uuid': fakes.get_fake_uuid()}
db.volume_update(ctx, volume['id'], values)
volume,
stubs.FAKE_UUID,
None,
- mountpoint)
+ mountpoint,
+ 'rw')
+ # cleanup
+ svc.stop()
+
+ def test_attach_attaching_volume_with_different_mode(self):
+ """Test that attaching volume reserved for another mode fails."""
+ # admin context
+ ctx = context.RequestContext('admin', 'fake', True)
+ # current status is available
+ volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
+ 'provider_location': ''})
+ # start service to handle rpc messages for attach requests
+ svc = self.start_service('volume', host='test')
+ values = {'status': 'attaching',
+ 'instance_uuid': fakes.get_fake_uuid()}
+ db.volume_update(ctx, volume['id'], values)
+ db.volume_admin_metadata_update(ctx, volume['id'],
+ {"attached_mode": 'rw'}, False)
+ mountpoint = '/dev/vbd'
+ self.assertRaises(exception.InvalidVolume,
+ self.volume_api.attach,
+ ctx,
+ volume,
+ values['instance_uuid'],
+ None,
+ mountpoint,
+ 'ro')
# cleanup
svc.stop()
def test_attach_to_instance(self):
body = {'os-attach': {'instance_uuid': 'fake',
- 'mountpoint': '/dev/vdc'}}
+ 'mountpoint': '/dev/vdc',
+ 'mode': 'rw'}}
req = webob.Request.blank('/v2/fake/volumes/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
self.assertEqual(res.status_int, 202)
def test_attach_to_host(self):
+ # using 'read-write' mode attach volume by default
body = {'os-attach': {'host_name': 'fake_host',
'mountpoint': '/dev/vdc'}}
req = webob.Request.blank('/v2/fake/volumes/1/action')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
+ # Invalid request to attach volume with an invalid mode
+ body = {'os-attach': {'instance_uuid': 'fake',
+ 'mountpoint': '/dev/vdc',
+ 'mode': 'rr'}}
+ req = webob.Request.blank('/v2/fake/volumes/1/action')
+ req.method = "POST"
+ req.headers["content-type"] = "application/json"
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+ body = {'os-attach': {'host_name': 'fake_host',
+ 'mountpoint': '/dev/vdc',
+ 'mode': 'ww'}}
+ req = webob.Request.blank('/v2/fake/volumes/1/action')
+ req.method = "POST"
+ req.headers["content-type"] = "application/json"
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
def test_begin_detaching(self):
def fake_begin_detaching(*args, **kwargs):
return {}
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
+ def test_update_readonly_flag(self):
+ def fake_update_readonly_flag(*args, **kwargs):
+ return {}
+ self.stubs.Set(volume.API, 'update_readonly_flag',
+ fake_update_readonly_flag)
+
+ body = {'os-update_readonly_flag': {'readonly': True}}
+ req = webob.Request.blank('/v2/fake/volumes/1/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+
def stub_volume_get(self, context, volume_id):
volume = stubs.stub_volume(volume_id)
import webob
from cinder import context
+from cinder import db
from cinder import test
from cinder.tests.api import fakes
from cinder import volume
super(VolumeHostAttributeTest, self).setUp()
self.stubs.Set(volume.API, 'get', fake_volume_get)
self.stubs.Set(volume.API, 'get_all', fake_volume_get_all)
+ self.stubs.Set(db, 'volume_get', fake_volume_get)
+
self.UUID = uuid.uuid4()
def test_get_volume_allowed(self):
from cinder.api import common
from cinder.api.openstack.wsgi import MetadataXMLDeserializer
from cinder.api.openstack.wsgi import XMLDeserializer
+from cinder import db
from cinder import test
from cinder.tests.api import fakes
from cinder import volume
self.stubs.Set(volume.API, 'get_all', fake_volume_get_all)
self.stubs.Set(volume.API, 'get_volume_image_metadata',
fake_get_volume_image_metadata)
+ self.stubs.Set(db, 'volume_get', fake_volume_get)
self.UUID = uuid.uuid4()
def _make_request(self, url):
'size': 1,
'availability_zone': 'fakeaz',
'instance_uuid': 'fakeuuid',
+ 'attached_host': None,
'mountpoint': '/',
+ 'attached_mode': 'rw',
'status': 'fakestatus',
'migration_status': None,
'attach_status': 'attached',
'source_volid': None,
'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66',
'volume_metadata': [],
- 'volume_type': {'name': 'vol_type_name'}}
+ 'volume_type': {'name': 'vol_type_name'},
+ 'readonly': 'False'}
volume.update(kwargs)
return volume
stubs.stub_volume_get_all_by_project)
self.stubs.Set(db, 'service_get_all_by_topic',
stubs.stub_service_get_all_by_topic)
+ self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
self.stubs.Set(volume_api.API, 'delete', stubs.stub_volume_delete)
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
- 'metadata': {},
+ 'metadata': {'attached_mode': 'rw',
+ 'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'image_id': test_id,
'snapshot_id': None,
'source_volid': None,
- 'metadata': {},
+ 'metadata': {'attached_mode': 'rw',
+ 'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'volume_id': '1',
'server_id': 'fakeuuid',
'host_name': None,
- 'device': '/',
+ 'device': '/'
}],
'bootable': False,
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
- 'metadata': {},
+ 'metadata': {'attached_mode': 'rw',
+ 'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
- 'size': 1,
- }}
+ 'size': 1}}
self.assertEquals(res_dict, expected)
def test_volume_update_metadata(self):
'volume_id': '1',
'server_id': 'fakeuuid',
'host_name': None,
- 'device': '/',
+ 'device': '/'
}],
'bootable': False,
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
- 'metadata': {"qos_max_iops": 2000},
+ 'metadata': {"qos_max_iops": 2000,
+ "readonly": "False",
+ "attached_mode": "rw"},
'id': '1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
- 'size': 1,
+ 'size': 1
}}
self.assertEquals(res_dict, expected)
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
- 'metadata': {},
+ 'metadata': {'attached_mode': 'rw',
+ 'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
- 'metadata': {},
+ 'metadata': {'attached_mode': 'rw',
+ 'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
- 'metadata': {},
+ 'metadata': {'attached_mode': 'rw',
+ 'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
- 'metadata': {},
+ 'metadata': {'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
- 'metadata': {},
+ 'metadata': {'attached_mode': 'rw',
+ 'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'snapshot_id': None,
'source_volid': None,
'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66',
- 'volume_metadata': [],
+ 'volume_admin_metadata': [{'key': 'attached_mode', 'value': 'rw'},
+ {'key': 'readonly', 'value': 'False'}],
'bootable': False,
'volume_type': {'name': 'vol_type_name'}}
volume.update(kwargs)
if kwargs.get('volume_glance_metadata', None):
volume['bootable'] = True
+ if kwargs.get('attach_status') == 'detached':
+ del volume['volume_admin_metadata'][0]
return volume
raise exc.NotFound
+def stub_volume_get_db(context, volume_id):
+ return stub_volume(volume_id)
+
+
def stub_volume_get_all(context, search_opts=None, marker=None, limit=None,
sort_key='created_at', sort_dir='desc'):
return [stub_volume(100, project_id='fake'),
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
- 'metadata': {},
+ 'metadata': {'attached_mode': 'rw', 'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1,
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
- 'metadata': {"qos_max_iops": 2000},
+ 'metadata': {"qos_max_iops": 2000,
+ "readonly": "False",
+ "attached_mode": "rw"},
'id': '1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1,
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
- 'metadata': {},
+ 'metadata': {'attached_mode': 'rw', 'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1,
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
- 'metadata': {},
+ 'metadata': {'attached_mode': 'rw', 'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1,
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
- 'metadata': {},
+ 'metadata': {'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1,
pass
def initialize_connection(self, volume, connector):
+ volume_metadata = {}
+ for metadata in volume['volume_admin_metadata']:
+ volume_metadata[metadata['key']] = metadata['value']
+ access_mode = volume_metadata.get('attached_mode')
+ if access_mode is None:
+ access_mode = ('ro'
+ if volume_metadata.get('readonly') == 'True'
+ else 'rw')
return {
'driver_volume_type': 'iscsi',
- 'data': {}
+ 'data': {'access_mode': access_mode}
}
def terminate_connection(self, volume, connector, **kwargs):
"volume:get": [],
"volume:get_all": [],
"volume:get_volume_metadata": [],
- "volume:delete": [],
- "volume:update": [],
"volume:delete_volume_metadata": [],
"volume:update_volume_metadata": [],
+ "volume:get_volume_admin_metadata": [["rule:admin_api"]],
+ "volume:delete_volume_admin_metadata": [["rule:admin_api"]],
+ "volume:update_volume_admin_metadata": [["rule:admin_api"]],
+ "volume:delete": [],
+ "volume:update": [],
"volume:attach": [],
"volume:detach": [],
"volume:reserve_volume": [],
"volume:extend": [],
"volume:migrate_volume": [["rule:admin_api"]],
"volume:migrate_volume_completion": [["rule:admin_api"]],
+ "volume:update_readonly_flag": [],
"volume_extension:volume_admin_actions:reset_status": [["rule:admin_api"]],
"volume_extension:snapshot_admin_actions:reset_status": [["rule:admin_api"]],
metadata,
autoload=True)
self.assertTrue('migration_status' not in volumes.c)
+
+ def test_migration_020(self):
+ """Test adding volume_admin_metadata table works correctly."""
+ for (key, engine) in self.engines.items():
+ migration_api.version_control(engine,
+ TestMigrations.REPOSITORY,
+ migration.INIT_VERSION)
+ migration_api.upgrade(engine, TestMigrations.REPOSITORY, 19)
+ metadata = sqlalchemy.schema.MetaData()
+ metadata.bind = engine
+
+ migration_api.upgrade(engine, TestMigrations.REPOSITORY, 20)
+
+ self.assertTrue(engine.dialect.has_table(engine.connect(),
+ "volume_admin_metadata"))
+ volume_admin_metadata = sqlalchemy.Table('volume_admin_metadata',
+ metadata,
+ autoload=True)
+
+ self.assertTrue(isinstance(volume_admin_metadata.c.created_at.type,
+ sqlalchemy.types.DATETIME))
+ self.assertTrue(isinstance(volume_admin_metadata.c.updated_at.type,
+ sqlalchemy.types.DATETIME))
+ self.assertTrue(isinstance(volume_admin_metadata.c.deleted_at.type,
+ sqlalchemy.types.DATETIME))
+ self.assertTrue(isinstance(volume_admin_metadata.c.deleted.type,
+ sqlalchemy.types.BOOLEAN))
+ self.assertTrue(isinstance(volume_admin_metadata.c.deleted.type,
+ sqlalchemy.types.BOOLEAN))
+ self.assertTrue(isinstance(volume_admin_metadata.c.id.type,
+ sqlalchemy.types.INTEGER))
+ self.assertTrue(isinstance(volume_admin_metadata.c.volume_id.type,
+ sqlalchemy.types.VARCHAR))
+ self.assertTrue(isinstance(volume_admin_metadata.c.key.type,
+ sqlalchemy.types.VARCHAR))
+ self.assertTrue(isinstance(volume_admin_metadata.c.value.type,
+ sqlalchemy.types.VARCHAR))
+
+ migration_api.downgrade(engine, TestMigrations.REPOSITORY, 19)
+
+ self.assertFalse(engine.dialect.has_table(engine.connect(),
+ "volume_admin_metadata"))
@staticmethod
def _create_volume(size=0, snapshot_id=None, image_id=None,
- source_volid=None, metadata=None, status="creating",
- migration_status=None, availability_zone=None):
+ source_volid=None, metadata=None, admin_metadata=None,
+ status="creating", migration_status=None,
+ availability_zone=None):
"""Create a volume object."""
vol = {}
vol['size'] = size
vol['host'] = CONF.host
if metadata is not None:
vol['metadata'] = metadata
+ if admin_metadata is not None:
+ vol['admin_metadata'] = admin_metadata
return db.volume_create(context.get_admin_context(), vol)
except TypeError:
pass
- def test_run_attach_detach_volume(self):
+ def test_run_attach_detach_volume_for_instance(self):
"""Make sure volume can be attached and detached from instance."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
- volume = self._create_volume()
+ volume = self._create_volume(admin_metadata={'readonly': 'True'})
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.volume.attach_volume(self.context, volume_id, instance_uuid,
- None, mountpoint)
+ None, mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertEqual(vol['instance_uuid'], instance_uuid)
self.assertEqual(vol['attached_host'], None)
+ admin_metadata = vol['volume_admin_metadata']
+ self.assertEquals(len(admin_metadata), 2)
+ self.assertEquals(admin_metadata[0]['key'], 'readonly')
+ self.assertEquals(admin_metadata[0]['value'], 'True')
+ self.assertEquals(admin_metadata[1]['key'], 'attached_mode')
+ self.assertEquals(admin_metadata[1]['value'], 'ro')
+ connector = {'initiator': 'iqn.2012-07.org.fake:01'}
+ conn_info = self.volume.initialize_connection(self.context,
+ volume_id, connector)
+ self.assertEquals(conn_info['data']['access_mode'], 'ro')
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id)
- # attach volume to the host then to detach
- volume = self._create_volume()
+ def test_run_attach_detach_volume_for_host(self):
+ """Make sure volume can be attached and detached from host."""
+ mountpoint = "/dev/sdf"
+ volume = self._create_volume(admin_metadata={'readonly': 'False'})
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.volume.attach_volume(self.context, volume_id, None,
- 'fake_host', mountpoint)
+ 'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.assertEqual(vol['instance_uuid'], None)
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual(vol['attached_host'], 'fake-host')
+ admin_metadata = vol['volume_admin_metadata']
+ self.assertEquals(len(admin_metadata), 2)
+ self.assertEquals(admin_metadata[0]['key'], 'readonly')
+ self.assertEquals(admin_metadata[0]['value'], 'False')
+ self.assertEquals(admin_metadata[1]['key'], 'attached_mode')
+ self.assertEquals(admin_metadata[1]['value'], 'rw')
+ connector = {'initiator': 'iqn.2012-07.org.fake:01'}
+ conn_info = self.volume.initialize_connection(self.context,
+ volume_id, connector)
+ self.assertEquals(conn_info['data']['access_mode'], 'rw')
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id)
+ def test_run_attach_detach_volume_with_attach_mode(self):
+ instance_uuid = '12345678-1234-5678-1234-567812345678'
+ mountpoint = "/dev/sdf"
+ volume = self._create_volume(admin_metadata={'readonly': 'True'})
+ volume_id = volume['id']
+ db.volume_update(self.context, volume_id, {'status': 'available',
+ 'mountpoint': None,
+ 'instance_uuid': None,
+ 'attached_host': None,
+ 'attached_mode': None})
+ self.volume.attach_volume(self.context, volume_id, instance_uuid,
+ None, mountpoint, 'ro')
+ vol = db.volume_get(context.get_admin_context(), volume_id)
+ self.assertEqual(vol['status'], "in-use")
+ self.assertEqual(vol['attach_status'], "attached")
+ self.assertEqual(vol['mountpoint'], mountpoint)
+ self.assertEqual(vol['instance_uuid'], instance_uuid)
+ self.assertEqual(vol['attached_host'], None)
+ admin_metadata = vol['volume_admin_metadata']
+ self.assertEquals(len(admin_metadata), 2)
+ self.assertEquals(admin_metadata[0]['key'], 'readonly')
+ self.assertEquals(admin_metadata[0]['value'], 'True')
+ self.assertEquals(admin_metadata[1]['key'], 'attached_mode')
+ self.assertEquals(admin_metadata[1]['value'], 'ro')
+ connector = {'initiator': 'iqn.2012-07.org.fake:01'}
+ conn_info = self.volume.initialize_connection(self.context,
+ volume_id, connector)
+ self.assertEquals(conn_info['data']['access_mode'], 'ro')
+
+ self.volume.detach_volume(self.context, volume_id)
+ vol = db.volume_get(self.context, volume_id)
+ self.assertEqual(vol['status'], "available")
+ self.assertEqual(vol['attach_status'], "detached")
+ self.assertEqual(vol['mountpoint'], None)
+ self.assertEqual(vol['instance_uuid'], None)
+ self.assertEqual(vol['attached_host'], None)
+ admin_metadata = vol['volume_admin_metadata']
+ self.assertEquals(len(admin_metadata), 1)
+ self.assertEquals(admin_metadata[0]['key'], 'readonly')
+ self.assertEquals(admin_metadata[0]['value'], 'True')
+
+ self.volume.attach_volume(self.context, volume_id, None,
+ 'fake_host', mountpoint, 'ro')
+ vol = db.volume_get(context.get_admin_context(), volume_id)
+ self.assertEqual(vol['status'], "in-use")
+ self.assertEqual(vol['attach_status'], "attached")
+ self.assertEqual(vol['mountpoint'], mountpoint)
+ self.assertEqual(vol['instance_uuid'], None)
+ self.assertEqual(vol['attached_host'], 'fake-host')
+ admin_metadata = vol['volume_admin_metadata']
+ self.assertEquals(len(admin_metadata), 2)
+ self.assertEquals(admin_metadata[0]['key'], 'readonly')
+ self.assertEquals(admin_metadata[0]['value'], 'True')
+ self.assertEquals(admin_metadata[1]['key'], 'attached_mode')
+ self.assertEquals(admin_metadata[1]['value'], 'ro')
+ connector = {'initiator': 'iqn.2012-07.org.fake:01'}
+ conn_info = self.volume.initialize_connection(self.context,
+ volume_id, connector)
+ self.assertEquals(conn_info['data']['access_mode'], 'ro')
+
+ self.volume.detach_volume(self.context, volume_id)
+ vol = db.volume_get(self.context, volume_id)
+ self.assertEqual(vol['status'], "available")
+ self.assertEqual(vol['attach_status'], "detached")
+ self.assertEqual(vol['mountpoint'], None)
+ self.assertEqual(vol['instance_uuid'], None)
+ self.assertEqual(vol['attached_host'], None)
+ admin_metadata = vol['volume_admin_metadata']
+ self.assertEquals(len(admin_metadata), 1)
+ self.assertEquals(admin_metadata[0]['key'], 'readonly')
+ self.assertEquals(admin_metadata[0]['value'], 'True')
+
+ self.volume.delete_volume(self.context, volume_id)
+ self.assertRaises(exception.VolumeNotFound,
+ db.volume_get,
+ self.context,
+ volume_id)
+
+ def test_run_manager_attach_detach_volume_with_wrong_attach_mode(self):
+ # Not allow using 'read-write' mode attach readonly volume
+ instance_uuid = '12345678-1234-5678-1234-567812345678'
+ mountpoint = "/dev/sdf"
+ volume = self._create_volume(admin_metadata={'readonly': 'True'})
+ volume_id = volume['id']
+ self.volume.create_volume(self.context, volume_id)
+ self.assertRaises(exception.InvalidVolumeAttachMode,
+ self.volume.attach_volume,
+ self.context,
+ volume_id,
+ instance_uuid,
+ None,
+ mountpoint,
+ 'rw')
+ vol = db.volume_get(context.get_admin_context(), volume_id)
+ self.assertEqual(vol['status'], "error_attaching")
+ self.assertEqual(vol['attach_status'], "detached")
+ admin_metadata = vol['volume_admin_metadata']
+ self.assertEquals(len(admin_metadata), 2)
+ self.assertEquals(admin_metadata[0]['key'], 'readonly')
+ self.assertEquals(admin_metadata[0]['value'], 'True')
+ self.assertEquals(admin_metadata[1]['key'], 'attached_mode')
+ self.assertEquals(admin_metadata[1]['value'], 'rw')
+
+ db.volume_update(self.context, volume_id, {'status': 'available'})
+ self.assertRaises(exception.InvalidVolumeAttachMode,
+ self.volume.attach_volume,
+ self.context,
+ volume_id,
+ None,
+ 'fake_host',
+ mountpoint,
+ 'rw')
+ vol = db.volume_get(context.get_admin_context(), volume_id)
+ self.assertEqual(vol['status'], "error_attaching")
+ self.assertEqual(vol['attach_status'], "detached")
+ admin_metadata = vol['volume_admin_metadata']
+ self.assertEquals(len(admin_metadata), 2)
+ self.assertEquals(admin_metadata[0]['key'], 'readonly')
+ self.assertEquals(admin_metadata[0]['value'], 'True')
+ self.assertEquals(admin_metadata[1]['key'], 'attached_mode')
+ self.assertEquals(admin_metadata[1]['value'], 'rw')
+
+ def test_run_api_attach_detach_volume_with_wrong_attach_mode(self):
+ # Not allow using 'read-write' mode attach readonly volume
+ instance_uuid = '12345678-1234-5678-1234-567812345678'
+ mountpoint = "/dev/sdf"
+ volume = self._create_volume(admin_metadata={'readonly': 'True'})
+ volume_id = volume['id']
+ self.volume.create_volume(self.context, volume_id)
+ volume_api = cinder.volume.api.API()
+ self.assertRaises(exception.InvalidVolumeAttachMode,
+ volume_api.attach,
+ self.context,
+ volume,
+ instance_uuid,
+ None,
+ mountpoint,
+ 'rw')
+ vol = db.volume_get(context.get_admin_context(), volume_id)
+ self.assertEqual(vol['attach_status'], "detached")
+ admin_metadata = vol['volume_admin_metadata']
+ self.assertEquals(len(admin_metadata), 1)
+ self.assertEquals(admin_metadata[0]['key'], 'readonly')
+ self.assertEquals(admin_metadata[0]['value'], 'True')
+
+ db.volume_update(self.context, volume_id, {'status': 'available'})
+ self.assertRaises(exception.InvalidVolumeAttachMode,
+ volume_api.attach,
+ self.context,
+ volume,
+ None,
+ 'fake_host',
+ mountpoint,
+ 'rw')
+ vol = db.volume_get(context.get_admin_context(), volume_id)
+ self.assertEqual(vol['attach_status'], "detached")
+ admin_metadata = vol['volume_admin_metadata']
+ self.assertEquals(len(admin_metadata), 1)
+ self.assertEquals(admin_metadata[0]['key'], 'readonly')
+ self.assertEquals(admin_metadata[0]['value'], 'True')
+
def test_concurrent_volumes_get_different_targets(self):
"""Ensure multiple concurrent volumes get different targets."""
volume_ids = []
self.assertEquals(volume['host'], 'newhost')
self.assertEquals(volume['migration_status'], None)
+ def test_update_volume_readonly_flag(self):
+ """Test volume readonly flag can be updated at API level."""
+ # create a volume and assign to host
+ volume = self._create_volume(admin_metadata={'readonly': 'True'})
+ self.volume.create_volume(self.context, volume['id'])
+ volume['status'] = 'in-use'
+
+ volume_api = cinder.volume.api.API()
+
+ # Update fails when status != available
+ self.assertRaises(exception.InvalidVolume,
+ volume_api.update_readonly_flag,
+ self.context,
+ volume,
+ False)
+
+ volume['status'] = 'available'
+
+ # works when volume in 'available' status
+ volume_api.update_readonly_flag(self.context, volume, False)
+
+ volume = db.volume_get(context.get_admin_context(), volume['id'])
+ self.assertEquals(volume['status'], 'available')
+ admin_metadata = volume['volume_admin_metadata']
+ self.assertEquals(len(admin_metadata), 1)
+ self.assertEquals(admin_metadata[0]['key'], 'readonly')
+ self.assertEquals(admin_metadata[0]['value'], 'False')
+
+ # clean up
+ self.volume.delete_volume(self.context, volume['id'])
+
class CopyVolumeToImageTestCase(BaseVolumeTestCase):
def fake_local_path(self, volume):
def test_get_iscsi_properties(self):
volume = {"provider_location": '',
"id": "0",
- "provider_auth": "a b c"}
+ "provider_auth": "a b c",
+ "attached_mode": "rw"}
iscsi_driver = driver.ISCSIDriver()
iscsi_driver._do_iscsi_discovery = lambda v: "0.0.0.0:0000,0 iqn:iqn 0"
result = iscsi_driver._get_iscsi_properties(volume)
instance_uuid='fake_uuid',
host_name=None,
mountpoint='fake_mountpoint',
- version='1.7')
+ mode='ro',
+ version='1.11')
def test_attach_volume_to_host(self):
self._test_volume_api('attach_volume',
instance_uuid=None,
host_name='fake_host',
mountpoint='fake_mountpoint',
- version='1.7')
+ mode='rw',
+ version='1.11')
def test_detach_volume(self):
self._test_volume_api('detach_volume',
self.update(context, volume, {"status": "in-use"})
@wrap_check_policy
- def attach(self, context, volume, instance_uuid, host_name, mountpoint):
+ def attach(self, context, volume, instance_uuid, host_name,
+ mountpoint, mode):
+ volume_metadata = self.get_volume_admin_metadata(context.elevated(),
+ volume)
+ if 'readonly' not in volume_metadata:
+ # NOTE(zhiyan): set a default value for read-only flag to metadata.
+ self.update_volume_admin_metadata(context.elevated(), volume,
+ {'readonly': 'False'})
+ volume_metadata['readonly'] = 'False'
+
+ if volume_metadata['readonly'] == 'True' and mode != 'ro':
+ raise exception.InvalidVolumeAttachMode(mode=mode,
+ volume_id=volume['id'])
+
return self.volume_rpcapi.attach_volume(context,
volume,
instance_uuid,
host_name,
- mountpoint)
+ mountpoint,
+ mode)
@wrap_check_policy
def detach(self, context, volume):
self._check_metadata_properties(context, _metadata)
- self.db.volume_metadata_update(context, volume['id'], _metadata, True)
+ self.db.volume_metadata_update(context, volume['id'],
+ _metadata, delete)
# TODO(jdg): Implement an RPC call for drivers that may use this info
return i['value']
return None
+ @wrap_check_policy
+ def get_volume_admin_metadata(self, context, volume):
+ """Get all administration metadata associated with a volume."""
+ rv = self.db.volume_admin_metadata_get(context, volume['id'])
+ return dict(rv.iteritems())
+
+ @wrap_check_policy
+ def delete_volume_admin_metadata(self, context, volume, key):
+ """Delete the given administration metadata item from a volume."""
+ self.db.volume_admin_metadata_delete(context, volume['id'], key)
+
+ @wrap_check_policy
+ def update_volume_admin_metadata(self, context, volume, metadata,
+ delete=False):
+ """Updates or creates volume administration metadata.
+
+ If delete is True, metadata items that are not specified in the
+ `metadata` argument will be deleted.
+
+ """
+ orig_meta = self.get_volume_admin_metadata(context, volume)
+ if delete:
+ _metadata = metadata
+ else:
+ _metadata = orig_meta.copy()
+ _metadata.update(metadata)
+
+ self._check_metadata_properties(context, _metadata)
+
+ self.db.volume_admin_metadata_update(context, volume['id'],
+ _metadata, delete)
+
+ # TODO(jdg): Implement an RPC call for drivers that may use this info
+
+ return _metadata
+
def get_snapshot_metadata(self, context, snapshot):
"""Get all metadata associated with a snapshot."""
rv = self.db.snapshot_metadata_get(context, snapshot['id'])
return self.volume_rpcapi.migrate_volume_completion(context, volume,
new_volume, error)
+ @wrap_check_policy
+ def update_readonly_flag(self, context, volume, flag):
+ if volume['status'] != 'available':
+ msg = _('Volume status must be available to update readonly flag.')
+ raise exception.InvalidVolume(reason=msg)
+ self.update_volume_admin_metadata(context.elevated(), volume,
+ {'readonly': str(flag)})
+
class HostAPI(base.Base):
def __init__(self):
the authentication details. Right now, either auth_method is not
present meaning no authentication, or auth_method == `CHAP`
meaning use CHAP with the specified credentials.
+
+ :access_mode: the volume access mode allow client used
+ ('rw' or 'ro' currently supported)
"""
properties = {}
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_portal': '127.0.0.0.1:3260',
'volume_id': 1,
+ 'access_mode': 'rw'
}
}
def initialize_connection(self, volume, connector):
return {
'driver_volume_type': 'iscsi',
- 'data': {}
+ 'data': {'access_mode': 'rw'}
}
def terminate_connection(self, volume, connector, **kwargs):
'target_discovered': True,
'target_lun': 1,
'target_wwn': '1234567890123',
+ 'access_mode': 'rw'
}
}
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['1234567890123', '0987654321321'],
+ 'access_mode': 'rw'
}
}
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
- RPC_API_VERSION = '1.10'
+ RPC_API_VERSION = '1.11'
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
return True
def attach_volume(self, context, volume_id, instance_uuid, host_name,
- mountpoint):
+ mountpoint, mode):
"""Updates db to show volume is attached"""
@utils.synchronized(volume_id, external=True)
def do_attach():
# check the volume status before attaching
volume = self.db.volume_get(context, volume_id)
+ volume_metadata = self.db.volume_admin_metadata_get(
+ context.elevated(), volume_id)
if volume['status'] == 'attaching':
if (volume['instance_uuid'] and volume['instance_uuid'] !=
instance_uuid):
host_name):
msg = _("being attached by another host")
raise exception.InvalidVolume(reason=msg)
+ if (volume_metadata.get('attached_mode') and
+ volume_metadata.get('attached_mode') != mode):
+ msg = _("being attached by different mode")
+ raise exception.InvalidVolume(reason=msg)
elif volume['status'] != "available":
msg = _("status must be available")
raise exception.InvalidVolume(reason=msg)
# TODO(jdg): attach_time column is currently varchar
# we should update this to a date-time object
# also consider adding detach_time?
- now = timeutils.strtime()
- new_status = 'attaching'
self.db.volume_update(context, volume_id,
{"instance_uuid": instance_uuid,
"attached_host": host_name,
- "status": new_status,
- "attach_time": now})
+ "status": "attaching",
+ "attach_time": timeutils.strtime()})
+ self.db.volume_admin_metadata_update(context.elevated(),
+ volume_id,
+ {"attached_mode": mode},
+ False)
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
- self.db.volume_update(context,
- volume_id,
+ self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
raise exception.InvalidUUID(uuid=instance_uuid)
host_name) if host_name else None
volume = self.db.volume_get(context, volume_id)
+
+ if volume_metadata.get('readonly') == 'True' and mode != 'ro':
+ self.db.volume_update(context, volume_id,
+ {'status': 'error_attaching'})
+ raise exception.InvalidVolumeAttachMode(mode=mode,
+ volume_id=volume_id)
try:
self.driver.attach_volume(context,
volume,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
- self.db.volume_update(context,
- volume_id,
+ self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
self.db.volume_attached(context.elevated(),
{'status': 'error_detaching'})
self.db.volume_detached(context.elevated(), volume_id)
+ self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
+ 'attached_mode')
# Check for https://bugs.launchpad.net/cinder/+bug/1065702
volume = self.db.volume_get(context, volume_id)
json in various places, so it should not contain any non-json
data types.
"""
- volume_ref = self.db.volume_get(context, volume_id)
+ volume = self.db.volume_get(context, volume_id)
self.driver.validate_connector(connector)
- conn_info = self.driver.initialize_connection(volume_ref, connector)
+ conn_info = self.driver.initialize_connection(volume, connector)
# Add qos_specs to connection info
- typeid = volume_ref['volume_type_id']
+ typeid = volume['volume_type_id']
specs = {}
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
conn_info['data'].update(qos_spec)
+ # Add access_mode to connection info
+ volume_metadata = self.db.volume_admin_metadata_get(context.elevated(),
+ volume_id)
+ if conn_info['data'].get('access_mode') is None:
+ access_mode = volume_metadata.get('attached_mode')
+ if access_mode is None:
+ # NOTE(zhiyan): client didn't call 'os-attach' before
+ access_mode = ('ro'
+ if volume_metadata.get('readonly') == 'True'
+ else 'rw')
+ conn_info['data']['access_mode'] = access_mode
return conn_info
def terminate_connection(self, context, volume_id, connector, force=False):
1.8 - Add migrate_volume, rename_volume.
1.9 - Add new_user and new_project to accept_transfer.
1.10 - Add migrate_volume_completion, remove rename_volume.
+ 1.11 - Adds mode parameter to attach_volume()
+ to support volume read-only attaching.
'''
BASE_RPC_API_VERSION = '1.0'
topic=rpc.queue_get_for(ctxt, self.topic, host))
def attach_volume(self, ctxt, volume, instance_uuid, host_name,
- mountpoint):
+ mountpoint, mode):
return self.call(ctxt, self.make_msg('attach_volume',
volume_id=volume['id'],
instance_uuid=instance_uuid,
host_name=host_name,
- mountpoint=mountpoint),
+ mountpoint=mountpoint,
+ mode=mode),
topic=rpc.queue_get_for(ctxt,
self.topic,
volume['host']),
- version='1.7')
+ version='1.11')
def detach_volume(self, ctxt, volume):
return self.call(ctxt, self.make_msg('detach_volume',
"volume:create": [],
"volume:get_all": [],
"volume:get_volume_metadata": [],
+ "volume:get_volume_admin_metadata": [["rule:admin_api"]],
+ "volume:delete_volume_admin_metadata": [["rule:admin_api"]],
+ "volume:update_volume_admin_metadata": [["rule:admin_api"]],
"volume:get_snapshot": [],
"volume:get_all_snapshots": [],
"volume:extend": [],
+ "volume:update_readonly_flag": [],
"volume_extension:types_manage": [["rule:admin_api"]],
"volume_extension:types_extra_specs": [["rule:admin_api"]],
"backup:get": [],
"backup:get_all": [],
"backup:restore": []
-
}