From 5d8a457c4e91a7cfe1ab5a09cdb2ca48ec6aa977 Mon Sep 17 00:00:00 2001 From: John Griffith Date: Fri, 19 Jul 2013 17:05:55 -0600 Subject: [PATCH] Add create & attach times to SolidFire attributes. This change simply adds create_time and attach_time to the SolidFire devices attributes. Times are taken from the volume-ref object, attach_time is only set/present after an attach, and is also removed from the attributes list on a detach. This also required that we actually set attach_time on the volume in the db. We have the column, but we weren't actually setting it, so this change required that be fixed. In the future we should also look at changing the attach_time from a string to a proper date-time object. Change-Id: Ib9577ac160596a6878d1729f6022885b6cfa90e2 --- cinder/db/sqlalchemy/api.py | 1 + cinder/tests/test_solidfire.py | 41 ++++++++++++++------ cinder/volume/driver.py | 4 +- cinder/volume/drivers/scality.py | 2 +- cinder/volume/drivers/solidfire.py | 61 +++++++++++++++++++++++++++++- cinder/volume/manager.py | 25 ++++++++---- 6 files changed, 110 insertions(+), 24 deletions(-) diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index a2e4bf814..25fb2ae97 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -1078,6 +1078,7 @@ def volume_detached(context, volume_id): volume_ref['attach_status'] = 'detached' volume_ref['instance_uuid'] = None volume_ref['attached_host'] = None + volume_ref['attach_time'] = None volume_ref.save(session=session) diff --git a/cinder/tests/test_solidfire.py b/cinder/tests/test_solidfire.py index f57c8515a..99094a19d 100644 --- a/cinder/tests/test_solidfire.py +++ b/cinder/tests/test_solidfire.py @@ -19,6 +19,7 @@ import mox from cinder import exception from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils from cinder import test from cinder.volume import configuration as conf from cinder.volume.drivers.solidfire import SolidFireDriver @@ -143,7 +144,8 @@ class SolidFireVolumeTestCase(test.TestCase): 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'volume_type_id': 'fast'} + 'volume_type_id': 'fast', + 'created_at': timeutils.utcnow()} sfv = SolidFireDriver(configuration=self.configuration) model_update = sfv.create_volume(testvol) @@ -156,7 +158,9 @@ class SolidFireVolumeTestCase(test.TestCase): 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'volume_type_id': None} + 'volume_type_id': None, + 'created_at': timeutils.utcnow()} + sfv = SolidFireDriver(configuration=self.configuration) model_update = sfv.create_volume(testvol) self.assertNotEqual(model_update, None) @@ -169,7 +173,9 @@ class SolidFireVolumeTestCase(test.TestCase): 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', - 'volume_type_id': None} + 'volume_type_id': None, + 'created_at': timeutils.utcnow()} + self.configuration.sf_emulate_512 = False sfv = SolidFireDriver(configuration=self.configuration) model_update = sfv.create_volume(testvol) @@ -189,7 +195,8 @@ class SolidFireVolumeTestCase(test.TestCase): '74-4cb7-bd55-14aed659a0cc.4060 0', 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' 'c76370d66b 2FE0CQ8J196R', - 'provider_geometry': '4096 4096' + 'provider_geometry': '4096 4096', + 'created_at': timeutils.utcnow(), } sfv = SolidFireDriver(configuration=self.configuration) @@ -208,7 +215,8 @@ class SolidFireVolumeTestCase(test.TestCase): 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'metadata': [preset_qos], - 'volume_type_id': None} + 'volume_type_id': None, + 'created_at': timeutils.utcnow()} sfv = SolidFireDriver(configuration=self.configuration) model_update = sfv.create_volume(testvol) @@ -224,7 +232,8 @@ class SolidFireVolumeTestCase(test.TestCase): testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'created_at': timeutils.utcnow()} sfv = SolidFireDriver(configuration=self.configuration) try: sfv.create_volume(testvol) @@ -266,7 +275,9 @@ class SolidFireVolumeTestCase(test.TestCase): testvol = {'project_id': 'testprjid', 'name': 'test_volume', 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'created_at': timeutils.utcnow()} + sfv = SolidFireDriver(configuration=self.configuration) sfv.delete_volume(testvol) @@ -276,7 +287,9 @@ class SolidFireVolumeTestCase(test.TestCase): testvol = {'project_id': 'testprjid', 'name': 'no-name', 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'created_at': timeutils.utcnow()} + sfv = SolidFireDriver(configuration=self.configuration) try: sfv.delete_volume(testvol) @@ -294,7 +307,9 @@ class SolidFireVolumeTestCase(test.TestCase): testvol = {'project_id': 'testprjid', 'name': 'no-name', 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'created_at': timeutils.utcnow()} + sfv = SolidFireDriver(configuration=self.configuration) self.assertRaises(exception.SfAccountNotFound, sfv.delete_volume, @@ -323,7 +338,9 @@ class SolidFireVolumeTestCase(test.TestCase): testvol = {'project_id': 'testprjid', 'name': 'test_volume', 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'created_at': timeutils.utcnow()} + sfv = SolidFireDriver(configuration=self.configuration) sfv.extend_volume(testvol, 2) @@ -349,7 +366,9 @@ class SolidFireVolumeTestCase(test.TestCase): testvol = {'project_id': 'testprjid', 'name': 'no-name', 'size': 1, - 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} + 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', + 'created_at': timeutils.utcnow()} + sfv = SolidFireDriver(configuration=self.configuration) self.assertRaises(exception.SfAccountNotFound, sfv.extend_volume, diff --git a/cinder/volume/driver.py b/cinder/volume/driver.py index bb088573d..d00c90004 100644 --- a/cinder/volume/driver.py +++ b/cinder/volume/driver.py @@ -167,12 +167,12 @@ class VolumeDriver(object): """Disallow connection from connector""" raise NotImplementedError() - def attach_volume(self, context, volume_id, instance_uuid, host_name, + def attach_volume(self, context, volume, instance_uuid, host_name, mountpoint): """Callback for volume attached to instance or host.""" pass - def detach_volume(self, context, volume_id): + def detach_volume(self, context, volume): """Callback for volume detached.""" pass diff --git a/cinder/volume/drivers/scality.py b/cinder/volume/drivers/scality.py index 3a6a49813..abf2fcda5 100644 --- a/cinder/volume/drivers/scality.py +++ b/cinder/volume/drivers/scality.py @@ -210,7 +210,7 @@ class ScalityDriver(driver.VolumeDriver): """Disallow connection from connector.""" pass - def detach_volume(self, context, volume_id): + def detach_volume(self, context, volume): """Callback for volume detached.""" pass diff --git a/cinder/volume/drivers/solidfire.py b/cinder/volume/drivers/solidfire.py index 2af030c35..25e40c828 100644 --- a/cinder/volume/drivers/solidfire.py +++ b/cinder/volume/drivers/solidfire.py @@ -30,6 +30,7 @@ from oslo.config import cfg from cinder import context from cinder import exception from cinder.openstack.common import log as logging +from cinder.openstack.common import timeutils from cinder.volume.drivers.san.san import SanISCSIDriver from cinder.volume import volume_types @@ -364,9 +365,11 @@ class SolidFireDriver(SanISCSIDriver): # to set any that were provided params = {'volumeID': sf_volume_id} + create_time = timeutils.strtime(v_ref['created_at']) attributes = {'uuid': v_ref['id'], 'is_clone': 'True', - 'src_uuid': src_uuid} + 'src_uuid': src_uuid, + 'created_at': create_time} if qos: params['qos'] = qos for k, v in qos.items(): @@ -484,8 +487,10 @@ class SolidFireDriver(SanISCSIDriver): if type_id is not None: qos = self._set_qos_by_volume_type(ctxt, type_id) + create_time = timeutils.strtime(volume['created_at']) attributes = {'uuid': volume['id'], - 'is_clone': 'False'} + 'is_clone': 'False', + 'created_at': create_time} if qos: for k, v in qos.items(): attributes[k] = str(v) @@ -662,3 +667,55 @@ class SolidFireDriver(SanISCSIDriver): data['thin_provision_percent'] =\ results['thinProvisioningPercent'] self.cluster_stats = data + + def attach_volume(self, context, volume, + instance_uuid, host_name, + mountpoint): + + LOG.debug(_("Entering SolidFire attach_volume...")) + sfaccount = self._get_sfaccount(volume['project_id']) + params = {'accountID': sfaccount['accountID']} + + sf_vol = self._get_sf_volume(volume['id'], params) + if sf_vol is None: + LOG.error(_("Volume ID %s was not found on " + "the SolidFire Cluster!"), volume['id']) + raise exception.VolumeNotFound(volume_id=volume['id']) + + attributes = sf_vol['attributes'] + attributes['attach_time'] = volume.get('attach_time', None) + attributes['attached_to'] = instance_uuid + params = { + 'volumeID': sf_vol['volumeID'], + 'attributes': attributes + } + + data = self._issue_api_request('ModifyVolume', params) + + if 'result' not in data: + raise exception.SolidFireAPIDataException(data=data) + + def detach_volume(self, context, volume): + + LOG.debug(_("Entering SolidFire attach_volume...")) + sfaccount = self._get_sfaccount(volume['project_id']) + params = {'accountID': sfaccount['accountID']} + + sf_vol = self._get_sf_volume(volume['id'], params) + if sf_vol is None: + LOG.error(_("Volume ID %s was not found on " + "the SolidFire Cluster!"), volume['id']) + raise exception.VolumeNotFound(volume_id=volume['id']) + + attributes = sf_vol['attributes'] + attributes['attach_time'] = None + attributes['attached_to'] = None + params = { + 'volumeID': sf_vol['volumeID'], + 'attributes': attributes + } + + data = self._issue_api_request('ModifyVolume', params) + + if 'result' not in data: + raise exception.SolidFireAPIDataException(data=data) diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index 6b0fb3e22..798f98546 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -371,7 +371,7 @@ class VolumeManager(manager.SchedulerDependentManager): self.db.volume_update(context, volume_ref['id'], {'status': volume_ref['status'], - 'launched_at': now}) + 'launched_at': now}) LOG.info(_("volume %s: created successfully"), volume_ref['name']) self._reset_stats() @@ -609,10 +609,16 @@ class VolumeManager(manager.SchedulerDependentManager): elif volume['status'] != "available": msg = _("status must be available") raise exception.InvalidVolume(reason=msg) + + # TODO(jdg): attach_time column is currently varchar + # we should update this to a date-time object + # also consider adding detach_time? + now = timeutils.strtime() self.db.volume_update(context, volume_id, {"instance_uuid": instance_uuid, "attached_host": host_name, - "status": "attaching"}) + "status": "attaching", + "attach_time": now}) if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): self.db.volume_update(context, @@ -623,9 +629,10 @@ class VolumeManager(manager.SchedulerDependentManager): host_name_sanitized = utils.sanitize_hostname( host_name) if host_name else None + volume = self.db.volume_get(context, volume_id) try: self.driver.attach_volume(context, - volume_id, + volume, instance_uuid, host_name_sanitized, mountpoint) @@ -646,8 +653,10 @@ class VolumeManager(manager.SchedulerDependentManager): """Updates db to show volume is detached""" # TODO(vish): refactor this into a more general "unreserve" # TODO(sleepsonthefloor): Is this 'elevated' appropriate? + + volume = self.db.volume_get(context, volume_id) try: - self.driver.detach_volume(context, volume_id) + self.driver.detach_volume(context, volume) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_update(context, @@ -657,10 +666,10 @@ class VolumeManager(manager.SchedulerDependentManager): self.db.volume_detached(context.elevated(), volume_id) # Check for https://bugs.launchpad.net/cinder/+bug/1065702 - volume_ref = self.db.volume_get(context, volume_id) - if (volume_ref['provider_location'] and - volume_ref['name'] not in volume_ref['provider_location']): - self.driver.ensure_export(context, volume_ref) + volume = self.db.volume_get(context, volume_id) + if (volume['provider_location'] and + volume['name'] not in volume['provider_location']): + self.driver.ensure_export(context, volume) def _copy_image_to_volume(self, context, volume, image_service, image_id): """Downloads Glance image to the specified volume.""" -- 2.45.2