self.standard_logout)
self.assertIsNone(return_model)
- @mock.patch('hpe3parclient.version', "4.0.2")
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_replicated_managed_periodic(self,
_mock_volume_types):
'provider_location': self.CLIENT_ID},
return_model)
- @mock.patch('hpe3parclient.version', "4.0.2")
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_replicated_managed_sync(self,
_mock_volume_types):
'provider_location': self.CLIENT_ID},
return_model)
- @mock.patch('hpe3parclient.version', "4.0.2")
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_replicated_unmanaged_periodic(self,
_mock_volume_types):
'provider_location': self.CLIENT_ID},
return_model)
- @mock.patch('hpe3parclient.version', "4.0.2")
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_replicated_unmanaged_sync(self,
_mock_volume_types):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
+ mock_client.getVolume.return_value = {'name': mock.ANY}
mock_client.copyVolume.return_value = {'taskid': 1}
with mock.patch.object(hpecommon.HPE3PARCommon,
'_create_client') as mock_create_client:
'host': volume_utils.append_host(self.FAKE_HOST,
HPE3PAR_CPG2),
'source_volid': HPE3PARBaseDriver.VOLUME_ID}
- src_vref = {'id': HPE3PARBaseDriver.VOLUME_ID}
+ src_vref = {'id': HPE3PARBaseDriver.VOLUME_ID,
+ 'name': HPE3PARBaseDriver.VOLUME_NAME}
model_update = self.driver.create_cloned_volume(volume, src_vref)
self.assertIsNone(model_update)
+ common = hpecommon.HPE3PARCommon(None)
+ vol_name = common._get_3par_vol_name(src_vref['id'])
+ # snapshot name is random
+ snap_name = mock.ANY
+ optional = mock.ANY
+
expected = [
+ mock.call.createSnapshot(snap_name, vol_name, optional),
+ mock.call.getVolume(snap_name),
mock.call.copyVolume(
- self.VOLUME_3PAR_NAME,
+ snap_name,
'osv-0DM4qZEVSKON-AAAAAAAAA',
HPE3PAR_CPG2,
{'snapCPG': 'OpenStackCPGSnap', 'tpvv': True,
def test_create_cloned_qos_volume(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_2
mock_client = self.setup_driver()
+ mock_client.getVolume.return_value = {'name': mock.ANY}
mock_client.copyVolume.return_value = {'taskid': 1}
with mock.patch.object(hpecommon.HPE3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
- src_vref = {'id': HPE3PARBaseDriver.CLONE_ID}
+ src_vref = {'id': HPE3PARBaseDriver.CLONE_ID,
+ 'name': HPE3PARBaseDriver.VOLUME_NAME}
volume = self.volume_qos.copy()
host = "TEST_HOST"
pool = "TEST_POOL"
model_update = self.driver.create_cloned_volume(volume, src_vref)
self.assertIsNone(model_update)
+ # creation of the temp snapshot
+ common = hpecommon.HPE3PARCommon(None)
+ snap_name = mock.ANY
+ vol_name = common._get_3par_vol_name(src_vref['id'])
+ optional = mock.ANY
+
expected = [
+ mock.call.createSnapshot(snap_name, vol_name, optional),
+ mock.call.getVolume(snap_name),
mock.call.getCPG(expected_cpg),
mock.call.copyVolume(
- 'osv-0DM4qZEVSKON-AAAAAAAAA',
+ snap_name,
self.VOLUME_3PAR_NAME,
expected_cpg,
{'snapCPG': 'OpenStackCPGSnap', 'tpvv': True,
self.volume,
str(new_size))
- @mock.patch('hpe3parclient.version', "4.0.2")
@mock.patch.object(volume_types, 'get_volume_type')
def test_extend_volume_replicated(self, _mock_volume_types):
# Managed vs. unmanaged and periodic vs. sync are not relevant when
expected +
self.standard_logout)
- @mock.patch('hpe3parclient.version', "4.0.2")
@mock.patch.object(volume_types, 'get_volume_type')
def test_replication_enable_not_in_rcopy(self, _mock_volume_types):
# Managed vs. unmanaged and periodic vs. sync are not relevant when
'provider_location': self.CLIENT_ID},
return_model)
- @mock.patch('hpe3parclient.version', "4.0.2")
@mock.patch.object(volume_types, 'get_volume_type')
def test_replication_enable_in_rcopy(self, _mock_volume_types):
# Managed vs. unmanaged and periodic vs. sync are not relevant when
'provider_location': self.CLIENT_ID},
return_model)
- @mock.patch('hpe3parclient.version', "4.0.2")
@mock.patch.object(volume_types, 'get_volume_type')
def test_replication_enable_non_replicated_type(self, _mock_volume_types):
# Managed vs. unmanaged and periodic vs. sync are not relevant when
context.get_admin_context(),
self.volume_replicated)
- @mock.patch('hpe3parclient.version', "4.0.2")
@mock.patch.object(volume_types, 'get_volume_type')
def test_replication_disable(self, _mock_volume_types):
# Managed vs. unmanaged and periodic vs. sync are not relevant when
self.assertEqual({'replication_status': 'disabled'},
return_model)
- @mock.patch('hpe3parclient.version', "4.0.2")
@mock.patch.object(volume_types, 'get_volume_type')
def test_replication_disable_fail(self, _mock_volume_types):
# Managed vs. unmanaged and periodic vs. sync are not relevant when
self.assertEqual({'replication_status': 'disable_failed'},
return_model)
- @mock.patch('hpe3parclient.version', "4.0.2")
@mock.patch.object(volume_types, 'get_volume_type')
def test_replication_disable_non_replicated_type(self, _mock_volume_types):
# Managed vs. unmanaged and periodic vs. sync are not relevant when
context.get_admin_context(),
self.volume_replicated)
- @mock.patch('hpe3parclient.version', "4.0.2")
@mock.patch.object(volume_types, 'get_volume_type')
def test_list_replication_targets(self, _mock_volume_types):
# Managed vs. unmanaged and periodic vs. sync are not relevant when
'targets': targets},
return_model)
- @mock.patch('hpe3parclient.version', "4.0.2")
@mock.patch.object(volume_types, 'get_volume_type')
def test_list_replication_targets_non_replicated_type(self,
_mock_volume_types):
self.assertEqual([], return_model)
- @mock.patch('hpe3parclient.version', "4.0.2")
@mock.patch.object(volume_types, 'get_volume_type')
def test_replication_failover_managed(self, _mock_volume_types):
# periodic vs. sync is not relevant when conducting a failover. We
self.volume_replicated,
valid_target_device_id)
- @mock.patch('hpe3parclient.version', "4.0.2")
@mock.patch.object(volume_types, 'get_volume_type')
def test_replication_failover_unmanaged(self, _mock_volume_types):
# periodic vs. sync is not relevant when conducting a failover. We
LOG = logging.getLogger(__name__)
-MIN_CLIENT_VERSION = '4.0.0'
+MIN_CLIENT_VERSION = '4.1.0'
MIN_REP_CLIENT_VERSION = '4.0.2'
DEDUP_API_VERSION = 30201120
FLASH_CACHE_API_VERSION = 30201200
3.0.8 - Optimize array ID retrieval
3.0.9 - Bump minimum API version for volume replication
3.0.10 - Added additional volumes checks to the manage snapshot API
+ 3.0.11 - Fix the image cache capability bug #1491088
"""
- VERSION = "3.0.10"
+ VERSION = "3.0.11"
stats = {}
{'srstatld_version': SRSTATLD_API_VERSION,
'version': self.API_VERSION})
- # TODO(walter-boring) BUG: 1491088. For the time being disable
- # making the drivers usable if they enable the image cache
- # The image cache feature fails on 3PAR drivers
- # because it tries to extend a volume as it's still being cloned.
- if self.config.image_volume_cache_enabled:
- msg = _("3PAR drivers do not support enabling the image "
- "cache capability at this time. You must disable "
- "the configuration setting in cinder.conf")
- LOG.error(msg)
- raise exception.InvalidInput(message=msg)
-
# Get the client ID for provider_location. We only need to retrieve
# the ID directly from the array if the driver stats are not provided.
if not stats:
volume_name = self._encode_name(volume_id)
return "osv-%s" % volume_name
- def _get_3par_snap_name(self, snapshot_id):
+ def _get_3par_snap_name(self, snapshot_id, temp_snap=False):
snapshot_name = self._encode_name(snapshot_id)
- return "oss-%s" % snapshot_name
+ if temp_snap:
+ # is this a temporary snapshot
+ # this is done during cloning
+ prefix = "tss-%s"
+ else:
+ prefix = "oss-%s"
+ return prefix % snapshot_name
def _get_3par_ums_name(self, snapshot_id):
ums_name = self._encode_name(snapshot_id)
model_update = None
return model_update
+ def _create_temp_snapshot(self, volume):
+ """This creates a temporary snapshot of a volume.
+
+ This is used by cloning a volume so that we can then
+ issue extend volume against the original volume.
+ """
+ vol_name = self._get_3par_vol_name(volume['id'])
+ # create a brand new uuid for the temp snap
+ snap_uuid = uuid.uuid4().hex
+
+ # this will be named tss-%s
+ snap_name = self._get_3par_snap_name(snap_uuid, temp_snap=True)
+
+ extra = {'volume_name': volume['name'],
+ 'volume_id': volume['id']}
+
+ optional = {'comment': json.dumps(extra)}
+
+ # let the snapshot die in an hour
+ optional['expirationHours'] = 1
+
+ LOG.info(_LI("Creating temp snapshot %(snap)s from volume %(vol)s"),
+ {'snap': snap_name, 'vol': vol_name})
+
+ self.client.createSnapshot(snap_name, vol_name, optional)
+ return self.client.getVolume(snap_name)
+
def create_cloned_volume(self, volume, src_vref):
try:
- orig_name = self._get_3par_vol_name(src_vref['id'])
vol_name = self._get_3par_vol_name(volume['id'])
+ # create a temporary snapshot
+ snapshot = self._create_temp_snapshot(src_vref)
type_info = self.get_volume_settings_from_type(volume)
# make the 3PAR copy the contents.
# can't delete the original until the copy is done.
cpg = type_info['cpg']
- self._copy_volume(orig_name, vol_name, cpg=cpg,
+ self._copy_volume(snapshot['name'], vol_name, cpg=cpg,
snap_cpg=type_info['snap_cpg'],
tpvv=type_info['tpvv'],
tdvv=type_info['tdvv'])
self.client.removeVolumeFromVolumeSet(vvset_name,
volume_name)
self.client.deleteVolume(volume_name)
- elif (ex.get_code() == 151 or ex.get_code() == 32):
+ elif (ex.get_code() == 151):
# the volume is being operated on in a background
# task on the 3PAR.
# TODO(walter-boring) do a retry a few times.
"You can try again later.")
LOG.error(msg)
raise exception.VolumeIsBusy(message=msg)
+ elif (ex.get_code() == 32):
+ # Error 32 means that the volume has children
+
+ # see if we have any temp snapshots
+ snaps = self.client.getVolumeSnapshots(volume_name)
+ for snap in snaps:
+ if snap.startswith('tss-'):
+ # looks like we found a temp snapshot.
+ LOG.info(
+ _LI("Found a temporary snapshot %(name)s"),
+ {'name': snap})
+ try:
+ self.client.deleteVolume(snap)
+ except hpeexceptions.HTTPNotFound:
+ # if the volume is gone, it's as good as a
+ # successful delete
+ pass
+ except Exception:
+ msg = _("Volume has a temporary snapshot that "
+ "can't be deleted at this time.")
+ raise exception.VolumeIsBusy(message=msg)
+
+ try:
+ self.delete_volume(volume)
+ except Exception:
+ msg = _("Volume has children and cannot be deleted!")
+ raise exception.VolumeIsBusy(message=msg)
else:
LOG.error(_LE("Exception: %s"), ex)
raise exception.VolumeIsBusy(message=ex.get_description())
def create_volume_from_snapshot(self, volume, snapshot, snap_name=None,
vvs_name=None):
- """Creates a volume from a snapshot.
-
- """
+ """Creates a volume from a snapshot."""
LOG.debug("Create Volume from Snapshot\n%(vol_name)s\n%(ss_name)s",
{'vol_name': pprint.pformat(volume['display_name']),
'ss_name': pprint.pformat(snapshot['display_name'])})