from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder import test
+from cinder import units
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers import storwize_svc
del self._volumes_list[vol_name]
return ('', '')
+ def _cmd_expandvdisksize(self, **kwargs):
+ if 'obj' not in kwargs:
+ return self._errors['CMMVC5701E']
+ vol_name = kwargs['obj'].strip('\'\'')
+
+ # Assume unit is gb
+ if 'size' not in kwargs:
+ return self._errors['CMMVC5707E']
+ size = int(kwargs['size'])
+
+ if vol_name not in self._volumes_list:
+ return self._errors['CMMVC5753E']
+
+ curr_size = int(self._volumes_list[vol_name]['capacity'])
+ addition = size * units.GiB
+ self._volumes_list[vol_name]['capacity'] = str(curr_size + addition)
+ return ('', '')
+
def _get_fcmap_info(self, vol_name):
ret_vals = {
'fc_id': '',
out, err = self._cmd_mkvdisk(**kwargs)
elif command == 'rmvdisk':
out, err = self._cmd_rmvdisk(**kwargs)
+ elif command == 'expandvdisksize':
+ out, err = self._cmd_expandvdisksize(**kwargs)
elif command == 'lsvdisk':
out, err = self._cmd_lsvdisk(**kwargs)
elif command == 'mkhost':
self.assertAlmostEqual(stats['total_capacity_gb'], 3328.0)
self.assertAlmostEqual(stats['free_capacity_gb'], 3287.5)
+ def test_storwize_svc_extend_volume(self):
+ volume = self._generate_vol_info(None, None)
+ self.driver.db.volume_set(volume)
+ self.driver.create_volume(volume)
+ stats = self.driver.extend_volume(volume, '13')
+ attrs = self.driver._get_vdisk_attributes(volume['name'])
+ vol_size = int(attrs['capacity']) / units.GiB
+ self.assertAlmostEqual(vol_size, 13)
+
+ snap = self._generate_vol_info(volume['name'], volume['id'])
+ self.driver.create_snapshot(snap)
+ self._assert_vol_exists(snap['name'], True)
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.extend_volume, volume, '16')
+
+ self.driver.delete_snapshot(snap)
+ self.driver.delete_volume(volume)
+
class CLIResponseTestCase(test.TestCase):
def test_empty(self):
else:
return True
- def _delete_vdisk(self, name, force):
- """Deletes existing vdisks.
-
- It is very important to properly take care of mappings before deleting
- the disk:
- 1. If no mappings, then it was a vdisk, and can be deleted
- 2. If it is the source of a flashcopy mapping and copy_rate is 0, then
- it is a vdisk that has a snapshot. If the force flag is set,
- delete the mapping and the vdisk, otherwise set the mapping to
- copy and wait (this will allow users to delete vdisks that have
- snapshots if/when the upper layers allow it).
- 3. If it is the target of a mapping and copy_rate is 0, it is a
- snapshot, and we should properly stop the mapping and delete.
- 4. If it is the source/target of a mapping and copy_rate is not 0, it
- is a clone or vdisk created from a snapshot. We wait for the copy
- to complete (the mapping will be autodeleted) and then delete the
- vdisk.
-
- """
-
- LOG.debug(_('enter: _delete_vdisk: vdisk %s') % name)
-
- # Try to delete volume only if found on the storage
- vdisk_defined = self._is_vdisk_defined(name)
- if not vdisk_defined:
- LOG.info(_('warning: Tried to delete vdisk %s but it does not '
- 'exist.') % name)
- return
-
+ def _ensure_vdisk_no_fc_mappings(self, name, allow_snaps=True):
# Ensure vdisk has no FlashCopy mappings
mapping_ids = self._get_vdisk_fc_mappings(name)
while len(mapping_ids):
if copy_rate == '0':
# Case #2: A vdisk that has snapshots
if source == name:
- ssh_cmd = ('svctask chfcmap -copyrate 50 '
- '-autodelete on %s' % map_id)
- out, err = self._run_ssh(ssh_cmd)
- wait_for_copy = True
+ if not allow_snaps:
+ return False
+ ssh_cmd = ('svctask chfcmap -copyrate 50 '
+ '-autodelete on %s' % map_id)
+ out, err = self._run_ssh(ssh_cmd)
+ wait_for_copy = True
# Case #3: A snapshot
else:
msg = (_('Vdisk %(name)s not involved in '
if wait_for_copy:
time.sleep(5)
mapping_ids = self._get_vdisk_fc_mappings(name)
+ return True
+
+ def _delete_vdisk(self, name, force):
+ """Deletes existing vdisks.
+
+ It is very important to properly take care of mappings before deleting
+ the disk:
+ 1. If no mappings, then it was a vdisk, and can be deleted
+ 2. If it is the source of a flashcopy mapping and copy_rate is 0, then
+ it is a vdisk that has a snapshot. If the force flag is set,
+ delete the mapping and the vdisk, otherwise set the mapping to
+ copy and wait (this will allow users to delete vdisks that have
+ snapshots if/when the upper layers allow it).
+ 3. If it is the target of a mapping and copy_rate is 0, it is a
+ snapshot, and we should properly stop the mapping and delete.
+ 4. If it is the source/target of a mapping and copy_rate is not 0, it
+ is a clone or vdisk created from a snapshot. We wait for the copy
+ to complete (the mapping will be autodeleted) and then delete the
+ vdisk.
+
+ """
+
+ LOG.debug(_('enter: _delete_vdisk: vdisk %s') % name)
+
+ # Try to delete volume only if found on the storage
+ vdisk_defined = self._is_vdisk_defined(name)
+ if not vdisk_defined:
+ LOG.info(_('warning: Tried to delete vdisk %s but it does not '
+ 'exist.') % name)
+ return
+
+ self._ensure_vdisk_no_fc_mappings(name)
forceflag = '-force' if force else ''
cmd_params = {'frc': forceflag, 'name': name}
else:
raise NotImplementedError()
+ def extend_volume(self, volume, new_size):
+ LOG.debug(_('enter: extend_volume: volume %s') % volume['id'])
+ ret = self._ensure_vdisk_no_fc_mappings(volume['name'],
+ allow_snaps=False)
+ if not ret:
+ exception_message = (_('extend_volume: Extending a volume with '
+ 'snapshots is not supported.'))
+ raise exception.VolumeBackendAPIException(data=exception_message)
+
+ extend_amt = int(new_size) - volume['size']
+ ssh_cmd = ('svctask expandvdisksize -size %(amt)d -unit gb %(name)s'
+ % {'amt': extend_amt, 'name': volume['name']})
+ out, err = self._run_ssh(ssh_cmd)
+ # No output should be returned from expandvdisksize
+ self._assert_ssh_return(len(out.strip()) == 0, 'extend_volume',
+ ssh_cmd, out, err)
+ LOG.debug(_('leave: extend_volume: volume %s') % volume['id'])
+
"""====================================================================="""
""" MISC/HELPERS """
"""====================================================================="""