self.image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
self.driver = gpfs.GPFSDriver(configuration=conf.Configuration(None))
+ self.driver.gpfs_execute = self._execute_wrapper
self.driver.set_execute(self._execute_wrapper)
self.driver._cluster_id = '123456'
self.driver._gpfs_device = '/dev/gpfs'
def test_sizestr(self):
self.assertEqual(gpfs._sizestr('10'), '10G')
+ @mock.patch('cinder.utils.execute')
+ def test_gpfs_local_execute(self, mock_exec):
+ mock_exec.return_value = 'test'
+ self.driver._gpfs_local_execute('test')
+ expected = [mock.call('test', run_as_root=True)]
+ self.assertEqual(expected, mock_exec.mock_calls)
+
@mock.patch('cinder.utils.execute')
def test_get_gpfs_state_ok(self, mock_exec):
mock_exec.return_value = ('mmgetstate::HEADER:version:reserved:'
volume['host'] = host
return (volume, new_type, diff, host)
+
+
+class GPFSNFSDriverTestCase(test.TestCase):
+ driver_name = "cinder.volume.drivers.gpfs.GPFSNFSDriver"
+ TEST_NFS_EXPORT = 'nfs-host1:/export'
+ TEST_SIZE_IN_GB = 1
+ TEST_EXTEND_SIZE_IN_GB = 2
+ TEST_MNT_POINT = '/mnt/nfs'
+ TEST_MNT_POINT_BASE = '/mnt'
+ TEST_GPFS_MNT_POINT_BASE = '/export'
+ TEST_LOCAL_PATH = '/mnt/nfs/volume-123'
+ TEST_VOLUME_PATH = '/export/volume-123'
+ TEST_SNAP_PATH = '/export/snapshot-123'
+
+ def _execute_wrapper(self, cmd, *args, **kwargs):
+ try:
+ kwargs.pop('run_as_root')
+ except KeyError:
+ pass
+
+ return utils.execute(cmd, *args, **kwargs)
+
+ def _fake_volume(self):
+ volume = {}
+ volume['id'] = '123456'
+ volume['name'] = 'test'
+ volume['size'] = 1000
+ volume['consistencygroup_id'] = 'cg-1234'
+ return volume
+
+ def _fake_snapshot(self):
+ snapshot = {}
+ snapshot['id'] = '12345'
+ snapshot['name'] = 'test-snap'
+ snapshot['size'] = 1000
+ snapshot['volume_id'] = '123456'
+ snapshot['status'] = 'available'
+ return snapshot
+
+ def setUp(self):
+ super(GPFSNFSDriverTestCase, self).setUp()
+ self.driver = gpfs.GPFSNFSDriver(configuration=conf.
+ Configuration(None))
+ self.driver.gpfs_execute = self._execute_wrapper
+ self.driver.set_execute(self._execute_wrapper)
+ self.context = context.get_admin_context()
+ self.context.user_id = 'fake'
+ self.context.project_id = 'fake'
+
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
+ '_run_ssh')
+ def test_gpfs_remote_execute(self, mock_run_ssh):
+ mock_run_ssh.return_value = 'test'
+ self.driver._gpfs_remote_execute('test', check_exit_code=True)
+ expected = [mock.call(('test',), True)]
+ self.assertEqual(expected, mock_run_ssh.mock_calls)
+
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
+ '_ensure_shares_mounted')
+ def test_update_volume_stats(self, mock_ensure):
+ """Check update volume stats."""
+
+ mock_ensure.return_value = True
+ fake_avail = 80 * units.Gi
+ fake_size = 2 * fake_avail
+ fake_used = 10 * units.Gi
+
+ with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
+ '_get_capacity_info',
+ return_value=(fake_avail, fake_size, fake_used)):
+ stats = self.driver.get_volume_stats()
+ self.assertEqual('GPFSNFS', stats['volume_backend_name'])
+ self.assertEqual('file', stats['storage_protocol'])
+ stats = self.driver.get_volume_stats(True)
+ self.assertEqual('GPFSNFS', stats['volume_backend_name'])
+ self.assertEqual('file', stats['storage_protocol'])
+
+ def test_get_volume_path(self):
+ self.driver.configuration.gpfs_mount_point_base = (
+ self.TEST_GPFS_MNT_POINT_BASE)
+ volume = self._fake_volume()
+ self.assertEqual('/export/consisgroup-cg-1234/test',
+ self.driver._get_volume_path(volume))
+ volume['consistencygroup_id'] = None
+ self.assertEqual('/export/test',
+ self.driver._get_volume_path(volume))
+
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
+ '_get_mount_point_for_share')
+ def test_local_path(self, mock_mount_point):
+ mock_mount_point.return_value = self.TEST_MNT_POINT
+ volume = self._fake_volume()
+ volume['provider_location'] = self.TEST_GPFS_MNT_POINT_BASE
+ self.assertEqual('/mnt/nfs/consisgroup-cg-1234/test',
+ self.driver.local_path(volume))
+ volume['consistencygroup_id'] = None
+ self.assertEqual('/mnt/nfs/test',
+ self.driver.local_path(volume))
+
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
+ '_get_volume_path')
+ def test_get_snapshot_path(self, mock_volume_path):
+ volume = self._fake_volume()
+ self.driver.db = mock.Mock()
+ self.driver.db.volume_get = mock.Mock()
+ self.driver.db.volume_get.return_value = volume
+ mock_volume_path.return_value = os.path.join(self.
+ TEST_GPFS_MNT_POINT_BASE,
+ volume['name'])
+ snapshot = self._fake_snapshot()
+ self.assertEqual('/export/test-snap',
+ self.driver._get_snapshot_path(snapshot))
+
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
+ '_find_share')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ 'create_volume')
+ def test_create_volume(self,
+ mock_create_volume,
+ mock_find_share):
+ volume = self._fake_volume()
+ mock_find_share.return_value = self.TEST_VOLUME_PATH
+ self.assertEqual({'provider_location': self.TEST_VOLUME_PATH},
+ self.driver.create_volume(volume))
+
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_delete_gpfs_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
+ 'local_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
+ '_get_volume_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_verify_gpfs_path_state')
+ def test_delete_volume(self,
+ mock_verify_gpfs_path_state,
+ mock_volume_path,
+ mock_local_path,
+ mock_delete_gpfs_file):
+ self.driver.delete_volume('')
+
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ 'delete_snapshot')
+ def test_delete_snapshot(self,
+ mock_delete_snapshot):
+ self.driver.delete_snapshot('')
+
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_resize_volume_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
+ '_find_share')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_create_volume_from_snapshot')
+ def test_create_volume_from_snapshot(self,
+ mock_create_volume_from_snapshot,
+ mock_find_share,
+ mock_resize_volume_file):
+ volume = self._fake_volume()
+ snapshot = self._fake_snapshot()
+ mock_find_share.return_value = self.TEST_VOLUME_PATH
+ self.assertEqual({'provider_location': self.TEST_VOLUME_PATH},
+ self.driver.create_volume_from_snapshot(volume,
+ snapshot))
+
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_resize_volume_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
+ '_find_share')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_create_cloned_volume')
+ def test_create_cloned_volume(self,
+ mock_create_cloned_volume,
+ mock_find_share,
+ mock_resize_volume_file):
+ volume = self._fake_volume()
+ src_vref = self._fake_volume()
+ mock_find_share.return_value = self.TEST_VOLUME_PATH
+ self.assertEqual({'provider_location': self.TEST_VOLUME_PATH},
+ self.driver.create_cloned_volume(volume, src_vref))
+
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_delete_gpfs_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_do_backup')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_create_backup_source')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
+ 'local_path')
+ def test_backup_volume(self,
+ mock_local_path,
+ mock_create_backup_source,
+ mock_do_backup,
+ mock_delete_gpfs_file):
+ volume = self._fake_volume()
+ self.driver.db = mock.Mock()
+ self.driver.db.volume_get = mock.Mock()
+ self.driver.db.volume_get.return_value = volume
+ backup = {}
+ backup['volume_id'] = 'test'
+ backup['id'] = '123456'
+ backup_service = mock.Mock()
+ mock_local_path.return_value = self.TEST_VOLUME_PATH
+ self.driver.backup_volume('', backup, backup_service)
from cinder.openstack.common import fileutils
from cinder import utils
from cinder.volume import driver
+from cinder.volume.drivers import nfs
+from cinder.volume.drivers import remotefs
+from cinder.volume.drivers.san import san
GPFS_CLONE_MIN_RELEASE = 1200
+MIGRATION_ALLOWED_DEST_TYPE = ['GPFSDriver', 'GPFSNFSDriver']
LOG = logging.getLogger(__name__)
return '%sG' % size_in_g
-class GPFSDriver(driver.VolumeDriver):
+class GPFSDriver(driver.ConsistencyGroupVD, driver.ExtendVD,
+ driver.LocalVD, driver.TransferVD, driver.CloneableVD,
+ driver.CloneableImageVD, driver.SnapshotVD,
+ driver.RetypeVD, driver.MigrateVD,
+ driver.BaseVD):
"""Implements volume functions using GPFS primitives.
Version history:
1.0.0 - Initial driver
1.1.0 - Add volume retype, refactor volume migration
1.2.0 - Add consistency group support
+ 1.3.0 - Add NFS based GPFS storage backend support
"""
- VERSION = "1.2.0"
+ VERSION = "1.3.0"
def __init__(self, *args, **kwargs):
super(GPFSDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(gpfs_opts)
+ self.gpfs_execute = self._gpfs_local_execute
+ self._execute = utils.execute
+
+ def _gpfs_local_execute(self, *cmd, **kwargs):
+ if 'run_as_root' not in kwargs:
+ kwargs.update({'run_as_root': True})
+
+ return utils.execute(*cmd, **kwargs)
def _get_gpfs_state(self):
"""Return GPFS state information."""
try:
- (out, err) = self._execute('mmgetstate', '-Y', run_as_root=True)
+ (out, err) = self.gpfs_execute('mmgetstate', '-Y')
return out
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('Failed to issue mmgetstate command, error: %s.'),
def _get_filesystem_from_path(self, path):
"""Return filesystem for specified path."""
try:
- (out, err) = self._execute('df', path, run_as_root=True)
+ (out, err) = self.gpfs_execute('df', path)
lines = out.splitlines()
filesystem = lines[1].split()[0]
return filesystem
def _get_gpfs_cluster_id(self):
"""Return the id for GPFS cluster being used."""
try:
- (out, err) = self._execute('mmlsconfig', 'clusterId', '-Y',
- run_as_root=True)
+ (out, err) = self.gpfs_execute('mmlsconfig', 'clusterId', '-Y')
lines = out.splitlines()
value_token = lines[0].split(':').index('value')
cluster_id = lines[1].split(':')[value_token]
"""Return the GPFS fileset for specified path."""
fs_regex = re.compile(r'.*fileset.name:\s+(?P<fileset>\w+)', re.S)
try:
- (out, err) = self._execute('mmlsattr', '-L', path,
- run_as_root=True)
+ (out, err) = self.gpfs_execute('mmlsattr', '-L', path)
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('Failed to issue mmlsattr command on path %(path)s, '
'error: %(error)s'),
def _verify_gpfs_pool(self, storage_pool):
"""Return true if the specified pool is a valid GPFS storage pool."""
try:
- self._execute('mmlspool', self._gpfs_device, storage_pool,
- run_as_root=True)
+ self.gpfs_execute('mmlspool', self._gpfs_device, storage_pool)
return True
except processutils.ProcessExecutionError:
return False
raise exception.VolumeBackendAPIException(data=msg)
try:
- self._execute('mmchattr', '-P', new_pool, local_path,
- run_as_root=True)
+ self.gpfs_execute('mmchattr', '-P', new_pool, local_path)
LOG.debug('Updated storage pool with mmchattr to %s.', new_pool)
return True
except processutils.ProcessExecutionError as exc:
"""
filesystem = self._get_filesystem_from_path(path)
try:
- (out, err) = self._execute('mmlsfs', filesystem, '-V', '-Y',
- run_as_root=True)
+ (out, err) = self.gpfs_execute('mmlsfs', filesystem, '-V', '-Y')
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('Failed to issue mmlsfs command for path %(path)s, '
'error: %(error)s.'),
def _get_gpfs_cluster_release_level(self):
"""Return the GPFS version of current cluster."""
try:
- (out, err) = self._execute('mmlsconfig', 'minreleaseLeveldaemon',
- '-Y', run_as_root=True)
+ (out, err) = self.gpfs_execute('mmlsconfig',
+ 'minreleaseLeveldaemon',
+ '-Y')
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('Failed to issue mmlsconfig command, error: %s.'),
exc.stderr)
If not part of a gpfs file system, raise ProcessExecutionError.
"""
try:
- self._execute('mmlsattr', directory, run_as_root=True)
+ self.gpfs_execute('mmlsattr', directory)
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('Failed to issue mmlsattr command '
'for path %(path)s, '
def _set_rw_permission(self, path, modebits='660'):
"""Set permission bits for the path."""
- self._execute('chmod', modebits, path, run_as_root=True)
+ self.gpfs_execute('chmod', modebits, path)
def _can_migrate_locally(self, host):
"""Return true if the host can migrate a volume locally."""
LOG.debug('Evaluate migration: unexpected location info, '
'cannot migrate locally: %s.', info)
return None
- if dest_type != 'GPFSDriver' or dest_id != self._cluster_id:
+ if (dest_id != self._cluster_id or
+ dest_type not in MIGRATION_ALLOWED_DEST_TYPE):
LOG.debug('Evaluate migration: different destination driver or '
'cluster id in location info: %s.', info)
return None
"""Creates file with 0 disk usage."""
sizestr = _sizestr(size)
- self._execute('truncate', '-s', sizestr, path, run_as_root=True)
+ self.gpfs_execute('truncate', '-s', sizestr, path)
def _allocate_file_blocks(self, path, size):
"""Preallocate file blocks by writing zeros."""
block_size_mb = 1
block_count = size * units.Gi / (block_size_mb * units.Mi)
- self._execute('dd', 'if=/dev/zero', 'of=%s' % path,
- 'bs=%dM' % block_size_mb,
- 'count=%d' % block_count,
- run_as_root=True)
+ self.gpfs_execute('dd', 'if=/dev/zero', 'of=%s' % path,
+ 'bs=%dM' % block_size_mb,
+ 'count=%d' % block_count)
def _gpfs_change_attributes(self, options, path):
"""Update GPFS attributes on the specified file."""
cmd.extend(options)
cmd.append(path)
LOG.debug('Update volume attributes with mmchattr to %s.', options)
- self._execute(*cmd, run_as_root=True)
+ self.gpfs_execute(*cmd)
def _set_volume_attributes(self, volume, path, metadata):
"""Set various GPFS attributes for this volume."""
# Check if GPFS is mounted
self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base)
- volume_path = self.local_path(volume)
+ volume_path = self._get_volume_path(volume)
volume_size = volume['size']
# Create a sparse file first; allocate blocks later if requested
if not self.configuration.gpfs_sparse_volumes:
self._allocate_file_blocks(volume_path, volume_size)
- def create_volume_from_snapshot(self, volume, snapshot):
- """Creates a GPFS volume from a snapshot."""
-
+ def _create_volume_from_snapshot(self, volume, snapshot):
snapshot_path = self._get_snapshot_path(snapshot)
# check if the snapshot lies in the same CG as the volume to be created
# if yes, clone the volume from the snapshot, else perform full copy
if (volume['consistencygroup_id'] ==
snap_parent_vol['consistencygroup_id']):
clone = True
-
- volume_path = self.local_path(volume)
+ volume_path = self._get_volume_path(volume)
if clone:
self._create_gpfs_copy(src=snapshot_path, dest=volume_path)
self._gpfs_redirect(volume_path)
self._set_rw_permission(volume_path)
v_metadata = volume.get('volume_metadata')
self._set_volume_attributes(volume, volume_path, v_metadata)
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ """Creates a GPFS volume from a snapshot."""
+ self._create_volume_from_snapshot(volume, snapshot)
virt_size = self._resize_volume_file(volume, volume['size'])
return {'size': math.ceil(virt_size / units.Gi)}
- def create_cloned_volume(self, volume, src_vref):
- """Create a GPFS volume from another volume."""
+ def _get_volume_path(self, volume):
+ return self.local_path(volume)
- src = self.local_path(src_vref)
- dest = self.local_path(volume)
+ def _create_cloned_volume(self, volume, src_vref):
+ src = self._get_volume_path(src_vref)
+ dest = self._get_volume_path(volume)
if (volume['consistencygroup_id'] == src_vref['consistencygroup_id']):
self._create_gpfs_clone(src, dest)
else:
self._gpfs_full_copy(src, dest)
-
self._set_rw_permission(dest)
v_metadata = volume.get('volume_metadata')
self._set_volume_attributes(volume, dest, v_metadata)
+
+ def create_cloned_volume(self, volume, src_vref):
+ """Create a GPFS volume from another volume."""
+ self._create_cloned_volume(volume, src_vref)
virt_size = self._resize_volume_file(volume, volume['size'])
return {'size': math.ceil(virt_size / units.Gi)}
- def _delete_gpfs_file(self, fchild):
+ def _delete_gpfs_file(self, fchild, mount_point=None):
"""Delete a GPFS file and cleanup clone children."""
- if not os.path.exists(fchild):
- return
- (out, err) = self._execute('mmclone', 'show', fchild, run_as_root=True)
+ if mount_point is None:
+ if not os.path.exists(fchild):
+ return
+ else:
+ fchild_local_path = os.path.join(mount_point,
+ os.path.basename(fchild))
+ if not os.path.exists(fchild_local_path):
+ return
+
+ (out, err) = self.gpfs_execute('mmclone', 'show', fchild)
fparent = None
+ delete_parent = False
inode_regex = re.compile(
r'.*\s+(?:yes|no)\s+\d+\s+(?P<inode>\d+)', re.M | re.S)
match = inode_regex.match(out)
if match:
inode = match.group('inode')
- path = os.path.dirname(fchild)
+ if mount_point is None:
+ path = os.path.dirname(fchild)
+ else:
+ path = mount_point
+
(out, err) = self._execute('find', path, '-maxdepth', '1',
'-inum', inode, run_as_root=True)
if out:
fparent = out.split('\n', 1)[0]
- self._execute(
- 'rm', '-f', fchild, check_exit_code=False, run_as_root=True)
+
+ if mount_point is None:
+ self._execute(
+ 'rm', '-f', fchild, check_exit_code=False, run_as_root=True)
+ else:
+ self._execute(
+ 'rm', '-f', fchild_local_path, check_exit_code=False,
+ run_as_root=True)
# There is no need to check for volume references on this snapshot
# because 'rm -f' itself serves as a simple and implicit check. If the
# If there are such volumes, we quit recursion and let the other
# volumes delete the snapshot later. If there are no references, rm
# would succeed and the snapshot is deleted.
- if not os.path.exists(fchild) and fparent:
+ if mount_point is None:
+ if not os.path.exists(fchild) and fparent:
+ delete_parent = True
+ else:
+ if not os.path.exists(fchild_local_path) and fparent:
+ delete_parent = True
+
+ if delete_parent:
fpbase = os.path.basename(fparent)
if fpbase.endswith('.snap') or fpbase.endswith('.ts'):
- self._delete_gpfs_file(fparent)
+ if mount_point is None:
+ self._delete_gpfs_file(fparent)
+ else:
+ fparent_remote_path = os.path.join(os.path.dirname(fchild),
+ fpbase)
+ fparent_mount_path = os.path.dirname(fparent)
+ self._delete_gpfs_file(fparent_remote_path,
+ fparent_mount_path)
def delete_volume(self, volume):
"""Deletes a logical volume."""
max_depth = self.configuration.gpfs_max_clone_depth
if max_depth == 0:
return False
- (out, err) = self._execute('mmclone', 'show', src, run_as_root=True)
+ (out, err) = self.gpfs_execute('mmclone', 'show', src)
depth_regex = re.compile(r'.*\s+no\s+(?P<depth>\d+)', re.M | re.S)
match = depth_regex.match(out)
if match:
depth = int(match.group('depth'))
if depth > max_depth:
- self._execute('mmclone', 'redirect', src, run_as_root=True)
+ self.gpfs_execute('mmclone', 'redirect', src)
return True
return False
def _create_gpfs_copy(self, src, dest):
"""Create a GPFS file clone copy for the specified file."""
- self._execute('mmclone', 'copy', src, dest, run_as_root=True)
+ self.gpfs_execute('mmclone', 'copy', src, dest)
def _gpfs_full_copy(self, src, dest):
"""Create a full copy from src to dest."""
- self._execute('cp', src, dest,
- check_exit_code=True, run_as_root=True)
+ self.gpfs_execute('cp', src, dest, check_exit_code=True)
def _create_gpfs_snap(self, src, dest=None):
"""Create a GPFS file clone snapshot for the specified file."""
if dest is None:
- self._execute('mmclone', 'snap', src, run_as_root=True)
+ self.gpfs_execute('mmclone', 'snap', src)
else:
- self._execute('mmclone', 'snap', src, dest, run_as_root=True)
+ self.gpfs_execute('mmclone', 'snap', src, dest)
def _is_gpfs_parent_file(self, gpfs_file):
"""Return true if the specified file is a gpfs clone parent."""
- out, err = self._execute('mmclone', 'show', gpfs_file,
- run_as_root=True)
+ out, err = self.gpfs_execute('mmclone', 'show', gpfs_file)
ptoken = out.splitlines().pop().split()[0]
return ptoken == 'yes'
# snapshots will also be deleted.
snapshot_path = self._get_snapshot_path(snapshot)
snapshot_ts_path = '%s.ts' % snapshot_path
- self._execute('mv', snapshot_path, snapshot_ts_path, run_as_root=True)
- self._execute('rm', '-f', snapshot_ts_path,
- check_exit_code=False, run_as_root=True)
+ self.gpfs_execute('mv', snapshot_path, snapshot_ts_path)
+ self.gpfs_execute('rm', '-f', snapshot_ts_path,
+ check_exit_code=False)
def _get_snapshot_path(self, snapshot):
ctxt = context.get_admin_context()
image_meta,
self.local_path(volume))
+ def _create_backup_source(self, volume, backup):
+ src_path = self._get_volume_path(volume)
+ dest_path = '%s_%s' % (src_path, backup['id'])
+ self._create_gpfs_clone(src_path, dest_path)
+ self._gpfs_redirect(src_path)
+ return dest_path
+
+ def _do_backup(self, backup_path, backup, backup_service):
+ with utils.temporary_chown(backup_path):
+ with fileutils.file_open(backup_path) as backup_file:
+ backup_service.backup(backup, backup_file)
+
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
volume_path = self.local_path(volume)
- LOG.debug('Begin backup of volume %s.', volume['name'])
-
- # create a snapshot that will be used as the backup source
backup_path = '%s_%s' % (volume_path, backup['id'])
- self._create_gpfs_clone(volume_path, backup_path)
- self._gpfs_redirect(volume_path)
-
+ # create a snapshot that will be used as the backup source
+ self._create_backup_source(volume, backup)
try:
- with utils.temporary_chown(backup_path):
- with fileutils.file_open(backup_path) as backup_file:
- backup_service.backup(backup, backup_file)
+ LOG.debug('Begin backup of volume %s.', volume['name'])
+ self._do_backup(backup_path, backup, backup_service)
finally:
# clean up snapshot file. If it is a clone parent, delete
# will fail silently, but be cleaned up when volume is
LOG.debug('Migration target is same cluster but different path, '
'move the volume file.')
- local_path = self.local_path(volume)
+ local_path = self._get_volume_path(volume)
new_path = os.path.join(dest_path, volume['name'])
try:
- self._execute('mv', local_path, new_path, run_as_root=True)
+ self.gpfs_execute('mv', local_path, new_path)
return (True, None)
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('Driver-based migration of volume %(vol)s failed. '
cgpath = os.path.join(self.configuration.gpfs_mount_point_base,
cgname)
try:
- self._execute('mmcrfileset', fsdev, cgname,
- '--inode-space', 'new', run_as_root=True)
+ self.gpfs_execute('mmcrfileset', fsdev, cgname,
+ '--inode-space', 'new')
except processutils.ProcessExecutionError as e:
msg = (_('Failed to create consistency group: %(cgid)s. '
'Error: %(excmsg)s.') %
raise exception.VolumeBackendAPIException(data=msg)
try:
- self._execute('mmlinkfileset', fsdev, cgname,
- '-J', cgpath, run_as_root=True)
+ self.gpfs_execute('mmlinkfileset', fsdev, cgname,
+ '-J', cgpath)
except processutils.ProcessExecutionError as e:
msg = (_('Failed to link fileset for the share %(cgname)s. '
'Error: %(excmsg)s.') %
raise exception.VolumeBackendAPIException(data=msg)
try:
- self._execute('chmod', '770', cgpath, run_as_root=True)
+ self.gpfs_execute('chmod', '770', cgpath)
except processutils.ProcessExecutionError as e:
msg = (_('Failed to set permissions for the consistency group '
'%(cgname)s. '
# Unlink and delete the fileset associated with the consistency group.
# All of the volumes and volume snapshot data will also be deleted.
try:
- self._execute('mmunlinkfileset', fsdev, cgname, '-f',
- run_as_root=True)
+ self.gpfs_execute('mmunlinkfileset', fsdev, cgname, '-f')
except processutils.ProcessExecutionError as e:
msg = (_('Failed to unlink fileset for consistency group '
'%(cgname)s. Error: %(excmsg)s.') %
raise exception.VolumeBackendAPIException(data=msg)
try:
- self._execute('mmdelfileset', fsdev, cgname, '-f',
- run_as_root=True)
+ self.gpfs_execute('mmdelfileset', fsdev, cgname, '-f')
except processutils.ProcessExecutionError as e:
msg = (_('Failed to delete fileset for consistency group '
'%(cgname)s. Error: %(excmsg)s.') %
model_update = {'status': cgsnapshot['status']}
return model_update, snapshots
+
+
+class GPFSNFSDriver(GPFSDriver, nfs.NfsDriver, san.SanDriver):
+ """GPFS cinder driver extension.
+
+ This extends the capability of existing GPFS cinder driver
+ to be able to create cinder volumes when cinder volume service
+ is not running on GPFS node.
+ """
+
+ def __init__(self, *args, **kwargs):
+ self._context = None
+ self._storage_pool = None
+ self._cluster_id = None
+ super(GPFSNFSDriver, self).__init__(*args, **kwargs)
+ self.gpfs_execute = self._gpfs_remote_execute
+ self.configuration.append_config_values(remotefs.nas_opts)
+ self.configuration.san_ip = self.configuration.nas_ip
+ self.configuration.san_login = self.configuration.nas_login
+ self.configuration.san_password = self.configuration.nas_password
+ self.configuration.san_private_key = (
+ self.configuration.nas_private_key)
+ self.configuration.san_ssh_port = self.configuration.nas_ssh_port
+
+ def _gpfs_remote_execute(self, *cmd, **kwargs):
+ check_exit_code = kwargs.pop('check_exit_code', None)
+ return self._run_ssh(cmd, check_exit_code)
+
+ def do_setup(self, context):
+ super(GPFSNFSDriver, self).do_setup(context)
+ self._context = context
+
+ def get_volume_stats(self, refresh=False):
+ """Get volume stats.
+
+ If 'refresh' is True, or stats have never been updated, run update
+ the stats first.
+ """
+ if not self._stats or refresh:
+ self._update_volume_stats()
+
+ return self._stats
+
+ def _update_volume_stats(self):
+ """Retrieve stats info from volume group."""
+
+ LOG.debug("Enter _update_volume_stats.")
+ gpfs_base = self.configuration.gpfs_mount_point_base
+ data = {}
+ backend_name = self.configuration.safe_get('volume_backend_name')
+ data['volume_backend_name'] = backend_name or 'GPFSNFS'
+ data['vendor_name'] = 'IBM'
+ data['driver_version'] = self.get_version()
+ data['storage_protocol'] = 'file'
+
+ self._ensure_shares_mounted()
+
+ global_capacity = 0
+ global_free = 0
+ for share in self._mounted_shares:
+ capacity, free, _used = self._get_capacity_info(share)
+ global_capacity += capacity
+ global_free += free
+
+ data['total_capacity_gb'] = global_capacity / float(units.Gi)
+ data['free_capacity_gb'] = global_free / float(units.Gi)
+ data['reserved_percentage'] = 0
+ data['QoS_support'] = False
+ data['storage_pool'] = self._storage_pool
+ data['location_info'] = ('GPFSNFSDriver:%(cluster_id)s:%(root_path)s' %
+ {'cluster_id': self._cluster_id,
+ 'root_path': gpfs_base})
+
+ data['consistencygroup_support'] = 'True'
+ self._stats = data
+ LOG.debug("Exit _update_volume_stats.")
+
+ def _get_volume_path(self, volume):
+ """Returns remote GPFS path for the given volume."""
+ export_path = self.configuration.gpfs_mount_point_base
+ if volume['consistencygroup_id'] is not None:
+ cgname = "consisgroup-%s" % volume['consistencygroup_id']
+ volume_path = os.path.join(export_path, cgname, volume['name'])
+ else:
+ volume_path = os.path.join(export_path, volume['name'])
+ return volume_path
+
+ def local_path(self, volume):
+ """Returns the local path for the specified volume."""
+ remotefs_share = volume['provider_location']
+ base_local_path = self._get_mount_point_for_share(remotefs_share)
+
+ # Check if the volume is part of a consistency group and return
+ # the local_path accordingly.
+ if volume['consistencygroup_id'] is not None:
+ cgname = "consisgroup-%s" % volume['consistencygroup_id']
+ volume_path = os.path.join(base_local_path, cgname, volume['name'])
+ else:
+ volume_path = os.path.join(base_local_path, volume['name'])
+ return volume_path
+
+ def _get_snapshot_path(self, snapshot):
+ """Returns remote GPFS path for the given snapshot."""
+ snap_parent_vol = self.db.volume_get(self._context,
+ snapshot['volume_id'])
+ snap_parent_vol_path = self._get_volume_path(snap_parent_vol)
+ snapshot_path = os.path.join(os.path.dirname(snap_parent_vol_path),
+ snapshot['name'])
+ return snapshot_path
+
+ def create_volume(self, volume):
+ """Creates a GPFS volume."""
+ super(GPFSNFSDriver, self).create_volume(volume)
+ volume['provider_location'] = self._find_share(volume['size'])
+ return {'provider_location': volume['provider_location']}
+
+ def delete_volume(self, volume):
+ """Deletes a logical volume."""
+ # Check if GPFS is mounted
+ self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base)
+
+ volume_path = self._get_volume_path(volume)
+ mount_point = os.path.dirname(self.local_path(volume))
+ # Delete all dependent snapshots, the snapshot will get deleted
+ # if the link count goes to zero, else rm will fail silently
+ self._delete_gpfs_file(volume_path, mount_point)
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ """Creates a GPFS volume from a snapshot."""
+ self._create_volume_from_snapshot(volume, snapshot)
+ volume['provider_location'] = self._find_share(volume['size'])
+ self._resize_volume_file(volume, volume['size'])
+ return {'provider_location': volume['provider_location']}
+
+ def create_cloned_volume(self, volume, src_vref):
+ """Create a GPFS volume from another volume."""
+ self._create_cloned_volume(volume, src_vref)
+ volume['provider_location'] = self._find_share(volume['size'])
+ self._resize_volume_file(volume, volume['size'])
+ return {'provider_location': volume['provider_location']}
+
+ def backup_volume(self, context, backup, backup_service):
+ """Create a new backup from an existing volume."""
+ volume = self.db.volume_get(context, backup['volume_id'])
+ volume_path = self.local_path(volume)
+ backup_path = '%s_%s' % (volume_path, backup['id'])
+ # create a snapshot that will be used as the backup source
+ backup_remote_path = self._create_backup_source(volume, backup)
+ try:
+ LOG.debug('Begin backup of volume %s.', volume['name'])
+ self._do_backup(backup_path, backup, backup_service)
+ finally:
+ # clean up snapshot file. If it is a clone parent, delete
+ # will fail silently, but be cleaned up when volume is
+ # eventually removed. This ensures we do not accumulate
+ # more than gpfs_max_clone_depth snap files.
+ backup_mount_path = os.path.dirname(backup_path)
+ self._delete_gpfs_file(backup_remote_path, backup_mount_path)