from oslo.utils import units
from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LI
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
(out, err) = self._execute('mmgetstate', '-Y', run_as_root=True)
return out
except processutils.ProcessExecutionError as exc:
- LOG.error(_LE('Failed to issue mmgetstate command, error: %s.') %
+ LOG.error(_LE('Failed to issue mmgetstate command, error: %s.'),
exc.stderr)
raise exception.VolumeBackendAPIException(data=exc.stderr)
state_token = lines[0].split(':').index('state')
gpfs_state = lines[1].split(':')[state_token]
if gpfs_state != 'active':
- LOG.error(_LE('GPFS is not active. Detailed output: %s.') % out)
+ LOG.error(_LE('GPFS is not active. Detailed output: %s.'), out)
exception_message = (_('GPFS is not running, state: %s.') %
gpfs_state)
raise exception.VolumeBackendAPIException(data=exception_message)
return filesystem
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('Failed to issue df command for path %(path)s, '
- 'error: %(error)s.') %
+ 'error: %(error)s.'),
{'path': path,
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
cluster_id = lines[1].split(':')[value_token]
return cluster_id
except processutils.ProcessExecutionError as exc:
- LOG.error(_LE('Failed to issue mmlsconfig command, error: %s.') %
+ LOG.error(_LE('Failed to issue mmlsconfig command, error: %s.'),
exc.stderr)
raise exception.VolumeBackendAPIException(data=exc.stderr)
run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('Failed to issue mmlsattr command on path %(path)s, '
- 'error: %(error)s') %
+ 'error: %(error)s'),
{'path': path,
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
try:
self._execute('mmchattr', '-P', new_pool, local_path,
run_as_root=True)
- LOG.debug('Updated storage pool with mmchattr to %s.' % new_pool)
+ LOG.debug('Updated storage pool with mmchattr to %s.', new_pool)
return True
except processutils.ProcessExecutionError as exc:
- LOG.info('Could not update storage pool with mmchattr to '
- '%(pool)s, error: %(error)s' %
+ LOG.info(_LI('Could not update storage pool with mmchattr to '
+ '%(pool)s, error: %(error)s'),
{'pool': new_pool,
'error': exc.stderr})
return False
run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('Failed to issue mmlsfs command for path %(path)s, '
- 'error: %(error)s.') %
+ 'error: %(error)s.'),
{'path': path,
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
(out, err) = self._execute('mmlsconfig', 'minreleaseLeveldaemon',
'-Y', run_as_root=True)
except processutils.ProcessExecutionError as exc:
- LOG.error(_LE('Failed to issue mmlsconfig command, error: %s.') %
+ LOG.error(_LE('Failed to issue mmlsconfig command, error: %s.'),
exc.stderr)
raise exception.VolumeBackendAPIException(data=exc.stderr)
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('Failed to issue mmlsattr command '
'for path %(path)s, '
- 'error: %(error)s.') %
+ 'error: %(error)s.'),
{'path': directory,
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
(dest_type, dest_id, dest_path) = info.split(':')
except ValueError:
LOG.debug('Evaluate migration: unexpected location info, '
- 'cannot migrate locally: %s.' % info)
+ 'cannot migrate locally: %s.', info)
return None
if dest_type != 'GPFSDriver' or dest_id != self._cluster_id:
LOG.debug('Evaluate migration: different destination driver or '
- 'cluster id in location info: %s.' % info)
+ 'cluster id in location info: %s.', info)
return None
LOG.debug('Evaluate migration: use local migration.')
cmd = ['mmchattr']
cmd.extend(options)
cmd.append(path)
- LOG.debug('Update volume attributes with mmchattr to %s.' % options)
+ LOG.debug('Update volume attributes with mmchattr to %s.', options)
self._execute(*cmd, run_as_root=True)
def _set_volume_attributes(self, volume, path, metadata):
cloneable_image, reason, image_path = self._is_cloneable(image_id)
if not cloneable_image:
- LOG.debug('Image %(img)s not cloneable: %(reas)s.' %
+ LOG.debug('Image %(img)s not cloneable: %(reas)s.',
{'img': image_id, 'reas': reason})
return (None, False)
if data.file_format == 'raw':
if (self.configuration.gpfs_images_share_mode ==
'copy_on_write'):
- LOG.debug('Clone image to vol %s using mmclone.' %
+ LOG.debug('Clone image to vol %s using mmclone.',
volume['id'])
# if the image is not already a GPFS snap file make it so
if not self._is_gpfs_parent_file(image_path):
self._create_gpfs_copy(image_path, vol_path)
elif self.configuration.gpfs_images_share_mode == 'copy':
- LOG.debug('Clone image to vol %s using copyfile.' %
+ LOG.debug('Clone image to vol %s using copyfile.',
volume['id'])
shutil.copyfile(image_path, vol_path)
# if image is not raw convert it to raw into vol_path destination
else:
- LOG.debug('Clone image to vol %s using qemu convert.' %
+ LOG.debug('Clone image to vol %s using qemu convert.',
volume['id'])
image_utils.convert_image(image_path, vol_path, 'raw')
# Check if GPFS is mounted
self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base)
- LOG.debug('Copy image to vol %s using image_utils fetch_to_raw.' %
+ LOG.debug('Copy image to vol %s using image_utils fetch_to_raw.',
volume['id'])
image_utils.fetch_to_raw(context, image_service, image_id,
self.local_path(volume),
image_utils.resize_image(vol_path, new_size, run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_LE("Failed to resize volume "
- "%(volume_id)s, error: %(error)s.") %
+ "%(volume_id)s, error: %(error)s."),
{'volume_id': volume['id'],
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
volume_path = self.local_path(volume)
- LOG.debug('Begin backup of volume %s.' % volume['name'])
+ LOG.debug('Begin backup of volume %s.', volume['name'])
# create a snapshot that will be used as the backup source
backup_path = '%s_%s' % (volume_path, backup['id'])
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
- LOG.debug('Begin restore of backup %s.' % backup['id'])
+ LOG.debug('Begin restore of backup %s.', backup['id'])
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):
def _migrate_volume(self, volume, host):
"""Migrate vol if source and dest are managed by same GPFS cluster."""
- LOG.debug('Migrate volume request %(vol)s to %(host)s.' %
+ LOG.debug('Migrate volume request %(vol)s to %(host)s.',
{'vol': volume['name'],
'host': host['host']})
dest_path = self._can_migrate_locally(host)
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('Driver-based migration of volume %(vol)s failed. '
'Move from %(src)s to %(dst)s failed with error: '
- '%(error)s.') %
+ '%(error)s.'),
{'vol': volume['name'],
'src': local_path,
'dst': new_path,
def retype(self, context, volume, new_type, diff, host):
"""Modify volume to be of new type."""
LOG.debug('Retype volume request %(vol)s to be %(type)s '
- '(host: %(host)s), diff %(diff)s.' %
+ '(host: %(host)s), diff %(diff)s.',
{'vol': volume['name'],
'type': new_type,
'host': host,
# if different backends let migration create a new volume and copy
# data because the volume is considered to be substantially different
if _different(backends):
+ backend1, backend2 = backends
LOG.debug('Retype request is for different backends, '
- 'use migration: %s %s.' % backends)
+ 'use migration: %(backend1)s %(backend2)s.',
+ {'backend1': backend1, 'backend2': backend1})
return False
if _different(pools):
old, new = pools
- LOG.debug('Retype pool attribute from %s to %s.' % pools)
+ LOG.debug('Retype pool attribute from %(old)s to %(new)s.',
+ {'old': old, 'new': new})
retyped = self._update_volume_storage_pool(self.local_path(volume),
new)
if _different(hosts):
- LOG.debug('Retype hosts migrate from: %s to %s.' % hosts)
+ source, destination = hosts
+ LOG.debug('Retype hosts migrate from: %(source)s to '
+ '%(destination)s.', {'source': source,
+ 'destination': destination})
migrated, mdl_update = self._migrate_volume(volume, host)
if migrated:
updates = {'host': host['host']}