from oslo.config import cfg
import paste.urlmap
-from cinder.i18n import _
+from cinder.i18n import _LW
from cinder.openstack.common import log as logging
def root_app_factory(loader, global_conf, **local_conf):
if CONF.enable_v1_api:
- LOG.warn(_('The v1 api is deprecated and will be removed after the '
- 'Juno release. You should set enable_v1_api=false and '
- 'enable_v2_api=true in your cinder.conf file.'))
+ LOG.warn(_LW('The v1 api is deprecated and will be removed after the '
+ 'Juno release. You should set enable_v1_api=false and '
+ 'enable_v2_api=true in your cinder.conf file.'))
else:
del local_conf['/v1']
if not CONF.enable_v2_api:
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import exception
-from cinder.i18n import _, _LE, _LI
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
import cinder.policy
try:
self.load_extension(ext_factory)
except Exception as exc:
- LOG.warn(_('Failed to load extension %(ext_factory)s: '
- '%(exc)s'),
+ LOG.warn(_LW('Failed to load extension %(ext_factory)s: '
+ '%(exc)s'),
{'ext_factory': ext_factory, 'exc': exc})
from cinder import exception
from cinder import i18n
-from cinder.i18n import _, _LI
+from cinder.i18n import _, _LE, _LI
from cinder.openstack.common import log as logging
from cinder import utils
from cinder import wsgi
code=ex_value.code, explanation=ex_value.msg))
elif isinstance(ex_value, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
- LOG.error(_(
+ LOG.error(_LE(
'Exception handling resource: %s') %
ex_value, exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
driver.do_setup(ctxt)
driver.check_for_setup_error()
except Exception as ex:
- LOG.error(_("Error encountered during initialization of driver: "
- "%(name)s.") %
+ LOG.error(_LE("Error encountered during initialization of driver: "
+ "%(name)s.") %
{'name': driver.__class__.__name__})
LOG.exception(ex)
# we don't want to continue since we failed
from cinder.brick.initiator import linuxfc
from cinder.brick.initiator import linuxscsi
from cinder.brick.remotefs import remotefs
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
if tries >= self.device_scan_attempts:
raise exception.VolumeDeviceNotFound(device=host_device)
- LOG.warn(_("ISCSI volume not yet found at: %(host_device)s. "
- "Will rescan & retry. Try number: %(tries)s"),
+ LOG.warn(_LW("ISCSI volume not yet found at: %(host_device)s. "
+ "Will rescan & retry. Try number: %(tries)s"),
{'host_device': host_device,
'tries': tries})
LOG.error(msg)
raise exception.NoFibreChannelVolumeDeviceFound()
- LOG.warn(_("Fibre volume not yet found. "
- "Will rescan & retry. Try number: %(tries)s"),
+ LOG.warn(_LW("Fibre volume not yet found. "
+ "Will rescan & retry. Try number: %(tries)s"),
{'tries': tries})
self._linuxfc.rescan_hosts(hbas)
if waiting_status['tries'] >= self.device_scan_attempts:
raise exception.VolumeDeviceNotFound(device=aoe_path)
- LOG.warn(_("AoE volume not yet found at: %(path)s. "
- "Try number: %(tries)s"),
+ LOG.warn(_LW("AoE volume not yet found at: %(path)s. "
+ "Try number: %(tries)s"),
{'path': aoe_device,
'tries': waiting_status['tries']})
kwargs.get('glusterfs_mount_point_base') or\
mount_point_base
else:
- LOG.warn(_("Connection details not present."
- " RemoteFsClient may not initialize properly."))
+ LOG.warn(_LW("Connection details not present."
+ " RemoteFsClient may not initialize properly."))
self._remotefsclient = remotefs.RemoteFsClient(mount_type, root_helper,
execute=execute,
*args, **kwargs)
from oslo.concurrency import processutils as putils
from cinder.brick.initiator import linuxscsi
-from cinder.i18n import _
+from cinder.i18n import _LW
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# and systool is not installed
# 96 = nova.cmd.rootwrap.RC_NOEXECFOUND:
if exc.exit_code == 96:
- LOG.warn(_("systool is not installed"))
+ LOG.warn(_LW("systool is not installed"))
return []
except OSError as exc:
# This handles the case where rootwrap is NOT used
# and systool is not installed
if exc.errno == errno.ENOENT:
- LOG.warn(_("systool is not installed"))
+ LOG.warn(_LW("systool is not installed"))
return []
# No FC HBAs were found
from oslo.concurrency import processutils as putils
from cinder.brick import executor
-from cinder.i18n import _
+from cinder.i18n import _, _LW
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
self._execute('multipath', '-f', device, run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
- LOG.warn(_("multipath call failed exit (%(code)s)")
+ LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code})
def flush_multipath_devices(self):
self._execute('multipath', '-F', run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
- LOG.warn(_("multipath call failed exit (%(code)s)")
+ LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code})
def find_multipath_device(self, device):
run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as exc:
- LOG.warn(_("multipath call failed exit (%(code)s)")
+ LOG.warn(_LW("multipath call failed exit (%(code)s)")
% {'code': exc.exit_code})
return None
mdev_id = mdev_id.replace(')', '')
if mdev is None:
- LOG.warn(_("Couldn't find multipath device %(line)s")
+ LOG.warn(_LW("Couldn't find multipath device %(line)s")
% {'line': line})
return None
raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name)
if self._vg_exists() is False:
- LOG.error(_('Unable to locate Volume Group %s') % vg_name)
+ LOG.error(_LE('Unable to locate Volume Group %s') % vg_name)
raise exception.VolumeGroupNotFound(vg_name=vg_name)
# NOTE: we assume that the VG has been activated outside of Cinder
vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name)
if len(vg_list) != 1:
- LOG.error(_('Unable to find VG: %s') % self.vg_name)
+ LOG.error(_LE('Unable to find VG: %s') % self.vg_name)
raise exception.VolumeGroupNotFound(vg_name=self.vg_name)
self.vg_size = float(vg_list[0]['size'])
"""
if not self.supports_thin_provisioning(self._root_helper):
- LOG.error(_('Requested to setup thin provisioning, '
- 'however current LVM version does not '
- 'support it.'))
+ LOG.error(_LE('Requested to setup thin provisioning, '
+ 'however current LVM version does not '
+ 'support it.'))
return None
if name is None:
"""
source_lvref = self.get_volume(source_lv_name)
if source_lvref is None:
- LOG.error(_("Trying to create snapshot by non-existent LV: %s")
+ LOG.error(_LE("Trying to create snapshot by non-existent LV: %s")
% source_lv_name)
raise exception.VolumeDeviceNotFound(device=source_lv_name)
cmd = ['lvcreate', '--name', name,
import sqlalchemy
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LW
from cinder.openstack.common import log as logging
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
- LOG.warn(_('Id not in sort_keys; is sort_keys unique?'))
+ LOG.warn(_LW('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))
from cinder.db import base
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
import cinder.policy
from cinder import quota
group = self.db.consistencygroup_create(context, options)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_("Error occurred when creating consistency group"
- " %s."), name)
+ LOG.error(_LE("Error occurred when creating consistency group"
+ " %s."), name)
request_spec_list = []
filter_properties_list = []
try:
self.db.consistencygroup_destroy(context, group_id)
finally:
- LOG.error(_("Error occurred when building "
- "request spec list for consistency group "
- "%s."), group_id)
+ LOG.error(_LE("Error occurred when building "
+ "request spec list for consistency group "
+ "%s."), group_id)
# Cast to the scheduler and let it handle whatever is needed
# to select the target host for this group.
self.db.consistencygroup_destroy(context.elevated(),
group_id)
finally:
- LOG.error(_("Failed to update quota for "
- "consistency group %s."), group_id)
+ LOG.error(_LE("Failed to update quota for "
+ "consistency group %s."), group_id)
@wrap_check_policy
def delete(self, context, group, force=False):
try:
self.db.cgsnapshot_destroy(context, cgsnapshot_id)
finally:
- LOG.error(_("Error occurred when creating cgsnapshot"
- " %s."), cgsnapshot_id)
+ LOG.error(_LE("Error occurred when creating cgsnapshot"
+ " %s."), cgsnapshot_id)
self.volume_rpcapi.create_cgsnapshot(context, group, cgsnapshot)
try:
return f(*args, **kwargs)
except db_exc.DBDeadlock:
- LOG.warn(_("Deadlock detected when running "
- "'%(func_name)s': Retrying..."),
+ LOG.warn(_LW("Deadlock detected when running "
+ "'%(func_name)s': Retrying..."),
dict(func_name=f.__name__))
# Retry!
time.sleep(0.5)
from oslo.utils import excutils
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE
from cinder.keymgr import key as keymgr_key
from cinder.keymgr import key_mgr
from cinder.openstack.common import log as logging
endpoint=self._barbican_endpoint)
except Exception as e:
with excutils.save_and_reraise_exception():
- LOG.error(_("Error creating Barbican client: %s"), (e))
+ LOG.error(_LE("Error creating Barbican client: %s"), (e))
return self._barbican_client
return secret_uuid
except Exception as e:
with excutils.save_and_reraise_exception():
- LOG.error(_("Error creating key: %s"), (e))
+ LOG.error(_LE("Error creating key: %s"), (e))
def store_key(self, ctxt, key, expiration=None, name='Cinder Volume Key',
payload_content_type='application/octet-stream',
return secret_uuid
except Exception as e:
with excutils.save_and_reraise_exception():
- LOG.error(_("Error storing key: %s"), (e))
+ LOG.error(_LE("Error storing key: %s"), (e))
def copy_key(self, ctxt, key_id):
"""Copies (i.e., clones) a key stored by barbican.
return copy_uuid
except Exception as e:
with excutils.save_and_reraise_exception():
- LOG.error(_("Error copying key: %s"), (e))
+ LOG.error(_LE("Error copying key: %s"), (e))
def _create_secret_ref(self, key_id, barbican_client):
"""Creates the URL required for accessing a secret.
return secret_data
except Exception as e:
with excutils.save_and_reraise_exception():
- LOG.error(_("Error getting secret data: %s"), (e))
+ LOG.error(_LE("Error getting secret data: %s"), (e))
def _get_secret(self, ctxt, secret_ref):
"""Creates the URL required for accessing a secret's metadata.
return barbican_client.secrets.get(secret_ref)
except Exception as e:
with excutils.save_and_reraise_exception():
- LOG.error(_("Error getting secret metadata: %s"), (e))
+ LOG.error(_LE("Error getting secret metadata: %s"), (e))
def get_key(self, ctxt, key_id,
payload_content_type='application/octet-stream'):
return key
except Exception as e:
with excutils.save_and_reraise_exception():
- LOG.error(_("Error getting key: %s"), (e))
+ LOG.error(_LE("Error getting key: %s"), (e))
def delete_key(self, ctxt, key_id):
"""Deletes the specified key.
barbican_client.secrets.delete(secret_ref)
except Exception as e:
with excutils.save_and_reraise_exception():
- LOG.error(_("Error deleting key: %s"), (e))
+ LOG.error(_LE("Error deleting key: %s"), (e))
from oslo.config import cfg
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LW
from cinder.keymgr import key
from cinder.keymgr import key_mgr
from cinder.openstack.common import log as logging
def _generate_hex_key(self, **kwargs):
if CONF.keymgr.fixed_key is None:
- LOG.warn(_('config option keymgr.fixed_key has not been defined: '
- 'some operations may fail unexpectedly'))
+ LOG.warn(_LW('config option keymgr.fixed_key has not been defined:'
+ ' some operations may fail unexpectedly'))
raise ValueError(_('keymgr.fixed_key not defined'))
return CONF.keymgr.fixed_key
raise exception.KeyManagerError(
reason="cannot delete non-existent key")
- LOG.warn(_("Not deleting key %s"), key_id)
+ LOG.warn(_LW("Not deleting key %s"), key_id)
import logging
from openstack.common.gettextutils import _ # noqa
-
+from cinder.i18n import _LI
LOG = logging.getLogger(__name__)
if target_name or target_id:
arrow = " -> "
- LOG.info(_("Request ID Link: %(event_name)s '%(source_id)s'%(arrow)s"
- "%(target_name)s%(target_id)s") % {"event_name": event_name,
- "source_id": source_id,
- "target_name": rtarget_name,
- "arrow": arrow,
- "target_id": rtarget_id})
+ LOG.info(_LI("Request ID Link: %(event_name)s '%(source_id)s'%(arrow)s"
+ "%(target_name)s%(target_id)s") % {"event_name": event_name,
+ "source_id": source_id,
+ "target_name": rtarget_name,
+ "arrow": arrow,
+ "target_id": rtarget_id})
if notifier:
payload = {"source_request_id": source_id,
from cinder import db
from cinder import exception
-from cinder.i18n import _, _LI
+from cinder.i18n import _LI, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common.scheduler import filters
from cinder.openstack.common.scheduler import weights
for service in volume_services:
host = service['host']
if not utils.service_is_up(service):
- LOG.warn(_("volume service is down. (host: %s)") % host)
+ LOG.warn(_LW("volume service is down. (host: %s)") % host)
continue
capabilities = self.service_states.get(host, None)
host_state = self.host_state_map.get(host)
from cinder.brick import exception
from cinder.brick.initiator import connector
from cinder.brick.initiator import host_driver
-from cinder.i18n import _
+from cinder.i18n import _LE
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder import test
except loopingcall.LoopingCallDone:
return self
except Exception:
- LOG.exception(_('in fixed duration looping call'))
+ LOG.exception(_LE('in fixed duration looping call'))
raise
# License for the specific language governing permissions and limitations
# under the License.
-from cinder.i18n import _
+from cinder.i18n import _LE
from cinder.openstack.common import log as logging
from cinder.tests.brick.fake_lvm import FakeBrickLVM
from cinder.volume import driver
self.log_action('clear_volume', volume)
def local_path(self, volume):
- LOG.error(_("local_path not implemented"))
+ LOG.error(_LE("local_path not implemented"))
raise NotImplementedError()
def ensure_export(self, context, volume):
import requests
import six.moves.urllib.parse as urlparse
-from cinder.i18n import _
+from cinder.i18n import _, _LI
from cinder.openstack.common import log as logging
relative_url = parsed_url.path
if parsed_url.query:
relative_url = relative_url + "?" + parsed_url.query
- LOG.info(_("Doing %(method)s on %(relative_url)s"),
+ LOG.info(_LI("Doing %(method)s on %(relative_url)s"),
{'method': method, 'relative_url': relative_url})
if body:
- LOG.info(_("Body: %s") % body)
+ LOG.info(_LI("Body: %s") % body)
if port:
_url = "%s://%s:%d%s" % (scheme, hostname, int(port), relative_url)
import mock
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _LW
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume.drivers.fujitsu_eternus_dx_common import FJDXCommon
rc = 0L
job = {}
else:
- LOG.warn(_('method is not exist '))
+ LOG.warn(_LW('method is not exist '))
raise exception.VolumeBackendAPIException(data="invoke method")
LOG.debug('exit InvokeMethod:MAP_STAT: %s VOL_STAT: %s'
' Method: %s rc: %d job: %s' %
import paramiko
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _LI
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
"""Normal flow for i-t mode."""
GlobalVars._is_normal_test = True
GlobalVars._zone_state = []
- LOG.info(_("In Add GlobalVars._is_normal_test: "
- "%s"), GlobalVars._is_normal_test)
- LOG.info(_("In Add GlobalVars._zone_state:"
- " %s"), GlobalVars._zone_state)
+ LOG.info(_LI("In Add GlobalVars._is_normal_test: "
+ "%s"), GlobalVars._is_normal_test)
+ LOG.info(_LI("In Add GlobalVars._zone_state:"
+ " %s"), GlobalVars._zone_state)
get_active_zs_mock.return_value = _active_cfg_before_add
self.driver.add_connection('BRCD_FAB_1', _initiator_target_map)
self.assertTrue(_zone_name in GlobalVars._zone_state)
class FakeBrcdFCZoneClientCLI(object):
def __init__(self, ipaddress, username, password, port):
- LOG.info(_("User: %s"), username)
- LOG.info(_("_zone_state: %s"), GlobalVars._zone_state)
+ LOG.info(_LI("User: %s"), username)
+ LOG.info(_LI("_zone_state: %s"), GlobalVars._zone_state)
self.firmware_supported = True
if not GlobalVars._is_normal_test:
raise paramiko.SSHException("Unable to connect to fabric")
try:
transfer = self.db.transfer_create(context, transfer_rec)
except Exception:
- LOG.error(_("Failed to create transfer record for %s") % volume_id)
+ LOG.error(_LE("Failed to create transfer record "
+ "for %s") % volume_id)
raise
return {'id': transfer['id'],
'volume_id': transfer['volume_id'],
from cinder.brick.initiator import connector
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
# we can't do anything if the driver didn't init
if not driver.initialized:
driver_name = driver.__class__.__name__
- LOG.error(_("Volume driver %s not initialized") % driver_name)
+ LOG.error(_LE("Volume driver %s not initialized") % driver_name)
raise exception.DriverNotInitialized()
elevated, svc_host, CONF.volume_topic)
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
- LOG.error(_('Unable to find service for given host.'))
+ LOG.error(_LE('Unable to find service for given host.'))
availability_zone = service.get('availability_zone')
volume_type_id = volume_type['id'] if volume_type else None
from oslo.utils import excutils
from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LW
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
def _do_iscsi_discovery(self, volume):
# TODO(justinsb): Deprecate discovery and use stored info
# NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
- LOG.warn(_("ISCSI provider_location not stored, using discovery"))
+ LOG.warn(_LW("ISCSI provider_location not "
+ "stored, using discovery"))
volume_name = volume['name']
volume['host'].split('@')[0],
run_as_root=True)
except processutils.ProcessExecutionError as ex:
- LOG.error(_("ISCSI discovery attempt failed for:%s") %
+ LOG.error(_LE("ISCSI discovery attempt failed for:%s") %
volume['host'].split('@')[0])
LOG.debug("Error from iscsiadm -m discovery: %s" % ex.stderr)
return None
from cinder import context
from cinder.db.sqlalchemy import api
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LI
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.volume import driver
self.local_path(volume))
def create_cloned_volume(self, volume, src_vref):
- LOG.info(_('Creating clone of volume: %s') % src_vref['id'])
+ LOG.info(_LI('Creating clone of volume: %s') % src_vref['id'])
device = self.find_appropriate_size_device(src_vref['size'])
volutils.copy_volume(
self.local_path(src_vref), device,
from cinder import context
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LI
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers.emc import emc_vmax_common
iscsi_properties = self.smis_get_iscsi_properties(
volume, connector)
- LOG.info(_("Leaving initialize_connection: %s") % (iscsi_properties))
+ LOG.info(_LI("Leaving initialize_connection: %s") % (iscsi_properties))
return {
'driver_volume_type': 'iscsi',
'data': iscsi_properties
def smis_do_iscsi_discovery(self, volume):
- LOG.info(_("ISCSI provider_location not stored, using discovery."))
+ LOG.info(_LI("ISCSI provider_location not stored, using discovery."))
(out, _err) = self._execute('iscsiadm', '-m', 'discovery',
'-t', 'sendtargets', '-p',
self.configuration.iscsi_ip_address,
run_as_root=True)
- LOG.info(_(
+ LOG.info(_LI(
"smis_do_iscsi_discovery is: %(out)s")
% {'out': out})
targets = []
device_number = device_info['hostlunid']
- LOG.info(_(
+ LOG.info(_LI(
"location is: %(location)s") % {'location': location})
for loc in location:
properties['volume_id'] = volume['id']
- LOG.info(_("ISCSI properties: %(properties)s")
+ LOG.info(_LI("ISCSI properties: %(properties)s")
% {'properties': properties})
- LOG.info(_("ISCSI volume is: %(volume)s")
+ LOG.info(_LI("ISCSI volume is: %(volume)s")
% {'volume': volume})
if 'provider_auth' in volume:
auth = volume['provider_auth']
- LOG.info(_("AUTH properties: %(authProps)s")
+ LOG.info(_LI("AUTH properties: %(authProps)s")
% {'authProps': auth})
if auth is not None:
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
- LOG.info(_("AUTH properties: %s") % (properties))
+ LOG.info(_LI("AUTH properties: %s") % (properties))
return properties
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
else:
- LOG.error(_('Failed to find an available iSCSI targets for %s.'),
+ LOG.error(_LE('Failed to find an available '
+ 'iSCSI targets for %s.'),
storage_group)
return properties
try:
return json.loads(str_result)
except Exception:
- LOG.exception(_('querying %(typ)s, %(req)s failed to '
- 'parse result, return value = %(res)s'),
+ LOG.exception(_LE('querying %(typ)s, %(req)s failed to '
+ 'parse result, return value = %(res)s'),
{'typ': object_type,
'req': request_typ,
'res': str_result})
import six
from cinder import exception
-from cinder.i18n import _, _LE, _LW
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder.volume import volume_types
volumesize = int(volume['size']) * units.Gi
volumename = self._create_volume_name(volume['id'])
- LOG.info(_('Create Volume: %(volume)s Size: %(size)lu')
+ LOG.info(_LI('Create Volume: %(volume)s Size: %(size)lu')
% {'volume': volumename,
'size': volumesize})
volumename = self._create_volume_name(volume['id'])
vol_instance = None
- LOG.info(_('Create Volume from Snapshot: Volume: %(volumename)s '
- 'Snapshot: %(snapshotname)s')
+ LOG.info(_LI('Create Volume from Snapshot: Volume: %(volumename)s '
+ 'Snapshot: %(snapshotname)s')
% {'volumename': volumename,
'snapshotname': snapshotname})
srcname = self._create_volume_name(src_vref['id'])
volumename = self._create_volume_name(volume['id'])
- LOG.info(_('Create a Clone from Volume: Volume: %(volumename)s '
- 'Source Volume: %(srcname)s')
+ LOG.info(_LI('Create a Clone from Volume: Volume: %(volumename)s '
+ 'Source Volume: %(srcname)s')
% {'volumename': volumename,
'srcname': srcname})
"""Deletes an volume."""
LOG.debug('Entering delete_volume.')
volumename = self._create_volume_name(volume['id'])
- LOG.info(_('Delete Volume: %(volume)s')
+ LOG.info(_LI('Delete Volume: %(volume)s')
% {'volume': volumename})
self.conn = self._get_ecom_connection()
snapshotname = self._create_volume_name(snapshot['id'])
volumename = snapshot['volume_name']
- LOG.info(_('Create snapshot: %(snapshot)s: volume: %(volume)s')
+ LOG.info(_LI('Create snapshot: %(snapshot)s: volume: %(volume)s')
% {'snapshot': snapshotname,
'volume': volumename})
snapshotname = snapshot['name']
volumename = snapshot['volume_name']
- LOG.info(_('Delete Snapshot: %(snapshot)s: volume: %(volume)s')
+ LOG.info(_LI('Delete Snapshot: %(snapshot)s: volume: %(volume)s')
% {'snapshot': snapshotname,
'volume': volumename})
sync_name, storage_system =\
self._find_storage_sync_sv_sv(snapshot, volume, False)
if sync_name is None:
- LOG.info(_('Snapshot: %(snapshot)s: volume: %(volume)s. '
- 'Snapshot is deleted.')
+ LOG.info(_LI('Snapshot: %(snapshot)s: volume: %(volume)s. '
+ 'Snapshot is deleted.')
% {'snapshot': snapshotname,
'volume': volumename})
raise loopingcall.LoopingCallDone()
except Exception as ex:
if ex.args[0] == 6:
# 6 means object not found, so snapshot is deleted cleanly
- LOG.info(_('Snapshot: %(snapshot)s: volume: %(volume)s. '
- 'Snapshot is deleted.')
+ LOG.info(_LI('Snapshot: %(snapshot)s: volume: %(volume)s. '
+ 'Snapshot is deleted.')
% {'snapshot': snapshotname,
'volume': volumename})
else:
def _map_lun(self, volume, connector):
"""Maps a volume to the host."""
volumename = self._create_volume_name(volume['id'])
- LOG.info(_('Map volume: %(volume)s')
+ LOG.info(_LI('Map volume: %(volume)s')
% {'volume': volumename})
vol_instance = self._find_lun(volume)
def _unmap_lun(self, volume, connector):
"""Unmaps a volume from the host."""
volumename = self._create_volume_name(volume['id'])
- LOG.info(_('Unmap volume: %(volume)s')
+ LOG.info(_LI('Unmap volume: %(volume)s')
% {'volume': volumename})
device_info = self.find_device_number(volume, connector)
device_number = device_info['hostlunid']
if device_number is None:
- LOG.info(_("Volume %s is not mapped. No volume to unmap.")
+ LOG.info(_LI("Volume %s is not mapped. No volume to unmap.")
% (volumename))
return
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info."""
volumename = self._create_volume_name(volume['id'])
- LOG.info(_('Initialize connection: %(volume)s')
+ LOG.info(_LI('Initialize connection: %(volume)s')
% {'volume': volumename})
self.conn = self._get_ecom_connection()
device_info = self.find_device_number(volume, connector)
device_number = device_info['hostlunid']
if device_number is not None:
- LOG.info(_("Volume %s is already mapped.")
+ LOG.info(_LI("Volume %s is already mapped.")
% (volumename))
else:
self._map_lun(volume, connector)
def terminate_connection(self, volume, connector):
"""Disallow connection from connector."""
volumename = self._create_volume_name(volume['id'])
- LOG.info(_('Terminate connection: %(volume)s')
+ LOG.info(_LI('Terminate connection: %(volume)s')
% {'volume': volumename})
self.conn = self._get_ecom_connection()
self._unmap_lun(volume, connector)
volumesize = int(new_size) * units.Gi
volumename = self._create_volume_name(volume['id'])
- LOG.info(_('Extend Volume: %(volume)s New size: %(size)lu')
+ LOG.info(_LI('Extend Volume: %(volume)s New size: %(size)lu')
% {'volume': volumename,
'size': volumesize})
snapshot_instance = self._find_lun(snapshot)
volume_instance = self._find_lun(volume)
if snapshot_instance is None or volume_instance is None:
- LOG.info(_('Snapshot Volume %(snapshotname)s, '
- 'Source Volume %(volumename)s not found on the array.')
+ LOG.info(_LI('Snapshot Volume %(snapshotname)s, '
+ 'Source Volume %(volumename)s not '
+ 'found on the array.')
% {'snapshotname': snapshotname,
'volumename': volumename})
return None, None
if self._is_job_finished(conn, job):
raise loopingcall.LoopingCallDone()
if self.retries > JOB_RETRIES:
- LOG.error(_("_wait_for_job_complete failed after %(retries)d "
- "tries") % {'retries': self.retries})
+ LOG.error(_LE("_wait_for_job_complete failed after %(retries)d"
+ " tries") % {'retries': self.retries})
raise loopingcall.LoopingCallDone()
try:
self.retries += 1
if self._is_job_finished(conn, job):
self.wait_for_job_called = True
except Exception as e:
- LOG.error(_("Exception: %s") % six.text_type(e))
+ LOG.error(_LE("Exception: %s") % six.text_type(e))
exceptionMessage = (_("Issue encountered waiting for job."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(exceptionMessage)
if self._is_sync_complete(conn, syncName):
raise loopingcall.LoopingCallDone()
if self.retries > JOB_RETRIES:
- LOG.error(_("_wait_for_sync failed after %(retries)d tries")
+ LOG.error(_LE("_wait_for_sync failed after %(retries)d tries")
% {'retries': self.retries})
raise loopingcall.LoopingCallDone()
try:
if self._is_sync_complete(conn, syncName):
self.wait_for_sync_called = True
except Exception as e:
- LOG.error(_("Exception: %s") % six.text_type(e))
+ LOG.error(_LE("Exception: %s") % six.text_type(e))
exceptionMessage = (_("Issue encountered waiting for "
"synchronization."))
LOG.error(exceptionMessage)
break
if out_num_device_number is None:
- LOG.info(_("Device number not found for volume "
- "%(volumename)s %(vol_instance)s.")
+ LOG.info(_LI("Device number not found for volume "
+ "%(volumename)s %(vol_instance)s.")
% {'volumename': volumename,
'vol_instance': vol_instance.path})
else:
from cinder import context
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LW
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers import fujitsu_eternus_dx_common
def _do_iscsi_discovery(self, volume):
- LOG.warn(_("ISCSI provider_location not stored, using discovery"))
+ LOG.warn(_LW("ISCSI provider_location not stored, using discovery"))
(out, _err) = self._execute('iscsiadm', '-m', 'discovery',
'-t', 'sendtargets', '-p',
import requests
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder.volume.drivers.san.san import SanISCSIDriver
if i.key == 'fio-qos' and i.value in valid_presets]
if len(presets) > 0:
if len(presets) > 1:
- LOG.warning(_('More than one valid preset was '
- 'detected, using %s') % presets[0])
+ LOG.warning(_LW('More than one valid preset was '
+ 'detected, using %s') % presets[0])
return self.fio_qos_dict[presets[0]]
def _set_qos_by_volume_type(self, type_id):
from cinder.db.sqlalchemy import api
from cinder.db.sqlalchemy import models
from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _LE, _LW
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
try:
self.command.restart_pair_horcm()
except Exception as e:
- LOG.warning(_('Failed to restart horcm: %s') %
+ LOG.warning(_LW('Failed to restart horcm: %s') %
six.text_type(e))
else:
if (all_split or is_vvol) and restart:
try:
self.command.restart_pair_horcm()
except Exception as e:
- LOG.warning(_('Failed to restart horcm: %s') %
+ LOG.warning(_LW('Failed to restart horcm: %s') %
six.text_type(e))
def copy_async_data(self, pvol, svol, is_vvol):
import six
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _LW
from cinder.openstack.common import log as logging
from cinder import utils
import cinder.volume.driver
try:
self._fill_group(hgs, port, host_grp_name, wwns_copy)
except Exception as ex:
- LOG.warning(_('Failed to add host group: %s') %
+ LOG.warning(_LW('Failed to add host group: %s') %
six.text_type(ex))
msg = basic_lib.set_msg(
308, port=port, name=host_grp_name)
import six
from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _LE, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder import utils
try:
self.comm_modify_ldev(ldev)
except Exception as e:
- LOG.warning(_('Failed to discard zero page: %s') %
+ LOG.warning(_LW('Failed to discard zero page: %s') %
six.text_type(e))
@storage_synchronized
[basic_lib.PSUS], timeout,
interval, check_svol=True)
except Exception as ex:
- LOG.warning(_('Failed to create pair: %s') %
+ LOG.warning(_LW('Failed to create pair: %s') %
six.text_type(ex))
try:
[basic_lib.SMPL], timeout,
self.conf.hitachi_async_copy_check_interval)
except Exception as ex:
- LOG.warning(_('Failed to create pair: %s') %
+ LOG.warning(_LW('Failed to create pair: %s') %
six.text_type(ex))
if self.is_smpl(copy_group, ldev_name):
self.delete_pair_config(pvol, svol, copy_group,
ldev_name)
except Exception as ex:
- LOG.warning(_('Failed to create pair: %s') %
+ LOG.warning(_LW('Failed to create pair: %s') %
six.text_type(ex))
if restart:
try:
self.restart_pair_horcm()
except Exception as ex:
- LOG.warning(_('Failed to restart horcm: %s') %
+ LOG.warning(_LW('Failed to restart horcm: %s') %
six.text_type(ex))
else:
pvol, svol, [basic_lib.SMPL], timeout,
self.conf.hitachi_async_copy_check_interval)
except Exception as ex:
- LOG.warning(_('Failed to create pair: %s') %
+ LOG.warning(_LW('Failed to create pair: %s') %
six.text_type(ex))
def delete_pair(self, pvol, svol, is_vvol):
import six
from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _LE, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder import utils
used_list.append(int(line[2]))
if int(line[3]) == ldev:
hlu = int(line[2])
- LOG.warning(_('ldev(%(ldev)d) is already mapped '
- '(hlun: %(hlu)d)')
+ LOG.warning(_LW('ldev(%(ldev)d) is already mapped '
+ '(hlun: %(hlu)d)')
% {'ldev': ldev, 'hlu': hlu})
return hlu
return None
import time
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LW
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers.huawei import huawei_utils
port_num -= 1
break
else:
- LOG.warn(_('_remove_iscsi_port: iSCSI port was not found '
- 'on host %(hostid)s.') % {'hostid': hostid})
+ LOG.warn(_LW('_remove_iscsi_port: iSCSI port was not found '
+ 'on host %(hostid)s.') % {'hostid': hostid})
# Delete host if no initiator added to it.
if port_num == 0:
self.common._delete_hostport(port[0])
port_num -= 1
else:
- LOG.warn(_('_remove_fc_ports: FC port was not found '
- 'on host %(hostid)s.') % {'hostid': hostid})
+ LOG.warn(_LW('_remove_fc_ports: FC port was not found '
+ 'on host %(hostid)s.') % {'hostid': hostid})
if port_num == 0:
self.common._delete_host(hostid)
from cinder import context
from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LW
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume.drivers.huawei import huawei_utils
if policy_id:
self._update_qos_policy_lunlist(lun_list, policy_id)
else:
- LOG.warn(_("Can't find the Qos policy in array"))
+ LOG.warn(_LW("Can't find the Qos policy in array"))
# Create lun group and add LUN into to lun group
lungroup_id = self._create_lungroup(volume_name)
self._delete_lungroup(lungroup_id)
self._delete_lun(lun_id)
else:
- LOG.warn(_("Can't find lun or lun group in array"))
+ LOG.warn(_LW("Can't find lun or lun group in array"))
def _delete_lun_from_qos_policy(self, volume, lun_id):
"""Remove lun from qos policy."""
params[key] = value.strip()
else:
conf = self.configuration.cinder_huawei_conf_file
- LOG.warn(_('_parse_volume_type: Unacceptable parameter '
- '%(key)s. Please check this key in extra_specs '
- 'and make it consistent with the configuration '
- 'file %(conf)s.') % {'key': key, 'conf': conf})
+ LOG.warn(_LW('_parse_volume_type: Unacceptable parameter '
+ '%(key)s. Please check this key in '
+ 'extra_specs and make it consistent with the '
+ 'configuration file '
+ '%(conf)s.') % {'key': key, 'conf': conf})
LOG.debug("The config parameters are: %s" % params)
return params
try:
tree.write(filename, 'UTF-8')
except Exception as err:
- LOG.warn(_('%s') % err)
+ LOG.warn(_LW('%s') % err)
return logininfo
result = self.call(url, data, "PUT")
self._assert_rest_result(result, 'Extend lun error.')
else:
- LOG.warn(_('Can not find lun in array'))
+ LOG.warn(_LW('Can not find lun in array'))
from cinder import context
from cinder import exception
-from cinder.i18n import _, _LE, _LI
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder import ssh_utils
from cinder import utils
params[key] = value.strip()
else:
conf = self.configuration.cinder_huawei_conf_file
- LOG.warn(_('_parse_volume_type: Unacceptable parameter '
- '%(key)s. Please check this key in extra_specs '
- 'and make it consistent with the element in '
- 'configuration file %(conf)s.')
+ LOG.warn(_LW('_parse_volume_type: Unacceptable parameter '
+ '%(key)s. Please check this key in '
+ 'extra_specs '
+ 'and make it consistent with the element in '
+ 'configuration file %(conf)s.')
% {'key': key,
'conf': conf})
if map_id is not None:
self._delete_map(map_id)
else:
- LOG.warn(_('remove_map: No map between host %(host)s and '
- 'volume %(volume)s.') % {'host': host_name,
- 'volume': volume_id})
+ LOG.warn(_LW('remove_map: No map between host %(host)s and '
+ 'volume %(volume)s.') % {'host': host_name,
+ 'volume': volume_id})
return host_id
def _delete_map(self, mapid, attempts=2):
try:
image_utils.resize_image(vol_path, new_size, run_as_root=True)
except processutils.ProcessExecutionError as exc:
- LOG.error(_("Failed to resize volume "
- "%(volume_id)s, error: %(error)s.") %
+ LOG.error(_LE("Failed to resize volume "
+ "%(volume_id)s, error: %(error)s.") %
{'volume_id': volume['id'],
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
self._execute('mv', local_path, new_path, run_as_root=True)
return (True, None)
except processutils.ProcessExecutionError as exc:
- LOG.error(_('Driver-based migration of volume %(vol)s failed. '
- 'Move from %(src)s to %(dst)s failed with error: '
- '%(error)s.') %
+ LOG.error(_LE('Driver-based migration of volume %(vol)s failed. '
+ 'Move from %(src)s to %(dst)s failed with error: '
+ '%(error)s.') %
{'vol': volume['name'],
'src': local_path,
'dst': new_path,
from cinder import context
from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder import utils
if chap_enabled and chap_secret is None:
chap_secret = self._helpers.add_chap_secret_to_host(host_name)
elif not chap_enabled and chap_secret:
- LOG.warning(_('CHAP secret exists for host but CHAP is '
- 'disabled'))
+ LOG.warning(_LW('CHAP secret exists for host but CHAP is '
+ 'disabled'))
volume_attributes = self._helpers.get_vdisk_attributes(volume_name)
if volume_attributes is None:
if not preferred_node_entry and not vol_opts['multipath']:
# Get 1st node in I/O group
preferred_node_entry = io_group_nodes[0]
- LOG.warn(_('initialize_connection: Did not find a preferred '
- 'node for volume %s') % volume_name)
+ LOG.warn(_LW('initialize_connection: Did not find a preferred '
+ 'node for volume %s') % volume_name)
properties = {}
properties['target_discovered'] = False
properties['target_wwn'] = WWPN
break
else:
- LOG.warning(_('Unable to find a preferred node match '
- 'for node %(node)s in the list of '
- 'available WWPNs on %(host)s. '
- 'Using first available.') %
+ LOG.warning(_LW('Unable to find a preferred node match'
+ ' for node %(node)s in the list of '
+ 'available WWPNs on %(host)s. '
+ 'Using first available.') %
{'node': preferred_node,
'host': host_name})
properties['target_wwn'] = conn_wwpns[0]
try:
volume = self.db.volume_get(ctxt, vol_id)
except Exception:
- LOG.warn(_('Volume %s does not exist.'), vol_id)
+ LOG.warn(_LW('Volume %s does not exist.'), vol_id)
del self._vdiskcopyops[vol_id]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
attributes = self._helpers.get_pool_attrs(pool)
if not attributes:
- LOG.error(_('Could not get pool data from the storage'))
+ LOG.error(_LE('Could not get pool data from the storage'))
exception_message = (_('_update_volume_stats: '
'Could not get storage pool data'))
raise exception.VolumeBackendAPIException(data=exception_message)
from cinder import context
from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder.volume.drivers.ibm.storwize_svc import ssh as storwize_ssh
if 'unconfigured' != s:
wwpns.add(i)
node['WWPN'] = list(wwpns)
- LOG.info(_('WWPN on node %(node)s: %(wwpn)s')
+ LOG.info(_LI('WWPN on node %(node)s: %(wwpn)s')
% {'node': node['id'], 'wwpn': node['WWPN']})
def add_chap_secret_to_host(self, host_name):
# Check if the mapping exists
resp = self.ssh.lsvdiskhostmap(volume_name)
if not len(resp):
- LOG.warning(_('unmap_vol_from_host: No mapping of volume '
- '%(vol_name)s to any host found.') %
+ LOG.warning(_LW('unmap_vol_from_host: No mapping of volume '
+ '%(vol_name)s to any host found.') %
{'vol_name': volume_name})
return
if host_name is None:
if len(resp) > 1:
- LOG.warning(_('unmap_vol_from_host: Multiple mappings of '
- 'volume %(vol_name)s found, no host '
- 'specified.') % {'vol_name': volume_name})
+ LOG.warning(_LW('unmap_vol_from_host: Multiple mappings of '
+ 'volume %(vol_name)s found, no host '
+ 'specified.') % {'vol_name': volume_name})
return
else:
host_name = resp[0]['host_name']
if h == host_name:
found = True
if not found:
- LOG.warning(_('unmap_vol_from_host: No mapping of volume '
- '%(vol_name)s to host %(host)s found.') %
+ LOG.warning(_LW('unmap_vol_from_host: No mapping of volume '
+ '%(vol_name)s to host %(host)s found.') %
{'vol_name': volume_name, 'host': host_name})
# We now know that the mapping exists
"""Ensures that vdisk is not part of FC mapping and deletes it."""
LOG.debug('enter: delete_vdisk: vdisk %s' % vdisk)
if not self.is_vdisk_defined(vdisk):
- LOG.info(_('Tried to delete non-existant vdisk %s.') % vdisk)
+ LOG.info(_LI('Tried to delete non-existant vdisk %s.') % vdisk)
return
self.ensure_vdisk_no_fc_mappings(vdisk)
self.ssh.rmvdisk(vdisk, force=force)
from cinder.brick.remotefs import remotefs as remotefs_brick
from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder import utils
if attempt == (num_attempts - 1):
LOG.error(_LE('Mount failure for %(share)s after '
'%(count)d attempts.') % {
- 'share': nfs_share,
- 'count': num_attempts})
+ 'share': nfs_share,
+ 'count': num_attempts})
raise exception.NfsException(e)
LOG.debug('Mount attempt %d failed: %s.\nRetrying mount ...' %
(attempt, six.text_type(e)))
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""
- LOG.info(_('Extending volume %s.'), volume['id'])
+ LOG.info(_LI('Extending volume %s.'), volume['id'])
extend_by = int(new_size) - volume['size']
if not self._is_share_eligible(volume['provider_location'],
extend_by):
' extend volume %s to %sG'
% (volume['id'], new_size))
path = self.local_path(volume)
- LOG.info(_('Resizing file to %sG...'), new_size)
+ LOG.info(_LI('Resizing file to %sG...'), new_size)
image_utils.resize_image(path, new_size,
run_as_root=self._execute_as_root)
if not self._is_file_size_equal(path, new_size):
self.configuration.nas_secure_file_permissions)
if self.configuration.nas_secure_file_permissions == 'false':
- LOG.warn(_("The NAS file permissions mode will be 666 (allowing "
- "other/world read & write access). This is considered "
- "an insecure NAS environment. Please see %s for "
- "information on a secure NFS configuration.") %
+ LOG.warn(_LW("The NAS file permissions mode will be 666 (allowing "
+ "other/world read & write access). "
+ "This is considered an insecure NAS environment. "
+ "Please see %s for information on a secure "
+ "NFS configuration.") %
doc_html)
self.configuration.nas_secure_file_operations = \
self.configuration.nas_secure_file_operations)
if self.configuration.nas_secure_file_operations == 'false':
- LOG.warn(_("The NAS file operations will be run as root: allowing "
- "root level access at the storage backend. This is "
- "considered an insecure NAS environment. Please see %s "
- "for information on a secure NAS configuration.") %
+ LOG.warn(_LW("The NAS file operations will be run as "
+ "root: allowing root level access at the storage "
+ "backend. This is considered an insecure NAS "
+ "environment. Please see %s "
+ "for information on a secure NAS configuration.") %
doc_html)
def login(self):
"""Execute Https Login API."""
response = self._execute_login()
- LOG.info(_('Successful login by user %s') % self.username)
+ LOG.info(_LI('Successful login by user %s') % self.username)
self.sid = response['authInfo']['sid']
@_connection_checker
@_response_checker
def online_snap(self, vol_name, online_flag, snap_name, *args, **kwargs):
"""Execute onlineSnap API."""
- LOG.info(_('Setting snapshot %(snap)s to online_flag %(flag)s')
+ LOG.info(_LI('Setting snapshot %(snap)s to online_flag %(flag)s')
% {'snap': snap_name, 'flag': online_flag})
return self.client.service.onlineSnap(request={'sid': self.sid,
'vol': vol_name,
ret = 0
output = status.get('output', {})
else:
- LOG.error(_('Flexvisor failed to get pool info '
- '(failed to get event)%s.') % (poolid))
+ LOG.error(_LE('Flexvisor failed to get pool info '
+ '(failed to get event)%s.') % (poolid))
raise exception.VolumeBackendAPIException(
data="failed to get event")
elif ret != 0:
from cinder import context
from cinder import exception
from cinder import flow_utils
-from cinder.i18n import _, _LE, _LI
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder.volume import qos_specs
'new_type': volume_type.get('name')})
except Exception:
with excutils.save_and_reraise_exception():
- LOG.warning(_("Failed to manage virtual volume %(disp)s "
- "due to error during retype.") %
+ LOG.warning(_LW("Failed to manage virtual volume %(disp)s "
+ "due to error during retype.") %
{'disp': display_name})
# Try to undo the rename and clear the new comment.
self.client.modifyVolume(
hpexceptions = None
from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LW
from cinder.openstack.common import log as logging
import cinder.volume.driver
from cinder.volume.drivers.san.hp import hp_3par_common as hpcommon
host = common._get_3par_host(hostname)
elif (not host['initiatorChapEnabled'] and
self.configuration.hp3par_iscsi_chap_enabled):
- LOG.warn(_("Host exists without CHAP credentials set and has "
- "iSCSI attachments but CHAP is enabled. Updating "
- "host with new CHAP credentials."))
+ LOG.warn(_LW("Host exists without CHAP credentials set "
+ "and has iSCSI attachments but CHAP is "
+ "enabled. Updating host with new CHAP "
+ "credentials."))
self._set_3par_chaps(
common,
hostname,
host_info = common.client.getHost(chap_username)
if not host_info['initiatorChapEnabled']:
- LOG.warn(_("Host has no CHAP key, but CHAP is enabled."))
+ LOG.warn(_LW("Host has no CHAP key, but CHAP is enabled."))
except hpexceptions.HTTPNotFound:
chap_password = volume_utils.generate_password(16)
- LOG.warn(_("No host or VLUNs exist. Generating new CHAP key."))
+ LOG.warn(_LW("No host or VLUNs exist. Generating new CHAP key."))
else:
# Get a list of all iSCSI VLUNs and see if there is already a CHAP
# key assigned to one of them. Use that CHAP key if present,
"but CHAP is enabled. Skipping." %
vlun['remoteName'])
else:
- LOG.warn(_("Non-iSCSI VLUN detected."))
+ LOG.warn(_LW("Non-iSCSI VLUN detected."))
if not chap_exists:
chap_password = volume_utils.generate_password(16)
- LOG.warn(_("No VLUN contained CHAP credentials. "
- "Generating new CHAP key."))
+ LOG.warn(_LW("No VLUN contained CHAP credentials. "
+ "Generating new CHAP key."))
# Add CHAP credentials to the volume metadata
vol_name = common._get_3par_vol_name(volume['id'])
LeftHand array.
"""
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _LI
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume.driver import VolumeDriver
self.proxy = self._create_proxy(*self.args, **self.kwargs)
self.proxy.do_setup(context)
- LOG.info(_("HPLeftHand driver %(driver_ver)s, proxy %(proxy_ver)s") % {
+ LOG.info(_LI("HPLeftHand driver %(driver_ver)s, "
+ "proxy %(proxy_ver)s") % {
"driver_ver": self.VERSION,
"proxy_ver": self.proxy.get_version_string()})
from cinder import context
from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.volume.driver import ISCSIDriver
from cinder.volume import utils
server_info = self.client.getServerByName(connector['host'])
chap_secret = server_info['chapTargetSecret']
if not chap_enabled and chap_secret:
- LOG.warning(_('CHAP secret exists for host %s but CHAP is '
- 'disabled') % connector['host'])
+ LOG.warning(_LW('CHAP secret exists for host %s but CHAP is '
+ 'disabled') % connector['host'])
if chap_enabled and chap_secret is None:
- LOG.warning(_('CHAP is enabled, but server secret not '
- 'configured on server %s') % connector['host'])
+ LOG.warning(_LW('CHAP is enabled, but server secret not '
+ 'configured on server %s') % connector['host'])
return server_info
except hpexceptions.HTTPNotFound:
# server does not exist, so create one
virtual_ips = cluster_info['virtualIPAddresses']
if driver != self.__class__.__name__:
- LOG.info(_("Cannot provide backend assisted migration for "
- "volume: %s because volume is from a different "
- "backend.") % volume['name'])
+ LOG.info(_LI("Cannot provide backend assisted migration for "
+ "volume: %s because volume is from a different "
+ "backend.") % volume['name'])
return false_ret
if vip != virtual_ips[0]['ipV4Address']:
- LOG.info(_("Cannot provide backend assisted migration for "
- "volume: %s because cluster exists in different "
- "management group.") % volume['name'])
+ LOG.info(_LI("Cannot provide backend assisted migration for "
+ "volume: %s because cluster exists in different "
+ "management group.") % volume['name'])
return false_ret
except hpexceptions.HTTPNotFound:
- LOG.info(_("Cannot provide backend assisted migration for "
- "volume: %s because cluster exists in different "
- "management group.") % volume['name'])
+ LOG.info(_LI("Cannot provide backend assisted migration for "
+ "volume: %s because cluster exists in different "
+ "management group.") % volume['name'])
return false_ret
try:
# can't migrate if server is attached
if volume_info['iscsiSessions'] is not None:
- LOG.info(_("Cannot provide backend assisted migration "
- "for volume: %s because the volume has been "
- "exported.") % volume['name'])
+ LOG.info(_LI("Cannot provide backend assisted migration "
+ "for volume: %s because the volume has been "
+ "exported.") % volume['name'])
return false_ret
# can't migrate if volume has snapshots
'fields=snapshots,snapshots[resource[members[name]]]')
LOG.debug('Snapshot info: %s' % snap_info)
if snap_info['snapshots']['resource'] is not None:
- LOG.info(_("Cannot provide backend assisted migration "
- "for volume: %s because the volume has "
- "snapshots.") % volume['name'])
+ LOG.info(_LI("Cannot provide backend assisted migration "
+ "for volume: %s because the volume has "
+ "snapshots.") % volume['name'])
return false_ret
options = {'clusterName': cluster}
self.client.modifyVolume(volume_info['id'], options)
except hpexceptions.HTTPNotFound:
- LOG.info(_("Cannot provide backend assisted migration for "
- "volume: %s because volume does not exist in this "
- "management group.") % volume['name'])
+ LOG.info(_LI("Cannot provide backend assisted migration for "
+ "volume: %s because volume does not exist in this "
+ "management group.") % volume['name'])
return false_ret
except hpexceptions.HTTPServerError as ex:
LOG.error(ex)
from oslo.utils import excutils
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
from cinder import ssh_utils
from cinder import utils
except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_("Error running SSH command: %s") % command)
+ LOG.error(_LE("Error running SSH command: %s") % command)
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
from cinder.brick.remotefs import remotefs
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LI, _LW
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder import utils
def delete_volume(self, volume):
"""Deletes a logical volume."""
if not volume['provider_location']:
- LOG.warn(_('Volume %s does not have provider_location specified, '
- 'skipping.'), volume['name'])
+ LOG.warn(_LW('Volume %s does not have provider_location '
+ 'specified, skipping.'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
pattern = r"qemu-img version ([0-9\.]*)"
version = re.match(pattern, info)
if not version:
- LOG.warn(_("qemu-img is not installed."))
+ LOG.warn(_LW("qemu-img is not installed."))
return None
return [int(x) for x in version.groups()[0].split('.')]
@utils.synchronized('smbfs', external=False)
def extend_volume(self, volume, size_gb):
- LOG.info(_('Extending volume %s.'), volume['id'])
+ LOG.info(_LI('Extending volume %s.'), volume['id'])
self._extend_volume(volume, size_gb)
def _extend_volume(self, volume, size_gb):
volume_path = self.local_path(volume)
self._check_extend_volume_support(volume, size_gb)
- LOG.info(_('Resizing file to %sG...') % size_gb)
+ LOG.info(_LI('Resizing file to %sG...') % size_gb)
self._do_extend_volume(volume_path, size_gb, volume['name'])
Provides abstraction over cinder.volume.drivers.vmware.vim.Vim SOAP calls.
"""
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder.volume.drivers.vmware import error_util
try:
result = f(*args, **kwargs)
except self._exceptions as excep:
- LOG.exception(_("Failure while invoking function: "
- "%(func)s. Error: %(excep)s.") %
+ LOG.exception(_LE("Failure while invoking function: "
+ "%(func)s. Error: %(excep)s.") %
{'func': f.__name__, 'excep': excep})
if (self._max_retry_count != -1 and
self._retry_count >= self._max_retry_count):
# have been cleared. We could have made a call to
# SessionIsActive, but that is an overhead because we
# anyway would have to call TerminateSession.
- LOG.exception(_("Error while terminating session: %s.") %
+ LOG.exception(_LE("Error while terminating session: %s.") %
excep)
self._session_id = session.key
if self.pbm:
self.pbm.set_cookie()
- LOG.info(_("Successfully established connection to the server."))
+ LOG.info(_LI("Successfully established connection to the server."))
def __del__(self):
"""Logs-out the sessions."""
try:
self.vim.Logout(self.vim.service_content.sessionManager)
except Exception as excep:
- LOG.exception(_("Error while logging out from vim session: %s."),
+ LOG.exception(_LE("Error while logging out from vim session: %s."),
excep)
if self._pbm:
try:
self.pbm.Logout(self.pbm.service_content.sessionManager)
except Exception as excep:
- LOG.exception(_("Error while logging out from pbm session: "
- "%s."), excep)
+ LOG.exception(_LE("Error while logging out from pbm session: "
+ "%s."), excep)
def invoke_api(self, module, method, *args, **kwargs):
"""Wrapper method for invoking APIs.
return []
# empty response is due to an inactive session
- LOG.warn(_("Current session: %(session)s is inactive; "
- "re-creating the session while invoking "
- "method %(module)s.%(method)s."),
+ LOG.warn(_LW("Current session: %(session)s is inactive; "
+ "re-creating the session while invoking "
+ "method %(module)s.%(method)s."),
{'session': self._session_id,
'module': module,
'method': method},
sessionID=self._session_id,
userName=self._session_username)
except error_util.VimException:
- LOG.warn(_("Error occurred while checking whether the "
- "current session: %s is active."),
+ LOG.warn(_LW("Error occurred while checking whether the "
+ "current session: %s is active."),
self._session_id,
exc_info=True)
LOG.debug("Task %s status: success." % task)
else:
error_msg = str(task_info.error.localizedMessage)
- LOG.exception(_("Task: %(task)s failed with error: %(err)s.") %
+ LOG.exception(_LE("Task: %(task)s failed with "
+ "error: %(err)s.") %
{'task': task, 'err': error_msg})
raise error_util.VimFaultException([], error_msg)
except Exception as excep:
- LOG.exception(_("Task: %(task)s failed with error: %(err)s.") %
+ LOG.exception(_LE("Task: %(task)s failed with "
+ "error: %(err)s.") %
{'task': task, 'err': excep})
raise excep
# got the result. So stop the loop.
from oslo.utils import units
from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import uuidutils
VERSION = '1.4.0'
def _do_deprecation_warning(self):
- LOG.warn(_('The VMware ESX VMDK driver is now deprecated and will be '
- 'removed in the Juno release. The VMware vCenter VMDK '
- 'driver will remain and continue to be supported.'))
+ LOG.warn(_LW('The VMware ESX VMDK driver is now deprecated '
+ 'and will be removed in the Juno release. The VMware '
+ 'vCenter VMDK driver will remain and continue to be '
+ 'supported.'))
def __init__(self, *args, **kwargs):
super(VMwareEsxVmdkDriver, self).__init__(*args, **kwargs)
max_objects = self.configuration.vmware_max_objects_retrieval
self._volumeops = volumeops.VMwareVolumeOps(self.session,
max_objects)
- LOG.info(_("Successfully setup driver: %(driver)s for "
- "server: %(ip)s.") %
+ LOG.info(_LI("Successfully setup driver: %(driver)s for "
+ "server: %(ip)s.") %
{'driver': driver,
'ip': self.configuration.vmware_host_ip})
"""
backing = self.volumeops.get_backing(volume['name'])
if not backing:
- LOG.info(_("Backing not available, no operation to be performed."))
+ LOG.info(_LI("Backing not available, no operation "
+ "to be performed."))
return
self.volumeops.delete_backing(backing)
LOG.error(msg, storage_profile)
raise error_util.VimException(msg % storage_profile)
elif storage_profile:
- LOG.warn(_("Ignoring storage profile %s requirement for this "
- "volume since policy based placement is "
- "disabled."), storage_profile)
+ LOG.warn(_LW("Ignoring storage profile %s requirement for this "
+ "volume since policy based placement is "
+ "disabled."), storage_profile)
size_bytes = volume['size'] * units.Gi
datastore_summary = self._select_datastore_summary(size_bytes,
selected_host = host
break
except error_util.VimException as excep:
- LOG.warn(_("Unable to find suitable datastore for volume "
- "of size: %(vol)s GB under host: %(host)s. "
- "More details: %(excep)s") %
+ LOG.warn(_LW("Unable to find suitable datastore for volume"
+ " of size: %(vol)s GB under host: %(host)s. "
+ "More details: %(excep)s") %
{'vol': volume['size'],
'host': host, 'excep': excep})
if selected_host:
if backing:
break
except error_util.VimException as excep:
- LOG.warn(_("Unable to find suitable datastore for "
- "volume: %(vol)s under host: %(host)s. "
- "More details: %(excep)s") %
+ LOG.warn(_LW("Unable to find suitable datastore for "
+ "volume: %(vol)s under host: %(host)s. "
+ "More details: %(excep)s") %
{'vol': volume['name'],
'host': host.obj, 'excep': excep})
if backing:
if not backing:
# Create a backing in case it does not exist under the
# host managing the instance.
- LOG.info(_("There is no backing for the volume: %s. "
- "Need to create one.") % volume['name'])
+ LOG.info(_LI("There is no backing for the volume: %s. "
+ "Need to create one.") % volume['name'])
backing = self._create_backing(volume, host)
else:
# Relocate volume is necessary
if not backing:
# Create a backing in case it does not exist. It is a bad use
# case to boot from an empty volume.
- LOG.warn(_("Trying to boot from an empty volume: %s.") %
+ LOG.warn(_LW("Trying to boot from an empty volume: %s.") %
volume['name'])
# Create backing
backing = self._create_backing_in_inventory(volume)
connection_info['data'] = {'volume': backing.value,
'volume_id': volume['id']}
- LOG.info(_("Returning connection_info: %(info)s for volume: "
- "%(volume)s with connector: %(connector)s.") %
+ LOG.info(_LI("Returning connection_info: %(info)s for volume: "
+ "%(volume)s with connector: %(connector)s.") %
{'info': connection_info,
'volume': volume['name'],
'connector': connector})
raise exception.InvalidVolume(msg % volume['status'])
backing = self.volumeops.get_backing(snapshot['volume_name'])
if not backing:
- LOG.info(_("There is no backing, so will not create "
- "snapshot: %s.") % snapshot['name'])
+ LOG.info(_LI("There is no backing, so will not create "
+ "snapshot: %s.") % snapshot['name'])
return
self.volumeops.create_snapshot(backing, snapshot['name'],
snapshot['display_description'])
- LOG.info(_("Successfully created snapshot: %s.") % snapshot['name'])
+ LOG.info(_LI("Successfully created snapshot: %s.") % snapshot['name'])
def create_snapshot(self, snapshot):
"""Creates a snapshot.
raise exception.InvalidVolume(msg % volume['status'])
backing = self.volumeops.get_backing(snapshot['volume_name'])
if not backing:
- LOG.info(_("There is no backing, and so there is no "
- "snapshot: %s.") % snapshot['name'])
+ LOG.info(_LI("There is no backing, and so there is no "
+ "snapshot: %s.") % snapshot['name'])
else:
self.volumeops.delete_snapshot(backing, snapshot['name'])
- LOG.info(_("Successfully deleted snapshot: %s.") %
+ LOG.info(_LI("Successfully deleted snapshot: %s.") %
snapshot['name'])
def delete_snapshot(self, snapshot):
if volume['size'] > src_size_in_gb:
self._extend_volumeops_virtual_disk(volume['size'], dest_vmdk_path,
datacenter)
- LOG.info(_("Successfully cloned new backing: %(back)s from "
- "source VMDK file: %(vmdk)s.") %
+ LOG.info(_LI("Successfully cloned new backing: %(back)s from "
+ "source VMDK file: %(vmdk)s.") %
{'back': backing, 'vmdk': src_vmdk_path})
def _create_cloned_volume(self, volume, src_vref):
self._verify_volume_creation(volume)
backing = self.volumeops.get_backing(src_vref['name'])
if not backing:
- LOG.info(_("There is no backing for the source volume: "
- "%(svol)s. Not creating any backing for the "
- "volume: %(vol)s.") %
+ LOG.info(_LI("There is no backing for the source volume: "
+ "%(svol)s. Not creating any backing for the "
+ "volume: %(vol)s.") %
{'svol': src_vref['name'],
'vol': volume['name']})
return
self._verify_volume_creation(volume)
backing = self.volumeops.get_backing(snapshot['volume_name'])
if not backing:
- LOG.info(_("There is no backing for the source snapshot: "
- "%(snap)s. Not creating any backing for the "
- "volume: %(vol)s.") %
+ LOG.info(_LI("There is no backing for the source snapshot: "
+ "%(snap)s. Not creating any backing for the "
+ "volume: %(vol)s.") %
{'snap': snapshot['name'],
'vol': volume['name']})
return
snapshot_moref = self.volumeops.get_snapshot(backing,
snapshot['name'])
if not snapshot_moref:
- LOG.info(_("There is no snapshot point for the snapshotted "
- "volume: %(snap)s. Not creating any backing for "
- "the volume: %(vol)s.") %
+ LOG.info(_LI("There is no snapshot point for the snapshotted "
+ "volume: %(snap)s. Not creating any backing for "
+ "the volume: %(vol)s.") %
{'snap': snapshot['name'], 'vol': volume['name']})
return
src_vmdk_path = self.volumeops.get_vmdk_path(snapshot_moref)
self.volumeops.delete_vmdk_file(
descriptor_ds_file_path, dc_ref)
except error_util.VimException:
- LOG.warn(_("Error occurred while deleting temporary "
- "disk: %s."),
+ LOG.warn(_LW("Error occurred while deleting temporary "
+ "disk: %s."),
descriptor_ds_file_path,
exc_info=True)
dest_path.get_descriptor_ds_file_path())
except error_util.VimException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Error occurred while copying %(src)s to "
- "%(dst)s."),
+ LOG.exception(_LE("Error occurred while copying %(src)s to "
+ "%(dst)s."),
{'src': src_path.get_descriptor_ds_file_path(),
'dst': dest_path.get_descriptor_ds_file_path()})
finally:
except Exception:
# Delete the descriptor.
with excutils.save_and_reraise_exception():
- LOG.exception(_("Error occurred while copying image: "
- "%(image_id)s to %(path)s."),
+ LOG.exception(_LE("Error occurred while copying image: "
+ "%(image_id)s to %(path)s."),
{'path': path.get_descriptor_ds_file_path(),
'image_id': image_id})
LOG.debug("Deleting descriptor: %s.",
self.volumeops.delete_file(
path.get_descriptor_ds_file_path(), dc_ref)
except error_util.VimException:
- LOG.warn(_("Error occurred while deleting "
- "descriptor: %s."),
+ LOG.warn(_LW("Error occurred while deleting "
+ "descriptor: %s."),
path.get_descriptor_ds_file_path(),
exc_info=True)
try:
self.volumeops.delete_backing(backing)
except error_util.VimException:
- LOG.warn(_("Error occurred while deleting backing: %s."),
+ LOG.warn(_LW("Error occurred while deleting backing: %s."),
backing,
exc_info=True)
except Exception:
# Delete backing and virtual disk created from image.
with excutils.save_and_reraise_exception():
- LOG.exception(_("Error occurred while creating volume: %(id)s"
- " from image: %(image_id)s."),
+ LOG.exception(_LE("Error occurred while creating "
+ "volume: %(id)s"
+ " from image: %(image_id)s."),
{'id': volume['id'],
'image_id': image_id})
self._delete_temp_backing(backing)
image_size=image_size)
except exception.CinderException as excep:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Exception in copy_image_to_volume: %s."),
+ LOG.exception(_LE("Exception in copy_image_to_volume: %s."),
excep)
backing = self.volumeops.get_backing(volume['name'])
if backing:
- LOG.exception(_("Deleting the backing: %s") % backing)
+ LOG.exception(_LE("Deleting the backing: %s") % backing)
# delete the backing
self.volumeops.delete_backing(backing)
- LOG.info(_("Done copying image: %(id)s to volume: %(vol)s.") %
+ LOG.info(_LI("Done copying image: %(id)s to volume: %(vol)s.") %
{'id': image_id, 'vol': volume['name']})
def _extend_vmdk_virtual_disk(self, name, new_size_in_gb):
"""
backing = self.volumeops.get_backing(name)
if not backing:
- LOG.info(_("The backing is not found, so there is no need "
- "to extend the vmdk virtual disk for the volume "
- "%s."), name)
+ LOG.info(_LI("The backing is not found, so there is no need "
+ "to extend the vmdk virtual disk for the volume "
+ "%s."), name)
else:
root_vmdk_path = self.volumeops.get_vmdk_path(backing)
datacenter = self.volumeops.get_dc(backing)
root_vmdk_path, datacenter)
except error_util.VimException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Unable to extend the size of the "
- "vmdk virtual disk at the path %s."),
+ LOG.exception(_LE("Unable to extend the size of the "
+ "vmdk virtual disk at the path %s."),
root_vmdk_path)
def copy_image_to_volume(self, context, volume, image_service, image_id):
image_size_in_bytes, image_adapter_type, image_disk_type)
except exception.CinderException as excep:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Exception in copying the image to the "
- "volume: %s."), excep)
+ LOG.exception(_LE("Exception in copying the image to the "
+ "volume: %s."), excep)
LOG.debug("Volume: %(id)s created from image: %(image_id)s.",
{'id': volume['id'],
# get backing vm of volume and its vmdk path
backing = self.volumeops.get_backing(volume['name'])
if not backing:
- LOG.info(_("Backing not found, creating for volume: %s") %
+ LOG.info(_LI("Backing not found, creating for volume: %s") %
volume['name'])
backing = self._create_backing_in_inventory(volume)
vmdk_file_path = self.volumeops.get_vmdk_path(backing)
vmdk_size=volume['size'] * units.Gi,
image_name=image_meta['name'],
image_version=1)
- LOG.info(_("Done copying volume %(vol)s to a new image %(img)s") %
+ LOG.info(_LI("Done copying volume %(vol)s to a new image %(img)s") %
{'vol': volume['name'], 'img': image_meta['name']})
def _in_use(self, volume):
"""
# Can't attempt retype if the volume is in use.
if self._in_use(volume):
- LOG.warn(_("Volume: %s is in use, can't retype."),
+ LOG.warn(_LW("Volume: %s is in use, can't retype."),
volume['name'])
return False
best_candidate = self.ds_sel.select_datastore(req)
if not best_candidate:
# No candidate datastores; can't retype.
- LOG.warn(_("There are no datastores matching new requirements;"
- " can't retype volume: %s."),
+ LOG.warn(_LW("There are no datastores matching new "
+ "requirements; can't retype volume: %s."),
volume['name'])
return False
backing = new_backing
except error_util.VimException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Error occurred while cloning backing:"
- " %s during retype."),
+ LOG.exception(_LE("Error occurred while cloning "
+ "backing:"
+ " %s during retype."),
backing)
if renamed:
LOG.debug("Undo rename of backing: %(backing)s; "
self.volumeops.rename_backing(backing,
volume['name'])
except error_util.VimException:
- LOG.warn(_("Changing backing: %(backing)s name"
- " from %(new_name)s to %(old_name)s"
- " failed."),
+ LOG.warn(_LW("Changing backing: %(backing)s "
+ "name from %(new_name)s to "
+ "%(old_name)s failed."),
{'backing': backing,
'new_name': tmp_name,
'old_name': volume['name']})
# try extending vmdk in place
try:
self._extend_vmdk_virtual_disk(vol_name, new_size)
- LOG.info(_("Done extending volume %(vol)s to size %(size)s GB.") %
+ LOG.info(_LI("Done extending volume %(vol)s "
+ "to size %(size)s GB.") %
{'vol': vol_name, 'size': new_size})
return
except error_util.VimFaultException:
- LOG.info(_("Relocating volume %s vmdk to a different "
- "datastore since trying to extend vmdk file "
- "in place failed."), vol_name)
+ LOG.info(_LI("Relocating volume %s vmdk to a different "
+ "datastore since trying to extend vmdk file "
+ "in place failed."), vol_name)
# If in place extend fails, then try to relocate the volume
try:
(host, rp, folder, summary) = self._select_ds_for_volume(new_size)
except error_util.VimException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Not able to find a different datastore to "
- "place the extended volume %s."), vol_name)
+ LOG.exception(_LE("Not able to find a different datastore to "
+ "place the extended volume %s."), vol_name)
- LOG.info(_("Selected datastore %(ds)s to place extended volume of "
- "size %(size)s GB.") % {'ds': summary.name,
- 'size': new_size})
+ LOG.info(_LI("Selected datastore %(ds)s to place extended volume of "
+ "size %(size)s GB.") % {'ds': summary.name,
+ 'size': new_size})
try:
backing = self.volumeops.get_backing(vol_name)
self.volumeops.move_backing_to_folder(backing, folder)
except error_util.VimException:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Not able to relocate volume %s for "
- "extending."), vol_name)
- LOG.info(_("Done extending volume %(vol)s to size %(size)s GB.") %
+ LOG.exception(_LE("Not able to relocate volume %s for "
+ "extending."), vol_name)
+ LOG.info(_LI("Done extending volume %(vol)s to size %(size)s GB.") %
{'vol': vol_name, 'size': new_size})
@contextlib.contextmanager
return vm_ref
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Error occurred while creating temporary "
- "backing."))
+ LOG.exception(_LE("Error occurred while creating temporary "
+ "backing."))
backing = self.volumeops.get_backing(name)
if backing is not None:
self._delete_temp_backing(backing)
self.volumeops.rename_backing(backing,
volume['name'])
except error_util.VimException:
- LOG.warn(_("Cannot undo volume rename; old name "
- "was %(old_name)s and new name is "
- "%(new_name)s."),
+ LOG.warn(_LW("Cannot undo volume rename; old name "
+ "was %(old_name)s and new name is "
+ "%(new_name)s."),
{'old_name': volume['name'],
'new_name': tmp_backing_name},
exc_info=True)
pbm_service_wsdl = os.path.join(curr_dir, 'wsdl', major_minor,
'pbmService.wsdl')
if not os.path.exists(pbm_service_wsdl):
- LOG.warn(_("PBM WSDL file %s is missing!"), pbm_service_wsdl)
+ LOG.warn(_LW("PBM WSDL file %s is missing!"), pbm_service_wsdl)
return
pbm_wsdl = 'file://' + pbm_service_wsdl
- LOG.info(_("Using PBM WSDL location: %s"), pbm_wsdl)
+ LOG.info(_LI("Using PBM WSDL location: %s"), pbm_wsdl)
return pbm_wsdl
def _get_vc_version(self):
"""
version_str = self.configuration.vmware_host_version
if version_str:
- LOG.info(_("Using overridden vmware_host_version from config: "
- "%s"), version_str)
+ LOG.info(_LI("Using overridden vmware_host_version from config: "
+ "%s"), version_str)
else:
version_str = self.session.vim.service_content.about.version
- LOG.info(_("Fetched VC server version: %s"), version_str)
+ LOG.info(_LI("Fetched VC server version: %s"), version_str)
# convert version_str to LooseVersion and return
version = None
try:
version = dist_version.LooseVersion(version_str)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Version string '%s' is not parseable"),
+ LOG.exception(_LE("Version string '%s' is not parseable"),
version_str)
return version
self._volumeops = volumeops.VMwareVolumeOps(self.session, max_objects)
self._ds_sel = hub.DatastoreSelector(self.volumeops, self.session)
- LOG.info(_("Successfully setup driver: %(driver)s for server: "
- "%(ip)s.") % {'driver': self.__class__.__name__,
- 'ip': self.configuration.vmware_host_ip})
+ LOG.info(_LI("Successfully setup driver: %(driver)s for server: "
+ "%(ip)s.") % {'driver': self.__class__.__name__,
+ 'ip': self.configuration.vmware_host_ip})
def _get_volume_group_folder(self, datacenter):
"""Get volume group folder.
(folder, summary) = self._get_folder_ds_summary(volume,
resource_pool,
datastores)
- LOG.info(_("Relocating volume: %(backing)s to %(ds)s and %(rp)s.") %
+ LOG.info(_LI("Relocating volume: %(backing)s to %(ds)s and %(rp)s.") %
{'backing': backing, 'ds': summary, 'rp': resource_pool})
# Relocate the backing to the datastore and folder
self.volumeops.relocate_backing(backing, summary.datastore,
# the size of the source volume to the volume size.
if volume['size'] > src_vsize:
self._extend_vmdk_virtual_disk(volume['name'], volume['size'])
- LOG.info(_("Successfully created clone: %s.") % clone)
+ LOG.info(_LI("Successfully created clone: %s.") % clone)
def _create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
self._verify_volume_creation(volume)
backing = self.volumeops.get_backing(snapshot['volume_name'])
if not backing:
- LOG.info(_("There is no backing for the snapshotted volume: "
- "%(snap)s. Not creating any backing for the "
- "volume: %(vol)s.") %
+ LOG.info(_LI("There is no backing for the snapshotted volume: "
+ "%(snap)s. Not creating any backing for the "
+ "volume: %(vol)s.") %
{'snap': snapshot['name'], 'vol': volume['name']})
return
snapshot_moref = self.volumeops.get_snapshot(backing,
snapshot['name'])
if not snapshot_moref:
- LOG.info(_("There is no snapshot point for the snapshotted "
- "volume: %(snap)s. Not creating any backing for "
- "the volume: %(vol)s.") %
+ LOG.info(_LI("There is no snapshot point for the snapshotted "
+ "volume: %(snap)s. Not creating any backing for "
+ "the volume: %(vol)s.") %
{'snap': snapshot['name'], 'vol': volume['name']})
return
clone_type = VMwareVcVmdkDriver._get_clone_type(volume)
self._verify_volume_creation(volume)
backing = self.volumeops.get_backing(src_vref['name'])
if not backing:
- LOG.info(_("There is no backing for the source volume: %(src)s. "
- "Not creating any backing for volume: %(vol)s.") %
+ LOG.info(_LI("There is no backing for the source volume: %(src)s. "
+ "Not creating any backing for volume: %(vol)s.") %
{'src': src_vref['name'], 'vol': volume['name']})
return
clone_type = VMwareVcVmdkDriver._get_clone_type(volume)
from eventlet import timeout
-from cinder.i18n import _
+from cinder.i18n import _LE, _LI
from cinder.openstack.common import log as logging
from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import io_util
write_thread.stop()
# Log and raise the exception.
- LOG.exception(_("Error occurred during image transfer."))
+ LOG.exception(_LE("Error occurred during image transfer."))
if isinstance(exc, error_util.ImageTransferException):
raise
raise error_util.ImageTransferException(exc)
file_size)
start_transfer(context, timeout_secs, read_handle, file_size,
write_file_handle=write_handle)
- LOG.info(_("Downloaded image: %s from glance image server.") % image_id)
+ LOG.info(_LI("Downloaded image: %s from glance "
+ "image server.") % image_id)
def fetch_stream_optimized_image(context, timeout_secs, image_service,
file_size)
start_transfer(context, timeout_secs, read_handle, file_size,
write_file_handle=write_handle)
- LOG.info(_("Downloaded image: %s from glance image server.") % image_id)
+ LOG.info(_LI("Downloaded image: %s from glance image "
+ "server.") % image_id)
def upload_image(context, timeout_secs, image_service, image_id, owner_id,
start_transfer(context, timeout_secs, read_handle, file_size,
image_service=image_service, image_id=image_id,
image_meta=image_metadata)
- LOG.info(_("Uploaded image: %s to the Glance image server.") % image_id)
+ LOG.info(_LI("Uploaded image: %s to the Glance image server.") % image_id)
def download_stream_optimized_disk(
from oslo.config import cfg
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LI
from cinder.openstack.common import log as logging
from cinder.volume.drivers.windows import constants
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
else:
- LOG.info(_('Ignored target creation error "%s"'
- ' while ensuring export'), exc)
+ LOG.info(_LI('Ignored target creation error "%s"'
+ ' while ensuring export'), exc)
def remove_iscsi_target(self, target_name):
"""Removes ISCSI target."""
from oslo.config import cfg
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LW
from cinder.openstack.common import log as logging
from cinder.volume import driver
cg_name = self._get_volume_cg_name(volume_name)
if not cg_name:
# If the volume isn't present, then don't attempt to delete
- LOG.warning(_("snapshot: original volume %s not found, "
- "skipping delete operation")
+ LOG.warning(_LW("snapshot: original volume %s not found, "
+ "skipping delete operation")
% snapshot['volume_name'])
return True
snap_id = self._get_snap_id(cg_name, snapshot['name'])
if not snap_id:
# If the snapshot isn't present, then don't attempt to delete
- LOG.warning(_("snapshot: snapshot %s not found, "
- "skipping delete operation")
+ LOG.warning(_LW("snapshot: snapshot %s not found, "
+ "skipping delete operation")
% snapshot['name'])
return True
import time
import urllib2
-from cinder.i18n import _, _LE
+from cinder.i18n import _LE, _LI
from cinder.openstack.common import log
LOG = log.getLogger(__name__)
self.headers['x-auth-session'] = \
result.get_header('x-auth-session')
self.do_logout = True
- LOG.info(_('ZFSSA version: %s') %
+ LOG.info(_LI('ZFSSA version: %s') %
result.get_header('x-zfssa-version'))
elif result.status == httplib.NOT_FOUND:
self.db.volume_update(context, volume_id, update)
except exception.CinderException:
# Don't let resetting the status cause the rescheduling to fail.
- LOG.exception(_("Volume %s: resetting 'creating' status failed."),
+ LOG.exception(_LE("Volume %s: resetting 'creating' "
+ "status failed."),
volume_id)
def revert(self, context, result, flow_failures, **kwargs):
self._reschedule(context, cause, **kwargs)
self._post_reschedule(context, volume_id)
except exception.CinderException:
- LOG.exception(_("Volume %s: rescheduling failed"), volume_id)
+ LOG.exception(_LE("Volume %s: rescheduling failed"), volume_id)
class ExtractVolumeRefTask(flow_utils.CinderTask):
# If notification sending of volume database entry reading fails
# then we shouldn't error out the whole workflow since this is
# not always information that must be sent for volumes to operate
- LOG.exception(_("Failed notifying about the volume"
- " action %(event)s for volume %(volume_id)s") %
+ LOG.exception(_LE("Failed notifying about the volume"
+ " action %(event)s for volume %(volume_id)s") %
{'event': self.event_suffix,
'volume_id': volume_id})
snapshot_ref['volume_id'])
make_bootable = originating_vref.bootable
except exception.CinderException as ex:
- LOG.exception(_("Failed fetching snapshot %(snapshot_id)s bootable"
- " flag using the provided glance snapshot "
- "%(snapshot_ref_id)s volume reference") %
+ LOG.exception(_LE("Failed fetching snapshot %(snapshot_id)s "
+ "bootable"
+ " flag using the provided glance snapshot "
+ "%(snapshot_ref_id)s volume reference") %
{'snapshot_id': snapshot_id,
'snapshot_ref_id': snapshot_ref['volume_id']})
raise exception.MetadataUpdateFailure(reason=ex)
LOG.debug('Marking volume %s as bootable.', volume_id)
self.db.volume_update(context, volume_id, {'bootable': True})
except exception.CinderException as ex:
- LOG.exception(_("Failed updating volume %(volume_id)s bootable"
- " flag to true") % {'volume_id': volume_id})
+ LOG.exception(_LE("Failed updating volume %(volume_id)s bootable "
+ "flag to true") % {'volume_id': volume_id})
raise exception.MetadataUpdateFailure(reason=ex)
def _create_from_source_volume(self, context, volume_ref,
volume_ref = self.db.volume_update(context,
volume_ref['id'], updates)
except exception.CinderException:
- LOG.exception(_("Failed updating volume %(volume_id)s with "
- "%(updates)s") %
+ LOG.exception(_LE("Failed updating volume %(volume_id)s with "
+ "%(updates)s") %
{'volume_id': volume_ref['id'],
'updates': updates})
self._copy_image_to_volume(context, volume_ref,
# If somehow the update failed we want to ensure that the
# failure is logged (but not try rescheduling since the volume at
# this point has been created).
- LOG.exception(_("Failed updating model of volume %(volume_id)s"
- " with creation provided model %(model)s") %
+ LOG.exception(_LE("Failed updating model of volume %(volume_id)s "
+ "with creation provided model %(model)s") %
{'volume_id': volume_id, 'model': model_update})
raise
# Now use the parent to notify.
super(CreateVolumeOnFinishTask, self).execute(context, volume_ref)
except exception.CinderException:
- LOG.exception(_("Failed updating volume %(volume_id)s with "
- "%(update)s") % {'volume_id': volume_id,
- 'update': update})
+ LOG.exception(_LE("Failed updating volume %(volume_id)s with "
+ "%(update)s") % {'volume_id': volume_id,
+ 'update': update})
# Even if the update fails, the volume is ready.
msg = _("Volume %(volume_name)s (%(volume_id)s): created successfully")
LOG.info(msg % {
volume_ref = self.db.volume_update(context, volume_ref['id'],
model_update)
except exception.CinderException:
- LOG.exception(_("Failed updating model of volume %(volume_id)s"
- " with creation provided model %(model)s") %
+ LOG.exception(_LE("Failed updating model of volume %(volume_id)s"
+ " with creation provided model %(model)s") %
{'volume_id': volume_ref['id'],
'model': model_update})
raise
from cinder import context
from cinder import db
from cinder import exception
-from cinder.i18n import _, _LE
+from cinder.i18n import _, _LE, _LW
from cinder.openstack.common import log as logging
from cinder.volume import volume_types
db.qos_specs_associate(context, specs_id, type_id)
except db_exc.DBError as e:
LOG.exception(_LE('DB error: %s') % e)
- LOG.warn(_('Failed to associate qos specs '
- '%(id)s with type: %(vol_type_id)s') %
+ LOG.warn(_LW('Failed to associate qos specs '
+ '%(id)s with type: %(vol_type_id)s') %
dict(id=specs_id, vol_type_id=type_id))
raise exception.QoSSpecsAssociateFailed(specs_id=specs_id,
type_id=type_id)
db.qos_specs_disassociate(context, specs_id, type_id)
except db_exc.DBError as e:
LOG.exception(_LE('DB error: %s') % e)
- LOG.warn(_('Failed to disassociate qos specs '
- '%(id)s with type: %(vol_type_id)s') %
+ LOG.warn(_LW('Failed to disassociate qos specs '
+ '%(id)s with type: %(vol_type_id)s') %
dict(id=specs_id, vol_type_id=type_id))
raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id,
type_id=type_id)
db.qos_specs_disassociate_all(context, specs_id)
except db_exc.DBError as e:
LOG.exception(_LE('DB error: %s') % e)
- LOG.warn(_('Failed to disassociate qos specs %s.') % specs_id)
+ LOG.warn(_LW('Failed to disassociate qos specs %s.') % specs_id)
raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id,
type_id=None)
from cinder.brick.local_dev import lvm as brick_lvm
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LI, _LW
from cinder.openstack.common import log as logging
from cinder import rpc
from cinder import utils
try:
execute('cgcreate', '-g', 'blkio:%s' % group_name, run_as_root=True)
except processutils.ProcessExecutionError:
- LOG.warn(_('Failed to create blkio cgroup'))
+ LOG.warn(_LW('Failed to create blkio cgroup'))
return None
try:
if volume_clear_ionice is None:
volume_clear_ionice = CONF.volume_clear_ionice
- LOG.info(_("Performing secure delete on volume: %s") % volume_path)
+ LOG.info(_LI("Performing secure delete on volume: %s") % volume_path)
if volume_clear == 'zero':
return copy_volume('/dev/zero', volume_path, volume_clear_size,
# some incredible event this is 0 (cirros image?) don't barf
if duration < 1:
duration = 1
- LOG.info(_('Elapsed time for clear volume: %.2f sec') % duration)
+ LOG.info(_LI('Elapsed time for clear volume: %.2f sec') % duration)
def supports_thin_provisioning():
# Couldn't find volume type with the name in default_volume_type
# flag, record this issue and move on
#TODO(zhiteng) consider add notification to warn admin
- LOG.exception(_('Default volume type is not found, '
- 'please check default_volume_type config: %s'), e)
+ LOG.exception(_LE('Default volume type is not found, '
+ 'please check default_volume_type '
+ 'config: %s'), e)
return vol_type
msg = _("Exception: %s") % six.text_type(cisco_ex)
raise exception.FCZoneDriverException(msg)
except Exception as e:
- LOG.error(_("Exception: %s") % six.text_type(e))
+ LOG.error(_LE("Exception: %s") % six.text_type(e))
msg = (_("Failed to add zoning configuration %s") %
six.text_type(e))
raise exception.FCZoneDriverException(msg)
"""
import logging
-from cinder.i18n import _, _LI
+from cinder.i18n import _LI, _LW
from cinder.openstack.common import log
from cinder.volume.configuration import Configuration
from cinder.volume import manager
def decorator(self, *args, **kwargs):
conn_info = initialize_connection(self, *args, **kwargs)
if not conn_info:
- LOG.warn(_("Driver didn't return connection info, "
- "can't add zone."))
+ LOG.warn(_LW("Driver didn't return connection info, "
+ "can't add zone."))
return None
vol_type = conn_info.get('driver_volume_type', None)
def decorator(self, *args, **kwargs):
conn_info = terminate_connection(self, *args, **kwargs)
if not conn_info:
- LOG.warn(_("Driver didn't return connection info from "
- "terminate_connection call."))
+ LOG.warn(_LW("Driver didn't return connection info from "
+ "terminate_connection call."))
return None
vol_type = conn_info.get('driver_volume_type', None)