# Need to register global_opts
from cinder.common import config # noqa
-from cinder.i18n import _
+from cinder.i18n import _LE
from cinder.openstack.common import log as logging
from cinder import service
from cinder import utils
server = service.WSGIService('osapi_volume')
launcher.launch_service(server, workers=server.workers or 1)
except (Exception, SystemExit):
- LOG.exception(_('Failed to load osapi_volume'))
+ LOG.exception(_LE('Failed to load osapi_volume'))
for binary in ['cinder-volume', 'cinder-scheduler', 'cinder-backup']:
try:
launcher.launch_service(service.Service.create(binary=binary))
except (Exception, SystemExit):
- LOG.exception(_('Failed to load %s'), binary)
+ LOG.exception(_LE('Failed to load %s'), binary)
launcher.wait()
i18n.enable_lazy()
from cinder import context
from cinder import db
-from cinder.i18n import _
+from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
from cinder import rpc
from cinder import utils
volume_ref,
'exists', extra_usage_info=extra_info)
except Exception as e:
- LOG.error(_("Failed to send exists notification for volume %s.") %
+ LOG.error(_LE("Failed to send exists notification"
+ " for volume %s.") %
volume_ref.id)
print(traceback.format_exc(e))
volume_ref,
'create.end', extra_usage_info=local_extra_info)
except Exception as e:
- LOG.error(_("Failed to send create notification for "
- "volume %s.") % volume_ref.id)
+ LOG.error(_LE("Failed to send create notification for "
+ "volume %s.") % volume_ref.id)
print(traceback.format_exc(e))
if (CONF.send_actions and volume_ref.deleted_at and
volume_ref,
'delete.end', extra_usage_info=local_extra_info)
except Exception as e:
- LOG.error(_("Failed to send delete notification for volume "
- "%s.") % volume_ref.id)
+ LOG.error(_LE("Failed to send delete notification for volume "
+ "%s.") % volume_ref.id)
print(traceback.format_exc(e))
snapshots = db.snapshot_get_active_by_window(admin_context,
'exists',
extra_info)
except Exception as e:
- LOG.error(_("Failed to send exists notification for snapshot %s.")
+ LOG.error(_LE("Failed to send exists notification "
+ "for snapshot %s.")
% snapshot_ref.id)
print(traceback.format_exc(e))
snapshot_ref,
'create.end', extra_usage_info=local_extra_info)
except Exception as e:
- LOG.error(_("Failed to send create notification for snapshot "
- "%s.") % snapshot_ref.id)
+ LOG.error(_LE("Failed to send create notification for snapshot"
+ "%s.") % snapshot_ref.id)
print(traceback.format_exc(e))
if (CONF.send_actions and snapshot_ref.deleted_at and
snapshot_ref,
'delete.end', extra_usage_info=local_extra_info)
except Exception as e:
- LOG.error(_("Failed to send delete notification for snapshot "
- "%s.") % snapshot_ref.id)
+ LOG.error(_LE("Failed to send delete notification for snapshot"
+ "%s.") % snapshot_ref.id)
print(traceback.format_exc(e))
print(_("Volume usage audit completed"))
from cinder import context
from cinder.db import base
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LI, _LW
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
import cinder.policy
for over in overs:
if 'gigabytes' in over:
- msg = _("Quota exceeded for %(s_pid)s, tried to create "
- "%(s_size)sG backup (%(d_consumed)dG of "
- "%(d_quota)dG already consumed)")
+ msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
+ "%(s_size)sG backup (%(d_consumed)dG of "
+ "%(d_quota)dG already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed(over),
consumed=_consumed('backup_gigabytes'),
quota=quotas['backup_gigabytes'])
elif 'backups' in over:
- msg = _("Quota exceeded for %(s_pid)s, tried to create "
- "backups (%(d_consumed)d backups "
- "already consumed)")
+ msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
+ "backups (%(d_consumed)d backups "
+ "already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
name = 'restore_backup_%s' % backup_id
description = 'auto-created_from_restore_from_backup'
- LOG.info(_("Creating volume of %(size)s GB for restore of "
- "backup %(backup_id)s"),
+ LOG.info(_LI("Creating volume of %(size)s GB for restore of "
+ "backup %(backup_id)s"),
{'size': size, 'backup_id': backup_id},
context=context)
volume = self.volume_api.create(context, size, name, description)
{'volume_size': volume['size'], 'size': size})
raise exception.InvalidVolume(reason=msg)
- LOG.info(_("Overwriting volume %(volume_id)s with restore of "
- "backup %(backup_id)s"),
+ LOG.info(_LI("Overwriting volume %(volume_id)s with restore of "
+ "backup %(backup_id)s"),
{'volume_id': volume_id, 'backup_id': backup_id},
context=context)
from cinder.db import base
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LI
from cinder.openstack.common import jsonutils
from cinder.openstack.common import log as logging
try:
jsonutils.dumps(value)
except TypeError:
- LOG.info(_("Value with type=%s is not serializable") %
+ LOG.info(_LI("Value with type=%s is not serializable") %
type(value))
return False
for key, value in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(value):
- LOG.info(_("Unable to serialize field '%s' - excluding "
- "from backup") % (key))
+ LOG.info(_LI("Unable to serialize field '%s' - excluding "
+ "from backup") % (key))
continue
container[type_tag][key] = value
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(meta[entry]):
- LOG.info(_("Unable to serialize field '%s' - excluding "
- "from backup") % (entry))
+ LOG.info(_LI("Unable to serialize field '%s' - excluding "
+ "from backup") % (entry))
continue
container[type_tag][entry] = meta[entry]
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(entry.value):
- LOG.info(_("Unable to serialize field '%s' - "
- "excluding from backup") % (entry))
+ LOG.info(_LI("Unable to serialize field '%s' - "
+ "excluding from backup") % (entry))
continue
container[type_tag][entry.key] = entry.value
from cinder.backup.driver import BackupDriver
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import strutils
self.rbd_stripe_unit = CONF.backup_ceph_stripe_unit
self.rbd_stripe_count = CONF.backup_ceph_stripe_count
else:
- LOG.info(_("RBD striping not supported - ignoring configuration "
- "settings for rbd striping"))
+ LOG.info(_LI("RBD striping not supported - ignoring configuration "
+ "settings for rbd striping"))
self.rbd_stripe_count = 0
self.rbd_stripe_unit = 0
snap, rem = self._delete_backup_snapshot(client, base_name,
backup_id)
if rem:
- msg = (_("Backup base image of volume %(volume)s still "
- "has %(snapshots)s snapshots so skipping base "
- "image delete.") %
+ msg = (_LI("Backup base image of volume %(volume)s still "
+ "has %(snapshots)s snapshots so skipping base "
+ "image delete.") %
{'snapshots': rem, 'volume': volume_id})
LOG.info(msg)
return
- LOG.info(_("Deleting backup base image='%(basename)s' of "
- "volume %(volume)s.") %
+ LOG.info(_LI("Deleting backup base image='%(basename)s' of "
+ "volume %(volume)s.") %
{'basename': base_name, 'volume': volume_id})
# Delete base if no more snapshots
try:
except self.rbd.ImageBusy as exc:
# Allow a retry if the image is busy
if retries > 0:
- LOG.info(_("Backup image of volume %(volume)s is "
- "busy, retrying %(retries)s more time(s) "
- "in %(delay)ss.") %
+ LOG.info(_LI("Backup image of volume %(volume)s is "
+ "busy, retrying %(retries)s more time(s) "
+ "in %(delay)ss.") %
{'retries': retries,
'delay': delay,
'volume': volume_id})
eventlet.sleep(delay)
else:
- LOG.error(_("Max retries reached deleting backup "
- "%(basename)s image of volume %(volume)s.")
+ LOG.error(_LE("Max retries reached deleting backup "
+ "%(basename)s image of volume "
+ "%(volume)s.")
% {'volume': volume_id,
'basename': base_name})
raise exc
p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
- LOG.error(_("Pipe1 failed - %s ") % unicode(e))
+ LOG.error(_LE("Pipe1 failed - %s ") % unicode(e))
raise
# NOTE(dosaboy): ensure that the pipe is blocking. This is to work
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
- LOG.error(_("Pipe2 failed - %s ") % unicode(e))
+ LOG.error(_LE("Pipe2 failed - %s ") % unicode(e))
raise
p1.stdout.close()
dest_user=rbd_user, dest_conf=rbd_conf,
src_snap=restore_point)
except exception.BackupRBDOperationFailed:
- LOG.exception(_("Differential restore failed, trying full "
- "restore"))
+ LOG.exception(_LE("Differential restore failed, trying full "
+ "restore"))
raise
# If the volume we are restoring to is larger than the backup volume,
return True, restore_point
else:
- LOG.info(_("No restore point found for backup='%(backup)s' of "
- "volume %(volume)s - forcing full copy.") %
+ LOG.info(_LI("No restore point found for "
+ "backup='%(backup)s' of "
+ "volume %(volume)s - forcing full copy.") %
{'backup': backup['id'],
'volume': backup['volume_id']})
LOG.debug('Restore to volume %s finished successfully.' %
volume_id)
except exception.BackupOperationError as e:
- LOG.error(_('Restore to volume %(volume)s finished with error - '
- '%(error)s.') % {'error': e, 'volume': volume_id})
+ LOG.error(_LE('Restore to volume %(volume)s finished with error - '
+ '%(error)s.') % {'error': e, 'volume': volume_id})
raise
def delete(self, backup):
try:
self._try_delete_base_image(backup['id'], backup['volume_id'])
except self.rbd.ImageNotFound:
- msg = (_("RBD image for backup %(backup)s of volume %(volume)s "
- "not found. Deleting backup metadata.")
+ msg = (_LW("RBD image for backup %(backup)s of volume %(volume)s "
+ "not found. Deleting backup metadata.")
% {'backup': backup['id'], 'volume': backup['volume_id']})
LOG.warning(msg)
delete_failed = True
VolumeMetadataBackup(client, backup['id']).remove_if_exists()
if delete_failed:
- LOG.info(_("Delete of backup '%(backup)s' for volume '%(volume)s' "
- "finished with warning.") %
+ LOG.info(_LI("Delete of backup '%(backup)s' "
+ "for volume '%(volume)s' "
+ "finished with warning.") %
{'backup': backup['id'], 'volume': backup['volume_id']})
else:
LOG.debug("Delete of backup '%(backup)s' for volume "
from cinder.backup.driver import BackupDriver
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder.openstack.common import units
-
LOG = logging.getLogger(__name__)
swiftbackup_service_opts = [
CONF.backup_swift_auth))
if CONF.backup_swift_auth == 'single_user':
if CONF.backup_swift_user is None:
- LOG.error(_("single_user auth mode enabled, "
- "but %(param)s not set")
+ LOG.error(_LE("single_user auth mode enabled, "
+ "but %(param)s not set")
% {'param': 'backup_swift_user'})
raise exception.ParameterNotFound(param='backup_swift_user')
self.conn = swift.Connection(
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.exception(
- _("Backup volume metadata to swift failed: %s") %
+ _LE("Backup volume metadata to swift failed: %s") %
six.text_type(err))
self.delete(backup)
try:
fileno = volume_file.fileno()
except IOError:
- LOG.info("volume_file does not support fileno() so skipping "
- "fsync()")
+ LOG.info(_LI("volume_file does not support "
+ "fileno() so skipping"
+ "fsync()"))
else:
os.fsync(fileno)
try:
swift_object_names = self._generate_object_names(backup)
except Exception:
- LOG.warn(_('swift error while listing objects, continuing'
- ' with delete'))
+ LOG.warn(_LW('swift error while listing objects, continuing'
+ ' with delete'))
for swift_object_name in swift_object_names:
try:
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
except Exception:
- LOG.warn(_('swift error while deleting object %s, '
- 'continuing with delete') % swift_object_name)
+ LOG.warn(_LW('swift error while deleting object %s, '
+ 'continuing with delete')
+ % swift_object_name)
else:
LOG.debug('deleted swift object: %(swift_object_name)s'
' in container: %(container)s' %
from cinder.backup.driver import BackupDriver
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _LE, _
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import utils
hardlink_path,
run_as_root=True)
except processutils.ProcessExecutionError as exc:
- err = (_('backup: %(vol_id)s failed to remove backup hardlink'
- ' from %(vpath)s to %(bpath)s.\n'
- 'stdout: %(out)s\n stderr: %(err)s.')
+ err = (_LE('backup: %(vol_id)s failed to remove backup hardlink'
+ ' from %(vpath)s to %(bpath)s.\n'
+ 'stdout: %(out)s\n stderr: %(err)s.')
% {'vol_id': volume_id,
'vpath': volume_path,
'bpath': hardlink_path,
# log error if tsm cannot delete the backup object
# but do not raise exception so that cinder backup
# object can be removed.
- err = (_('delete: %(vol_id)s failed with '
- 'stdout: %(out)s\n stderr: %(err)s')
+ err = (_LE('delete: %(vol_id)s failed with '
+ 'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'out': out,
'err': err})
from cinder.backup import rpcapi as backup_rpcapi
from cinder import context
from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI, _LW
from cinder import manager
from cinder.openstack.common import excutils
from cinder.openstack.common import importutils
LOG.debug("Got backend '%s'." % (backend))
return backend
- LOG.info(_("Backend not found in hostname (%s) so using default.") %
+ LOG.info(_LI("Backend not found in hostname (%s) so using default.") %
(host))
if 'default' not in self.volume_managers:
self.volume_managers['default'] = default
def _init_volume_driver(self, ctxt, driver):
- LOG.info(_("Starting volume driver %(driver_name)s (%(version)s).") %
+ LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s).") %
{'driver_name': driver.__class__.__name__,
'version': driver.get_version()})
try:
for mgr in self.volume_managers.itervalues():
self._init_volume_driver(ctxt, mgr.driver)
- LOG.info(_("Cleaning up incomplete backup operations."))
+ LOG.info(_LI("Cleaning up incomplete backup operations."))
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
for volume in volumes:
volume_host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=volume_host)
if volume['status'] == 'backing-up':
- LOG.info(_('Resetting volume %s to available '
- '(was backing-up).') % volume['id'])
+ LOG.info(_LI('Resetting volume %s to available '
+ '(was backing-up).') % volume['id'])
mgr = self._get_manager(backend)
mgr.detach_volume(ctxt, volume['id'])
if volume['status'] == 'restoring-backup':
- LOG.info(_('Resetting volume %s to error_restoring '
- '(was restoring-backup).') % volume['id'])
+ LOG.info(_LI('Resetting volume %s to error_restoring '
+ '(was restoring-backup).') % volume['id'])
mgr = self._get_manager(backend)
mgr.detach_volume(ctxt, volume['id'])
self.db.volume_update(ctxt, volume['id'],
backups = self.db.backup_get_all_by_host(ctxt, self.host)
for backup in backups:
if backup['status'] == 'creating':
- LOG.info(_('Resetting backup %s to error (was creating).')
+ LOG.info(_LI('Resetting backup %s to error (was creating).')
% backup['id'])
err = 'incomplete backup reset on manager restart'
self.db.backup_update(ctxt, backup['id'], {'status': 'error',
'fail_reason': err})
if backup['status'] == 'restoring':
- LOG.info(_('Resetting backup %s to available (was restoring).')
+ LOG.info(_LI('Resetting backup %s to '
+ ' available (was restoring).')
% backup['id'])
self.db.backup_update(ctxt, backup['id'],
{'status': 'available'})
if backup['status'] == 'deleting':
- LOG.info(_('Resuming delete on backup: %s.') % backup['id'])
+ LOG.info(_LI('Resuming delete on backup: %s.') % backup['id'])
self.delete_backup(ctxt, backup['id'])
def create_backup(self, context, backup_id):
backup = self.db.backup_get(context, backup_id)
volume_id = backup['volume_id']
volume = self.db.volume_get(context, volume_id)
- LOG.info(_('Create backup started, backup: %(backup_id)s '
- 'volume: %(volume_id)s.') %
+ LOG.info(_LI('Create backup started, backup: %(backup_id)s '
+ 'volume: %(volume_id)s.') %
{'backup_id': backup_id, 'volume_id': volume_id})
volume_host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=volume_host)
'size': volume['size'],
'availability_zone':
self.az})
- LOG.info(_('Create backup finished. backup: %s.'), backup_id)
+ LOG.info(_LI('Create backup finished. backup: %s.'), backup_id)
def restore_backup(self, context, backup_id, volume_id):
"""Restore volume backups from configured backup service."""
- LOG.info(_('Restore backup started, backup: %(backup_id)s '
- 'volume: %(volume_id)s.') %
+ LOG.info(_LI('Restore backup started, backup: %(backup_id)s '
+ 'volume: %(volume_id)s.') %
{'backup_id': backup_id, 'volume_id': volume_id})
backup = self.db.backup_get(context, backup_id)
raise exception.InvalidBackup(reason=err)
if volume['size'] > backup['size']:
- LOG.info(_('Volume: %(vol_id)s, size: %(vol_size)d is '
- 'larger than backup: %(backup_id)s, '
- 'size: %(backup_size)d, continuing with restore.'),
+ LOG.info(_LI('Volume: %(vol_id)s, size: %(vol_size)d is '
+ 'larger than backup: %(backup_id)s, '
+ 'size: %(backup_size)d, continuing with restore.'),
{'vol_id': volume['id'],
'vol_size': volume['size'],
'backup_id': backup['id'],
self.db.volume_update(context, volume_id, {'status': 'available'})
self.db.backup_update(context, backup_id, {'status': 'available'})
- LOG.info(_('Restore backup finished, backup %(backup_id)s restored'
- ' to volume %(volume_id)s.') %
+ LOG.info(_LI('Restore backup finished, backup %(backup_id)s restored'
+ ' to volume %(volume_id)s.') %
{'backup_id': backup_id, 'volume_id': volume_id})
def delete_backup(self, context, backup_id):
'fail_reason':
unicode(err)})
- LOG.info(_('Delete backup started, backup: %s.'), backup_id)
+ LOG.info(_LI('Delete backup started, backup: %s.'), backup_id)
backup = self.db.backup_get(context, backup_id)
self.db.backup_update(context, backup_id, {'host': self.host})
**reserve_opts)
except Exception:
reservations = None
- LOG.exception(_("Failed to update usages deleting backup"))
+ LOG.exception(_LE("Failed to update usages deleting backup"))
context = context.elevated()
self.db.backup_destroy(context, backup_id)
QUOTAS.commit(context, reservations,
project_id=backup['project_id'])
- LOG.info(_('Delete backup finished, backup %s deleted.'), backup_id)
+ LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup_id)
def export_record(self, context, backup_id):
"""Export all volume backup metadata details to allow clean import.
:returns: 'backup_service' describing the needed driver.
:raises: InvalidBackup
"""
- LOG.info(_('Export record started, backup: %s.'), backup_id)
+ LOG.info(_LI('Export record started, backup: %s.'), backup_id)
backup = self.db.backup_get(context, backup_id)
msg = unicode(err)
raise exception.InvalidBackup(reason=msg)
- LOG.info(_('Export record finished, backup %s exported.'), backup_id)
+ LOG.info(_LI('Export record finished, backup %s exported.'), backup_id)
return backup_record
def import_record(self,
:raises: InvalidBackup
:raises: ServiceNotFound
"""
- LOG.info(_('Import record started, backup_url: %s.'), backup_url)
+ LOG.info(_LI('Import record started, backup_url: %s.'), backup_url)
# Can we import this backup?
if (backup_service != self.driver_name):
if isinstance(backup_service, driver.BackupDriverWithVerify):
backup_service.verify(backup_id)
else:
- LOG.warn(_('Backup service %(service)s does not support '
- 'verify. Backup id %(id)s is not verified. '
- 'Skipping verify.') % {'service':
- self.driver_name,
- 'id': backup_id})
+ LOG.warn(_LW('Backup service %(service)s does not support '
+ 'verify. Backup id %(id)s is not verified. '
+ 'Skipping verify.') % {'service':
+ self.driver_name,
+ 'id': backup_id})
except exception.InvalidBackup as err:
with excutils.save_and_reraise_exception():
self.db.backup_update(context, backup_id,
'fail_reason':
unicode(err)})
- LOG.info(_('Import record id %s metadata from driver '
- 'finished.') % backup_id)
+ LOG.info(_LI('Import record id %s metadata from driver '
+ 'finished.') % backup_id)
def reset_status(self, context, backup_id, status):
"""Reset volume backup status.
:raises: BackupVerifyUnsupportedDriver
:raises: AttributeError
"""
- LOG.info(_('Reset backup status started, backup_id: '
- '%(backup_id)s, status: %(status)s.'),
+ LOG.info(_LI('Reset backup status started, backup_id: '
+ '%(backup_id)s, status: %(status)s.'),
{'backup_id': backup_id,
'status': status})
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Backup driver has not been initialized"))
+ LOG.exception(_LE("Backup driver has not been initialized"))
backup = self.db.backup_get(context, backup_id)
backup_service = self._map_service_to_driver(backup['service'])
- LOG.info(_('Backup service: %s.'), backup_service)
+ LOG.info(_LI('Backup service: %s.'), backup_service)
if backup_service is not None:
configured_service = self.driver_name
if backup_service != configured_service:
notifier_info = {'id': backup_id, 'update': {'status': status}}
notifier = rpc.get_notifier('backupStatusUpdate')
notifier.info(context, "backups" + '.reset_status.end',
- notifier_info)
\ No newline at end of file
+ notifier_info)
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import MetaData, Integer, String, Table, ForeignKey
-from cinder.i18n import _
+from cinder.i18n import _LE
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
try:
quota_classes.create()
except Exception:
- LOG.error(_("Table |%s| not created!"), repr(quota_classes))
+ LOG.error(_LE("Table |%s| not created!"), repr(quota_classes))
raise
quota_usages = Table('quota_usages', meta,
try:
quota_usages.create()
except Exception:
- LOG.error(_("Table |%s| not created!"), repr(quota_usages))
+ LOG.error(_LE("Table |%s| not created!"), repr(quota_usages))
raise
reservations = Table('reservations', meta,
try:
reservations.create()
except Exception:
- LOG.error(_("Table |%s| not created!"), repr(reservations))
+ LOG.error(_LE("Table |%s| not created!"), repr(reservations))
raise
fkey = ForeignKeyConstraint(**params)
fkey.drop()
except Exception:
- LOG.error(_("Dropping foreign key reservations_ibfk_1 failed."))
+ LOG.error(_LE("Dropping foreign key reservations_ibfk_1 failed."))
quota_classes = Table('quota_classes', meta, autoload=True)
try:
quota_classes.drop()
except Exception:
- LOG.error(_("quota_classes table not dropped"))
+ LOG.error(_LE("quota_classes table not dropped"))
raise
quota_usages = Table('quota_usages', meta, autoload=True)
try:
quota_usages.drop()
except Exception:
- LOG.error(_("quota_usages table not dropped"))
+ LOG.error(_LE("quota_usages table not dropped"))
raise
reservations = Table('reservations', meta, autoload=True)
try:
reservations.drop()
except Exception:
- LOG.error(_("reservations table not dropped"))
+ LOG.error(_LE("reservations table not dropped"))
raise
from sqlalchemy import Boolean, Column, DateTime, Integer
from sqlalchemy import MetaData, String, Table
-from cinder.i18n import _
+from cinder.i18n import _LE
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
try:
table.drop()
except Exception:
- LOG.error(_("migrations table not dropped"))
+ LOG.error(_LE("migrations table not dropped"))
raise
try:
table.create()
except Exception:
- LOG.error(_("Table |%s| not created"), repr(table))
+ LOG.error(_LE("Table |%s| not created"), repr(table))
raise
from oslo.config import cfg
from sqlalchemy import MetaData, Table
-from cinder.i18n import _
+from cinder.i18n import _LE, _LI
from cinder.openstack.common import log as logging
# Get default values via config. The defaults will either
# Do not add entries if there are already 'default' entries. We don't
# want to write over something the user added.
if rows:
- LOG.info(_("Found existing 'default' entries in the quota_classes "
- "table. Skipping insertion of default values."))
+ LOG.info(_LI("Found existing 'default' entries in the quota_classes "
+ "table. Skipping insertion of default values."))
return
try:
'resource': 'gigabytes',
'hard_limit': CONF.quota_gigabytes,
'deleted': False, })
- LOG.info(_("Added default quota class data into the DB."))
+ LOG.info(_LI("Added default quota class data into the DB."))
except Exception:
- LOG.error(_("Default quota class data not inserted into the DB."))
+ LOG.error(_LE("Default quota class data not inserted into the DB."))
raise