try:
return f(*args, **kwargs)
except db_exc.DBDeadlock:
- LOG.warn(_LW("Deadlock detected when running "
- "'%(func_name)s': Retrying..."),
- dict(func_name=f.__name__))
+ LOG.warning(_LW("Deadlock detected when running "
+ "'%(func_name)s': Retrying..."),
+ dict(func_name=f.__name__))
# Retry!
time.sleep(0.5)
continue
if unders:
LOG.warning(_LW("Change will make usage less than 0 for the following "
- "resources: %s") % unders)
+ "resources: %s"), unders)
if overs:
usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved']))
for k, v in usages.items())
# Verify sort direction
for sort_dir in sort_dirs:
if sort_dir not in ('asc', 'desc'):
- msg = _LE("Unknown sort direction, must be 'desc' or 'asc'.")
+ msg = _("Unknown sort direction, must be 'desc' or 'asc'.")
raise exception.InvalidInput(reason=msg)
result_dirs.append(sort_dir)
else:
result_dirs.append(default_dir_value)
# Unless more direction are specified, which is an error
if len(result_dirs) > len(result_keys):
- msg = _LE("Sort direction array size exceeds sort key array size.")
+ msg = _("Sort direction array size exceeds sort key array size.")
raise exception.InvalidInput(reason=msg)
# Ensure defaults are included
results = model_query(context, models.Volume, session=session). \
filter_by(volume_type_id=id).all()
if results:
- msg = _('VolumeType %s deletion failed, VolumeType in use.') % id
- LOG.error(msg)
+ LOG.error(_LE('VolumeType %s deletion failed, '
+ 'VolumeType in use.'), id)
raise exception.VolumeTypeInUse(volume_type_id=id)
model_query(context, models.VolumeTypes, session=session).\
filter_by(id=id).\
value = dict(id=id, key=key, value=specs[key],
specs_id=qos_specs_id,
deleted=False)
- LOG.debug('qos_specs_update() value: %s' % value)
+ LOG.debug('qos_specs_update() value: %s', value)
spec_ref.update(value)
spec_ref.save(session=session)
# If the volume state is not 'awaiting-transfer' don't change it, but
# we can still mark the transfer record as deleted.
if volume_ref['status'] != 'awaiting-transfer':
- msg = _('Volume in unexpected state %s, '
- 'expected awaiting-transfer') % volume_ref['status']
- LOG.error(msg)
+ LOG.error(_LE('Volume in unexpected state %s, expected '
+ 'awaiting-transfer'), volume_ref['status'])
else:
volume_ref['status'] = 'available'
volume_ref.update(volume_ref)
try:
age_in_days = int(age_in_days)
except ValueError:
- msg = _LE('Invalid value for age, %(age)s')
- LOG.exception(msg, {'age': age_in_days})
- raise exception.InvalidParameterValue(msg % {'age': age_in_days})
- if age_in_days <= 0:
- msg = _LE('Must supply a positive value for age')
+ msg = _('Invalid value for age, %(age)s') % {'age': age_in_days}
LOG.exception(msg)
raise exception.InvalidParameterValue(msg)
+ if age_in_days <= 0:
+ msg = _('Must supply a positive value for age')
+ LOG.error(msg)
+ raise exception.InvalidParameterValue(msg)
engine = get_engine()
session = get_session()
from sqlalchemy import Boolean, Column, DateTime, ForeignKey
from sqlalchemy import Integer, MetaData, String, Table
-from cinder.i18n import _
+from cinder.i18n import _LE, _LI
LOG = logging.getLogger(__name__)
table.create()
except Exception:
LOG.info(repr(table))
- LOG.exception(_('Exception while creating table.'))
+ LOG.exception(_LE('Exception while creating table.'))
raise
if migrate_engine.name == "mysql":
tables = define_tables(meta)
tables.reverse()
for table in tables:
- LOG.info("dropping table %(table)s" % {'table': table})
+ LOG.info(_LI("dropping table %(table)s"), {'table': table})
table.drop()
fkey = ForeignKeyConstraint(**params)
fkey.drop()
except Exception:
- msg = _LE("Dropping foreign key %s failed.")
- LOG.error(msg, fk_name)
+ LOG.error(_LE("Dropping foreign key %s failed."), fk_name)
quota_classes = Table('quota_classes', meta, autoload=True)
try:
from sqlalchemy import Column, DateTime, Text, Boolean
from sqlalchemy import MetaData, Integer, String, Table, ForeignKey
-from cinder.i18n import _
+from cinder.i18n import _LE
LOG = logging.getLogger(__name__)
try:
volume_glance_metadata.create()
except Exception:
- LOG.exception(_("Exception while creating table "
- "'volume_glance_metadata'"))
+ LOG.exception(_LE("Exception while creating table "
+ "'volume_glance_metadata'"))
meta.drop_all(tables=[volume_glance_metadata])
raise
try:
volume_glance_metadata.drop()
except Exception:
- LOG.error(_("volume_glance_metadata table not dropped"))
+ LOG.error(_LE("volume_glance_metadata table not dropped"))
raise
from oslo_log import log as logging
from sqlalchemy import Integer, MetaData, String, Table
+from cinder.i18n import _LI
+
LOG = logging.getLogger(__name__)
name=fkey_name)
try:
fkey.create()
- LOG.info('Created foreign key %s' % fkey_name)
+ LOG.info(_LI('Created foreign key %s'), fkey_name)
except Exception:
if migrate_engine.url.get_dialect().name.startswith('sqlite'):
pass
name=fkey_name)
try:
fkey.create()
- LOG.info('Created foreign key %s' % fkey_name)
+ LOG.info(_LI('Created foreign key %s'), fkey_name)
except Exception:
if migrate_engine.url.get_dialect().name.startswith('sqlite'):
pass
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import MetaData, Integer, String, Table
-from cinder.i18n import _
+from cinder.i18n import _LE
LOG = logging.getLogger(__name__)
try:
backups.create()
except Exception:
- LOG.error(_("Table |%s| not created!"), repr(backups))
+ LOG.error(_LE("Table |%s| not created!"), repr(backups))
raise
try:
backups.drop()
except Exception:
- LOG.error(_("backups table not dropped"))
+ LOG.error(_LE("backups table not dropped"))
raise
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import Integer, MetaData, String, Table, ForeignKey
-from cinder.i18n import _
+from cinder.i18n import _LE
LOG = logging.getLogger(__name__)
try:
snapshot_metadata.create()
except Exception:
- LOG.error(_("Table |%s| not created!"), repr(snapshot_metadata))
+ LOG.error(_LE("Table |%s| not created!"), repr(snapshot_metadata))
raise
try:
snapshot_metadata.drop()
except Exception:
- LOG.error(_("snapshot_metadata table not dropped"))
+ LOG.error(_LE("snapshot_metadata table not dropped"))
raise
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import MetaData, String, Table, ForeignKey
-from cinder.i18n import _
+from cinder.i18n import _LE
LOG = logging.getLogger(__name__)
try:
transfers.create()
except Exception:
- LOG.error(_("Table |%s| not created!"), repr(transfers))
+ LOG.error(_LE("Table |%s| not created!"), repr(transfers))
raise
try:
transfers.drop()
except Exception:
- LOG.error(_("transfers table not dropped"))
+ LOG.error(_LE("transfers table not dropped"))
raise
from sqlalchemy import Boolean, Column, DateTime, ForeignKey
from sqlalchemy import Integer, MetaData, String, Table
-from cinder.i18n import _
+from cinder.i18n import _LE
LOG = logging.getLogger(__name__)
try:
table.drop()
except Exception:
- LOG.exception(_('Exception while dropping table %s.'),
+ LOG.exception(_LE('Exception while dropping table %s.'),
repr(table))
raise
try:
table.create()
except Exception:
- LOG.exception(_('Exception while creating table %s.'),
+ LOG.exception(_LE('Exception while creating table %s.'),
repr(table))
raise
from sqlalchemy import Column, ForeignKey, MetaData, Table
from sqlalchemy import Boolean, DateTime, Integer, String
-from cinder.i18n import _
+from cinder.i18n import _LE
LOG = logging.getLogger(__name__)
try:
volumes.create_column(encryption_key)
except Exception:
- LOG.error(_("Column |%s| not created!"), repr(encryption_key))
+ LOG.error(_LE("Column |%s| not created!"), repr(encryption_key))
raise
# encryption key UUID and volume type id -- must be stored per snapshot
try:
snapshots.create_column(encryption_key)
except Exception:
- LOG.error(_("Column |%s| not created!"), repr(encryption_key))
+ LOG.error(_LE("Column |%s| not created!"), repr(encryption_key))
raise
volume_type = Column('volume_type_id', String(36))
try:
snapshots.create_column(volume_type)
except Exception:
- LOG.error(_("Column |%s| not created!"), repr(volume_type))
+ LOG.error(_LE("Column |%s| not created!"), repr(volume_type))
raise
volume_types = Table('volume_types', meta, autoload=True)
try:
encryption.create()
except Exception:
- LOG.error(_("Table |%s| not created!"), repr(encryption))
+ LOG.error(_LE("Table |%s| not created!"), repr(encryption))
raise
try:
volumes.c.encryption_key_id.drop()
except Exception:
- LOG.error(_("encryption_key_id column not dropped from volumes"))
+ LOG.error(_LE("encryption_key_id column not dropped from volumes"))
raise
# drop encryption key UUID and volume type id for snapshots
try:
snapshots.c.encryption_key_id.drop()
except Exception:
- LOG.error(_("encryption_key_id column not dropped from snapshots"))
+ LOG.error(_LE("encryption_key_id column not dropped from snapshots"))
raise
try:
snapshots.c.volume_type_id.drop()
except Exception:
- LOG.error(_("volume_type_id column not dropped from snapshots"))
+ LOG.error(_LE("volume_type_id column not dropped from snapshots"))
raise
# drop encryption types table
try:
encryption.drop()
except Exception:
- LOG.error(_("encryption table not dropped"))
+ LOG.error(_LE("encryption table not dropped"))
raise
from sqlalchemy import ForeignKey, MetaData, String, Table
from migrate import ForeignKeyConstraint
-from cinder.i18n import _
+from cinder.i18n import _LE
LOG = logging.getLogger(__name__)
try:
quality_of_service_specs.create()
except Exception:
- LOG.error(_("Table quality_of_service_specs not created!"))
+ LOG.error(_LE("Table quality_of_service_specs not created!"))
raise
volume_types = Table('volume_types', meta, autoload=True)
volume_types.create_column(qos_specs_id)
volume_types.update().values(qos_specs_id=None).execute()
except Exception:
- LOG.error(_("Added qos_specs_id column to volume type table failed."))
+ LOG.error(_LE("Added qos_specs_id column to volume type table "
+ "failed."))
raise
fkey = ForeignKeyConstraint(**params)
fkey.drop()
except Exception:
- LOG.error(_("Dropping foreign key volume_types_ibfk_1 failed"))
+ LOG.error(_LE("Dropping foreign key volume_types_ibfk_1 failed"))
volume_types = Table('volume_types', meta, autoload=True)
qos_specs_id = Column('qos_specs_id', String(36))
try:
volume_types.drop_column(qos_specs_id)
except Exception:
- LOG.error(_("Dropping qos_specs_id column failed."))
+ LOG.error(_LE("Dropping qos_specs_id column failed."))
raise
try:
qos_specs.drop()
except Exception:
- LOG.error(_("Dropping quality_of_service_specs table failed."))
+ LOG.error(_LE("Dropping quality_of_service_specs table failed."))
raise
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import Integer, MetaData, String, Table, ForeignKey
-from cinder.i18n import _
+from cinder.i18n import _LE
LOG = logging.getLogger(__name__)
try:
volume_admin_metadata.create()
except Exception:
- LOG.error(_("Table |%s| not created!"), repr(volume_admin_metadata))
+ LOG.error(_LE("Table |%s| not created!"), repr(volume_admin_metadata))
raise
try:
volume_admin_metadata.drop()
except Exception:
- LOG.error(_("volume_admin_metadata table not dropped"))
+ LOG.error(_LE("volume_admin_metadata table not dropped"))
raise
from oslo_log import log as logging
from sqlalchemy import Index, MetaData, Table
-from cinder.i18n import _
+from cinder.i18n import _LI
LOG = logging.getLogger(__name__)
reservations = Table('reservations', meta, autoload=True)
if _get_deleted_expire_index(reservations):
- LOG.info(_('Skipped adding reservations_deleted_expire_idx '
- 'because an equivalent index already exists.'))
+ LOG.info(_LI('Skipped adding reservations_deleted_expire_idx '
+ 'because an equivalent index already exists.'))
return
# Based on expire_reservations query
if index:
index.drop(migrate_engine)
else:
- LOG.info(_('Skipped removing reservations_deleted_expire_idx '
- 'because index does not exist.'))
+ LOG.info(_LI('Skipped removing reservations_deleted_expire_idx '
+ 'because index does not exist.'))
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import ForeignKey, MetaData, String, Table
-from cinder.i18n import _
+from cinder.i18n import _LE
LOG = logging.getLogger(__name__)
try:
consistencygroups.create()
except Exception:
- LOG.error(_("Table |%s| not created!"), repr(consistencygroups))
+ LOG.error(_LE("Table |%s| not created!"), repr(consistencygroups))
raise
# New table
try:
cgsnapshots.create()
except Exception:
- LOG.error(_("Table |%s| not created!"), repr(cgsnapshots))
+ LOG.error(_LE("Table |%s| not created!"), repr(cgsnapshots))
raise
# Add column to volumes table
volumes.create_column(consistencygroup_id)
volumes.update().values(consistencygroup_id=None).execute()
except Exception:
- LOG.error(_("Adding consistencygroup_id column to volumes table"
- " failed."))
+ LOG.error(_LE("Adding consistencygroup_id column to volumes table"
+ " failed."))
raise
# Add column to snapshots table
snapshots.create_column(cgsnapshot_id)
snapshots.update().values(cgsnapshot_id=None).execute()
except Exception:
- LOG.error(_("Adding cgsnapshot_id column to snapshots table"
- " failed."))
+ LOG.error(_LE("Adding cgsnapshot_id column to snapshots table"
+ " failed."))
raise
fkey = ForeignKeyConstraint(**params)
fkey.drop()
except Exception:
- LOG.error(_("Dropping foreign key 'cgsnapshot_id' in "
- "the 'snapshots' table failed."))
+ LOG.error(_LE("Dropping foreign key 'cgsnapshot_id' in "
+ "the 'snapshots' table failed."))
snapshots = Table('snapshots', meta, autoload=True)
cgsnapshot_id = snapshots.columns.cgsnapshot_id
fkey = ForeignKeyConstraint(**params)
fkey.drop()
except Exception:
- LOG.error(_("Dropping foreign key 'consistencygroup_id' in "
- "the 'volumes' table failed."))
+ LOG.error(_LE("Dropping foreign key 'consistencygroup_id' in "
+ "the 'volumes' table failed."))
volumes = Table('volumes', meta, autoload=True)
consistencygroup_id = volumes.columns.consistencygroup_id
try:
cgsnapshots.drop()
except Exception:
- LOG.error(_("cgsnapshots table not dropped"))
+ LOG.error(_LE("cgsnapshots table not dropped"))
raise
# Drop table
try:
consistencygroups.drop()
except Exception:
- LOG.error(_("consistencygroups table not dropped"))
+ LOG.error(_LE("consistencygroups table not dropped"))
raise
from oslo_log import log as logging
from sqlalchemy import MetaData, Table
-from cinder.i18n import _
+from cinder.i18n import _LE, _LI
# Get default values via config. The defaults will either
# come from the default values set in the quota option
# Do not add entries if there are already 'consistencygroups' entries.
if rows:
- LOG.info(_("Found existing 'consistencygroups' entries in the"
- "quota_classes table. Skipping insertion."))
+ LOG.info(_LI("Found existing 'consistencygroups' entries in the "
+ "quota_classes table. Skipping insertion."))
return
try:
'resource': 'consistencygroups',
'hard_limit': CONF.quota_consistencygroups,
'deleted': False, })
- LOG.info(_("Added default consistencygroups quota class data into "
- "the DB."))
+ LOG.info(_LI("Added default consistencygroups quota class data into "
+ "the DB."))
except Exception:
- LOG.error(_("Default consistencygroups quota class data not inserted "
- "into the DB."))
+ LOG.error(_LE("Default consistencygroups quota class data not "
+ "inserted into the DB."))
raise
from sqlalchemy import Boolean, Column, DateTime, UniqueConstraint
from sqlalchemy import Integer, MetaData, String, Table, ForeignKey
-from cinder.i18n import _
+from cinder.i18n import _LE
LOG = logging.getLogger(__name__)
# pylint: disable=E1120
volume_types.update().values(is_public=True).execute()
except Exception:
- LOG.error(_("Column |%s| not created!"), repr(is_public))
+ LOG.error(_LE("Column |%s| not created!"), repr(is_public))
raise
volume_type_projects = Table(
try:
volume_type_projects.create()
except Exception:
- LOG.error(_("Table |%s| not created!"), repr(volume_type_projects))
+ LOG.error(_LE("Table |%s| not created!"), repr(volume_type_projects))
raise
try:
volume_types.drop_column(is_public)
except Exception:
- LOG.error(_("volume_types.is_public column not dropped"))
+ LOG.error(_LE("volume_types.is_public column not dropped"))
raise
volume_type_projects = Table('volume_type_projects', meta, autoload=True)
try:
volume_type_projects.drop()
except Exception:
- LOG.error(_("volume_type_projects table not dropped"))
+ LOG.error(_LE("volume_type_projects table not dropped"))
raise
# of patches are done to address these issues. It should be
# removed completely when bug 1433216 is closed.
ignore_dirs = [
- "cinder/db",
"cinder/openstack",
"cinder/volume"]
for directory in ignore_dirs: