]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
cinder.backup: Replace 'locals()' with explicit values
authorAndrew Forrest <forrest@research.att.com>
Sat, 15 Jun 2013 00:09:53 +0000 (17:09 -0700)
committerAndrew Forrest <forrest@research.att.com>
Tue, 18 Jun 2013 15:19:07 +0000 (08:19 -0700)
Help bring source code into compliance with the Cinder Style Commandments:
https://github.com/openstack/cinder/blob/master/HACKING.rst

This change covers all affected source in the cinder backup module, i.e.
cinder/backup/*.py and subdirectories.

Partially fixes: bug #1190748

Change-Id: I3e0f3806030f1813d055135f15e64b5a719970ed

cinder/backup/api.py
cinder/backup/manager.py
cinder/backup/services/swift.py

index 188812056bb0feb5b3bb8221cc413f044735ec76..325f86231aaf85b3f950ab39b2b3444dcfdfc3c0 100644 (file)
@@ -136,7 +136,9 @@ class API(base.Base):
             description = 'auto-created_from_restore_from_swift'
 
             LOG.audit(_("Creating volume of %(size)s GB for restore of "
-                        "backup %(backup_id)s"), locals(), context=context)
+                        "backup %(backup_id)s"),
+                      {'size': size, 'backup_id': backup_id},
+                      context=context)
             volume = self.volume_api.create(context, size, name, description)
             volume_id = volume['id']
 
@@ -149,8 +151,9 @@ class API(base.Base):
             volume = self.volume_api.get(context, volume_id)
             volume_size = volume['size']
             if volume_size < size:
-                err = _('volume size %(volume_size)d is too small to restore '
-                        'backup of size %(size)d.') % locals()
+                err = (_('volume size %(volume_size)d is too small to restore '
+                         'backup of size %(size)d.') %
+                       {'volume_size': volume_size, 'size': size})
                 raise exception.InvalidVolume(reason=err)
 
         if volume['status'] != "available":
@@ -165,7 +168,9 @@ class API(base.Base):
             raise exception.InvalidVolume(reason=msg)
 
         LOG.audit(_("Overwriting volume %(volume_id)s with restore of "
-                    "backup %(backup_id)s"), locals(), context=context)
+                    "backup %(backup_id)s"),
+                  {'volume_id': volume_id, 'backup_id': backup_id},
+                  context=context)
 
         # Setting the status here rather than setting at start and unrolling
         # for each error condition, it should be a very small window
index 4146694302491bd8b38750df67fe2c9deaba8cb7..4c89ca83eb441dd906030d1c3a4fd66ee86e0939 100755 (executable)
@@ -118,7 +118,8 @@ class BackupManager(manager.SchedulerDependentManager):
         volume_id = backup['volume_id']
         volume = self.db.volume_get(context, volume_id)
         LOG.info(_('create_backup started, backup: %(backup_id)s for '
-                   'volume: %(volume_id)s') % locals())
+                   'volume: %(volume_id)s') %
+                 {'backup_id': backup_id, 'volume_id': volume_id})
         self.db.backup_update(context, backup_id, {'host': self.host,
                                                    'service':
                                                    CONF.backup_service})
@@ -127,7 +128,10 @@ class BackupManager(manager.SchedulerDependentManager):
         actual_status = volume['status']
         if actual_status != expected_status:
             err = _('create_backup aborted, expected volume status '
-                    '%(expected_status)s but got %(actual_status)s') % locals()
+                    '%(expected_status)s but got %(actual_status)s') % {
+                        'expected_status': expected_status,
+                        'actual_status': actual_status,
+                    }
             self.db.backup_update(context, backup_id, {'status': 'error',
                                                        'fail_reason': err})
             raise exception.InvalidVolume(reason=err)
@@ -136,7 +140,10 @@ class BackupManager(manager.SchedulerDependentManager):
         actual_status = backup['status']
         if actual_status != expected_status:
             err = _('create_backup aborted, expected backup status '
-                    '%(expected_status)s but got %(actual_status)s') % locals()
+                    '%(expected_status)s but got %(actual_status)s') % {
+                        'expected_status': expected_status,
+                        'actual_status': actual_status,
+                    }
             self.db.volume_update(context, volume_id, {'status': 'available'})
             self.db.backup_update(context, backup_id, {'status': 'error',
                                                        'fail_reason': err})
@@ -165,7 +172,8 @@ class BackupManager(manager.SchedulerDependentManager):
         Restore volume backups from configured backup service.
         """
         LOG.info(_('restore_backup started, restoring backup: %(backup_id)s'
-                   ' to volume: %(volume_id)s') % locals())
+                   ' to volume: %(volume_id)s') %
+                 {'backup_id': backup_id, 'volume_id': volume_id})
         backup = self.db.backup_get(context, backup_id)
         volume = self.db.volume_get(context, volume_id)
         self.db.backup_update(context, backup_id, {'host': self.host})
@@ -174,7 +182,10 @@ class BackupManager(manager.SchedulerDependentManager):
         actual_status = volume['status']
         if actual_status != expected_status:
             err = _('restore_backup aborted, expected volume status '
-                    '%(expected_status)s but got %(actual_status)s') % locals()
+                    '%(expected_status)s but got %(actual_status)s') % {
+                        'expected_status': expected_status,
+                        'actual_status': actual_status
+                    }
             self.db.backup_update(context, backup_id, {'status': 'available'})
             raise exception.InvalidVolume(reason=err)
 
@@ -182,7 +193,10 @@ class BackupManager(manager.SchedulerDependentManager):
         actual_status = backup['status']
         if actual_status != expected_status:
             err = _('restore_backup aborted, expected backup status '
-                    '%(expected_status)s but got %(actual_status)s') % locals()
+                    '%(expected_status)s but got %(actual_status)s') % {
+                        'expected_status': expected_status,
+                        'actual_status': actual_status
+                    }
             self.db.backup_update(context, backup_id, {'status': 'error',
                                                        'fail_reason': err})
             self.db.volume_update(context, volume_id, {'status': 'error'})
@@ -200,7 +214,10 @@ class BackupManager(manager.SchedulerDependentManager):
             err = _('restore_backup aborted, the backup service currently'
                     ' configured [%(configured_service)s] is not the'
                     ' backup service that was used to create this'
-                    ' backup [%(backup_service)s]') % locals()
+                    ' backup [%(backup_service)s]') % {
+                        'configured_service': configured_service,
+                        'backup_service': backup_service,
+                    }
             self.db.backup_update(context, backup_id, {'status': 'available'})
             self.db.volume_update(context, volume_id, {'status': 'error'})
             raise exception.InvalidBackup(reason=err)
@@ -219,7 +236,8 @@ class BackupManager(manager.SchedulerDependentManager):
         self.db.volume_update(context, volume_id, {'status': 'available'})
         self.db.backup_update(context, backup_id, {'status': 'available'})
         LOG.info(_('restore_backup finished, backup: %(backup_id)s restored'
-                   ' to volume: %(volume_id)s') % locals())
+                   ' to volume: %(volume_id)s') %
+                 {'backup_id': backup_id, 'volume_id': volume_id})
 
     def delete_backup(self, context, backup_id):
         """
@@ -233,7 +251,10 @@ class BackupManager(manager.SchedulerDependentManager):
         actual_status = backup['status']
         if actual_status != expected_status:
             err = _('delete_backup aborted, expected backup status '
-                    '%(expected_status)s but got %(actual_status)s') % locals()
+                    '%(expected_status)s but got %(actual_status)s') % {
+                        'expected_status': expected_status,
+                        'actual_status': actual_status,
+                    }
             self.db.backup_update(context, backup_id, {'status': 'error',
                                                        'fail_reason': err})
             raise exception.InvalidBackup(reason=err)
@@ -245,7 +266,10 @@ class BackupManager(manager.SchedulerDependentManager):
                 err = _('delete_backup aborted, the backup service currently'
                         ' configured [%(configured_service)s] is not the'
                         ' backup service that was used to create this'
-                        ' backup [%(backup_service)s]') % locals()
+                        ' backup [%(backup_service)s]') % {
+                            'configured_service': configured_service,
+                            'backup_service': backup_service,
+                        }
                 self.db.backup_update(context, backup_id,
                                       {'status': 'error'})
                 raise exception.InvalidBackup(reason=err)
index 24a989e9aafc4c9ea235b9adb93e579d3bee3a1e..7e2aa287abdfd187221b645da2a956e4ba623d89 100644 (file)
@@ -131,7 +131,8 @@ class SwiftBackupService(base.Base):
         backup_id = backup['id']
         container = backup['container']
         LOG.debug(_('_create_container started, container: %(container)s,'
-                    'backup: %(backup_id)s') % locals())
+                    'backup: %(backup_id)s') %
+                  {'container': container, 'backup_id': backup_id})
         if container is None:
             container = CONF.backup_swift_container
             self.db.backup_update(context, backup_id, {'container': container})
@@ -167,7 +168,8 @@ class SwiftBackupService(base.Base):
     def _write_metadata(self, backup, volume_id, container, object_list):
         filename = self._metadata_filename(backup)
         LOG.debug(_('_write_metadata started, container name: %(container)s,'
-                    ' metadata filename: %(filename)s') % locals())
+                    ' metadata filename: %(filename)s') %
+                  {'container': container, 'filename': filename})
         metadata = {}
         metadata['version'] = self.SERVICE_VERSION
         metadata['backup_id'] = backup['id']
@@ -183,7 +185,8 @@ class SwiftBackupService(base.Base):
         if etag != md5:
             err = _('error writing metadata file to swift, MD5 of metadata'
                     ' file in swift [%(etag)s] is not the same as MD5 of '
-                    'metadata file sent to swift [%(md5)s]') % locals()
+                    'metadata file sent to swift [%(md5)s]') % {'etag': etag,
+                                                                'md5': md5}
             raise exception.InvalidBackup(reason=err)
         LOG.debug(_('_write_metadata finished'))
 
@@ -191,7 +194,8 @@ class SwiftBackupService(base.Base):
         container = backup['container']
         filename = self._metadata_filename(backup)
         LOG.debug(_('_read_metadata started, container name: %(container)s, '
-                    'metadata filename: %(filename)s') % locals())
+                    'metadata filename: %(filename)s') %
+                  {'container': container, 'filename': filename})
         (resp, body) = self.conn.get_object(container, filename)
         metadata = json.loads(body)
         LOG.debug(_('_read_metadata finished (%s)') % metadata)
@@ -221,7 +225,13 @@ class SwiftBackupService(base.Base):
         LOG.debug(_('starting backup of volume: %(volume_id)s to swift,'
                     ' volume size: %(volume_size_bytes)d, swift object names'
                     ' prefix %(object_prefix)s, availability zone:'
-                    ' %(availability_zone)s') % locals())
+                    ' %(availability_zone)s') %
+                  {
+                      'volume_id': volume_id,
+                      'volume_size_bytes': volume_size_bytes,
+                      'object_prefix': object_prefix,
+                      'availability_zone': availability_zone,
+                  })
         object_id = 1
         object_list = []
         while True:
@@ -243,7 +253,12 @@ class SwiftBackupService(base.Base):
                 comp_size_bytes = len(data)
                 LOG.debug(_('compressed %(data_size_bytes)d bytes of data'
                             ' to %(comp_size_bytes)d bytes using '
-                            '%(algorithm)s') % locals())
+                            '%(algorithm)s') %
+                          {
+                              'data_size_bytes': data_size_bytes,
+                              'comp_size_bytes': comp_size_bytes,
+                              'algorithm': algorithm,
+                          })
             else:
                 LOG.debug(_('not compressing data'))
                 obj[object_name]['compression'] = 'none'
@@ -254,14 +269,16 @@ class SwiftBackupService(base.Base):
                 etag = self.conn.put_object(container, object_name, reader)
             except socket.error as err:
                 raise exception.SwiftConnectionFailed(reason=str(err))
-            LOG.debug(_('swift MD5 for %(object_name)s: %(etag)s') % locals())
+            LOG.debug(_('swift MD5 for %(object_name)s: %(etag)s') %
+                      {'object_name': object_name, 'etag': etag, })
             md5 = hashlib.md5(data).hexdigest()
             obj[object_name]['md5'] = md5
-            LOG.debug(_('backup MD5 for %(object_name)s: %(md5)s') % locals())
+            LOG.debug(_('backup MD5 for %(object_name)s: %(md5)s') %
+                      {'object_name': object_name, 'md5': md5})
             if etag != md5:
                 err = _('error writing object to swift, MD5 of object in '
                         'swift %(etag)s is not the same as MD5 of object sent '
-                        'to swift %(md5)s') % locals()
+                        'to swift %(md5)s') % {'etag': etag, 'md5': md5}
                 raise exception.InvalidBackup(reason=err)
             object_list.append(obj)
             object_id += 1
@@ -298,7 +315,13 @@ class SwiftBackupService(base.Base):
             object_name = metadata_object.keys()[0]
             LOG.debug(_('restoring object from swift. backup: %(backup_id)s, '
                         'container: %(container)s, swift object name: '
-                        '%(object_name)s, volume: %(volume_id)s') % locals())
+                        '%(object_name)s, volume: %(volume_id)s') %
+                      {
+                          'backup_id': backup_id,
+                          'container': container,
+                          'object_name': object_name,
+                          'volume_id': volume_id,
+                      })
             try:
                 (resp, body) = self.conn.get_object(container, object_name)
             except socket.error as err:
@@ -330,7 +353,13 @@ class SwiftBackupService(base.Base):
         object_prefix = backup['service_metadata']
         LOG.debug(_('starting restore of backup %(object_prefix)s from swift'
                     ' container: %(container)s, to volume %(volume_id)s, '
-                    'backup: %(backup_id)s') % locals())
+                    'backup: %(backup_id)s') %
+                  {
+                      'object_prefix': object_prefix,
+                      'container': container,
+                      'volume_id': volume_id,
+                      'backup_id': backup_id,
+                  })
         try:
             metadata = self._read_metadata(backup)
         except socket.error as err:
@@ -346,7 +375,7 @@ class SwiftBackupService(base.Base):
             raise exception.InvalidBackup(reason=err)
         restore_func(backup, volume_id, metadata, volume_file)
         LOG.debug(_('restore %(backup_id)s to %(volume_id)s finished.') %
-                  locals())
+                  {'backup_id': backup_id, 'volume_id': volume_id})
 
     def delete(self, backup):
         """Delete the given backup from swift."""
@@ -372,7 +401,11 @@ class SwiftBackupService(base.Base):
                                'continuing with delete') % swift_object_name)
                 else:
                     LOG.debug(_('deleted swift object: %(swift_object_name)s'
-                                ' in container: %(container)s') % locals())
+                                ' in container: %(container)s') %
+                              {
+                                  'swift_object_name': swift_object_name,
+                                  'container': container
+                              })
                 # Deleting a backup's objects from swift can take some time.
                 # Yield so other threads can run
                 eventlet.sleep(0)