]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Efficient image transfer for Glance cinder store
authorTomoki Sekiyama <tomoki.sekiyama@hds.com>
Fri, 8 May 2015 23:52:19 +0000 (19:52 -0400)
committerTomoki Sekiyama <tomoki.sekiyama@hds.com>
Tue, 25 Aug 2015 14:40:25 +0000 (10:40 -0400)
This adds an implementation of clone_image which offloads
the image copy when the image is stored in Glance Cinder store.
It uses "create_clone_volume" method to create a new volume
from an image that is placed on a cinder volume.

To enable this feature, glance_api_version in cinder.conf must be
set to 2, and allowed_direct_url_schemes must contain 'cinder'.
In glance-api.conf, show_multiple_locations must be set to True.
Also the Cinder store must be enabled.

In addition, if image_upload_use_cinder_backend is set to True,
upload-to-image in raw format will create a cloned volume and
register its location to the image service.
If image_upload_use_internal_tenant is set to True, the image
volume is stored in the internal tenant. Otherwise it is placed
in the current context's tenant.

This also changes LVM driver implementation to enable
image upload. Especially for thin LVM, it modifies
create_cloned_volume method to create thin snapshot LV.
Note that the thin snapshot LV can be treated as the normal
LVs; it can be read/written and can survive after the parent
volume is deleted.

Only raw format image is supported. Otherwise, normal image
upload/download method is used.

Currently Glance Cinder store does not support upload and
download, so the image created by this feature cannot be used
by other projects than Cinder. Patches to provide access to
images stored on Cinder are proposed here:
  glance-specs: https://review.openstack.org/183363
  glance patch(adding rootwrap): https://review.openstack.org/186201
  glance_store patch: https://review.openstack.org/166414

Note that this change works even without Glance patches.

Change-Id: I2cb68749f194d0cd597b7258a317afb982236aea
Implements: blueprint clone-image-in-glance-cinder-backend
DocImpact

cinder/brick/local_dev/lvm.py
cinder/image/glance.py
cinder/tests/unit/brick/fake_lvm.py
cinder/tests/unit/image/fake.py
cinder/tests/unit/test_volume.py
cinder/tests/unit/volume/flows/test_create_volume_flow.py
cinder/volume/driver.py
cinder/volume/drivers/lvm.py
cinder/volume/flows/manager/create_volume.py
cinder/volume/manager.py

index b830dd70280a028dc3c2cc35f4018e0aa0fdf812..67f8e19907f9376f5ca12245656a358a04ef897e 100644 (file)
@@ -604,10 +604,12 @@ class LVM(executor.Executor):
             LOG.error(_LE('StdErr  :%s'), err.stderr)
             raise
 
-    def activate_lv(self, name, is_snapshot=False):
+    def activate_lv(self, name, is_snapshot=False, permanent=False):
         """Ensure that logical volume/snapshot logical volume is activated.
 
         :param name: Name of LV to activate
+        :param is_snapshot: whether LV is a snapshot
+        :param permanent: whether we should drop skipactivation flag
         :raises: putils.ProcessExecutionError
         """
 
@@ -626,6 +628,10 @@ class LVM(executor.Executor):
 
         if self.supports_lvchange_ignoreskipactivation:
             cmd.append('-K')
+            # If permanent=True is specified, drop the skipactivation flag in
+            # order to make this LV automatically activated after next reboot.
+            if permanent:
+                cmd += ['-k', 'n']
 
         cmd.append(lv_path)
 
index f013efe227158a5d2cc81e790d0ed3d9934ba4d9..0a2250d293483cdf978a5f2b8bb3ca3eada47f3e 100644 (file)
@@ -270,6 +270,30 @@ class GlanceImageService(object):
         return (getattr(image_meta, 'direct_url', None),
                 getattr(image_meta, 'locations', None))
 
+    def add_location(self, context, image_id, url, metadata):
+        """Add a backend location url to an image.
+
+        Returns a dict containing image metadata on success.
+        """
+        if CONF.glance_api_version != 2:
+            raise exception.Invalid("Image API version 2 is disabled.")
+        client = GlanceClientWrapper(version=2)
+        try:
+            return client.call(context, 'add_location',
+                               image_id, url, metadata)
+        except Exception:
+            _reraise_translated_image_exception(image_id)
+
+    def delete_locations(self, context, image_id, url_set):
+        """Delete backend location urls from an image."""
+        if CONF.glance_api_version != 2:
+            raise exception.Invalid("Image API version 2 is disabled.")
+        client = GlanceClientWrapper(version=2)
+        try:
+            return client.call(context, 'delete_locations', image_id, url_set)
+        except Exception:
+            _reraise_translated_image_exception(image_id)
+
     def download(self, context, image_id, data=None):
         """Calls out to Glance for data and writes data."""
         if data and 'file' in CONF.allowed_direct_url_schemes:
index 61f28ec01d809f191792fbbfc3326b892b61b946..8474b9c93a9367b75478b29f31ef2c8f9b4bc066 100644 (file)
@@ -55,7 +55,7 @@ class FakeBrickLVM(object):
     def lv_has_snapshot(self, name):
         return False
 
-    def activate_lv(self, lv, is_snapshot=False):
+    def activate_lv(self, lv, is_snapshot=False, permanent=False):
         pass
 
     def rename_volume(self, lv_name, new_name):
index 5214e09e83809f75f5ced290e130f7129093125c..cb38e4709311f911167c6585221252950dff1b8f 100644 (file)
@@ -210,6 +210,12 @@ class _FakeImageService(object):
             return 'fake_location'
         return None
 
+    def add_location(self, context, image_id, url, metadata):
+        self.update(context, image_id, {'locations': [{'url': url,
+                                                       'metadata': metadata}]})
+        return True
+
+
 _fakeImageService = _FakeImageService()
 
 
index 256c272090bebfa52b67161bf5f00792154d7dc1..45f2bb8205e7a89e5ed9c24c47b2b94eb9f58d53 100644 (file)
@@ -3381,8 +3381,12 @@ class VolumeTestCase(BaseVolumeTestCase):
                           self.context,
                           volume_id)
 
-    def _create_volume_from_image(self, fakeout_copy_image_to_volume=False,
-                                  fakeout_clone_image=False):
+    @mock.patch('cinder.volume.flows.manager.create_volume.'
+                'CreateVolumeFromSpecTask._clone_image_volume')
+    def _create_volume_from_image(self, mock_clone_image_volume,
+                                  fakeout_copy_image_to_volume=False,
+                                  fakeout_clone_image=False,
+                                  clone_image_volume=False):
         """Test function of create_volume_from_image.
 
         Test cases call this function to create a volume from image, caller
@@ -3414,6 +3418,7 @@ class VolumeTestCase(BaseVolumeTestCase):
         if fakeout_copy_image_to_volume:
             self.stubs.Set(self.volume, '_copy_image_to_volume',
                            fake_copy_image_to_volume)
+        mock_clone_image_volume.return_value = ({}, clone_image_volume)
 
         image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
         volume_id = tests_utils.create_volume(self.context,
@@ -3508,6 +3513,17 @@ class VolumeTestCase(BaseVolumeTestCase):
         self.assertDictEqual(self.volume.stats['pools'],
                              {'_pool0': {'allocated_capacity_gb': 1}})
 
+    def test_create_volume_from_image_clone_image_volume(self):
+        """Test create volume from image via image volume.
+
+        Verify that after cloning image to volume, it is in available
+        state and is bootable.
+        """
+        volume = self._create_volume_from_image(clone_image_volume=True)
+        self.assertEqual('available', volume['status'])
+        self.assertTrue(volume['bootable'])
+        self.volume.delete_volume(self.context, volume['id'])
+
     def test_create_volume_from_exact_sized_image(self):
         """Test create volume from an image of the same size.
 
@@ -5491,6 +5507,61 @@ class CopyVolumeToImageTestCase(BaseVolumeTestCase):
                               self.context,
                               saving_image_id)
 
+    @mock.patch.object(QUOTAS, 'reserve')
+    @mock.patch.object(QUOTAS, 'commit')
+    @mock.patch.object(vol_manager.VolumeManager, 'create_volume')
+    @mock.patch.object(fake_driver.FakeISCSIDriver, 'copy_volume_to_image')
+    def _test_copy_volume_to_image_with_image_volume(
+            self, mock_copy, mock_create, mock_quota_commit,
+            mock_quota_reserve):
+        self.flags(glance_api_version=2)
+        self.volume.driver.configuration.image_upload_use_cinder_backend = True
+        image_service = fake_image.FakeImageService()
+        image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2'
+        self.image_meta['id'] = image_id
+        self.image_meta['status'] = 'queued'
+        image_service.create(self.context, self.image_meta)
+
+        # creating volume testdata
+        self.volume_attrs['instance_uuid'] = None
+        db.volume_create(self.context, self.volume_attrs)
+
+        def fake_create(context, volume_id, **kwargs):
+            db.volume_update(context, volume_id, {'status': 'available'})
+
+        mock_create.side_effect = fake_create
+
+        # start test
+        self.volume.copy_volume_to_image(self.context,
+                                         self.volume_id,
+                                         self.image_meta)
+
+        volume = db.volume_get(self.context, self.volume_id)
+        self.assertEqual('available', volume['status'])
+
+        # return create image
+        image = image_service.show(self.context, image_id)
+        image_service.delete(self.context, image_id)
+        return image
+
+    def test_copy_volume_to_image_with_image_volume(self):
+        image = self._test_copy_volume_to_image_with_image_volume()
+        self.assertTrue(image['locations'][0]['url'].startswith('cinder://'))
+
+    def test_copy_volume_to_image_with_image_volume_qcow2(self):
+        self.image_meta['disk_format'] = 'qcow2'
+        image = self._test_copy_volume_to_image_with_image_volume()
+        self.assertIsNone(image.get('locations'))
+
+    @mock.patch.object(vol_manager.VolumeManager, 'delete_volume')
+    @mock.patch.object(fake_image._FakeImageService, 'add_location',
+                       side_effect=exception.Invalid)
+    def test_copy_volume_to_image_with_image_volume_failure(
+            self, mock_add_location, mock_delete):
+        image = self._test_copy_volume_to_image_with_image_volume()
+        self.assertIsNone(image.get('locations'))
+        self.assertTrue(mock_delete.called)
+
 
 class GetActiveByWindowTestCase(BaseVolumeTestCase):
     def setUp(self):
@@ -6574,6 +6645,24 @@ class LVMVolumeDriverTestCase(DriverTestCase):
 
         self.assertEqual('default', lvm_driver.configuration.lvm_type)
 
+    @mock.patch.object(lvm.LVMISCSIDriver, 'extend_volume')
+    def test_create_cloned_volume_by_thin_snapshot(self, mock_extend):
+        self.configuration.lvm_type = 'thin'
+        fake_vg = mock.Mock(fake_lvm.FakeBrickLVM('cinder-volumes', False,
+                                                  None, 'default'))
+        lvm_driver = lvm.LVMISCSIDriver(configuration=self.configuration,
+                                        vg_obj=fake_vg,
+                                        db=db)
+        fake_volume = tests_utils.create_volume(self.context, size=1)
+        fake_new_volume = tests_utils.create_volume(self.context, size=2)
+
+        lvm_driver.create_cloned_volume(fake_new_volume, fake_volume)
+        fake_vg.create_lv_snapshot.assert_called_once_with(
+            fake_new_volume['name'], fake_volume['name'], 'thin')
+        mock_extend.assert_called_once_with(fake_new_volume, 2)
+        fake_vg.activate_lv.assert_called_once_with(
+            fake_new_volume['name'], is_snapshot=True, permanent=True)
+
 
 class ISCSITestCase(DriverTestCase):
     """Test Case for ISCSIDriver"""
index ada3891368ddfb30842593ade83b562644486f97..d5c447b90d53d395016801b58991e1d0e1d50b4b 100644 (file)
@@ -223,3 +223,60 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
                           volume, snapshot_obj.id)
         fake_driver.create_volume_from_snapshot.assert_called_once_with(
             volume, snapshot_obj)
+
+
+class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase):
+
+    def setUp(self):
+        super(CreateVolumeFlowManagerGlanceCinderBackendCase, self).setUp()
+        self.ctxt = context.get_admin_context()
+
+    @mock.patch('cinder.volume.flows.manager.create_volume.'
+                'CreateVolumeFromSpecTask.'
+                '_handle_bootable_volume_glance_meta')
+    def test_create_from_image_volume(self, handle_bootable, format='raw',
+                                      owner=None, location=True):
+        self.flags(allowed_direct_url_schemes=['cinder'])
+        fake_db = mock.MagicMock()
+        fake_driver = mock.MagicMock()
+        fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+            fake_db, fake_driver)
+        fake_image_service = mock.MagicMock()
+        volume = fake_volume.fake_volume_obj(self.ctxt)
+        image_volume = fake_volume.fake_volume_obj(self.ctxt,
+                                                   volume_metadata={})
+        image_id = '34e54c31-3bc8-5c1d-9fff-2225bcce4b59'
+        url = 'cinder://%s' % image_volume['id']
+        image_location = None
+        if location:
+            image_location = (url, [{'url': url, 'metadata': {}}])
+        image_meta = {'id': image_id,
+                      'container_format': 'bare',
+                      'disk_format': format,
+                      'owner': owner or self.ctxt.project_id}
+        fake_driver.clone_image.return_value = (None, False)
+        fake_db.volume_get_all_by_host.return_value = [image_volume]
+
+        fake_manager._create_from_image(self.ctxt,
+                                        volume,
+                                        image_location,
+                                        image_id,
+                                        image_meta,
+                                        fake_image_service)
+        if format is 'raw' and not owner and location:
+            fake_driver.create_cloned_volume.assert_called_once_with(
+                volume, image_volume)
+            handle_bootable.assert_called_once_with(self.ctxt, volume['id'],
+                                                    image_id=image_id,
+                                                    image_meta=image_meta)
+        else:
+            self.assertFalse(fake_driver.create_cloned_volume.called)
+
+    def test_create_from_image_volume_in_qcow2_format(self):
+        self.test_create_from_image_volume(format='qcow2')
+
+    def test_create_from_image_volume_of_other_owner(self):
+        self.test_create_from_image_volume(owner='fake-owner')
+
+    def test_create_from_image_volume_without_location(self):
+        self.test_create_from_image_volume(location=False)
index 7f6c8646e2a26a09cf8a7a8c83b5b01ed2337375..9dd83fb4b639695fd28122a606bf67a3fa3eabbe 100644 (file)
@@ -234,7 +234,21 @@ volume_opts = [
                      "is: {'key-1'='val1' 'key-2'='val2'...},{...} "
                      "and for managed devices its simply a list of valid "
                      "configured backend_names that the driver supports "
-                     "replicating to: backend-a,bakcend-b...")
+                     "replicating to: backend-a,bakcend-b..."),
+    cfg.BoolOpt('image_upload_use_cinder_backend',
+                default=False,
+                help='If set to True, upload-to-image in raw format will '
+                     'create a cloned volume and register its location to '
+                     'the image service, instead of uploading the volume '
+                     'content. The cinder backend and locations support '
+                     'must be enabled in the image service, and '
+                     'glance_api_version must be set to 2.'),
+    cfg.BoolOpt('image_upload_use_internal_tenant',
+                default=False,
+                help='If set to True, the image volume created by '
+                     'upload-to-image will be placed in the internal tenant. '
+                     'Otherwise, the image volume is created in the current '
+                     'context\'s tenant.'),
 ]
 
 # for backward compatibility
index f0bededc4550cfe8a8b95df06cd68609064744c9..d1e2f50589f5c8698999ad5708691257706e6d4f 100644 (file)
@@ -449,6 +449,16 @@ class LVMVolumeDriver(driver.VolumeDriver):
 
     def create_cloned_volume(self, volume, src_vref):
         """Creates a clone of the specified volume."""
+        if self.configuration.lvm_type == 'thin':
+            self.vg.create_lv_snapshot(volume['name'],
+                                       src_vref['name'],
+                                       self.configuration.lvm_type)
+            if volume['size'] > src_vref['size']:
+                LOG.debug("Resize the new volume to %s.", volume['size'])
+                self.extend_volume(volume, volume['size'])
+            self.vg.activate_lv(volume['name'], is_snapshot=True,
+                                permanent=True)
+            return
 
         mirror_count = 0
         if self.configuration.lvm_mirrors:
index 6f9346a5b9882470d4e3d630b417a895c5383216..9abef9f43d1846029089bb152d87a3392de17bab 100644 (file)
@@ -570,6 +570,59 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
         self.db.volume_glance_metadata_bulk_create(context, volume_id,
                                                    volume_metadata)
 
+    def _clone_image_volume(self, context, volume, image_location, image_meta):
+        """Create a volume efficiently from an existing image.
+
+        Returns a dict of volume properties eg. provider_location,
+        boolean indicating whether cloning occurred
+        """
+        if not image_location:
+            return None, False
+
+        if (image_meta.get('container_format') != 'bare' or
+                image_meta.get('disk_format') != 'raw'):
+            LOG.info(_LI("Requested image %(id)s is not in raw format."),
+                     {'id': image_meta.get('id')})
+            return None, False
+
+        image_volume = None
+        direct_url, locations = image_location
+        urls = set([direct_url] + [loc.get('url') for loc in locations or []])
+        image_volume_ids = [url[9:] for url in urls
+                            if url and url.startswith('cinder://')]
+        image_volumes = self.db.volume_get_all_by_host(
+            context, volume['host'], filters={'id': image_volume_ids})
+
+        for image_volume in image_volumes:
+            # For the case image volume is stored in the service tenant,
+            # image_owner volume metadata should also be checked.
+            image_owner = None
+            volume_metadata = image_volume.get('volume_metadata') or {}
+            for m in volume_metadata:
+                if m['key'] == 'image_owner':
+                    image_owner = m['value']
+            if (image_meta['owner'] != volume['project_id'] and
+                    image_meta['owner'] != image_owner):
+                LOG.info(_LI("Skipping image volume %(id)s because "
+                             "it is not accessible by current Tenant."),
+                         {'id': image_volume.id})
+                continue
+
+            LOG.info(_LI("Will clone a volume from the image volume "
+                         "%(id)s."), {'id': image_volume.id})
+            break
+        else:
+            LOG.debug("No accessible image volume for image %(id)s found.",
+                      {'id': image_meta['id']})
+            return None, False
+
+        try:
+            return self.driver.create_cloned_volume(volume, image_volume), True
+        except (NotImplementedError, exception.CinderException):
+            LOG.exception(_LE('Failed to clone image volume %(id)s.'),
+                          {'id': image_volume['id']})
+            return None, False
+
     def _create_from_image(self, context, volume_ref,
                            image_location, image_id, image_meta,
                            image_service, **kwargs):
@@ -587,6 +640,11 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
                                                        image_location,
                                                        image_meta,
                                                        image_service)
+        if not cloned and 'cinder' in CONF.allowed_direct_url_schemes:
+            model_update, cloned = self._clone_image_volume(context,
+                                                            volume_ref,
+                                                            image_location,
+                                                            image_meta)
         if not cloned:
             # TODO(harlowja): what needs to be rolled back in the clone if this
             # volume create fails?? Likely this should be a subflow or broken
index fa5a2d1eecd3103204f089bde91ef7cce3e5fc37..cbbe984e3b517245c7031ab677c73a49aaefd303 100644 (file)
@@ -966,6 +966,107 @@ class VolumeManager(manager.SchedulerDependentManager):
         self._notify_about_volume_usage(context, volume, "detach.end")
         LOG.info(_LI("Detach volume completed successfully."), resource=volume)
 
+    def _clone_image_volume(self, ctx, volume, image_meta):
+        volume_type_id = volume.get('volume_type_id')
+        reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
+        QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id)
+        reservations = QUOTAS.reserve(ctx, **reserve_opts)
+
+        try:
+            new_vol_values = {}
+            for k, v in volume.items():
+                new_vol_values[k] = v
+            del new_vol_values['id']
+            del new_vol_values['_name_id']
+            del new_vol_values['volume_type']
+            new_vol_values['volume_type_id'] = volume_type_id
+            new_vol_values['attach_status'] = 'detached'
+            new_vol_values['volume_attachment'] = []
+            new_vol_values['status'] = 'creating'
+            new_vol_values['project_id'] = ctx.project_id
+            new_vol_values['display_name'] = 'image-%s' % image_meta['id']
+            new_vol_values['source_volid'] = volume.id
+            LOG.debug('Creating image volume entry: %s.', new_vol_values)
+            image_volume = self.db.volume_create(ctx, new_vol_values)
+        except Exception:
+            QUOTAS.rollback(ctx, reservations)
+            return False
+
+        QUOTAS.commit(ctx, reservations,
+                      project_id=new_vol_values['project_id'])
+
+        try:
+            self.create_volume(ctx, image_volume.id,
+                               allow_reschedule=False)
+            image_volume = self.db.volume_get(ctx, image_volume.id)
+            if image_volume.status != 'available':
+                raise exception.InvalidVolume(_('Volume is not available.'))
+
+            self.db.volume_admin_metadata_update(ctx.elevated(),
+                                                 image_volume.id,
+                                                 {'readonly': 'True'},
+                                                 False)
+            return image_volume
+        except exception.CinderException:
+            LOG.exception(_LE('Failed to clone volume %(volume_id)s for '
+                              'image %(image_id).'),
+                          {'volume_id': volume.id,
+                           'image_id': image_meta['id']})
+            try:
+                self.delete_volume(ctx, image_volume)
+            except exception.CinderException:
+                LOG.exception(_LE('Could not delete the image volume %(id)s.'),
+                              {'id': volume.id})
+            return False
+
+    def _clone_image_volume_and_add_location(self, ctx, volume, image_service,
+                                             image_meta):
+        """Create a cloned volume and register its location to the image."""
+        if (image_meta['disk_format'] != 'raw' or
+                image_meta['container_format'] != 'bare'):
+            return False
+
+        image_volume_context = ctx
+        if self.driver.configuration.image_upload_use_internal_tenant:
+            internal_ctx = context.get_internal_tenant_context()
+            if internal_ctx:
+                image_volume_context = internal_ctx
+
+        image_volume = self._clone_image_volume(image_volume_context,
+                                                volume,
+                                                image_meta)
+        if not image_volume:
+            return False
+
+        uri = 'cinder://%s' % image_volume.id
+        image_registered = None
+        try:
+            image_registered = image_service.add_location(
+                ctx, image_meta['id'], uri, {})
+        except (exception.NotAuthorized, exception.Invalid,
+                exception.NotFound):
+            LOG.exception(_LE('Failed to register image volume location '
+                              '%(uri)s.'), {'uri': uri})
+
+        if not image_registered:
+            LOG.warning(_LW('Registration of image volume URI %(uri)s '
+                            'to image %(image_id)s failed.'),
+                        {'uri': uri, 'image_id': image_meta['id']})
+            try:
+                self.delete_volume(image_volume_context, image_volume)
+            except exception.CinderException:
+                LOG.exception(_LE('Could not delete failed image volume '
+                                  '%(id)s.'), {'id': image_volume.id})
+            return False
+
+        image_volume_meta = {'glance_image_id': image_meta['id'],
+                             'image_owner': ctx.project_id}
+        self.db.volume_metadata_update(image_volume_context,
+                                       image_volume.id,
+                                       image_volume_meta,
+                                       False)
+        return True
+
     def copy_volume_to_image(self, context, volume_id, image_meta):
         """Uploads the specified volume to Glance.
 
@@ -985,10 +1086,19 @@ class VolumeManager(manager.SchedulerDependentManager):
 
             image_service, image_id = \
                 glance.get_remote_image_service(context, image_meta['id'])
-            self.driver.copy_volume_to_image(context, volume, image_service,
-                                             image_meta)
-            LOG.debug("Uploaded volume to glance image-id: %(image_id)s.",
-                      resource=volume)
+            if (self.driver.configuration.image_upload_use_cinder_backend
+                    and self._clone_image_volume_and_add_location(
+                        context, volume, image_service, image_meta)):
+                LOG.debug("Registered image volume location to glance "
+                          "image-id: %(image_id)s.",
+                          {'image_id': image_meta['id']},
+                          resource=volume)
+            else:
+                self.driver.copy_volume_to_image(context, volume,
+                                                 image_service, image_meta)
+                LOG.debug("Uploaded volume to glance image-id: %(image_id)s.",
+                          {'image_id': image_meta['id']},
+                          resource=volume)
         except Exception as error:
             LOG.error(_LE("Upload volume to image encountered an error "
                           "(image-id: %(image_id)s)."),