]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Fixes ceph backup import errors
authorEdward Hope-Morley <edward.hope-morley@canonical.com>
Thu, 26 Sep 2013 13:05:24 +0000 (14:05 +0100)
committerEdward Hope-Morley <edward.hope-morley@canonical.com>
Mon, 30 Sep 2013 22:16:14 +0000 (23:16 +0100)
For some reason, using non-rbd volume driver whilst using
Ceph backup causes what looks to be a cyclic import error.
Ensuring that the backup.drivers.ceph module is imported
into volume.drivers.rbd ONLY if it is needed i.e. if both
drivers are in use, seems to remedy this.

Change-Id: Ieae0c3451cfb1750eca8cfcde5e0be9e7eb028d0
Fixes: bug 1231412
cinder/backup/drivers/ceph.py
cinder/volume/drivers/rbd.py

index 8b8552d6c8f9e53d871f6c4d827a9a1f93e69c3c..16ec821b9554c650e73519e381ecab26f9dd5b8f 100644 (file)
@@ -54,7 +54,7 @@ from cinder.openstack.common import log as logging
 from cinder.openstack.common import processutils
 from cinder import units
 from cinder import utils
-import cinder.volume.drivers as drivers
+import cinder.volume.drivers.rbd as rbd_driver
 
 try:
     import rados
@@ -360,7 +360,7 @@ class CephBackupDriver(BackupDriver):
             LOG.debug(_("trying diff format name format basename='%s'") %
                       (base_name))
 
-        with drivers.rbd.RADOSClient(self) as client:
+        with rbd_driver.RADOSClient(self) as client:
             rbd_exists, base_name = \
                 self._rbd_image_exists(base_name, volume_id, client,
                                        try_diff_format=try_diff_format)
@@ -505,7 +505,7 @@ class CephBackupDriver(BackupDriver):
 
         base_name = self._get_backup_base_name(volume_id, diff_format=True)
         image_created = False
-        with drivers.rbd.RADOSClient(self, self._ceph_backup_pool) as client:
+        with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client:
             # If from_snap does not exist at the destination (and the
             # destination exists), this implies a previous backup has failed.
             # In this case we will force a full backup.
@@ -595,7 +595,7 @@ class CephBackupDriver(BackupDriver):
         """
         backup_name = self._get_backup_base_name(volume_id, backup_id)
 
-        with drivers.rbd.RADOSClient(self, self._ceph_backup_pool) as client:
+        with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client:
             # First create base backup image
             old_format, features = self._get_rbd_support()
             LOG.debug(_("creating base image='%s'") % (backup_name))
@@ -610,11 +610,11 @@ class CephBackupDriver(BackupDriver):
             LOG.debug(_("copying data"))
             dest_rbd = self.rbd.Image(client.ioctx, backup_name)
             try:
-                rbd_meta = drivers.rbd.RBDImageMetadata(dest_rbd,
-                                                        self._ceph_backup_pool,
-                                                        self._ceph_backup_user,
-                                                        self._ceph_backup_conf)
-                rbd_fd = drivers.rbd.RBDImageIOWrapper(rbd_meta)
+                rbd_meta = rbd_driver.RBDImageMetadata(dest_rbd,
+                                                       self._ceph_backup_pool,
+                                                       self._ceph_backup_user,
+                                                       self._ceph_backup_conf)
+                rbd_fd = rbd_driver.RBDImageIOWrapper(rbd_meta)
                 self._transfer_data(src_volume, src_name, rbd_fd, backup_name,
                                     length)
             finally:
@@ -758,7 +758,7 @@ class CephBackupDriver(BackupDriver):
         This will result in all extents being copied from source to
         destination.
         """
-        with drivers.rbd.RADOSClient(self, self._ceph_backup_pool) as client:
+        with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client:
             # If a source snapshot is provided we assume the base is diff
             # format.
             if src_snap:
@@ -774,11 +774,11 @@ class CephBackupDriver(BackupDriver):
             src_rbd = self.rbd.Image(client.ioctx, backup_name,
                                      snapshot=src_snap, read_only=True)
             try:
-                rbd_meta = drivers.rbd.RBDImageMetadata(src_rbd,
-                                                        self._ceph_backup_pool,
-                                                        self._ceph_backup_user,
-                                                        self._ceph_backup_conf)
-                rbd_fd = drivers.rbd.RBDImageIOWrapper(rbd_meta)
+                rbd_meta = rbd_driver.RBDImageMetadata(src_rbd,
+                                                       self._ceph_backup_pool,
+                                                       self._ceph_backup_user,
+                                                       self._ceph_backup_conf)
+                rbd_fd = rbd_driver.RBDImageIOWrapper(rbd_meta)
                 self._transfer_data(rbd_fd, backup_name, dest_file, dest_name,
                                     length)
             finally:
@@ -792,7 +792,7 @@ class CephBackupDriver(BackupDriver):
         shrink it to the size of the original backup so we need to
         post-process and resize it back to its expected size.
         """
-        with drivers.rbd.RADOSClient(self, self._ceph_backup_pool) as client:
+        with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client:
             adjust_size = 0
             base_image = self.rbd.Image(client.ioctx, self._utf8(backup_base),
                                         read_only=True)
@@ -803,7 +803,7 @@ class CephBackupDriver(BackupDriver):
                 base_image.close()
 
         if adjust_size:
-            with drivers.rbd.RADOSClient(self, src_pool) as client:
+            with rbd_driver.RADOSClient(self, src_pool) as client:
                 dest_image = self.rbd.Image(client.ioctx,
                                             self._utf8(restore_vol))
                 try:
@@ -847,7 +847,7 @@ class CephBackupDriver(BackupDriver):
 
     def _num_backup_snaps(self, backup_base_name):
         """Return the number of snapshots that exist on the base image."""
-        with drivers.rbd.RADOSClient(self, self._ceph_backup_pool) as client:
+        with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client:
             base_rbd = self.rbd.Image(client.ioctx, backup_base_name,
                                       read_only=True)
             try:
@@ -865,7 +865,7 @@ class CephBackupDriver(BackupDriver):
 
         If the backup was not incremental None is returned.
         """
-        with drivers.rbd.RADOSClient(self, self._ceph_backup_pool) as client:
+        with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client:
             base_rbd = self.rbd.Image(client.ioctx, base_name, read_only=True)
             try:
                 restore_point = self._get_backup_snap_name(base_rbd, base_name,
@@ -955,7 +955,7 @@ class CephBackupDriver(BackupDriver):
         base_name = self._get_backup_base_name(backup['volume_id'],
                                                diff_format=True)
 
-        with drivers.rbd.RADOSClient(self, self._ceph_backup_pool) as client:
+        with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client:
             diff_allowed, restore_point = \
                 self._diff_restore_allowed(base_name, backup, volume,
                                            volume_file, client)
index 192fd47ca939d7187dc942b681d595f79de8bda4..0a417b47c69fcac98cbb1c0b76aa7932e193ca66 100644 (file)
@@ -22,7 +22,6 @@ import urllib
 
 from oslo.config import cfg
 
-from cinder.backup.drivers import ceph as ceph_backup
 from cinder import exception
 from cinder.image import image_utils
 from cinder.openstack.common import fileutils
@@ -300,7 +299,12 @@ class RBDDriver(driver.VolumeDriver):
         There should only ever be one but accept all since they need to be
         deleted before the volume can be.
         """
-        return ceph_backup.CephBackupDriver.get_backup_snaps(rbd_image)
+        # NOTE(dosaboy): we do the import here otherwise we get import conflict
+        # issues between the rbd driver and the ceph backup driver. These
+        # issues only seem to occur when NOT using them together and are
+        # triggered when the ceph backup driver imports the rbd volume driver.
+        from cinder.backup.drivers import ceph
+        return ceph.CephBackupDriver.get_backup_snaps(rbd_image)
 
     def _get_mon_addrs(self):
         args = ['ceph', 'mon', 'dump', '--format=json']