]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
RemoteFS: Move Nova snapshot code into RemoteFSSnapDriver
authorEric Harney <eharney@redhat.com>
Mon, 8 Dec 2014 17:50:33 +0000 (12:50 -0500)
committerEric Harney <eharney@redhat.com>
Tue, 9 Dec 2014 16:17:12 +0000 (11:17 -0500)
Implements bp: remotefs-snaps

Change-Id: I2e92322ff9c80de123dc281f790a7c2a89c4d62e

cinder/tests/test_glusterfs.py
cinder/volume/drivers/glusterfs.py
cinder/volume/drivers/remotefs.py

index d06cf69e8804c7fb56fa85e2e4fb6186359280d8..2079c0da6bac49416f6107d61b2b2622133254c8 100644 (file)
@@ -1465,7 +1465,7 @@ class GlusterFsDriverTestCase(test.TestCase):
 
         mox.ReplayAll()
 
-        self.assertRaisesAndMessageMatches(exception.GlusterfsException,
+        self.assertRaisesAndMessageMatches(exception.RemoteFSException,
                                            'Unable to delete snapshot',
                                            drv.delete_snapshot,
                                            snap_ref)
index cc7e71c313193e50a815e110e17e571f88fbb62b..c60e15d59bc0738392a660d1596f038709edf18e 100644 (file)
 import errno
 import os
 import stat
-import time
 
 from oslo.concurrency import processutils
 from oslo.config import cfg
 from oslo.utils import units
 
 from cinder.brick.remotefs import remotefs as remotefs_brick
-from cinder import compute
-from cinder import db
 from cinder import exception
 from cinder.i18n import _, _LE, _LI, _LW
 from cinder.image import image_utils
@@ -116,7 +113,6 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
         self._remotefsclient = None
         super(GlusterfsDriver, self).__init__(*args, **kwargs)
         self.configuration.append_config_values(volume_opts)
-        self._nova = None
         self.base = getattr(self.configuration,
                             'glusterfs_mount_point_base',
                             CONF.glusterfs_mount_point_base)
@@ -134,8 +130,6 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
         """Any initialization the volume driver does while starting."""
         super(GlusterfsDriver, self).do_setup(context)
 
-        self._nova = compute.API()
-
         config = self.configuration.glusterfs_shares_config
         if not config:
             msg = (_("There's no Gluster config file configured (%s)") %
@@ -312,100 +306,6 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
         """Apply locking to the delete snapshot operation."""
         self._delete_snapshot(snapshot)
 
-    def _delete_snapshot_online(self, context, snapshot, info):
-        # Update info over the course of this method
-        # active file never changes
-        info_path = self._local_path_volume(snapshot['volume']) + '.info'
-        snap_info = self._read_info_file(info_path)
-
-        if info['active_file'] == info['snapshot_file']:
-            # blockRebase/Pull base into active
-            # info['base'] => snapshot_file
-
-            file_to_delete = info['base_file']
-            if info['base_id'] is None:
-                # Passing base=none to blockRebase ensures that
-                # libvirt blanks out the qcow2 backing file pointer
-                new_base = None
-            else:
-                new_base = info['new_base_file']
-                snap_info[info['base_id']] = info['snapshot_file']
-
-            delete_info = {'file_to_merge': new_base,
-                           'merge_target_file': None,  # current
-                           'type': 'qcow2',
-                           'volume_id': snapshot['volume']['id']}
-
-            del(snap_info[snapshot['id']])
-        else:
-            # blockCommit snapshot into base
-            # info['base'] <= snapshot_file
-            # delete record of snapshot
-            file_to_delete = info['snapshot_file']
-
-            delete_info = {'file_to_merge': info['snapshot_file'],
-                           'merge_target_file': info['base_file'],
-                           'type': 'qcow2',
-                           'volume_id': snapshot['volume']['id']}
-
-            del(snap_info[snapshot['id']])
-
-        try:
-            self._nova.delete_volume_snapshot(
-                context,
-                snapshot['id'],
-                delete_info)
-        except Exception as e:
-            LOG.error(_LE('Call to Nova delete snapshot failed'))
-            LOG.exception(e)
-            raise e
-
-        # Loop and wait for result
-        # Nova will call Cinderclient to update the status in the database
-        # An update of progress = '90%' means that Nova is done
-        seconds_elapsed = 0
-        increment = 1
-        timeout = 7200
-        while True:
-            s = db.snapshot_get(context, snapshot['id'])
-
-            if s['status'] == 'deleting':
-                if s['progress'] == '90%':
-                    # Nova tasks completed successfully
-                    break
-                else:
-                    msg = ('status of snapshot %s is '
-                           'still "deleting"... waiting') % snapshot['id']
-                    LOG.debug(msg)
-                    time.sleep(increment)
-                    seconds_elapsed += increment
-            else:
-                msg = _('Unable to delete snapshot %(id)s, '
-                        'status: %(status)s.') % {'id': snapshot['id'],
-                                                  'status': s['status']}
-                raise exception.GlusterfsException(msg)
-
-            if 10 < seconds_elapsed <= 20:
-                increment = 2
-            elif 20 < seconds_elapsed <= 60:
-                increment = 5
-            elif 60 < seconds_elapsed:
-                increment = 10
-
-            if seconds_elapsed > timeout:
-                msg = _('Timed out while waiting for Nova update '
-                        'for deletion of snapshot %(id)s.') %\
-                    {'id': snapshot['id']}
-                raise exception.GlusterfsException(msg)
-
-        # Write info file updated above
-        self._write_info_file(info_path, snap_info)
-
-        # Delete stale file
-        path_to_delete = os.path.join(
-            self._local_volume_dir(snapshot['volume']), file_to_delete)
-        self._execute('rm', '-f', path_to_delete, run_as_root=True)
-
     def ensure_export(self, ctx, volume):
         """Synchronously recreates an export for a logical volume."""
 
@@ -638,68 +538,3 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver):
 
         return super(GlusterfsDriver, self).backup_volume(
             context, backup, backup_service)
-
-    def _create_snapshot_online(self, snapshot, backing_filename,
-                                new_snap_path):
-        # Perform online snapshot via Nova
-        context = snapshot['context']
-
-        self._do_create_snapshot(snapshot,
-                                 backing_filename,
-                                 new_snap_path)
-
-        connection_info = {
-            'type': 'qcow2',
-            'new_file': os.path.basename(new_snap_path),
-            'snapshot_id': snapshot['id']
-        }
-
-        try:
-            result = self._nova.create_volume_snapshot(
-                context,
-                snapshot['volume_id'],
-                connection_info)
-            LOG.debug('nova call result: %s' % result)
-        except Exception as e:
-            LOG.error(_LE('Call to Nova to create snapshot failed'))
-            LOG.exception(e)
-            raise e
-
-        # Loop and wait for result
-        # Nova will call Cinderclient to update the status in the database
-        # An update of progress = '90%' means that Nova is done
-        seconds_elapsed = 0
-        increment = 1
-        timeout = 600
-        while True:
-            s = db.snapshot_get(context, snapshot['id'])
-
-            if s['status'] == 'creating':
-                if s['progress'] == '90%':
-                    # Nova tasks completed successfully
-                    break
-
-                time.sleep(increment)
-                seconds_elapsed += increment
-            elif s['status'] == 'error':
-
-                msg = _('Nova returned "error" status '
-                        'while creating snapshot.')
-                raise exception.RemoteFSException(msg)
-
-            LOG.debug('Status of snapshot %(id)s is now %(status)s' % {
-                'id': snapshot['id'],
-                'status': s['status']
-            })
-
-            if 10 < seconds_elapsed <= 20:
-                increment = 2
-            elif 20 < seconds_elapsed <= 60:
-                increment = 5
-            elif 60 < seconds_elapsed:
-                increment = 10
-
-            if seconds_elapsed > timeout:
-                msg = _('Timed out while waiting for Nova update '
-                        'for creation of snapshot %s.') % snapshot['id']
-                raise exception.RemoteFSException(msg)
index c0a3520542e9af24ef5a00d280fcdb0efda13e29..2e20e9cb530d3bee465be654604f9cdad191b9fb 100644 (file)
@@ -19,11 +19,14 @@ import json
 import os
 import re
 import tempfile
+import time
 
 from oslo.concurrency import processutils as putils
 from oslo.config import cfg
 from oslo.utils import units
 
+from cinder import compute
+from cinder import db
 from cinder import exception
 from cinder.i18n import _, _LE, _LI, _LW
 from cinder.image import image_utils
@@ -546,8 +549,14 @@ class RemoteFSSnapDriver(RemoteFSDriver):
     def __init__(self, *args, **kwargs):
         self._remotefsclient = None
         self.base = None
+        self._nova = None
         super(RemoteFSSnapDriver, self).__init__(*args, **kwargs)
 
+    def do_setup(self, context):
+        super(RemoteFSSnapDriver, self).do_setup(context)
+
+        self._nova = compute.API()
+
     def _local_volume_dir(self, volume):
         share = volume['provider_location']
         local_dir = self._get_mount_point_for_share(share)
@@ -834,7 +843,7 @@ class RemoteFSSnapDriver(RemoteFSDriver):
         merge, and call to Nova to perform this operation.
 
         :raises: InvalidVolume if status not acceptable
-        :raises: RemotefsException(msg) if operation fails
+        :raises: RemoteFSException(msg) if operation fails
         :returns: None
 
         """
@@ -1168,7 +1177,159 @@ class RemoteFSSnapDriver(RemoteFSDriver):
 
     def _create_snapshot_online(self, snapshot, backing_filename,
                                 new_snap_path):
-        raise NotImplementedError()
+        # Perform online snapshot via Nova
+        context = snapshot['context']
+
+        self._do_create_snapshot(snapshot,
+                                 backing_filename,
+                                 new_snap_path)
+
+        connection_info = {
+            'type': 'qcow2',
+            'new_file': os.path.basename(new_snap_path),
+            'snapshot_id': snapshot['id']
+        }
+
+        try:
+            result = self._nova.create_volume_snapshot(
+                context,
+                snapshot['volume_id'],
+                connection_info)
+            LOG.debug('nova call result: %s' % result)
+        except Exception as e:
+            LOG.error(_LE('Call to Nova to create snapshot failed'))
+            LOG.exception(e)
+            raise e
+
+        # Loop and wait for result
+        # Nova will call Cinderclient to update the status in the database
+        # An update of progress = '90%' means that Nova is done
+        seconds_elapsed = 0
+        increment = 1
+        timeout = 600
+        while True:
+            s = db.snapshot_get(context, snapshot['id'])
+
+            if s['status'] == 'creating':
+                if s['progress'] == '90%':
+                    # Nova tasks completed successfully
+                    break
+
+                time.sleep(increment)
+                seconds_elapsed += increment
+            elif s['status'] == 'error':
+
+                msg = _('Nova returned "error" status '
+                        'while creating snapshot.')
+                raise exception.RemoteFSException(msg)
+
+            LOG.debug('Status of snapshot %(id)s is now %(status)s' % {
+                'id': snapshot['id'],
+                'status': s['status']
+            })
+
+            if 10 < seconds_elapsed <= 20:
+                increment = 2
+            elif 20 < seconds_elapsed <= 60:
+                increment = 5
+            elif 60 < seconds_elapsed:
+                increment = 10
+
+            if seconds_elapsed > timeout:
+                msg = _('Timed out while waiting for Nova update '
+                        'for creation of snapshot %s.') % snapshot['id']
+                raise exception.RemoteFSException(msg)
 
     def _delete_snapshot_online(self, context, snapshot, info):
-        raise NotImplementedError()
+        # Update info over the course of this method
+        # active file never changes
+        info_path = self._local_path_volume(snapshot['volume']) + '.info'
+        snap_info = self._read_info_file(info_path)
+
+        if info['active_file'] == info['snapshot_file']:
+            # blockRebase/Pull base into active
+            # info['base'] => snapshot_file
+
+            file_to_delete = info['base_file']
+            if info['base_id'] is None:
+                # Passing base=none to blockRebase ensures that
+                # libvirt blanks out the qcow2 backing file pointer
+                new_base = None
+            else:
+                new_base = info['new_base_file']
+                snap_info[info['base_id']] = info['snapshot_file']
+
+            delete_info = {'file_to_merge': new_base,
+                           'merge_target_file': None,  # current
+                           'type': 'qcow2',
+                           'volume_id': snapshot['volume']['id']}
+
+            del(snap_info[snapshot['id']])
+        else:
+            # blockCommit snapshot into base
+            # info['base'] <= snapshot_file
+            # delete record of snapshot
+            file_to_delete = info['snapshot_file']
+
+            delete_info = {'file_to_merge': info['snapshot_file'],
+                           'merge_target_file': info['base_file'],
+                           'type': 'qcow2',
+                           'volume_id': snapshot['volume']['id']}
+
+            del(snap_info[snapshot['id']])
+
+        try:
+            self._nova.delete_volume_snapshot(
+                context,
+                snapshot['id'],
+                delete_info)
+        except Exception as e:
+            LOG.error(_LE('Call to Nova delete snapshot failed'))
+            LOG.exception(e)
+            raise e
+
+        # Loop and wait for result
+        # Nova will call Cinderclient to update the status in the database
+        # An update of progress = '90%' means that Nova is done
+        seconds_elapsed = 0
+        increment = 1
+        timeout = 7200
+        while True:
+            s = db.snapshot_get(context, snapshot['id'])
+
+            if s['status'] == 'deleting':
+                if s['progress'] == '90%':
+                    # Nova tasks completed successfully
+                    break
+                else:
+                    msg = ('status of snapshot %s is '
+                           'still "deleting"... waiting') % snapshot['id']
+                    LOG.debug(msg)
+                    time.sleep(increment)
+                    seconds_elapsed += increment
+            else:
+                msg = _('Unable to delete snapshot %(id)s, '
+                        'status: %(status)s.') % {'id': snapshot['id'],
+                                                  'status': s['status']}
+                raise exception.RemoteFSException(msg)
+
+            if 10 < seconds_elapsed <= 20:
+                increment = 2
+            elif 20 < seconds_elapsed <= 60:
+                increment = 5
+            elif 60 < seconds_elapsed:
+                increment = 10
+
+            if seconds_elapsed > timeout:
+                msg = _('Timed out while waiting for Nova update '
+                        'for deletion of snapshot %(id)s.') %\
+                    {'id': snapshot['id']}
+                raise exception.RemoteFSException(msg)
+
+        # Write info file updated above
+        self._write_info_file(info_path, snap_info)
+
+        # Delete stale file
+        path_to_delete = os.path.join(
+            self._local_volume_dir(snapshot['volume']), file_to_delete)
+        self._execute('rm', '-f', path_to_delete, run_as_root=True)