]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Change volume and snapshot stuck creating to error
authorYasuaki Nagata <yasuaki.nagata@intellilink.co.jp>
Thu, 22 Jan 2015 08:45:08 +0000 (17:45 +0900)
committerIvan Kolodyazhny <e0ne@e0ne.info>
Thu, 26 Mar 2015 08:11:45 +0000 (10:11 +0200)
Service of volume goes down after the status of
the volume is turned 'creating', volume to remain with
the status of the 'creating'.

Status of the volume is still 'creating' even if
volume service is restarted, it is impossible for user
to remove it without force-delete.

With this patch, in order to keep consistence with Nova,
volume state 'creating' should be shifted to the 'error' properly.

Closes-Bug: #1310526
Co-Authored-By: Ivan Kolodyazhny <e0ne@e0ne.info>
Change-Id: I89ef201f25b739cc443cb2920de2bd6ce89d97e1

cinder/db/api.py
cinder/db/sqlalchemy/api.py
cinder/tests/test_db_api.py
cinder/tests/test_volume.py
cinder/volume/manager.py

index 2f27d25e920623e50cd7af419a561b3d80fdbd72..1717631a425f72385fca1aa7113ddfd324c7a67f 100644 (file)
@@ -290,6 +290,15 @@ def snapshot_get_all_by_project(context, project_id):
     return IMPL.snapshot_get_all_by_project(context, project_id)
 
 
+def snapshot_get_by_host(context, host, filters=None):
+    """Get all snapshots belonging to a host.
+
+    :param host: Include include snapshots only for specified host.
+    :param filters: Filters for the query in the form of key/value.
+    """
+    return IMPL.snapshot_get_by_host(context, host, filters)
+
+
 def snapshot_get_all_for_cgsnapshot(context, project_id):
     """Get all snapshots belonging to a cgsnapshot."""
     return IMPL.snapshot_get_all_for_cgsnapshot(context, project_id)
index 5a88fccad1139cadd81035ca646f862dbdda112d..5c68e4714b51ad8d53b0020c09d3ac66e9b6953d 100644 (file)
@@ -1936,6 +1936,18 @@ def snapshot_get_all_for_volume(context, volume_id):
         all()
 
 
+@require_context
+def snapshot_get_by_host(context, host, filters=None):
+    query = model_query(context, models.Snapshot, read_deleted='no',
+                        project_only=True)
+    if filters:
+        query = query.filter_by(**filters)
+
+    return query.join(models.Snapshot.volume).filter(
+        models.Volume.host == host).options(
+            joinedload('snapshot_metadata')).all()
+
+
 @require_context
 def snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id):
     return model_query(context, models.Snapshot, read_deleted='no',
index eb3e2adc7eb8ea523eea948f88cee78867d5a4f7..8e3ba83d63789fa5e7f0e7aa4b9355ca754b4a4f 100644 (file)
@@ -934,6 +934,34 @@ class DBAPISnapshotTestCase(BaseTest):
                                         db.snapshot_get_all(self.ctxt),
                                         ignored_keys=['metadata', 'volume'])
 
+    def test_snapshot_get_by_host(self):
+        db.volume_create(self.ctxt, {'id': 1, 'host': 'host1'})
+        db.volume_create(self.ctxt, {'id': 2, 'host': 'host2'})
+        snapshot1 = db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1})
+        snapshot2 = db.snapshot_create(self.ctxt, {'id': 2, 'volume_id': 2,
+                                                   'status': 'error'})
+
+        self._assertEqualListsOfObjects([snapshot1],
+                                        db.snapshot_get_by_host(
+                                            self.ctxt,
+                                            'host1'),
+                                        ignored_keys='volume')
+        self._assertEqualListsOfObjects([snapshot2],
+                                        db.snapshot_get_by_host(
+                                            self.ctxt,
+                                            'host2'),
+                                        ignored_keys='volume')
+        self._assertEqualListsOfObjects([],
+                                        db.snapshot_get_by_host(
+                                            self.ctxt,
+                                            'host2', {'status': 'available'}),
+                                        ignored_keys='volume')
+        self._assertEqualListsOfObjects([snapshot2],
+                                        db.snapshot_get_by_host(
+                                            self.ctxt,
+                                            'host2', {'status': 'error'}),
+                                        ignored_keys='volume')
+
     def test_snapshot_metadata_get(self):
         metadata = {'a': 'b', 'c': 'd'}
         db.volume_create(self.ctxt, {'id': 1})
index 7a0e5e90e14c86ca40c9559ceb46e59e8b2b1a66..68ca32ca1b59e546296ba78016b6c64870ef4293 100644 (file)
@@ -326,6 +326,45 @@ class VolumeTestCase(BaseVolumeTestCase):
             manager.init_host()
         self.assertEqual(0, mock_add_p_task.call_count)
 
+    def test_create_volume_fails_with_creating_and_downloading_status(self):
+        """Test init_host in case of volume.
+
+        While the status of volume is 'creating' or 'downloading',
+        volume process down.
+        After process restarting this 'creating' status is changed to 'error'.
+        """
+        for status in ['creating', 'downloading']:
+            volume = tests_utils.create_volume(self.context, status=status,
+                                               size=0, host=CONF.host)
+
+            volume_id = volume['id']
+            self.volume.init_host()
+            volume = db.volume_get(context.get_admin_context(), volume_id)
+            self.assertEqual('error', volume['status'])
+            self.volume.delete_volume(self.context, volume_id)
+
+    def test_create_snapshot_fails_with_creating_status(self):
+        """Test init_host in case of snapshot.
+
+        While the status of snapshot is 'creating', volume process
+        down. After process restarting this 'creating' status is
+        changed to 'error'.
+        """
+        volume = tests_utils.create_volume(self.context,
+                                           **self.volume_params)
+        snapshot = tests_utils.create_snapshot(self.context,
+                                               volume['id'],
+                                               status='creating')
+        snap_id = snapshot['id']
+        self.volume.init_host()
+
+        snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id)
+
+        self.assertEqual('error', snapshot_obj.status)
+
+        self.volume.delete_snapshot(self.context, snapshot_obj)
+        self.volume.delete_volume(self.context, volume['id'])
+
     @mock.patch.object(QUOTAS, 'reserve')
     @mock.patch.object(QUOTAS, 'commit')
     @mock.patch.object(QUOTAS, 'rollback')
index 3d605ac038a3a6067016178a78d72b86708e811a..bd728d1e5aaff2626fe7576c5ee398320a405111 100644 (file)
@@ -331,15 +331,33 @@ class VolumeManager(manager.SchedulerDependentManager):
                         self.db.volume_update(ctxt,
                                               volume['id'],
                                               {'status': 'error'})
-                elif volume['status'] == 'downloading':
-                    LOG.info(_LI("volume %s stuck in a downloading state"),
-                             volume['id'])
-                    self.driver.clear_download(ctxt, volume)
+                elif volume['status'] in ('downloading', 'creating'):
+                    LOG.info(_LI("volume %(volume_id)s stuck in "
+                                 "%(volume_stat)s state. "
+                                 "Changing to error state."),
+                             {'volume_id': volume['id'],
+                              'volume_stat': volume['status']})
+
+                    if volume['status'] == 'downloading':
+                        self.driver.clear_download(ctxt, volume)
                     self.db.volume_update(ctxt,
                                           volume['id'],
                                           {'status': 'error'})
                 else:
                     LOG.info(_LI("volume %s: skipping export"), volume['id'])
+            snapshots = self.db.snapshot_get_by_host(ctxt,
+                                                     self.host,
+                                                     {'status': 'creating'})
+            for snapshot in snapshots:
+                LOG.info(_LI("snapshot %(snap_id)s stuck in "
+                             "%(snap_stat)s state. "
+                             "Changing to error state."),
+                         {'snap_id': snapshot['id'],
+                          'snap_stat': snapshot['status']})
+
+                self.db.snapshot_update(ctxt,
+                                        snapshot['id'],
+                                        {'status': 'error'})
         except Exception as ex:
             LOG.error(_LE("Error encountered during "
                           "re-exporting phase of driver initialization: "