From: Yasuaki Nagata Date: Thu, 22 Jan 2015 08:45:08 +0000 (+0900) Subject: Change volume and snapshot stuck creating to error X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=e71006c49a280d53146fdbdfef9ce307d0a1d0de;p=openstack-build%2Fcinder-build.git Change volume and snapshot stuck creating to error Service of volume goes down after the status of the volume is turned 'creating', volume to remain with the status of the 'creating'. Status of the volume is still 'creating' even if volume service is restarted, it is impossible for user to remove it without force-delete. With this patch, in order to keep consistence with Nova, volume state 'creating' should be shifted to the 'error' properly. Closes-Bug: #1310526 Co-Authored-By: Ivan Kolodyazhny Change-Id: I89ef201f25b739cc443cb2920de2bd6ce89d97e1 --- diff --git a/cinder/db/api.py b/cinder/db/api.py index 2f27d25e9..1717631a4 100644 --- a/cinder/db/api.py +++ b/cinder/db/api.py @@ -290,6 +290,15 @@ def snapshot_get_all_by_project(context, project_id): return IMPL.snapshot_get_all_by_project(context, project_id) +def snapshot_get_by_host(context, host, filters=None): + """Get all snapshots belonging to a host. + + :param host: Include include snapshots only for specified host. + :param filters: Filters for the query in the form of key/value. + """ + return IMPL.snapshot_get_by_host(context, host, filters) + + def snapshot_get_all_for_cgsnapshot(context, project_id): """Get all snapshots belonging to a cgsnapshot.""" return IMPL.snapshot_get_all_for_cgsnapshot(context, project_id) diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index 5a88fccad..5c68e4714 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -1936,6 +1936,18 @@ def snapshot_get_all_for_volume(context, volume_id): all() +@require_context +def snapshot_get_by_host(context, host, filters=None): + query = model_query(context, models.Snapshot, read_deleted='no', + project_only=True) + if filters: + query = query.filter_by(**filters) + + return query.join(models.Snapshot.volume).filter( + models.Volume.host == host).options( + joinedload('snapshot_metadata')).all() + + @require_context def snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id): return model_query(context, models.Snapshot, read_deleted='no', diff --git a/cinder/tests/test_db_api.py b/cinder/tests/test_db_api.py index eb3e2adc7..8e3ba83d6 100644 --- a/cinder/tests/test_db_api.py +++ b/cinder/tests/test_db_api.py @@ -934,6 +934,34 @@ class DBAPISnapshotTestCase(BaseTest): db.snapshot_get_all(self.ctxt), ignored_keys=['metadata', 'volume']) + def test_snapshot_get_by_host(self): + db.volume_create(self.ctxt, {'id': 1, 'host': 'host1'}) + db.volume_create(self.ctxt, {'id': 2, 'host': 'host2'}) + snapshot1 = db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1}) + snapshot2 = db.snapshot_create(self.ctxt, {'id': 2, 'volume_id': 2, + 'status': 'error'}) + + self._assertEqualListsOfObjects([snapshot1], + db.snapshot_get_by_host( + self.ctxt, + 'host1'), + ignored_keys='volume') + self._assertEqualListsOfObjects([snapshot2], + db.snapshot_get_by_host( + self.ctxt, + 'host2'), + ignored_keys='volume') + self._assertEqualListsOfObjects([], + db.snapshot_get_by_host( + self.ctxt, + 'host2', {'status': 'available'}), + ignored_keys='volume') + self._assertEqualListsOfObjects([snapshot2], + db.snapshot_get_by_host( + self.ctxt, + 'host2', {'status': 'error'}), + ignored_keys='volume') + def test_snapshot_metadata_get(self): metadata = {'a': 'b', 'c': 'd'} db.volume_create(self.ctxt, {'id': 1}) diff --git a/cinder/tests/test_volume.py b/cinder/tests/test_volume.py index 7a0e5e90e..68ca32ca1 100644 --- a/cinder/tests/test_volume.py +++ b/cinder/tests/test_volume.py @@ -326,6 +326,45 @@ class VolumeTestCase(BaseVolumeTestCase): manager.init_host() self.assertEqual(0, mock_add_p_task.call_count) + def test_create_volume_fails_with_creating_and_downloading_status(self): + """Test init_host in case of volume. + + While the status of volume is 'creating' or 'downloading', + volume process down. + After process restarting this 'creating' status is changed to 'error'. + """ + for status in ['creating', 'downloading']: + volume = tests_utils.create_volume(self.context, status=status, + size=0, host=CONF.host) + + volume_id = volume['id'] + self.volume.init_host() + volume = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual('error', volume['status']) + self.volume.delete_volume(self.context, volume_id) + + def test_create_snapshot_fails_with_creating_status(self): + """Test init_host in case of snapshot. + + While the status of snapshot is 'creating', volume process + down. After process restarting this 'creating' status is + changed to 'error'. + """ + volume = tests_utils.create_volume(self.context, + **self.volume_params) + snapshot = tests_utils.create_snapshot(self.context, + volume['id'], + status='creating') + snap_id = snapshot['id'] + self.volume.init_host() + + snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id) + + self.assertEqual('error', snapshot_obj.status) + + self.volume.delete_snapshot(self.context, snapshot_obj) + self.volume.delete_volume(self.context, volume['id']) + @mock.patch.object(QUOTAS, 'reserve') @mock.patch.object(QUOTAS, 'commit') @mock.patch.object(QUOTAS, 'rollback') diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index 3d605ac03..bd728d1e5 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -331,15 +331,33 @@ class VolumeManager(manager.SchedulerDependentManager): self.db.volume_update(ctxt, volume['id'], {'status': 'error'}) - elif volume['status'] == 'downloading': - LOG.info(_LI("volume %s stuck in a downloading state"), - volume['id']) - self.driver.clear_download(ctxt, volume) + elif volume['status'] in ('downloading', 'creating'): + LOG.info(_LI("volume %(volume_id)s stuck in " + "%(volume_stat)s state. " + "Changing to error state."), + {'volume_id': volume['id'], + 'volume_stat': volume['status']}) + + if volume['status'] == 'downloading': + self.driver.clear_download(ctxt, volume) self.db.volume_update(ctxt, volume['id'], {'status': 'error'}) else: LOG.info(_LI("volume %s: skipping export"), volume['id']) + snapshots = self.db.snapshot_get_by_host(ctxt, + self.host, + {'status': 'creating'}) + for snapshot in snapshots: + LOG.info(_LI("snapshot %(snap_id)s stuck in " + "%(snap_stat)s state. " + "Changing to error state."), + {'snap_id': snapshot['id'], + 'snap_stat': snapshot['status']}) + + self.db.snapshot_update(ctxt, + snapshot['id'], + {'status': 'error'}) except Exception as ex: LOG.error(_LE("Error encountered during " "re-exporting phase of driver initialization: "