From: John Griffith Date: Wed, 2 Mar 2016 04:03:33 +0000 (+0000) Subject: Use get_by_args instead of host_and_topic X-Git-Url: https://review.fuel-infra.org/gitweb?a=commitdiff_plain;h=cd8e3c397877f25b3628b501744acd124843518e;p=openstack-build%2Fcinder-build.git Use get_by_args instead of host_and_topic The replication v2.1 code (cheesecake) requires a number of service queries. The default by_host_and_topic however doesn't include an interface to get disabled services which is required at times. Rather than adding a new version of get_by_host_and_topic and introduce the arg, we probably shouldn't have been using this particular query at all. The only service we're interested in is the cinder-volume service, granted in most cases volume_topic is set to this as a default but we shouldn't be relying on it. This patch changes the get_by_host_and_topic to use the more appropriate (in this case) get_by_args and specifically requests the cinder-volume binary. Note that for manage_existing, we check the resturned service and ensure that it disabled == False, otherwise we raise an exception. Change-Id: I2e8c5142c589af6c1ddadeb661f86e7194faac7a Closes-Bug: #1552006 --- diff --git a/cinder/tests/unit/api/contrib/test_snapshot_manage.py b/cinder/tests/unit/api/contrib/test_snapshot_manage.py index a6ff23998..3c2787ed9 100644 --- a/cinder/tests/unit/api/contrib/test_snapshot_manage.py +++ b/cinder/tests/unit/api/contrib/test_snapshot_manage.py @@ -68,7 +68,7 @@ class SnapshotManageTest(test.TestCase): @mock.patch('cinder.volume.rpcapi.VolumeAPI.manage_existing_snapshot') @mock.patch('cinder.volume.api.API.create_snapshot_in_db') - @mock.patch('cinder.db.service_get_by_host_and_topic') + @mock.patch('cinder.db.service_get_by_args') def test_manage_snapshot_ok(self, mock_db, mock_create_snapshot, mock_rpcapi): """Test successful manage volume execution. @@ -79,7 +79,9 @@ class SnapshotManageTest(test.TestCase): code to the caller. """ ctxt = context.RequestContext('admin', 'fake', True) - mock_db.return_value = fake_service.fake_service_obj(ctxt) + mock_db.return_value = fake_service.fake_service_obj( + ctxt, + binary='cinder-volume') body = {'snapshot': {'volume_id': 'fake_volume_id', 'ref': 'fake_ref'}} res = self._get_resp(body) diff --git a/cinder/volume/api.py b/cinder/volume/api.py index a0b6107f2..c2c50ca1e 100644 --- a/cinder/volume/api.py +++ b/cinder/volume/api.py @@ -1524,13 +1524,18 @@ class API(base.Base): elevated = context.elevated() try: svc_host = volume_utils.extract_host(host, 'backend') - service = objects.Service.get_by_host_and_topic( - elevated, svc_host, CONF.volume_topic) + service = objects.Service.get_by_args( + elevated, svc_host, 'cinder-volume') except exception.ServiceNotFound: with excutils.save_and_reraise_exception(): LOG.error(_LE('Unable to find service: %(service)s for ' 'given host: %(host)s.'), {'service': CONF.volume_topic, 'host': host}) + if service.disabled: + LOG.error(_LE('Unable to manage_existing volume on a disabled ' + 'service.')) + raise exception.ServiceUnavailable() + availability_zone = service.get('availability_zone') manage_what = { @@ -1569,13 +1574,19 @@ class API(base.Base): metadata=None): host = volume_utils.extract_host(volume['host']) try: - objects.Service.get_by_host_and_topic(context.elevated(), host, - CONF.volume_topic) + # NOTE(jdg): We don't use this, we just make sure it's valid + # and exists before sending off the call + service = objects.Service.get_by_args( + context.elevated(), host, 'cinder-volume') except exception.ServiceNotFound: with excutils.save_and_reraise_exception(): LOG.error(_LE('Unable to find service: %(service)s for ' 'given host: %(host)s.'), {'service': CONF.volume_topic, 'host': host}) + if service.disabled: + LOG.error(_LE('Unable to manage_existing snapshot on a disabled ' + 'service.')) + raise exception.ServiceUnavailable() snapshot_object = self.create_snapshot_in_db(context, volume, name, description, False, @@ -1595,7 +1606,7 @@ class API(base.Base): svc_host = volume_utils.extract_host(host, 'backend') service = objects.Service.get_by_args( - ctxt, svc_host, CONF.volume_topic) + ctxt, svc_host, 'cinder-volume') expected = {'replication_status': [fields.ReplicationStatus.ENABLED, fields.ReplicationStatus.FAILED_OVER]} result = service.conditional_update( @@ -1618,9 +1629,8 @@ class API(base.Base): ctxt = context.get_admin_context() svc_host = volume_utils.extract_host(host, 'backend') - # NOTE(jdg): get_by_host_and_topic filters out disabled service = objects.Service.get_by_args( - ctxt, svc_host, CONF.volume_topic) + ctxt, svc_host, 'cinder-volume') expected = {'frozen': False} result = service.conditional_update( {'frozen': True}, expected) @@ -1639,9 +1649,8 @@ class API(base.Base): ctxt = context.get_admin_context() svc_host = volume_utils.extract_host(host, 'backend') - # NOTE(jdg): get_by_host_and_topic filters out disabled service = objects.Service.get_by_args( - ctxt, svc_host, CONF.volume_topic) + ctxt, svc_host, 'cinder-volume') expected = {'frozen': True} result = service.conditional_update( {'frozen': False}, expected) diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index 1c318ff12..de9a11c4c 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -245,9 +245,10 @@ class VolumeManager(manager.SchedulerDependentManager): curr_active_backend_id = None svc_host = vol_utils.extract_host(self.host, 'backend') try: - service = objects.Service.get_by_host_and_topic( - context.get_admin_context(), svc_host, - CONF.volume_topic) + service = objects.Service.get_by_args( + context.get_admin_context(), + svc_host, + 'cinder-volume') except exception.ServiceNotFound: # NOTE(jdg): This is to solve problems with unit tests LOG.info(_LI("Service not found for updating " @@ -503,9 +504,10 @@ class VolumeManager(manager.SchedulerDependentManager): try: # NOTE(jdg): may be some things to think about here in failover # scenarios - service = objects.Service.get_by_host_and_topic( - context.get_admin_context(), svc_host, - CONF.volume_topic) + service = objects.Service.get_by_args( + context.get_admin_context(), + svc_host, + 'cinder-volume') except exception.ServiceNotFound: # FIXME(jdg): no idea what we'd do if we hit this case LOG.info(_LI("Service not found for updating " @@ -3258,10 +3260,10 @@ class VolumeManager(manager.SchedulerDependentManager): """ svc_host = vol_utils.extract_host(self.host, 'backend') - # NOTE(jdg): get_by_host_and_topic filters out disabled service = objects.Service.get_by_args( - context, svc_host, - CONF.volume_topic) + context, + svc_host, + 'cinder-volume') volumes = objects.VolumeList.get_all_by_host(context, self.host) exception_encountered = False @@ -3357,10 +3359,10 @@ class VolumeManager(manager.SchedulerDependentManager): 'notification to driver has failed.')) svc_host = vol_utils.extract_host(self.host, 'backend') - # NOTE(jdg): get_by_host_and_topic filters out disabled service = objects.Service.get_by_args( - context, svc_host, - CONF.volume_topic) + context, + svc_host, + 'cinder-volume') service.disabled = True service.disabled_reason = "frozen" service.save() @@ -3390,10 +3392,10 @@ class VolumeManager(manager.SchedulerDependentManager): return False svc_host = vol_utils.extract_host(self.host, 'backend') - # NOTE(jdg): get_by_host_and_topic filters out disabled service = objects.Service.get_by_args( - context, svc_host, - CONF.volume_topic) + context, + svc_host, + 'cinder-volume') service.disabled = False service.disabled_reason = "" service.save()