]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Port to oslo.messaging
authorFlavio Percoco <flaper87@gmail.com>
Fri, 7 Feb 2014 11:20:44 +0000 (12:20 +0100)
committerFlavio Percoco <flaper87@gmail.com>
Mon, 10 Mar 2014 16:03:03 +0000 (17:03 +0100)
The oslo.messaging library takes the existing RPC code from oslo and
wraps it in a sane API with well defined semantics around which we can
make a commitment to retain compatibility in future.

The patch is large, but the changes can be summarized as:

    * oslo.messaging>=1.3.0a4 is required; a proper 1.3.0 release will be
    pushed before the icehouse release candidates.

    * The new rpc module has init() and cleanup() methods which manage the
    global oslo.messaging transport state. The TRANSPORT and NOTIFIER
    globals are conceptually similar to the current RPCIMPL global,
    except we're free to create and use alternate Transport objects
    in e.g. the cells code.

    * The rpc.get_{client,server,notifier}() methods are just helpers
    which wrap the global messaging state, specifiy serializers and
    specify the use of the eventlet executor.

    * In oslo.messaging, a request context is expected to be a dict so
    we add a RequestContextSerializer which can serialize to and from
    dicts using RequestContext.{to,from}_dict()

    * The allowed_rpc_exception_modules configuration option is replaced
    by an allowed_remote_exmods get_transport() parameter. This is not
    something that users ever need to configure, but it is something
    each project using oslo.messaging needs to be able to customize.

    * We maintain a global NOTIFIER object and create specializations of
    it with specific publisher IDs in order to avoid notification driver
    loading overhead.

    * rpc.py contains transport aliases for backwards compatibility
    purposes. setup.cfg also contains notification driver aliases for
    backwards compat.

    * The messaging options are moved about in cinder.conf.sample because
    the options are advertised via a oslo.config.opts entry point and
    picked up by the generator.

    * We use messaging.ConfFixture in tests to override oslo.messaging
    config options, rather than making assumptions about the options
    registered by the library.

Implements blueprint: oslo-messaging

Change-Id: Ib912809428d92e788558439e2d85b51272ebefdd

68 files changed:
bin/cinder-api
bin/cinder-manage
cinder/api/contrib/admin_actions.py
cinder/api/contrib/qos_specs_manage.py
cinder/api/contrib/types_extra_specs.py
cinder/api/contrib/types_manage.py
cinder/api/contrib/volume_actions.py
cinder/api/contrib/volume_type_encryption.py
cinder/backup/manager.py
cinder/backup/rpcapi.py
cinder/manager.py
cinder/openstack/common/log_handler.py
cinder/openstack/common/notifier/__init__.py [deleted file]
cinder/openstack/common/notifier/api.py [deleted file]
cinder/openstack/common/notifier/log_notifier.py [deleted file]
cinder/openstack/common/notifier/no_op_notifier.py [deleted file]
cinder/openstack/common/notifier/proxy.py [deleted file]
cinder/openstack/common/notifier/rpc_notifier.py [deleted file]
cinder/openstack/common/notifier/rpc_notifier2.py [deleted file]
cinder/openstack/common/notifier/test_notifier.py [deleted file]
cinder/openstack/common/rpc/__init__.py [deleted file]
cinder/openstack/common/rpc/amqp.py [deleted file]
cinder/openstack/common/rpc/common.py [deleted file]
cinder/openstack/common/rpc/dispatcher.py [deleted file]
cinder/openstack/common/rpc/impl_fake.py [deleted file]
cinder/openstack/common/rpc/impl_kombu.py [deleted file]
cinder/openstack/common/rpc/impl_qpid.py [deleted file]
cinder/openstack/common/rpc/impl_zmq.py [deleted file]
cinder/openstack/common/rpc/matchmaker.py [deleted file]
cinder/openstack/common/rpc/matchmaker_redis.py [deleted file]
cinder/openstack/common/rpc/matchmaker_ring.py [deleted file]
cinder/openstack/common/rpc/proxy.py [deleted file]
cinder/openstack/common/rpc/serializer.py [deleted file]
cinder/openstack/common/rpc/service.py [deleted file]
cinder/openstack/common/rpc/zmq_receiver.py [deleted file]
cinder/rpc.py [new file with mode: 0644]
cinder/scheduler/flows/create_volume.py
cinder/scheduler/manager.py
cinder/scheduler/rpcapi.py
cinder/service.py
cinder/test.py
cinder/tests/api/contrib/test_admin_actions.py
cinder/tests/api/contrib/test_qos_specs_manage.py
cinder/tests/api/contrib/test_types_extra_specs.py
cinder/tests/api/contrib/test_types_manage.py
cinder/tests/api/contrib/test_volume_actions.py
cinder/tests/api/contrib/test_volume_transfer.py
cinder/tests/api/contrib/test_volume_type_encryption.py
cinder/tests/api/v1/test_volumes.py
cinder/tests/api/v2/stubs.py
cinder/tests/api/v2/test_volumes.py
cinder/tests/cast_as_call.py [new file with mode: 0644]
cinder/tests/fake_notifier.py [new file with mode: 0644]
cinder/tests/integrated/integrated_helpers.py
cinder/tests/scheduler/test_rpcapi.py
cinder/tests/test_huawei_hvs.py
cinder/tests/test_quota.py
cinder/tests/test_test.py
cinder/tests/test_volume.py
cinder/tests/test_volume_rpcapi.py
cinder/tests/test_volume_utils.py
cinder/volume/manager.py
cinder/volume/rpcapi.py
cinder/volume/utils.py
etc/cinder/cinder.conf.sample
openstack-common.conf
requirements.txt
setup.cfg

index 0c6529b0f284dd78cee7150fb6a51a31586e153e..7bbded26f17a772e76034eb91642b64e5949163b 100755 (executable)
@@ -38,6 +38,7 @@ gettextutils.install('cinder', lazy=True)
 # Need to register global_opts
 from cinder.common import config  # noqa
 from cinder.openstack.common import log as logging
+from cinder import rpc
 from cinder import service
 from cinder import utils
 from cinder import version
@@ -52,6 +53,7 @@ if __name__ == '__main__':
     logging.setup("cinder")
     utils.monkey_patch()
 
+    rpc.init(CONF)
     launcher = service.process_launcher()
     server = service.WSGIService('osapi_volume')
     launcher.launch_service(server, workers=server.workers or 1)
index 5a47bdf891bc0891177f3a2d1d6942d05915b06c..e4572114f6f0a9bb3e056450d77621ff7ba46e3b 100755 (executable)
@@ -59,7 +59,7 @@ import os
 import sys
 
 from oslo.config import cfg
-
+from oslo import messaging
 
 # If ../cinder/__init__.py exists, add ../ to Python search path, so that
 # it will override what happens to be installed in /usr/(local/)lib/python...
@@ -79,8 +79,8 @@ from cinder import context
 from cinder import db
 from cinder.db import migration
 from cinder.openstack.common import log as logging
-from cinder.openstack.common import rpc
 from cinder.openstack.common import uuidutils
+from cinder import rpc
 from cinder import utils
 from cinder import version
 
@@ -246,6 +246,12 @@ class VersionCommands(object):
 class VolumeCommands(object):
     """Methods for dealing with a cloud in an odd state."""
 
+    def __init__(self, *args, **kwargs):
+        super(VolumeCommands, self).__init__(*args, **kwargs)
+        rpc.init(CONF)
+        target = messaging.Target(topic=CONF.volume_topic)
+        self.client = rpc.get_client(target)
+
     @args('volume_id',
           help='Volume ID to be deleted')
     def delete(self, volume_id):
@@ -267,10 +273,8 @@ class VolumeCommands(object):
             print(_("Detach volume from instance and then try again."))
             return
 
-        rpc.cast(ctxt,
-                 rpc.queue_get_for(ctxt, CONF.volume_topic, host),
-                 {"method": "delete_volume",
-                  "args": {"volume_id": volume['id']}})
+        cctxt = self.client.prepare(server=host)
+        cctxt.cast(ctxt, "delete_volume", volume_id=volume['id'])
 
     @args('volume_id',
           help='Volume ID to be reattached')
@@ -286,12 +290,10 @@ class VolumeCommands(object):
             return
         instance = db.instance_get(ctxt, volume['instance_id'])
         host = instance['host']
-        rpc.cast(ctxt,
-                 rpc.queue_get_for(ctxt, CONF.compute_topic, host),
-                 {"method": "attach_volume",
-                  "args": {"instance_id": instance['id'],
-                           "volume_id": volume['id'],
-                           "mountpoint": volume['mountpoint']}})
+
+        cctxt = self.client.prepare(topic=CONF.compute_topic, server=host)
+        cctxt.cast(ctxt, "attach_volume", instance_id=instance['id'],
+                   volume_id=volume['id'], mountpoint=volume['mountpoint'])
 
 
 class ConfigCommands(object):
@@ -537,7 +539,6 @@ def main():
         sys.exit(2)
 
     fn = CONF.category.action_fn
-
     fn_args = fetch_func_args(fn)
     fn(*fn_args)
 
index f407998153a74881c67cdf9189a3d7a598b46481..4dbf736d867764489239cfbd969f3d6fc7a5e78c 100644 (file)
@@ -20,8 +20,8 @@ from cinder.api.openstack import wsgi
 from cinder import db
 from cinder import exception
 from cinder.openstack.common import log as logging
-from cinder.openstack.common.notifier import api as notifier_api
 from cinder.openstack.common import strutils
+from cinder import rpc
 from cinder import volume
 
 
@@ -84,18 +84,17 @@ class AdminController(wsgi.Controller):
                         'update': update})
 
         notifier_info = dict(id=id, update=update)
-        notifier_api.notify(context, 'volumeStatusUpdate',
-                            self.collection + '.reset_status.start',
-                            notifier_api.INFO, notifier_info)
+        notifier = rpc.get_notifier('volumeStatusUpdate')
+        notifier.info(context, self.collection + '.reset_status.start',
+                      notifier_info)
 
         try:
             self._update(context, id, update)
         except exception.NotFound as e:
             raise exc.HTTPNotFound(e)
 
-        notifier_api.notify(context, 'volumeStatusUpdate',
-                            self.collection + '.reset_status.end',
-                            notifier_api.INFO, notifier_info)
+        notifier.info(context, self.collection + '.reset_status.end',
+                      notifier_info)
 
         return webob.Response(status_int=202)
 
index 4c5a3fc3ce2f049ab33433fa674390c983eeda94..10a951f0ad6653a83262782444370d612ef8da23 100644 (file)
@@ -23,8 +23,8 @@ from cinder.api.views import qos_specs as view_qos_specs
 from cinder.api import xmlutil
 from cinder import exception
 from cinder.openstack.common import log as logging
-from cinder.openstack.common.notifier import api as notifier_api
 from cinder.openstack.common import strutils
+from cinder import rpc
 from cinder.volume import qos_specs
 
 
@@ -83,11 +83,9 @@ class QoSSpecsController(wsgi.Controller):
 
     @staticmethod
     def _notify_qos_specs_error(context, method, payload):
-        notifier_api.notify(context,
-                            'QoSSpecs',
-                            method,
-                            notifier_api.ERROR,
-                            payload)
+        rpc.get_notifier('QoSSpecs').error(context,
+                                           method,
+                                           payload)
 
     @wsgi.serializers(xml=QoSSpecsTemplate)
     def index(self, req):
@@ -115,9 +113,9 @@ class QoSSpecsController(wsgi.Controller):
             qos_specs.create(context, name, specs)
             spec = qos_specs.get_qos_specs_by_name(context, name)
             notifier_info = dict(name=name, specs=specs)
-            notifier_api.notify(context, 'QoSSpecs',
-                                'QoSSpecs.create',
-                                notifier_api.INFO, notifier_info)
+            rpc.get_notifier('QoSSpecs').info(context,
+                                              'QoSSpecs.create',
+                                              notifier_info)
         except exception.InvalidInput as err:
             notifier_err = dict(name=name, error_message=err)
             self._notify_qos_specs_error(context,
@@ -150,9 +148,9 @@ class QoSSpecsController(wsgi.Controller):
         try:
             qos_specs.update(context, id, specs)
             notifier_info = dict(id=id, specs=specs)
-            notifier_api.notify(context, 'QoSSpecs',
-                                'qos_specs.update',
-                                notifier_api.INFO, notifier_info)
+            rpc.get_notifier('QoSSpecs').info(context,
+                                              'qos_specs.update',
+                                              notifier_info)
         except exception.QoSSpecsNotFound as err:
             notifier_err = dict(id=id, error_message=err)
             self._notify_qos_specs_error(context,
@@ -202,9 +200,9 @@ class QoSSpecsController(wsgi.Controller):
         try:
             qos_specs.delete(context, id, force)
             notifier_info = dict(id=id)
-            notifier_api.notify(context, 'QoSSpecs',
-                                'qos_specs.delete',
-                                notifier_api.INFO, notifier_info)
+            rpc.get_notifier('QoSSpecs').info(context,
+                                              'qos_specs.delete',
+                                              notifier_info)
         except exception.QoSSpecsNotFound as err:
             notifier_err = dict(id=id, error_message=err)
             self._notify_qos_specs_error(context,
@@ -240,9 +238,8 @@ class QoSSpecsController(wsgi.Controller):
         try:
             qos_specs.delete_keys(context, id, keys)
             notifier_info = dict(id=id)
-            notifier_api.notify(context, 'QoSSpecs',
-                                'qos_specs.delete_keys',
-                                notifier_api.INFO, notifier_info)
+            rpc.get_notifier().info(context, 'qos_specs.delete_keys',
+                                    notifier_info)
         except exception.QoSSpecsNotFound as err:
             notifier_err = dict(id=id, error_message=err)
             self._notify_qos_specs_error(context,
@@ -269,9 +266,9 @@ class QoSSpecsController(wsgi.Controller):
         try:
             associates = qos_specs.get_associations(context, id)
             notifier_info = dict(id=id)
-            notifier_api.notify(context, 'QoSSpecs',
-                                'qos_specs.associations',
-                                notifier_api.INFO, notifier_info)
+            rpc.get_notifier('QoSSpecs').info(context,
+                                              'qos_specs.associations',
+                                              notifier_info)
         except exception.QoSSpecsNotFound as err:
             notifier_err = dict(id=id, error_message=err)
             self._notify_qos_specs_error(context,
@@ -307,9 +304,9 @@ class QoSSpecsController(wsgi.Controller):
         try:
             qos_specs.associate_qos_with_type(context, id, type_id)
             notifier_info = dict(id=id, type_id=type_id)
-            notifier_api.notify(context, 'QoSSpecs',
-                                'qos_specs.associate',
-                                notifier_api.INFO, notifier_info)
+            rpc.get_notifier('QoSSpecs').info(context,
+                                              'qos_specs.associate',
+                                              notifier_info)
         except exception.VolumeTypeNotFound as err:
             notifier_err = dict(id=id, error_message=err)
             self._notify_qos_specs_error(context,
@@ -360,9 +357,9 @@ class QoSSpecsController(wsgi.Controller):
         try:
             qos_specs.disassociate_qos_specs(context, id, type_id)
             notifier_info = dict(id=id, type_id=type_id)
-            notifier_api.notify(context, 'QoSSpecs',
-                                'qos_specs.disassociate',
-                                notifier_api.INFO, notifier_info)
+            rpc.get_notifier('QoSSpecs').info(context,
+                                              'qos_specs.disassociate',
+                                              notifier_info)
         except exception.VolumeTypeNotFound as err:
             notifier_err = dict(id=id, error_message=err)
             self._notify_qos_specs_error(context,
@@ -394,9 +391,9 @@ class QoSSpecsController(wsgi.Controller):
         try:
             qos_specs.disassociate_all(context, id)
             notifier_info = dict(id=id)
-            notifier_api.notify(context, 'QoSSpecs',
-                                'qos_specs.disassociate_all',
-                                notifier_api.INFO, notifier_info)
+            rpc.get_notifier('QoSSpecs').info(context,
+                                              'qos_specs.disassociate_all',
+                                              notifier_info)
         except exception.QoSSpecsNotFound as err:
             notifier_err = dict(id=id, error_message=err)
             self._notify_qos_specs_error(context,
index f2131d58ba28b9c627b0a1b873e85684ac321785..85d5a34d051e0ad56c68e9796a675ccd0b2f754a 100644 (file)
@@ -23,7 +23,7 @@ from cinder.api.openstack import wsgi
 from cinder.api import xmlutil
 from cinder import db
 from cinder import exception
-from cinder.openstack.common.notifier import api as notifier_api
+from cinder import rpc
 from cinder.volume import volume_types
 
 authorize = extensions.extension_authorizer('volume', 'types_extra_specs')
@@ -88,9 +88,9 @@ class VolumeTypeExtraSpecsController(wsgi.Controller):
                                                     type_id,
                                                     specs)
         notifier_info = dict(type_id=type_id, specs=specs)
-        notifier_api.notify(context, 'volumeTypeExtraSpecs',
-                            'volume_type_extra_specs.create',
-                            notifier_api.INFO, notifier_info)
+        notifier = rpc.get_notifier('volumeTypeExtraSpecs')
+        notifier.info(context, 'volume_type_extra_specs.create',
+                      notifier_info)
         return body
 
     @wsgi.serializers(xml=VolumeTypeExtraSpecTemplate)
@@ -111,9 +111,10 @@ class VolumeTypeExtraSpecsController(wsgi.Controller):
                                                     type_id,
                                                     body)
         notifier_info = dict(type_id=type_id, id=id)
-        notifier_api.notify(context, 'volumeTypeExtraSpecs',
-                            'volume_type_extra_specs.update',
-                            notifier_api.INFO, notifier_info)
+        notifier = rpc.get_notifier('volumeTypeExtraSpecs')
+        notifier.info(context,
+                      'volume_type_extra_specs.update',
+                      notifier_info)
         return body
 
     @wsgi.serializers(xml=VolumeTypeExtraSpecTemplate)
@@ -140,9 +141,10 @@ class VolumeTypeExtraSpecsController(wsgi.Controller):
             raise webob.exc.HTTPNotFound(explanation=error.msg)
 
         notifier_info = dict(type_id=type_id, id=id)
-        notifier_api.notify(context, 'volumeTypeExtraSpecs',
-                            'volume_type_extra_specs.delete',
-                            notifier_api.INFO, notifier_info)
+        notifier = rpc.get_notifier('volumeTypeExtraSpecs')
+        notifier.info(context,
+                      'volume_type_extra_specs.delete',
+                      notifier_info)
         return webob.Response(status_int=202)
 
     def _check_key_names(self, keys):
index 9b346b049742779e8cd8754fc2656d7e5ae5cbc9..0efa744ecd5affea92d1922d9e05b110919f517f 100644 (file)
@@ -22,7 +22,7 @@ from cinder.api.openstack import wsgi
 from cinder.api.v1 import types
 from cinder.api.views import types as views_types
 from cinder import exception
-from cinder.openstack.common.notifier import api as notifier_api
+from cinder import rpc
 from cinder.volume import volume_types
 
 
@@ -35,11 +35,7 @@ class VolumeTypesManageController(wsgi.Controller):
     _view_builder_class = views_types.ViewBuilder
 
     def _notify_volume_type_error(self, context, method, payload):
-        notifier_api.notify(context,
-                            'volumeType',
-                            method,
-                            notifier_api.ERROR,
-                            payload)
+        rpc.get_notifier('volumeType').error(context, method, payload)
 
     @wsgi.action("create")
     @wsgi.serializers(xml=types.VolumeTypeTemplate)
@@ -62,9 +58,8 @@ class VolumeTypesManageController(wsgi.Controller):
             volume_types.create(context, name, specs)
             vol_type = volume_types.get_volume_type_by_name(context, name)
             notifier_info = dict(volume_types=vol_type)
-            notifier_api.notify(context, 'volumeType',
-                                'volume_type.create',
-                                notifier_api.INFO, notifier_info)
+            rpc.get_notifier('volumeType').info(context, 'volume_type.create',
+                                                notifier_info)
 
         except exception.VolumeTypeExists as err:
             notifier_err = dict(volume_types=vol_type, error_message=err)
@@ -92,9 +87,9 @@ class VolumeTypesManageController(wsgi.Controller):
             vol_type = volume_types.get_volume_type(context, id)
             volume_types.destroy(context, vol_type['id'])
             notifier_info = dict(volume_types=vol_type)
-            notifier_api.notify(context, 'volumeType',
-                                'volume_type.delete',
-                                notifier_api.INFO, notifier_info)
+            rpc.get_notifier('volumeType').info(context,
+                                                'volume_type.delete',
+                                                notifier_info)
         except exception.VolumeTypeInUse as err:
             notifier_err = dict(id=id, error_message=err)
             self._notify_volume_type_error(context,
index ebe24485d16dda51d597d564826fad34b98d1448..2961112684f8fe2d22a62c0f7695b569922d41d7 100644 (file)
 
 import webob
 
+from oslo import messaging
+
 from cinder.api import extensions
 from cinder.api.openstack import wsgi
 from cinder.api import xmlutil
 from cinder import exception
 from cinder.openstack.common import log as logging
-from cinder.openstack.common.rpc import common as rpc_common
 from cinder.openstack.common import strutils
 from cinder import utils
 from cinder import volume
@@ -259,7 +260,7 @@ class VolumeActionsController(wsgi.Controller):
             raise webob.exc.HTTPBadRequest(explanation=error.msg)
         except ValueError as error:
             raise webob.exc.HTTPBadRequest(explanation=unicode(error))
-        except rpc_common.RemoteError as error:
+        except messaging.RemoteError as error:
             msg = "%(err_type)s: %(err_msg)s" % {'err_type': error.exc_type,
                                                  'err_msg': error.value}
             raise webob.exc.HTTPBadRequest(explanation=msg)
index 8b59e30e4f46b95369a599db03673be25be8aa83..3368a5b81e4e06d7a194d125b4c6263775ca2e8f 100644 (file)
@@ -22,7 +22,7 @@ from cinder.api.openstack import wsgi
 from cinder.api import xmlutil
 from cinder import db
 from cinder import exception
-from cinder.openstack.common.notifier import api as notifier_api
+from cinder import rpc
 from cinder.volume import volume_types
 
 authorize = extensions.extension_authorizer('volume',
@@ -127,9 +127,8 @@ class VolumeTypeEncryptionController(wsgi.Controller):
 
         db.volume_type_encryption_create(context, type_id, encryption_specs)
         notifier_info = dict(type_id=type_id, specs=encryption_specs)
-        notifier_api.notify(context, 'volumeTypeEncryption',
-                            'volume_type_encryption.create',
-                            notifier_api.INFO, notifier_info)
+        notifier = rpc.get_notifier('volumeTypeEncryption')
+        notifier.info(context, 'volume_type_encryption.create', notifier_info)
         return body
 
     @wsgi.serializers(xml=VolumeTypeEncryptionTemplate)
@@ -159,9 +158,8 @@ class VolumeTypeEncryptionController(wsgi.Controller):
 
         db.volume_type_encryption_update(context, type_id, encryption_specs)
         notifier_info = dict(type_id=type_id, id=id)
-        notifier_api.notify(context, 'volumeTypeEncryption',
-                            'volume_type_encryption.update',
-                            notifier_api.INFO, notifier_info)
+        notifier = rpc.get_notifier('volumeTypeEncryption')
+        notifier.info(context, 'volume_type_encryption.update', notifier_info)
 
         return body
 
index feab8b48227a10eaaf9a621ff8e3dd1a92ef8d57..c1e91ca7468b82489215ea4edf817d1b8c54772c 100644 (file)
@@ -34,6 +34,7 @@ Volume backups can be created, restored, deleted and listed.
 """
 
 from oslo.config import cfg
+from oslo import messaging
 
 from cinder.backup import rpcapi as backup_rpcapi
 from cinder import context
@@ -67,6 +68,8 @@ class BackupManager(manager.SchedulerDependentManager):
 
     RPC_API_VERSION = '1.0'
 
+    target = messaging.Target(version=RPC_API_VERSION)
+
     def __init__(self, service_name=None, *args, **kwargs):
         self.service = importutils.import_module(self.driver_name)
         self.az = CONF.storage_availability_zone
index ee421eb1933f64df03825695f3ab29727194f341..5767adadc2ac8cc57cc0170db114868a55db2549 100644 (file)
@@ -19,10 +19,10 @@ Client side of the volume backup RPC API.
 
 
 from oslo.config import cfg
+from oslo import messaging
 
 from cinder.openstack.common import log as logging
-from cinder.openstack.common import rpc
-import cinder.openstack.common.rpc.proxy
+from cinder import rpc
 
 
 CONF = cfg.CONF
@@ -30,7 +30,7 @@ CONF = cfg.CONF
 LOG = logging.getLogger(__name__)
 
 
-class BackupAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
+class BackupAPI(object):
     """Client side of the volume rpc API.
 
     API version history:
@@ -41,48 +41,34 @@ class BackupAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
     BASE_RPC_API_VERSION = '1.0'
 
     def __init__(self):
-        super(BackupAPI, self).__init__(
-            topic=CONF.backup_topic,
-            default_version=self.BASE_RPC_API_VERSION)
+        super(BackupAPI, self).__init__()
+        target = messaging.Target(topic=CONF.backup_topic,
+                                  version=self.BASE_RPC_API_VERSION)
+        self.client = rpc.get_client(target, '1.0')
 
     def create_backup(self, ctxt, host, backup_id, volume_id):
         LOG.debug("create_backup in rpcapi backup_id %s", backup_id)
-        topic = rpc.queue_get_for(ctxt, self.topic, host)
-        LOG.debug("create queue topic=%s", topic)
-        self.cast(ctxt,
-                  self.make_msg('create_backup',
-                                backup_id=backup_id),
-                  topic=topic)
+        cctxt = self.client.prepare(server=host)
+        cctxt.cast(ctxt, 'create_backup', backup_id=backup_id)
 
     def restore_backup(self, ctxt, host, backup_id, volume_id):
         LOG.debug("restore_backup in rpcapi backup_id %s", backup_id)
-        topic = rpc.queue_get_for(ctxt, self.topic, host)
-        LOG.debug("restore queue topic=%s", topic)
-        self.cast(ctxt,
-                  self.make_msg('restore_backup',
-                                backup_id=backup_id,
-                                volume_id=volume_id),
-                  topic=topic)
+        cctxt = self.client.prepare(server=host)
+        cctxt.cast(ctxt, 'restore_backup', backup_id=backup_id,
+                   volume_id=volume_id)
 
     def delete_backup(self, ctxt, host, backup_id):
-        LOG.debug("delete_backup rpcapi backup_id %s", backup_id)
-        topic = rpc.queue_get_for(ctxt, self.topic, host)
-        self.cast(ctxt,
-                  self.make_msg('delete_backup',
-                                backup_id=backup_id),
-                  topic=topic)
+        LOG.debug("delete_backup  rpcapi backup_id %s", backup_id)
+        cctxt = self.client.prepare(server=host)
+        cctxt.cast(ctxt, 'delete_backup', backup_id=backup_id)
 
     def export_record(self, ctxt, host, backup_id):
         LOG.debug("export_record in rpcapi backup_id %(id)s "
                   "on host %(host)s.",
                   {'id': backup_id,
                    'host': host})
-        topic = rpc.queue_get_for(ctxt, self.topic, host)
-        LOG.debug("export queue topic=%s" % topic)
-        return self.call(ctxt,
-                         self.make_msg('export_record',
-                                       backup_id=backup_id),
-                         topic=topic)
+        cctxt = self.client.prepare(server=host)
+        return cctxt.call(ctxt, 'export_record', backup_id=backup_id)
 
     def import_record(self,
                       ctxt,
@@ -91,16 +77,14 @@ class BackupAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
                       backup_service,
                       backup_url,
                       backup_hosts):
-        LOG.debug("import_record rpcapi backup id $(id)s "
+        LOG.debug("import_record rpcapi backup id %(id)s "
                   "on host %(host)s "
                   "for backup_url %(url)s." % {'id': backup_id,
                                                'host': host,
                                                'url': backup_url})
-        topic = rpc.queue_get_for(ctxt, self.topic, host)
-        self.cast(ctxt,
-                  self.make_msg('import_record',
-                                backup_id=backup_id,
-                                backup_service=backup_service,
-                                backup_url=backup_url,
-                                backup_hosts=backup_hosts),
-                  topic=topic)
+        cctxt = self.client.prepare(server=host)
+        cctxt.cast(ctxt, 'import_record',
+                   backup_id=backup_id,
+                   backup_service=backup_service,
+                   backup_url=backup_url,
+                   backup_hosts=backup_hosts)
index 0ef53c01945f6dac8f36ee8d6092469dc2b49e97..03944420c27232ea34dfa7c82753a8e79ae1ad66 100644 (file)
@@ -53,11 +53,11 @@ This module provides Manager, a base class for managers.
 
 
 from oslo.config import cfg
+from oslo import messaging
 
 from cinder.db import base
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import periodic_task
-from cinder.openstack.common.rpc import dispatcher as rpc_dispatcher
 from cinder.scheduler import rpcapi as scheduler_rpcapi
 from cinder import version
 
@@ -70,20 +70,15 @@ class Manager(base.Base, periodic_task.PeriodicTasks):
     # Set RPC API version to 1.0 by default.
     RPC_API_VERSION = '1.0'
 
+    target = messaging.Target(version=RPC_API_VERSION)
+
     def __init__(self, host=None, db_driver=None):
         if not host:
             host = CONF.host
         self.host = host
+        self.additional_endpoints = []
         super(Manager, self).__init__(db_driver)
 
-    def create_rpc_dispatcher(self):
-        '''Get the rpc dispatcher for this manager.
-
-        If a manager would like to set an rpc API version, or support more than
-        one class as the target of rpc messages, override this method.
-        '''
-        return rpc_dispatcher.RpcDispatcher([self])
-
     def periodic_tasks(self, context, raise_on_error=False):
         """Tasks to be run at a periodic interval."""
         return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
index ebeb9ab737e24e5cdf42971424d86b21caeed28b..37a7195dd4f3027e5cf2bdefca138a05052eaf3b 100644 (file)
@@ -15,15 +15,15 @@ import logging
 
 from oslo.config import cfg
 
-from cinder.openstack.common import notifier
+from cinder import rpc
 
 
 class PublishErrorsHandler(logging.Handler):
     def emit(self, record):
+        # NOTE(flaper87): This will have to be changed in the
+        # future. Leaving for backwar compatibility
         if ('cinder.openstack.common.notifier.log_notifier' in
                 cfg.CONF.notification_driver):
             return
-        notifier.api.notify(None, 'error.publisher',
-                            'error_notification',
-                            notifier.api.ERROR,
-                            dict(error=record.msg))
+        rpc.get_notifier('error.publisher').info('error_notification',
+                                                 dict(error=record.msg))
diff --git a/cinder/openstack/common/notifier/__init__.py b/cinder/openstack/common/notifier/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/cinder/openstack/common/notifier/api.py b/cinder/openstack/common/notifier/api.py
deleted file mode 100644 (file)
index d93530d..0000000
+++ /dev/null
@@ -1,173 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import socket
-import uuid
-
-from oslo.config import cfg
-
-from cinder.openstack.common import context
-from cinder.openstack.common.gettextutils import _
-from cinder.openstack.common import importutils
-from cinder.openstack.common import jsonutils
-from cinder.openstack.common import log as logging
-from cinder.openstack.common import timeutils
-
-
-LOG = logging.getLogger(__name__)
-
-notifier_opts = [
-    cfg.MultiStrOpt('notification_driver',
-                    default=[],
-                    help='Driver or drivers to handle sending notifications'),
-    cfg.StrOpt('default_notification_level',
-               default='INFO',
-               help='Default notification level for outgoing notifications'),
-    cfg.StrOpt('default_publisher_id',
-               default=None,
-               help='Default publisher_id for outgoing notifications'),
-]
-
-CONF = cfg.CONF
-CONF.register_opts(notifier_opts)
-
-WARN = 'WARN'
-INFO = 'INFO'
-ERROR = 'ERROR'
-CRITICAL = 'CRITICAL'
-DEBUG = 'DEBUG'
-
-log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL)
-
-
-class BadPriorityException(Exception):
-    pass
-
-
-def notify_decorator(name, fn):
-    """Decorator for notify which is used from utils.monkey_patch().
-
-        :param name: name of the function
-        :param function: - object of the function
-        :returns: function -- decorated function
-
-    """
-    def wrapped_func(*args, **kwarg):
-        body = {}
-        body['args'] = []
-        body['kwarg'] = {}
-        for arg in args:
-            body['args'].append(arg)
-        for key in kwarg:
-            body['kwarg'][key] = kwarg[key]
-
-        ctxt = context.get_context_from_function_and_args(fn, args, kwarg)
-        notify(ctxt,
-               CONF.default_publisher_id or socket.gethostname(),
-               name,
-               CONF.default_notification_level,
-               body)
-        return fn(*args, **kwarg)
-    return wrapped_func
-
-
-def publisher_id(service, host=None):
-    if not host:
-        try:
-            host = CONF.host
-        except AttributeError:
-            host = CONF.default_publisher_id or socket.gethostname()
-    return "%s.%s" % (service, host)
-
-
-def notify(context, publisher_id, event_type, priority, payload):
-    """Sends a notification using the specified driver
-
-    :param publisher_id: the source worker_type.host of the message
-    :param event_type:   the literal type of event (ex. Instance Creation)
-    :param priority:     patterned after the enumeration of Python logging
-                         levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
-    :param payload:       A python dictionary of attributes
-
-    Outgoing message format includes the above parameters, and appends the
-    following:
-
-    message_id
-      a UUID representing the id for this notification
-
-    timestamp
-      the GMT timestamp the notification was sent at
-
-    The composite message will be constructed as a dictionary of the above
-    attributes, which will then be sent via the transport mechanism defined
-    by the driver.
-
-    Message example::
-
-        {'message_id': str(uuid.uuid4()),
-         'publisher_id': 'compute.host1',
-         'timestamp': timeutils.utcnow(),
-         'priority': 'WARN',
-         'event_type': 'compute.create_instance',
-         'payload': {'instance_id': 12, ... }}
-
-    """
-    if priority not in log_levels:
-        raise BadPriorityException(
-            _('%s not in valid priorities') % priority)
-
-    # Ensure everything is JSON serializable.
-    payload = jsonutils.to_primitive(payload, convert_instances=True)
-
-    msg = dict(message_id=str(uuid.uuid4()),
-               publisher_id=publisher_id,
-               event_type=event_type,
-               priority=priority,
-               payload=payload,
-               timestamp=str(timeutils.utcnow()))
-
-    for driver in _get_drivers():
-        try:
-            driver.notify(context, msg)
-        except Exception as e:
-            LOG.exception(_("Problem '%(e)s' attempting to "
-                            "send to notification system. "
-                            "Payload=%(payload)s")
-                          % dict(e=e, payload=payload))
-
-
-_drivers = None
-
-
-def _get_drivers():
-    """Instantiate, cache, and return drivers based on the CONF."""
-    global _drivers
-    if _drivers is None:
-        _drivers = {}
-        for notification_driver in CONF.notification_driver:
-            try:
-                driver = importutils.import_module(notification_driver)
-                _drivers[notification_driver] = driver
-            except ImportError:
-                LOG.exception(_("Failed to load notifier %s. "
-                                "These notifications will not be sent.") %
-                              notification_driver)
-    return _drivers.values()
-
-
-def _reset_drivers():
-    """Used by unit tests to reset the drivers."""
-    global _drivers
-    _drivers = None
diff --git a/cinder/openstack/common/notifier/log_notifier.py b/cinder/openstack/common/notifier/log_notifier.py
deleted file mode 100644 (file)
index 11a453d..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo.config import cfg
-
-from cinder.openstack.common import jsonutils
-from cinder.openstack.common import log as logging
-
-
-CONF = cfg.CONF
-
-
-def notify(_context, message):
-    """Notifies the recipient of the desired event given the model.
-
-    Log notifications using OpenStack's default logging system.
-    """
-
-    priority = message.get('priority',
-                           CONF.default_notification_level)
-    priority = priority.lower()
-    logger = logging.getLogger(
-        'cinder.openstack.common.notification.%s' %
-        message['event_type'])
-    getattr(logger, priority)(jsonutils.dumps(message))
diff --git a/cinder/openstack/common/notifier/no_op_notifier.py b/cinder/openstack/common/notifier/no_op_notifier.py
deleted file mode 100644 (file)
index 13d946e..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-def notify(_context, message):
-    """Notifies the recipient of the desired event given the model."""
-    pass
diff --git a/cinder/openstack/common/notifier/proxy.py b/cinder/openstack/common/notifier/proxy.py
deleted file mode 100644 (file)
index 1ab4047..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-A temporary helper which emulates oslo.messaging.Notifier.
-
-This helper method allows us to do the tedious porting to the new Notifier API
-as a standalone commit so that the commit which switches us to oslo.messaging
-is smaller and easier to review. This file will be removed as part of that
-commit.
-"""
-
-from oslo.config import cfg
-
-from cinder.openstack.common.notifier import api as notifier_api
-
-CONF = cfg.CONF
-
-
-class Notifier(object):
-
-    def __init__(self, publisher_id):
-        super(Notifier, self).__init__()
-        self.publisher_id = publisher_id
-
-    _marker = object()
-
-    def prepare(self, publisher_id=_marker):
-        ret = self.__class__(self.publisher_id)
-        if publisher_id is not self._marker:
-            ret.publisher_id = publisher_id
-        return ret
-
-    def _notify(self, ctxt, event_type, payload, priority):
-        notifier_api.notify(ctxt,
-                            self.publisher_id,
-                            event_type,
-                            priority,
-                            payload)
-
-    def audit(self, ctxt, event_type, payload):
-        # No audit in old notifier.
-        self._notify(ctxt, event_type, payload, 'INFO')
-
-    def debug(self, ctxt, event_type, payload):
-        self._notify(ctxt, event_type, payload, 'DEBUG')
-
-    def info(self, ctxt, event_type, payload):
-        self._notify(ctxt, event_type, payload, 'INFO')
-
-    def warn(self, ctxt, event_type, payload):
-        self._notify(ctxt, event_type, payload, 'WARN')
-
-    warning = warn
-
-    def error(self, ctxt, event_type, payload):
-        self._notify(ctxt, event_type, payload, 'ERROR')
-
-    def critical(self, ctxt, event_type, payload):
-        self._notify(ctxt, event_type, payload, 'CRITICAL')
-
-
-def get_notifier(service=None, host=None, publisher_id=None):
-    if not publisher_id:
-        publisher_id = "%s.%s" % (service, host or CONF.host)
-    return Notifier(publisher_id)
diff --git a/cinder/openstack/common/notifier/rpc_notifier.py b/cinder/openstack/common/notifier/rpc_notifier.py
deleted file mode 100644 (file)
index 6412342..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo.config import cfg
-
-from cinder.openstack.common import context as req_context
-from cinder.openstack.common.gettextutils import _
-from cinder.openstack.common import log as logging
-from cinder.openstack.common import rpc
-
-LOG = logging.getLogger(__name__)
-
-notification_topic_opt = cfg.ListOpt(
-    'notification_topics', default=['notifications', ],
-    help='AMQP topic used for OpenStack notifications')
-
-CONF = cfg.CONF
-CONF.register_opt(notification_topic_opt)
-
-
-def notify(context, message):
-    """Sends a notification via RPC."""
-    if not context:
-        context = req_context.get_admin_context()
-    priority = message.get('priority',
-                           CONF.default_notification_level)
-    priority = priority.lower()
-    for topic in CONF.notification_topics:
-        topic = '%s.%s' % (topic, priority)
-        try:
-            rpc.notify(context, topic, message)
-        except Exception:
-            LOG.exception(_("Could not send notification to %(topic)s. "
-                            "Payload=%(message)s"),
-                          {"topic": topic, "message": message})
diff --git a/cinder/openstack/common/notifier/rpc_notifier2.py b/cinder/openstack/common/notifier/rpc_notifier2.py
deleted file mode 100644 (file)
index 524ca92..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-'''messaging based notification driver, with message envelopes'''
-
-from oslo.config import cfg
-
-from cinder.openstack.common import context as req_context
-from cinder.openstack.common.gettextutils import _
-from cinder.openstack.common import log as logging
-from cinder.openstack.common import rpc
-
-LOG = logging.getLogger(__name__)
-
-notification_topic_opt = cfg.ListOpt(
-    'topics', default=['notifications', ],
-    help='AMQP topic(s) used for OpenStack notifications')
-
-opt_group = cfg.OptGroup(name='rpc_notifier2',
-                         title='Options for rpc_notifier2')
-
-CONF = cfg.CONF
-CONF.register_group(opt_group)
-CONF.register_opt(notification_topic_opt, opt_group)
-
-
-def notify(context, message):
-    """Sends a notification via RPC."""
-    if not context:
-        context = req_context.get_admin_context()
-    priority = message.get('priority',
-                           CONF.default_notification_level)
-    priority = priority.lower()
-    for topic in CONF.rpc_notifier2.topics:
-        topic = '%s.%s' % (topic, priority)
-        try:
-            rpc.notify(context, topic, message, envelope=True)
-        except Exception:
-            LOG.exception(_("Could not send notification to %(topic)s. "
-                            "Payload=%(message)s"),
-                          {"topic": topic, "message": message})
diff --git a/cinder/openstack/common/notifier/test_notifier.py b/cinder/openstack/common/notifier/test_notifier.py
deleted file mode 100644 (file)
index 11fc21f..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-NOTIFICATIONS = []
-
-
-def notify(_context, message):
-    """Test notifier, stores notifications in memory for unittests."""
-    NOTIFICATIONS.append(message)
diff --git a/cinder/openstack/common/rpc/__init__.py b/cinder/openstack/common/rpc/__init__.py
deleted file mode 100644 (file)
index 235cd96..0000000
+++ /dev/null
@@ -1,304 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-# Copyright 2011 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-A remote procedure call (rpc) abstraction.
-
-For some wrappers that add message versioning to rpc, see:
-    rpc.dispatcher
-    rpc.proxy
-"""
-
-import inspect
-
-from oslo.config import cfg
-
-from cinder.openstack.common.gettextutils import _
-from cinder.openstack.common import importutils
-from cinder.openstack.common import local
-from cinder.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-rpc_opts = [
-    cfg.StrOpt('rpc_backend',
-               default='%s.impl_kombu' % __package__,
-               help="The messaging module to use, defaults to kombu."),
-    cfg.IntOpt('rpc_thread_pool_size',
-               default=64,
-               help='Size of RPC thread pool'),
-    cfg.IntOpt('rpc_conn_pool_size',
-               default=30,
-               help='Size of RPC connection pool'),
-    cfg.IntOpt('rpc_response_timeout',
-               default=60,
-               help='Seconds to wait for a response from call or multicall'),
-    cfg.IntOpt('rpc_cast_timeout',
-               default=30,
-               help='Seconds to wait before a cast expires (TTL). '
-                    'Only supported by impl_zmq.'),
-    cfg.ListOpt('allowed_rpc_exception_modules',
-                default=['nova.exception',
-                         'cinder.exception',
-                         'exceptions',
-                         ],
-                help='Modules of exceptions that are permitted to be recreated'
-                     ' upon receiving exception data from an rpc call.'),
-    cfg.BoolOpt('fake_rabbit',
-                default=False,
-                help='If passed, use a fake RabbitMQ provider'),
-    cfg.StrOpt('control_exchange',
-               default='openstack',
-               help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
-]
-
-CONF = cfg.CONF
-CONF.register_opts(rpc_opts)
-
-
-def set_defaults(control_exchange):
-    cfg.set_defaults(rpc_opts,
-                     control_exchange=control_exchange)
-
-
-def create_connection(new=True):
-    """Create a connection to the message bus used for rpc.
-
-    For some example usage of creating a connection and some consumers on that
-    connection, see nova.service.
-
-    :param new: Whether or not to create a new connection.  A new connection
-                will be created by default.  If new is False, the
-                implementation is free to return an existing connection from a
-                pool.
-
-    :returns: An instance of openstack.common.rpc.common.Connection
-    """
-    return _get_impl().create_connection(CONF, new=new)
-
-
-def _check_for_lock():
-    if not CONF.debug:
-        return None
-
-    if ((hasattr(local.strong_store, 'locks_held')
-         and local.strong_store.locks_held)):
-        stack = ' :: '.join([frame[3] for frame in inspect.stack()])
-        LOG.warn(_('A RPC is being made while holding a lock. The locks '
-                   'currently held are %(locks)s. This is probably a bug. '
-                   'Please report it. Include the following: [%(stack)s].'),
-                 {'locks': local.strong_store.locks_held,
-                  'stack': stack})
-        return True
-
-    return False
-
-
-def call(context, topic, msg, timeout=None, check_for_lock=False):
-    """Invoke a remote method that returns something.
-
-    :param context: Information that identifies the user that has made this
-                    request.
-    :param topic: The topic to send the rpc message to.  This correlates to the
-                  topic argument of
-                  openstack.common.rpc.common.Connection.create_consumer()
-                  and only applies when the consumer was created with
-                  fanout=False.
-    :param msg: This is a dict in the form { "method" : "method_to_invoke",
-                                             "args" : dict_of_kwargs }
-    :param timeout: int, number of seconds to use for a response timeout.
-                    If set, this overrides the rpc_response_timeout option.
-    :param check_for_lock: if True, a warning is emitted if a RPC call is made
-                    with a lock held.
-
-    :returns: A dict from the remote method.
-
-    :raises: openstack.common.rpc.common.Timeout if a complete response
-             is not received before the timeout is reached.
-    """
-    if check_for_lock:
-        _check_for_lock()
-    return _get_impl().call(CONF, context, topic, msg, timeout)
-
-
-def cast(context, topic, msg):
-    """Invoke a remote method that does not return anything.
-
-    :param context: Information that identifies the user that has made this
-                    request.
-    :param topic: The topic to send the rpc message to.  This correlates to the
-                  topic argument of
-                  openstack.common.rpc.common.Connection.create_consumer()
-                  and only applies when the consumer was created with
-                  fanout=False.
-    :param msg: This is a dict in the form { "method" : "method_to_invoke",
-                                             "args" : dict_of_kwargs }
-
-    :returns: None
-    """
-    return _get_impl().cast(CONF, context, topic, msg)
-
-
-def fanout_cast(context, topic, msg):
-    """Broadcast a remote method invocation with no return.
-
-    This method will get invoked on all consumers that were set up with this
-    topic name and fanout=True.
-
-    :param context: Information that identifies the user that has made this
-                    request.
-    :param topic: The topic to send the rpc message to.  This correlates to the
-                  topic argument of
-                  openstack.common.rpc.common.Connection.create_consumer()
-                  and only applies when the consumer was created with
-                  fanout=True.
-    :param msg: This is a dict in the form { "method" : "method_to_invoke",
-                                             "args" : dict_of_kwargs }
-
-    :returns: None
-    """
-    return _get_impl().fanout_cast(CONF, context, topic, msg)
-
-
-def multicall(context, topic, msg, timeout=None, check_for_lock=False):
-    """Invoke a remote method and get back an iterator.
-
-    In this case, the remote method will be returning multiple values in
-    separate messages, so the return values can be processed as the come in via
-    an iterator.
-
-    :param context: Information that identifies the user that has made this
-                    request.
-    :param topic: The topic to send the rpc message to.  This correlates to the
-                  topic argument of
-                  openstack.common.rpc.common.Connection.create_consumer()
-                  and only applies when the consumer was created with
-                  fanout=False.
-    :param msg: This is a dict in the form { "method" : "method_to_invoke",
-                                             "args" : dict_of_kwargs }
-    :param timeout: int, number of seconds to use for a response timeout.
-                    If set, this overrides the rpc_response_timeout option.
-    :param check_for_lock: if True, a warning is emitted if a RPC call is made
-                    with a lock held.
-
-    :returns: An iterator.  The iterator will yield a tuple (N, X) where N is
-              an index that starts at 0 and increases by one for each value
-              returned and X is the Nth value that was returned by the remote
-              method.
-
-    :raises: openstack.common.rpc.common.Timeout if a complete response
-             is not received before the timeout is reached.
-    """
-    if check_for_lock:
-        _check_for_lock()
-    return _get_impl().multicall(CONF, context, topic, msg, timeout)
-
-
-def notify(context, topic, msg, envelope=False):
-    """Send notification event.
-
-    :param context: Information that identifies the user that has made this
-                    request.
-    :param topic: The topic to send the notification to.
-    :param msg: This is a dict of content of event.
-    :param envelope: Set to True to enable message envelope for notifications.
-
-    :returns: None
-    """
-    return _get_impl().notify(cfg.CONF, context, topic, msg, envelope)
-
-
-def cleanup():
-    """Clean up resources in use by implementation.
-
-    Clean up any resources that have been allocated by the RPC implementation.
-    This is typically open connections to a messaging service.  This function
-    would get called before an application using this API exits to allow
-    connections to get torn down cleanly.
-
-    :returns: None
-    """
-    return _get_impl().cleanup()
-
-
-def cast_to_server(context, server_params, topic, msg):
-    """Invoke a remote method that does not return anything.
-
-    :param context: Information that identifies the user that has made this
-                    request.
-    :param server_params: Connection information
-    :param topic: The topic to send the notification to.
-    :param msg: This is a dict in the form { "method" : "method_to_invoke",
-                                             "args" : dict_of_kwargs }
-
-    :returns: None
-    """
-    return _get_impl().cast_to_server(CONF, context, server_params, topic,
-                                      msg)
-
-
-def fanout_cast_to_server(context, server_params, topic, msg):
-    """Broadcast to a remote method invocation with no return.
-
-    :param context: Information that identifies the user that has made this
-                    request.
-    :param server_params: Connection information
-    :param topic: The topic to send the notification to.
-    :param msg: This is a dict in the form { "method" : "method_to_invoke",
-                                             "args" : dict_of_kwargs }
-
-    :returns: None
-    """
-    return _get_impl().fanout_cast_to_server(CONF, context, server_params,
-                                             topic, msg)
-
-
-def queue_get_for(context, topic, host):
-    """Get a queue name for a given topic + host.
-
-    This function only works if this naming convention is followed on the
-    consumer side, as well.  For example, in nova, every instance of the
-    nova-foo service calls create_consumer() for two topics:
-
-        foo
-        foo.<host>
-
-    Messages sent to the 'foo' topic are distributed to exactly one instance of
-    the nova-foo service.  The services are chosen in a round-robin fashion.
-    Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
-    <host>.
-    """
-    return '%s.%s' % (topic, host) if host else topic
-
-
-_RPCIMPL = None
-
-
-def _get_impl():
-    """Delay import of rpc_backend until configuration is loaded."""
-    global _RPCIMPL
-    if _RPCIMPL is None:
-        try:
-            _RPCIMPL = importutils.import_module(CONF.rpc_backend)
-        except ImportError:
-            # For backwards compatibility with older nova config.
-            impl = CONF.rpc_backend.replace('nova.rpc',
-                                            'nova.openstack.common.rpc')
-            _RPCIMPL = importutils.import_module(impl)
-    return _RPCIMPL
diff --git a/cinder/openstack/common/rpc/amqp.py b/cinder/openstack/common/rpc/amqp.py
deleted file mode 100644 (file)
index 8fcaa59..0000000
+++ /dev/null
@@ -1,637 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-# Copyright 2011 - 2012, Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-Shared code between AMQP based openstack.common.rpc implementations.
-
-The code in this module is shared between the rpc implementations based on
-AMQP. Specifically, this includes impl_kombu and impl_qpid. impl_carrot also
-uses AMQP, but is deprecated and predates this code.
-"""
-
-import collections
-import inspect
-import sys
-import uuid
-
-from eventlet import greenpool
-from eventlet import pools
-from eventlet import queue
-from eventlet import semaphore
-from oslo.config import cfg
-import six
-
-
-from cinder.openstack.common import excutils
-from cinder.openstack.common.gettextutils import _
-from cinder.openstack.common import local
-from cinder.openstack.common import log as logging
-from cinder.openstack.common.rpc import common as rpc_common
-
-
-amqp_opts = [
-    cfg.BoolOpt('amqp_durable_queues',
-                default=False,
-                deprecated_name='rabbit_durable_queues',
-                deprecated_group='DEFAULT',
-                help='Use durable queues in amqp.'),
-    cfg.BoolOpt('amqp_auto_delete',
-                default=False,
-                help='Auto-delete queues in amqp.'),
-]
-
-cfg.CONF.register_opts(amqp_opts)
-
-UNIQUE_ID = '_unique_id'
-LOG = logging.getLogger(__name__)
-
-
-class Pool(pools.Pool):
-    """Class that implements a Pool of Connections."""
-    def __init__(self, conf, connection_cls, *args, **kwargs):
-        self.connection_cls = connection_cls
-        self.conf = conf
-        kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
-        kwargs.setdefault("order_as_stack", True)
-        super(Pool, self).__init__(*args, **kwargs)
-        self.reply_proxy = None
-
-    # TODO(comstud): Timeout connections not used in a while
-    def create(self):
-        LOG.debug(_('Pool creating new connection'))
-        return self.connection_cls(self.conf)
-
-    def empty(self):
-        while self.free_items:
-            self.get().close()
-        # Force a new connection pool to be created.
-        # Note that this was added due to failing unit test cases. The issue
-        # is the above "while loop" gets all the cached connections from the
-        # pool and closes them, but never returns them to the pool, a pool
-        # leak. The unit tests hang waiting for an item to be returned to the
-        # pool. The unit tests get here via the tearDown() method. In the run
-        # time code, it gets here via cleanup() and only appears in service.py
-        # just before doing a sys.exit(), so cleanup() only happens once and
-        # the leakage is not a problem.
-        self.connection_cls.pool = None
-
-
-_pool_create_sem = semaphore.Semaphore()
-
-
-def get_connection_pool(conf, connection_cls):
-    with _pool_create_sem:
-        # Make sure only one thread tries to create the connection pool.
-        if not connection_cls.pool:
-            connection_cls.pool = Pool(conf, connection_cls)
-    return connection_cls.pool
-
-
-class ConnectionContext(rpc_common.Connection):
-    """The class that is actually returned to the create_connection() caller.
-
-    This is essentially a wrapper around Connection that supports 'with'.
-    It can also return a new Connection, or one from a pool.
-
-    The function will also catch when an instance of this class is to be
-    deleted.  With that we can return Connections to the pool on exceptions
-    and so forth without making the caller be responsible for catching them.
-    If possible the function makes sure to return a connection to the pool.
-    """
-
-    def __init__(self, conf, connection_pool, pooled=True, server_params=None):
-        """Create a new connection, or get one from the pool."""
-        self.connection = None
-        self.conf = conf
-        self.connection_pool = connection_pool
-        if pooled:
-            self.connection = connection_pool.get()
-        else:
-            self.connection = connection_pool.connection_cls(
-                conf,
-                server_params=server_params)
-        self.pooled = pooled
-
-    def __enter__(self):
-        """When with ConnectionContext() is used, return self."""
-        return self
-
-    def _done(self):
-        """If the connection came from a pool, clean it up and put it back.
-        If it did not come from a pool, close it.
-        """
-        if self.connection:
-            if self.pooled:
-                # Reset the connection so it's ready for the next caller
-                # to grab from the pool
-                self.connection.reset()
-                self.connection_pool.put(self.connection)
-            else:
-                try:
-                    self.connection.close()
-                except Exception:
-                    pass
-            self.connection = None
-
-    def __exit__(self, exc_type, exc_value, tb):
-        """End of 'with' statement.  We're done here."""
-        self._done()
-
-    def __del__(self):
-        """Caller is done with this connection.  Make sure we cleaned up."""
-        self._done()
-
-    def close(self):
-        """Caller is done with this connection."""
-        self._done()
-
-    def create_consumer(self, topic, proxy, fanout=False):
-        self.connection.create_consumer(topic, proxy, fanout)
-
-    def create_worker(self, topic, proxy, pool_name):
-        self.connection.create_worker(topic, proxy, pool_name)
-
-    def join_consumer_pool(self, callback, pool_name, topic, exchange_name,
-                           ack_on_error=True):
-        self.connection.join_consumer_pool(callback,
-                                           pool_name,
-                                           topic,
-                                           exchange_name,
-                                           ack_on_error)
-
-    def consume_in_thread(self):
-        self.connection.consume_in_thread()
-
-    def __getattr__(self, key):
-        """Proxy all other calls to the Connection instance."""
-        if self.connection:
-            return getattr(self.connection, key)
-        else:
-            raise rpc_common.InvalidRPCConnectionReuse()
-
-
-class ReplyProxy(ConnectionContext):
-    """Connection class for RPC replies / callbacks."""
-    def __init__(self, conf, connection_pool):
-        self._call_waiters = {}
-        self._num_call_waiters = 0
-        self._num_call_waiters_wrn_threshold = 10
-        self._reply_q = 'reply_' + uuid.uuid4().hex
-        super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
-        self.declare_direct_consumer(self._reply_q, self._process_data)
-        self.consume_in_thread()
-
-    def _process_data(self, message_data):
-        msg_id = message_data.pop('_msg_id', None)
-        waiter = self._call_waiters.get(msg_id)
-        if not waiter:
-            LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
-                       ', message : %(data)s'), {'msg_id': msg_id,
-                                                 'data': message_data})
-            LOG.warn(_('_call_waiters: %s') % str(self._call_waiters))
-        else:
-            waiter.put(message_data)
-
-    def add_call_waiter(self, waiter, msg_id):
-        self._num_call_waiters += 1
-        if self._num_call_waiters > self._num_call_waiters_wrn_threshold:
-            LOG.warn(_('Number of call waiters is greater than warning '
-                       'threshold: %d. There could be a MulticallProxyWaiter '
-                       'leak.') % self._num_call_waiters_wrn_threshold)
-            self._num_call_waiters_wrn_threshold *= 2
-        self._call_waiters[msg_id] = waiter
-
-    def del_call_waiter(self, msg_id):
-        self._num_call_waiters -= 1
-        del self._call_waiters[msg_id]
-
-    def get_reply_q(self):
-        return self._reply_q
-
-
-def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
-              failure=None, ending=False, log_failure=True):
-    """Sends a reply or an error on the channel signified by msg_id.
-
-    Failure should be a sys.exc_info() tuple.
-
-    """
-    with ConnectionContext(conf, connection_pool) as conn:
-        if failure:
-            failure = rpc_common.serialize_remote_exception(failure,
-                                                            log_failure)
-
-        msg = {'result': reply, 'failure': failure}
-        if ending:
-            msg['ending'] = True
-        _add_unique_id(msg)
-        # If a reply_q exists, add the msg_id to the reply and pass the
-        # reply_q to direct_send() to use it as the response queue.
-        # Otherwise use the msg_id for backward compatibility.
-        if reply_q:
-            msg['_msg_id'] = msg_id
-            conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
-        else:
-            conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
-
-
-class RpcContext(rpc_common.CommonRpcContext):
-    """Context that supports replying to a rpc.call."""
-    def __init__(self, **kwargs):
-        self.msg_id = kwargs.pop('msg_id', None)
-        self.reply_q = kwargs.pop('reply_q', None)
-        self.conf = kwargs.pop('conf')
-        super(RpcContext, self).__init__(**kwargs)
-
-    def deepcopy(self):
-        values = self.to_dict()
-        values['conf'] = self.conf
-        values['msg_id'] = self.msg_id
-        values['reply_q'] = self.reply_q
-        return self.__class__(**values)
-
-    def reply(self, reply=None, failure=None, ending=False,
-              connection_pool=None, log_failure=True):
-        if self.msg_id:
-            msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
-                      reply, failure, ending, log_failure)
-            if ending:
-                self.msg_id = None
-
-
-def unpack_context(conf, msg):
-    """Unpack context from msg."""
-    context_dict = {}
-    for key in list(msg.keys()):
-        # NOTE(vish): Some versions of python don't like unicode keys
-        #             in kwargs.
-        key = str(key)
-        if key.startswith('_context_'):
-            value = msg.pop(key)
-            context_dict[key[9:]] = value
-    context_dict['msg_id'] = msg.pop('_msg_id', None)
-    context_dict['reply_q'] = msg.pop('_reply_q', None)
-    context_dict['conf'] = conf
-    ctx = RpcContext.from_dict(context_dict)
-    rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
-    return ctx
-
-
-def pack_context(msg, context):
-    """Pack context into msg.
-
-    Values for message keys need to be less than 255 chars, so we pull
-    context out into a bunch of separate keys. If we want to support
-    more arguments in rabbit messages, we may want to do the same
-    for args at some point.
-
-    """
-    if isinstance(context, dict):
-        context_d = dict([('_context_%s' % key, value)
-                          for (key, value) in six.iteritems(context)])
-    else:
-        context_d = dict([('_context_%s' % key, value)
-                          for (key, value) in
-                          six.iteritems(context.to_dict())])
-
-    msg.update(context_d)
-
-
-class _MsgIdCache(object):
-    """This class checks any duplicate messages."""
-
-    # NOTE: This value is considered can be a configuration item, but
-    #       it is not necessary to change its value in most cases,
-    #       so let this value as static for now.
-    DUP_MSG_CHECK_SIZE = 16
-
-    def __init__(self, **kwargs):
-        self.prev_msgids = collections.deque([],
-                                             maxlen=self.DUP_MSG_CHECK_SIZE)
-
-    def check_duplicate_message(self, message_data):
-        """AMQP consumers may read same message twice when exceptions occur
-           before ack is returned. This method prevents doing it.
-        """
-        if UNIQUE_ID in message_data:
-            msg_id = message_data[UNIQUE_ID]
-            if msg_id not in self.prev_msgids:
-                self.prev_msgids.append(msg_id)
-            else:
-                raise rpc_common.DuplicateMessageError(msg_id=msg_id)
-
-
-def _add_unique_id(msg):
-    """Add unique_id for checking duplicate messages."""
-    unique_id = uuid.uuid4().hex
-    msg.update({UNIQUE_ID: unique_id})
-    LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
-
-
-class _ThreadPoolWithWait(object):
-    """Base class for a delayed invocation manager.
-
-    Used by the Connection class to start up green threads
-    to handle incoming messages.
-    """
-
-    def __init__(self, conf, connection_pool):
-        self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
-        self.connection_pool = connection_pool
-        self.conf = conf
-
-    def wait(self):
-        """Wait for all callback threads to exit."""
-        self.pool.waitall()
-
-
-class CallbackWrapper(_ThreadPoolWithWait):
-    """Wraps a straight callback.
-
-    Allows it to be invoked in a green thread.
-    """
-
-    def __init__(self, conf, callback, connection_pool,
-                 wait_for_consumers=False):
-        """Initiates CallbackWrapper object.
-
-        :param conf: cfg.CONF instance
-        :param callback: a callable (probably a function)
-        :param connection_pool: connection pool as returned by
-                                get_connection_pool()
-        :param wait_for_consumers: wait for all green threads to
-                                   complete and raise the last
-                                   caught exception, if any.
-
-        """
-        super(CallbackWrapper, self).__init__(
-            conf=conf,
-            connection_pool=connection_pool,
-        )
-        self.callback = callback
-        self.wait_for_consumers = wait_for_consumers
-        self.exc_info = None
-
-    def _wrap(self, message_data, **kwargs):
-        """Wrap the callback invocation to catch exceptions.
-        """
-        try:
-            self.callback(message_data, **kwargs)
-        except Exception:
-            self.exc_info = sys.exc_info()
-
-    def __call__(self, message_data):
-        self.exc_info = None
-        self.pool.spawn_n(self._wrap, message_data)
-
-        if self.wait_for_consumers:
-            self.pool.waitall()
-            if self.exc_info:
-                six.reraise(self.exc_info[1], None, self.exc_info[2])
-
-
-class ProxyCallback(_ThreadPoolWithWait):
-    """Calls methods on a proxy object based on method and args."""
-
-    def __init__(self, conf, proxy, connection_pool):
-        super(ProxyCallback, self).__init__(
-            conf=conf,
-            connection_pool=connection_pool,
-        )
-        self.proxy = proxy
-        self.msg_id_cache = _MsgIdCache()
-
-    def __call__(self, message_data):
-        """Consumer callback to call a method on a proxy object.
-
-        Parses the message for validity and fires off a thread to call the
-        proxy object method.
-
-        Message data should be a dictionary with two keys:
-            method: string representing the method to call
-            args: dictionary of arg: value
-
-        Example: {'method': 'echo', 'args': {'value': 42}}
-
-        """
-        # It is important to clear the context here, because at this point
-        # the previous context is stored in local.store.context
-        if hasattr(local.store, 'context'):
-            del local.store.context
-        rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
-        self.msg_id_cache.check_duplicate_message(message_data)
-        ctxt = unpack_context(self.conf, message_data)
-        method = message_data.get('method')
-        args = message_data.get('args', {})
-        version = message_data.get('version')
-        namespace = message_data.get('namespace')
-        if not method:
-            LOG.warn(_('no method for message: %s') % message_data)
-            ctxt.reply(_('No method for message: %s') % message_data,
-                       connection_pool=self.connection_pool)
-            return
-        self.pool.spawn_n(self._process_data, ctxt, version, method,
-                          namespace, args)
-
-    def _process_data(self, ctxt, version, method, namespace, args):
-        """Process a message in a new thread.
-
-        If the proxy object we have has a dispatch method
-        (see rpc.dispatcher.RpcDispatcher), pass it the version,
-        method, and args and let it dispatch as appropriate.  If not, use
-        the old behavior of magically calling the specified method on the
-        proxy we have here.
-        """
-        ctxt.update_store()
-        try:
-            rval = self.proxy.dispatch(ctxt, version, method, namespace,
-                                       **args)
-            # Check if the result was a generator
-            if inspect.isgenerator(rval):
-                for x in rval:
-                    ctxt.reply(x, None, connection_pool=self.connection_pool)
-            else:
-                ctxt.reply(rval, None, connection_pool=self.connection_pool)
-            # This final None tells multicall that it is done.
-            ctxt.reply(ending=True, connection_pool=self.connection_pool)
-        except rpc_common.ClientException as e:
-            LOG.debug(_('Expected exception during message handling (%s)') %
-                      e._exc_info[1])
-            ctxt.reply(None, e._exc_info,
-                       connection_pool=self.connection_pool,
-                       log_failure=False)
-        except Exception:
-            # sys.exc_info() is deleted by LOG.exception().
-            exc_info = sys.exc_info()
-            LOG.error(_('Exception during message handling'),
-                      exc_info=exc_info)
-            ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
-
-
-class MulticallProxyWaiter(object):
-    def __init__(self, conf, msg_id, timeout, connection_pool):
-        self._msg_id = msg_id
-        self._timeout = timeout or conf.rpc_response_timeout
-        self._reply_proxy = connection_pool.reply_proxy
-        self._done = False
-        self._got_ending = False
-        self._conf = conf
-        self._dataqueue = queue.LightQueue()
-        # Add this caller to the reply proxy's call_waiters
-        self._reply_proxy.add_call_waiter(self, self._msg_id)
-        self.msg_id_cache = _MsgIdCache()
-
-    def put(self, data):
-        self._dataqueue.put(data)
-
-    def done(self):
-        if self._done:
-            return
-        self._done = True
-        # Remove this caller from reply proxy's call_waiters
-        self._reply_proxy.del_call_waiter(self._msg_id)
-
-    def _process_data(self, data):
-        result = None
-        self.msg_id_cache.check_duplicate_message(data)
-        if data['failure']:
-            failure = data['failure']
-            result = rpc_common.deserialize_remote_exception(self._conf,
-                                                             failure)
-        elif data.get('ending', False):
-            self._got_ending = True
-        else:
-            result = data['result']
-        return result
-
-    def __iter__(self):
-        """Return a result until we get a reply with an 'ending' flag."""
-        if self._done:
-            raise StopIteration
-        while True:
-            try:
-                data = self._dataqueue.get(timeout=self._timeout)
-                result = self._process_data(data)
-            except queue.Empty:
-                self.done()
-                raise rpc_common.Timeout()
-            except Exception:
-                with excutils.save_and_reraise_exception():
-                    self.done()
-            if self._got_ending:
-                self.done()
-                raise StopIteration
-            if isinstance(result, Exception):
-                self.done()
-                raise result
-            yield result
-
-
-def create_connection(conf, new, connection_pool):
-    """Create a connection."""
-    return ConnectionContext(conf, connection_pool, pooled=not new)
-
-
-_reply_proxy_create_sem = semaphore.Semaphore()
-
-
-def multicall(conf, context, topic, msg, timeout, connection_pool):
-    """Make a call that returns multiple times."""
-    LOG.debug(_('Making synchronous call on %s ...'), topic)
-    msg_id = uuid.uuid4().hex
-    msg.update({'_msg_id': msg_id})
-    LOG.debug(_('MSG_ID is %s') % (msg_id))
-    _add_unique_id(msg)
-    pack_context(msg, context)
-
-    with _reply_proxy_create_sem:
-        if not connection_pool.reply_proxy:
-            connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
-    msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
-    wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
-    with ConnectionContext(conf, connection_pool) as conn:
-        conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
-    return wait_msg
-
-
-def call(conf, context, topic, msg, timeout, connection_pool):
-    """Sends a message on a topic and wait for a response."""
-    rv = multicall(conf, context, topic, msg, timeout, connection_pool)
-    # NOTE(vish): return the last result from the multicall
-    rv = list(rv)
-    if not rv:
-        return
-    return rv[-1]
-
-
-def cast(conf, context, topic, msg, connection_pool):
-    """Sends a message on a topic without waiting for a response."""
-    LOG.debug(_('Making asynchronous cast on %s...'), topic)
-    _add_unique_id(msg)
-    pack_context(msg, context)
-    with ConnectionContext(conf, connection_pool) as conn:
-        conn.topic_send(topic, rpc_common.serialize_msg(msg))
-
-
-def fanout_cast(conf, context, topic, msg, connection_pool):
-    """Sends a message on a fanout exchange without waiting for a response."""
-    LOG.debug(_('Making asynchronous fanout cast...'))
-    _add_unique_id(msg)
-    pack_context(msg, context)
-    with ConnectionContext(conf, connection_pool) as conn:
-        conn.fanout_send(topic, rpc_common.serialize_msg(msg))
-
-
-def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
-    """Sends a message on a topic to a specific server."""
-    _add_unique_id(msg)
-    pack_context(msg, context)
-    with ConnectionContext(conf, connection_pool, pooled=False,
-                           server_params=server_params) as conn:
-        conn.topic_send(topic, rpc_common.serialize_msg(msg))
-
-
-def fanout_cast_to_server(conf, context, server_params, topic, msg,
-                          connection_pool):
-    """Sends a message on a fanout exchange to a specific server."""
-    _add_unique_id(msg)
-    pack_context(msg, context)
-    with ConnectionContext(conf, connection_pool, pooled=False,
-                           server_params=server_params) as conn:
-        conn.fanout_send(topic, rpc_common.serialize_msg(msg))
-
-
-def notify(conf, context, topic, msg, connection_pool, envelope):
-    """Sends a notification event on a topic."""
-    LOG.debug(_('Sending %(event_type)s on %(topic)s'),
-              dict(event_type=msg.get('event_type'),
-                   topic=topic))
-    _add_unique_id(msg)
-    pack_context(msg, context)
-    with ConnectionContext(conf, connection_pool) as conn:
-        if envelope:
-            msg = rpc_common.serialize_msg(msg)
-        conn.notify_send(topic, msg)
-
-
-def cleanup(connection_pool):
-    if connection_pool:
-        connection_pool.empty()
-
-
-def get_control_exchange(conf):
-    return conf.control_exchange
diff --git a/cinder/openstack/common/rpc/common.py b/cinder/openstack/common/rpc/common.py
deleted file mode 100644 (file)
index 9421156..0000000
+++ /dev/null
@@ -1,508 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-# Copyright 2011 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import copy
-import sys
-import traceback
-
-from oslo.config import cfg
-import six
-
-from cinder.openstack.common.gettextutils import _
-from cinder.openstack.common import importutils
-from cinder.openstack.common import jsonutils
-from cinder.openstack.common import local
-from cinder.openstack.common import log as logging
-from cinder.openstack.common import versionutils
-
-
-CONF = cfg.CONF
-LOG = logging.getLogger(__name__)
-
-
-_RPC_ENVELOPE_VERSION = '2.0'
-'''RPC Envelope Version.
-
-This version number applies to the top level structure of messages sent out.
-It does *not* apply to the message payload, which must be versioned
-independently.  For example, when using rpc APIs, a version number is applied
-for changes to the API being exposed over rpc.  This version number is handled
-in the rpc proxy and dispatcher modules.
-
-This version number applies to the message envelope that is used in the
-serialization done inside the rpc layer.  See serialize_msg() and
-deserialize_msg().
-
-The current message format (version 2.0) is very simple.  It is::
-
-    {
-        'oslo.version': <RPC Envelope Version as a String>,
-        'oslo.message': <Application Message Payload, JSON encoded>
-    }
-
-Message format version '1.0' is just considered to be the messages we sent
-without a message envelope.
-
-So, the current message envelope just includes the envelope version.  It may
-eventually contain additional information, such as a signature for the message
-payload.
-
-We will JSON encode the application message payload.  The message envelope,
-which includes the JSON encoded application message body, will be passed down
-to the messaging libraries as a dict.
-'''
-
-_VERSION_KEY = 'oslo.version'
-_MESSAGE_KEY = 'oslo.message'
-
-_REMOTE_POSTFIX = '_Remote'
-
-
-class RPCException(Exception):
-    msg_fmt = _("An unknown RPC related exception occurred.")
-
-    def __init__(self, message=None, **kwargs):
-        self.kwargs = kwargs
-
-        if not message:
-            try:
-                message = self.msg_fmt % kwargs
-
-            except Exception:
-                # kwargs doesn't match a variable in the message
-                # log the issue and the kwargs
-                LOG.exception(_('Exception in string format operation'))
-                for name, value in six.iteritems(kwargs):
-                    LOG.error("%s: %s" % (name, value))
-                # at least get the core message out if something happened
-                message = self.msg_fmt
-
-        super(RPCException, self).__init__(message)
-
-
-class RemoteError(RPCException):
-    """Signifies that a remote class has raised an exception.
-
-    Contains a string representation of the type of the original exception,
-    the value of the original exception, and the traceback.  These are
-    sent to the parent as a joined string so printing the exception
-    contains all of the relevant info.
-
-    """
-    msg_fmt = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
-
-    def __init__(self, exc_type=None, value=None, traceback=None):
-        self.exc_type = exc_type
-        self.value = value
-        self.traceback = traceback
-        super(RemoteError, self).__init__(exc_type=exc_type,
-                                          value=value,
-                                          traceback=traceback)
-
-
-class Timeout(RPCException):
-    """Signifies that a timeout has occurred.
-
-    This exception is raised if the rpc_response_timeout is reached while
-    waiting for a response from the remote side.
-    """
-    msg_fmt = _('Timeout while waiting on RPC response - '
-                'topic: "%(topic)s", RPC method: "%(method)s" '
-                'info: "%(info)s"')
-
-    def __init__(self, info=None, topic=None, method=None):
-        """Initiates Timeout object.
-
-        :param info: Extra info to convey to the user
-        :param topic: The topic that the rpc call was sent to
-        :param rpc_method_name: The name of the rpc method being
-                                called
-        """
-        self.info = info
-        self.topic = topic
-        self.method = method
-        super(Timeout, self).__init__(
-            None,
-            info=info or _('<unknown>'),
-            topic=topic or _('<unknown>'),
-            method=method or _('<unknown>'))
-
-
-class DuplicateMessageError(RPCException):
-    msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.")
-
-
-class InvalidRPCConnectionReuse(RPCException):
-    msg_fmt = _("Invalid reuse of an RPC connection.")
-
-
-class UnsupportedRpcVersion(RPCException):
-    msg_fmt = _("Specified RPC version, %(version)s, not supported by "
-                "this endpoint.")
-
-
-class UnsupportedRpcEnvelopeVersion(RPCException):
-    msg_fmt = _("Specified RPC envelope version, %(version)s, "
-                "not supported by this endpoint.")
-
-
-class RpcVersionCapError(RPCException):
-    msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low")
-
-
-class Connection(object):
-    """A connection, returned by rpc.create_connection().
-
-    This class represents a connection to the message bus used for rpc.
-    An instance of this class should never be created by users of the rpc API.
-    Use rpc.create_connection() instead.
-    """
-    def close(self):
-        """Close the connection.
-
-        This method must be called when the connection will no longer be used.
-        It will ensure that any resources associated with the connection, such
-        as a network connection, and cleaned up.
-        """
-        raise NotImplementedError()
-
-    def create_consumer(self, topic, proxy, fanout=False):
-        """Create a consumer on this connection.
-
-        A consumer is associated with a message queue on the backend message
-        bus.  The consumer will read messages from the queue, unpack them, and
-        dispatch them to the proxy object.  The contents of the message pulled
-        off of the queue will determine which method gets called on the proxy
-        object.
-
-        :param topic: This is a name associated with what to consume from.
-                      Multiple instances of a service may consume from the same
-                      topic. For example, all instances of nova-compute consume
-                      from a queue called "compute".  In that case, the
-                      messages will get distributed amongst the consumers in a
-                      round-robin fashion if fanout=False.  If fanout=True,
-                      every consumer associated with this topic will get a
-                      copy of every message.
-        :param proxy: The object that will handle all incoming messages.
-        :param fanout: Whether or not this is a fanout topic.  See the
-                       documentation for the topic parameter for some
-                       additional comments on this.
-        """
-        raise NotImplementedError()
-
-    def create_worker(self, topic, proxy, pool_name):
-        """Create a worker on this connection.
-
-        A worker is like a regular consumer of messages directed to a
-        topic, except that it is part of a set of such consumers (the
-        "pool") which may run in parallel. Every pool of workers will
-        receive a given message, but only one worker in the pool will
-        be asked to process it. Load is distributed across the members
-        of the pool in round-robin fashion.
-
-        :param topic: This is a name associated with what to consume from.
-                      Multiple instances of a service may consume from the same
-                      topic.
-        :param proxy: The object that will handle all incoming messages.
-        :param pool_name: String containing the name of the pool of workers
-        """
-        raise NotImplementedError()
-
-    def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
-        """Register as a member of a group of consumers.
-
-        Uses given topic from the specified exchange.
-        Exactly one member of a given pool will receive each message.
-
-        A message will be delivered to multiple pools, if more than
-        one is created.
-
-        :param callback: Callable to be invoked for each message.
-        :type callback: callable accepting one argument
-        :param pool_name: The name of the consumer pool.
-        :type pool_name: str
-        :param topic: The routing topic for desired messages.
-        :type topic: str
-        :param exchange_name: The name of the message exchange where
-                              the client should attach. Defaults to
-                              the configured exchange.
-        :type exchange_name: str
-        """
-        raise NotImplementedError()
-
-    def consume_in_thread(self):
-        """Spawn a thread to handle incoming messages.
-
-        Spawn a thread that will be responsible for handling all incoming
-        messages for consumers that were set up on this connection.
-
-        Message dispatching inside of this is expected to be implemented in a
-        non-blocking manner.  An example implementation would be having this
-        thread pull messages in for all of the consumers, but utilize a thread
-        pool for dispatching the messages to the proxy objects.
-        """
-        raise NotImplementedError()
-
-
-def _safe_log(log_func, msg, msg_data):
-    """Sanitizes the msg_data field before logging."""
-    SANITIZE = ['_context_auth_token', 'auth_token', 'new_pass']
-
-    def _fix_passwords(d):
-        """Sanitizes the password fields in the dictionary."""
-        for k in six.iterkeys(d):
-            if k.lower().find('password') != -1:
-                d[k] = '<SANITIZED>'
-            elif k.lower() in SANITIZE:
-                d[k] = '<SANITIZED>'
-            elif isinstance(d[k], list):
-                for e in d[k]:
-                    if isinstance(e, dict):
-                        _fix_passwords(e)
-            elif isinstance(d[k], dict):
-                _fix_passwords(d[k])
-        return d
-
-    return log_func(msg, _fix_passwords(copy.deepcopy(msg_data)))
-
-
-def serialize_remote_exception(failure_info, log_failure=True):
-    """Prepares exception data to be sent over rpc.
-
-    Failure_info should be a sys.exc_info() tuple.
-
-    """
-    tb = traceback.format_exception(*failure_info)
-    failure = failure_info[1]
-    if log_failure:
-        LOG.error(_("Returning exception %s to caller"),
-                  six.text_type(failure))
-        LOG.error(tb)
-
-    kwargs = {}
-    if hasattr(failure, 'kwargs'):
-        kwargs = failure.kwargs
-
-    # NOTE(matiu): With cells, it's possible to re-raise remote, remote
-    # exceptions. Lets turn it back into the original exception type.
-    cls_name = str(failure.__class__.__name__)
-    mod_name = str(failure.__class__.__module__)
-    if (cls_name.endswith(_REMOTE_POSTFIX) and
-            mod_name.endswith(_REMOTE_POSTFIX)):
-        cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
-        mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
-
-    data = {
-        'class': cls_name,
-        'module': mod_name,
-        'message': six.text_type(failure),
-        'tb': tb,
-        'args': failure.args,
-        'kwargs': kwargs
-    }
-
-    json_data = jsonutils.dumps(data)
-
-    return json_data
-
-
-def deserialize_remote_exception(conf, data):
-    failure = jsonutils.loads(str(data))
-
-    trace = failure.get('tb', [])
-    message = failure.get('message', "") + "\n" + "\n".join(trace)
-    name = failure.get('class')
-    module = failure.get('module')
-
-    # NOTE(ameade): We DO NOT want to allow just any module to be imported, in
-    # order to prevent arbitrary code execution.
-    if module not in conf.allowed_rpc_exception_modules:
-        return RemoteError(name, failure.get('message'), trace)
-
-    try:
-        mod = importutils.import_module(module)
-        klass = getattr(mod, name)
-        if not issubclass(klass, Exception):
-            raise TypeError("Can only deserialize Exceptions")
-
-        failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
-    except (AttributeError, TypeError, ImportError):
-        return RemoteError(name, failure.get('message'), trace)
-
-    ex_type = type(failure)
-    str_override = lambda self: message
-    new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
-                       {'__str__': str_override, '__unicode__': str_override})
-    new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
-    try:
-        # NOTE(ameade): Dynamically create a new exception type and swap it in
-        # as the new type for the exception. This only works on user defined
-        # Exceptions and not core python exceptions. This is important because
-        # we cannot necessarily change an exception message so we must override
-        # the __str__ method.
-        failure.__class__ = new_ex_type
-    except TypeError:
-        # NOTE(ameade): If a core exception then just add the traceback to the
-        # first exception argument.
-        failure.args = (message,) + failure.args[1:]
-    return failure
-
-
-class CommonRpcContext(object):
-    def __init__(self, **kwargs):
-        self.values = kwargs
-
-    def __getattr__(self, key):
-        try:
-            return self.values[key]
-        except KeyError:
-            raise AttributeError(key)
-
-    def to_dict(self):
-        return copy.deepcopy(self.values)
-
-    @classmethod
-    def from_dict(cls, values):
-        return cls(**values)
-
-    def deepcopy(self):
-        return self.from_dict(self.to_dict())
-
-    def update_store(self):
-        local.store.context = self
-
-    def elevated(self, read_deleted=None, overwrite=False):
-        """Return a version of this context with admin flag set."""
-        # TODO(russellb) This method is a bit of a nova-ism.  It makes
-        # some assumptions about the data in the request context sent
-        # across rpc, while the rest of this class does not.  We could get
-        # rid of this if we changed the nova code that uses this to
-        # convert the RpcContext back to its native RequestContext doing
-        # something like nova.context.RequestContext.from_dict(ctxt.to_dict())
-
-        context = self.deepcopy()
-        context.values['is_admin'] = True
-
-        context.values.setdefault('roles', [])
-
-        if 'admin' not in context.values['roles']:
-            context.values['roles'].append('admin')
-
-        if read_deleted is not None:
-            context.values['read_deleted'] = read_deleted
-
-        return context
-
-
-class ClientException(Exception):
-    """Encapsulates actual exception expected to be hit by a RPC proxy object.
-
-    Merely instantiating it records the current exception information, which
-    will be passed back to the RPC client without exceptional logging.
-    """
-    def __init__(self):
-        self._exc_info = sys.exc_info()
-
-
-def catch_client_exception(exceptions, func, *args, **kwargs):
-    try:
-        return func(*args, **kwargs)
-    except Exception as e:
-        if type(e) in exceptions:
-            raise ClientException()
-        else:
-            raise
-
-
-def client_exceptions(*exceptions):
-    """Decorator for manager methods that raise expected exceptions.
-
-    Marking a Manager method with this decorator allows the declaration
-    of expected exceptions that the RPC layer should not consider fatal,
-    and not log as if they were generated in a real error scenario. Note
-    that this will cause listed exceptions to be wrapped in a
-    ClientException, which is used internally by the RPC layer.
-    """
-    def outer(func):
-        def inner(*args, **kwargs):
-            return catch_client_exception(exceptions, func, *args, **kwargs)
-        return inner
-    return outer
-
-
-# TODO(sirp): we should deprecate this in favor of
-# using `versionutils.is_compatible` directly
-def version_is_compatible(imp_version, version):
-    """Determine whether versions are compatible.
-
-    :param imp_version: The version implemented
-    :param version: The version requested by an incoming message.
-    """
-    return versionutils.is_compatible(version, imp_version)
-
-
-def serialize_msg(raw_msg):
-    # NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
-    # information about this format.
-    msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
-           _MESSAGE_KEY: jsonutils.dumps(raw_msg)}
-
-    return msg
-
-
-def deserialize_msg(msg):
-    # NOTE(russellb): Hang on to your hats, this road is about to
-    # get a little bumpy.
-    #
-    # Robustness Principle:
-    #    "Be strict in what you send, liberal in what you accept."
-    #
-    # At this point we have to do a bit of guessing about what it
-    # is we just received.  Here is the set of possibilities:
-    #
-    # 1) We received a dict.  This could be 2 things:
-    #
-    #   a) Inspect it to see if it looks like a standard message envelope.
-    #      If so, great!
-    #
-    #   b) If it doesn't look like a standard message envelope, it could either
-    #      be a notification, or a message from before we added a message
-    #      envelope (referred to as version 1.0).
-    #      Just return the message as-is.
-    #
-    # 2) It's any other non-dict type.  Just return it and hope for the best.
-    #    This case covers return values from rpc.call() from before message
-    #    envelopes were used.  (messages to call a method were always a dict)
-
-    if not isinstance(msg, dict):
-        # See #2 above.
-        return msg
-
-    base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY)
-    if not all(map(lambda key: key in msg, base_envelope_keys)):
-        #  See #1.b above.
-        return msg
-
-    # At this point we think we have the message envelope
-    # format we were expecting. (#1.a above)
-
-    if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]):
-        raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY])
-
-    raw_msg = jsonutils.loads(msg[_MESSAGE_KEY])
-
-    return raw_msg
diff --git a/cinder/openstack/common/rpc/dispatcher.py b/cinder/openstack/common/rpc/dispatcher.py
deleted file mode 100644 (file)
index 4f98f3a..0000000
+++ /dev/null
@@ -1,178 +0,0 @@
-# Copyright 2012 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-Code for rpc message dispatching.
-
-Messages that come in have a version number associated with them.  RPC API
-version numbers are in the form:
-
-    Major.Minor
-
-For a given message with version X.Y, the receiver must be marked as able to
-handle messages of version A.B, where:
-
-    A = X
-
-    B >= Y
-
-The Major version number would be incremented for an almost completely new API.
-The Minor version number would be incremented for backwards compatible changes
-to an existing API.  A backwards compatible change could be something like
-adding a new method, adding an argument to an existing method (but not
-requiring it), or changing the type for an existing argument (but still
-handling the old type as well).
-
-The conversion over to a versioned API must be done on both the client side and
-server side of the API at the same time.  However, as the code stands today,
-there can be both versioned and unversioned APIs implemented in the same code
-base.
-
-EXAMPLES
-========
-
-Nova was the first project to use versioned rpc APIs.  Consider the compute rpc
-API as an example.  The client side is in nova/compute/rpcapi.py and the server
-side is in nova/compute/manager.py.
-
-
-Example 1) Adding a new method.
--------------------------------
-
-Adding a new method is a backwards compatible change.  It should be added to
-nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to
-X.Y+1.  On the client side, the new method in nova/compute/rpcapi.py should
-have a specific version specified to indicate the minimum API version that must
-be implemented for the method to be supported.  For example::
-
-    def get_host_uptime(self, ctxt, host):
-        topic = _compute_topic(self.topic, ctxt, host, None)
-        return self.call(ctxt, self.make_msg('get_host_uptime'), topic,
-                version='1.1')
-
-In this case, version '1.1' is the first version that supported the
-get_host_uptime() method.
-
-
-Example 2) Adding a new parameter.
-----------------------------------
-
-Adding a new parameter to an rpc method can be made backwards compatible.  The
-RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped.
-The implementation of the method must not expect the parameter to be present.::
-
-    def some_remote_method(self, arg1, arg2, newarg=None):
-        # The code needs to deal with newarg=None for cases
-        # where an older client sends a message without it.
-        pass
-
-On the client side, the same changes should be made as in example 1.  The
-minimum version that supports the new parameter should be specified.
-"""
-
-import six
-
-from cinder.openstack.common.rpc import common as rpc_common
-from cinder.openstack.common.rpc import serializer as rpc_serializer
-
-
-class RpcDispatcher(object):
-    """Dispatch rpc messages according to the requested API version.
-
-    This class can be used as the top level 'manager' for a service.  It
-    contains a list of underlying managers that have an API_VERSION attribute.
-    """
-
-    def __init__(self, callbacks, serializer=None):
-        """Initialize the rpc dispatcher.
-
-        :param callbacks: List of proxy objects that are an instance
-                          of a class with rpc methods exposed.  Each proxy
-                          object should have an RPC_API_VERSION attribute.
-        :param serializer: The Serializer object that will be used to
-                           deserialize arguments before the method call and
-                           to serialize the result after it returns.
-        """
-        self.callbacks = callbacks
-        if serializer is None:
-            serializer = rpc_serializer.NoOpSerializer()
-        self.serializer = serializer
-        super(RpcDispatcher, self).__init__()
-
-    def _deserialize_args(self, context, kwargs):
-        """Helper method called to deserialize args before dispatch.
-
-        This calls our serializer on each argument, returning a new set of
-        args that have been deserialized.
-
-        :param context: The request context
-        :param kwargs: The arguments to be deserialized
-        :returns: A new set of deserialized args
-        """
-        new_kwargs = dict()
-        for argname, arg in six.iteritems(kwargs):
-            new_kwargs[argname] = self.serializer.deserialize_entity(context,
-                                                                     arg)
-        return new_kwargs
-
-    def dispatch(self, ctxt, version, method, namespace, **kwargs):
-        """Dispatch a message based on a requested version.
-
-        :param ctxt: The request context
-        :param version: The requested API version from the incoming message
-        :param method: The method requested to be called by the incoming
-                       message.
-        :param namespace: The namespace for the requested method.  If None,
-                          the dispatcher will look for a method on a callback
-                          object with no namespace set.
-        :param kwargs: A dict of keyword arguments to be passed to the method.
-
-        :returns: Whatever is returned by the underlying method that gets
-                  called.
-        """
-        if not version:
-            version = '1.0'
-
-        had_compatible = False
-        for proxyobj in self.callbacks:
-            # Check for namespace compatibility
-            try:
-                cb_namespace = proxyobj.RPC_API_NAMESPACE
-            except AttributeError:
-                cb_namespace = None
-
-            if namespace != cb_namespace:
-                continue
-
-            # Check for version compatibility
-            try:
-                rpc_api_version = proxyobj.RPC_API_VERSION
-            except AttributeError:
-                rpc_api_version = '1.0'
-
-            is_compatible = rpc_common.version_is_compatible(rpc_api_version,
-                                                             version)
-            had_compatible = had_compatible or is_compatible
-
-            if not hasattr(proxyobj, method):
-                continue
-            if is_compatible:
-                kwargs = self._deserialize_args(ctxt, kwargs)
-                result = getattr(proxyobj, method)(ctxt, **kwargs)
-                return self.serializer.serialize_entity(ctxt, result)
-
-        if had_compatible:
-            raise AttributeError("No such RPC function '%s'" % method)
-        else:
-            raise rpc_common.UnsupportedRpcVersion(version=version)
diff --git a/cinder/openstack/common/rpc/impl_fake.py b/cinder/openstack/common/rpc/impl_fake.py
deleted file mode 100644 (file)
index 194228c..0000000
+++ /dev/null
@@ -1,194 +0,0 @@
-#    Copyright 2011 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-"""Fake RPC implementation which calls proxy methods directly with no
-queues.  Casts will block, but this is very useful for tests.
-"""
-
-import inspect
-# NOTE(russellb): We specifically want to use json, not our own jsonutils.
-# jsonutils has some extra logic to automatically convert objects to primitive
-# types so that they can be serialized.  We want to catch all cases where
-# non-primitive types make it into this code and treat it as an error.
-import json
-import time
-
-import eventlet
-import six
-
-from cinder.openstack.common.rpc import common as rpc_common
-
-CONSUMERS = {}
-
-
-class RpcContext(rpc_common.CommonRpcContext):
-    def __init__(self, **kwargs):
-        super(RpcContext, self).__init__(**kwargs)
-        self._response = []
-        self._done = False
-
-    def deepcopy(self):
-        values = self.to_dict()
-        new_inst = self.__class__(**values)
-        new_inst._response = self._response
-        new_inst._done = self._done
-        return new_inst
-
-    def reply(self, reply=None, failure=None, ending=False):
-        if ending:
-            self._done = True
-        if not self._done:
-            self._response.append((reply, failure))
-
-
-class Consumer(object):
-    def __init__(self, topic, proxy):
-        self.topic = topic
-        self.proxy = proxy
-
-    def call(self, context, version, method, namespace, args, timeout):
-        done = eventlet.event.Event()
-
-        def _inner():
-            ctxt = RpcContext.from_dict(context.to_dict())
-            try:
-                rval = self.proxy.dispatch(context, version, method,
-                                           namespace, **args)
-                res = []
-                # Caller might have called ctxt.reply() manually
-                for (reply, failure) in ctxt._response:
-                    if failure:
-                        six.reraise(failure[0], failure[1], failure[2])
-                    res.append(reply)
-                # if ending not 'sent'...we might have more data to
-                # return from the function itself
-                if not ctxt._done:
-                    if inspect.isgenerator(rval):
-                        for val in rval:
-                            res.append(val)
-                    else:
-                        res.append(rval)
-                done.send(res)
-            except rpc_common.ClientException as e:
-                done.send_exception(e._exc_info[1])
-            except Exception as e:
-                done.send_exception(e)
-
-        thread = eventlet.greenthread.spawn(_inner)
-
-        if timeout:
-            start_time = time.time()
-            while not done.ready():
-                eventlet.greenthread.sleep(1)
-                cur_time = time.time()
-                if (cur_time - start_time) > timeout:
-                    thread.kill()
-                    raise rpc_common.Timeout()
-
-        return done.wait()
-
-
-class Connection(object):
-    """Connection object."""
-
-    def __init__(self):
-        self.consumers = []
-
-    def create_consumer(self, topic, proxy, fanout=False):
-        consumer = Consumer(topic, proxy)
-        self.consumers.append(consumer)
-        if topic not in CONSUMERS:
-            CONSUMERS[topic] = []
-        CONSUMERS[topic].append(consumer)
-
-    def close(self):
-        for consumer in self.consumers:
-            CONSUMERS[consumer.topic].remove(consumer)
-        self.consumers = []
-
-    def consume_in_thread(self):
-        pass
-
-
-def create_connection(conf, new=True):
-    """Create a connection."""
-    return Connection()
-
-
-def check_serialize(msg):
-    """Make sure a message intended for rpc can be serialized."""
-    json.dumps(msg)
-
-
-def multicall(conf, context, topic, msg, timeout=None):
-    """Make a call that returns multiple times."""
-
-    check_serialize(msg)
-
-    method = msg.get('method')
-    if not method:
-        return
-    args = msg.get('args', {})
-    version = msg.get('version', None)
-    namespace = msg.get('namespace', None)
-
-    try:
-        consumer = CONSUMERS[topic][0]
-    except (KeyError, IndexError):
-        raise rpc_common.Timeout("No consumers available")
-    else:
-        return consumer.call(context, version, method, namespace, args,
-                             timeout)
-
-
-def call(conf, context, topic, msg, timeout=None):
-    """Sends a message on a topic and wait for a response."""
-    rv = multicall(conf, context, topic, msg, timeout)
-    # NOTE(vish): return the last result from the multicall
-    rv = list(rv)
-    if not rv:
-        return
-    return rv[-1]
-
-
-def cast(conf, context, topic, msg):
-    check_serialize(msg)
-    try:
-        call(conf, context, topic, msg)
-    except Exception:
-        pass
-
-
-def notify(conf, context, topic, msg, envelope):
-    check_serialize(msg)
-
-
-def cleanup():
-    pass
-
-
-def fanout_cast(conf, context, topic, msg):
-    """Cast to all consumers of a topic."""
-    check_serialize(msg)
-    method = msg.get('method')
-    if not method:
-        return
-    args = msg.get('args', {})
-    version = msg.get('version', None)
-    namespace = msg.get('namespace', None)
-
-    for consumer in CONSUMERS.get(topic, []):
-        try:
-            consumer.call(context, version, method, namespace, args, None)
-        except Exception:
-            pass
diff --git a/cinder/openstack/common/rpc/impl_kombu.py b/cinder/openstack/common/rpc/impl_kombu.py
deleted file mode 100644 (file)
index 3c6d320..0000000
+++ /dev/null
@@ -1,855 +0,0 @@
-#    Copyright 2011 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import functools
-import itertools
-import socket
-import ssl
-import time
-import uuid
-
-import eventlet
-import greenlet
-import kombu
-import kombu.connection
-import kombu.entity
-import kombu.messaging
-from oslo.config import cfg
-import six
-
-from cinder.openstack.common import excutils
-from cinder.openstack.common.gettextutils import _
-from cinder.openstack.common import network_utils
-from cinder.openstack.common.rpc import amqp as rpc_amqp
-from cinder.openstack.common.rpc import common as rpc_common
-from cinder.openstack.common import sslutils
-
-kombu_opts = [
-    cfg.StrOpt('kombu_ssl_version',
-               default='',
-               help='SSL version to use (valid only if SSL enabled). '
-                    'valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may '
-                    'be available on some distributions'
-               ),
-    cfg.StrOpt('kombu_ssl_keyfile',
-               default='',
-               help='SSL key file (valid only if SSL enabled)'),
-    cfg.StrOpt('kombu_ssl_certfile',
-               default='',
-               help='SSL cert file (valid only if SSL enabled)'),
-    cfg.StrOpt('kombu_ssl_ca_certs',
-               default='',
-               help=('SSL certification authority file '
-                     '(valid only if SSL enabled)')),
-    cfg.StrOpt('rabbit_host',
-               default='localhost',
-               help='The RabbitMQ broker address where a single node is used'),
-    cfg.IntOpt('rabbit_port',
-               default=5672,
-               help='The RabbitMQ broker port where a single node is used'),
-    cfg.ListOpt('rabbit_hosts',
-                default=['$rabbit_host:$rabbit_port'],
-                help='RabbitMQ HA cluster host:port pairs'),
-    cfg.BoolOpt('rabbit_use_ssl',
-                default=False,
-                help='connect over SSL for RabbitMQ'),
-    cfg.StrOpt('rabbit_userid',
-               default='guest',
-               help='the RabbitMQ userid'),
-    cfg.StrOpt('rabbit_password',
-               default='guest',
-               help='the RabbitMQ password',
-               secret=True),
-    cfg.StrOpt('rabbit_virtual_host',
-               default='/',
-               help='the RabbitMQ virtual host'),
-    cfg.IntOpt('rabbit_retry_interval',
-               default=1,
-               help='how frequently to retry connecting with RabbitMQ'),
-    cfg.IntOpt('rabbit_retry_backoff',
-               default=2,
-               help='how long to backoff for between retries when connecting '
-                    'to RabbitMQ'),
-    cfg.IntOpt('rabbit_max_retries',
-               default=0,
-               help='maximum retries with trying to connect to RabbitMQ '
-                    '(the default of 0 implies an infinite retry count)'),
-    cfg.BoolOpt('rabbit_ha_queues',
-                default=False,
-                help='use H/A queues in RabbitMQ (x-ha-policy: all).'
-                     'You need to wipe RabbitMQ database when '
-                     'changing this option.'),
-
-]
-
-cfg.CONF.register_opts(kombu_opts)
-
-LOG = rpc_common.LOG
-
-
-def _get_queue_arguments(conf):
-    """Construct the arguments for declaring a queue.
-
-    If the rabbit_ha_queues option is set, we declare a mirrored queue
-    as described here:
-
-      http://www.rabbitmq.com/ha.html
-
-    Setting x-ha-policy to all means that the queue will be mirrored
-    to all nodes in the cluster.
-    """
-    return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {}
-
-
-class ConsumerBase(object):
-    """Consumer base class."""
-
-    def __init__(self, channel, callback, tag, **kwargs):
-        """Declare a queue on an amqp channel.
-
-        'channel' is the amqp channel to use
-        'callback' is the callback to call when messages are received
-        'tag' is a unique ID for the consumer on the channel
-
-        queue name, exchange name, and other kombu options are
-        passed in here as a dictionary.
-        """
-        self.callback = callback
-        self.tag = str(tag)
-        self.kwargs = kwargs
-        self.queue = None
-        self.ack_on_error = kwargs.get('ack_on_error', True)
-        self.reconnect(channel)
-
-    def reconnect(self, channel):
-        """Re-declare the queue after a rabbit reconnect."""
-        self.channel = channel
-        self.kwargs['channel'] = channel
-        self.queue = kombu.entity.Queue(**self.kwargs)
-        self.queue.declare()
-
-    def _callback_handler(self, message, callback):
-        """Call callback with deserialized message.
-
-        Messages that are processed without exception are ack'ed.
-
-        If the message processing generates an exception, it will be
-        ack'ed if ack_on_error=True. Otherwise it will be .requeue()'ed.
-        """
-
-        try:
-            msg = rpc_common.deserialize_msg(message.payload)
-            callback(msg)
-        except Exception:
-            if self.ack_on_error:
-                LOG.exception(_("Failed to process message"
-                                " ... skipping it."))
-                message.ack()
-            else:
-                LOG.exception(_("Failed to process message"
-                                " ... will requeue."))
-                message.requeue()
-        else:
-            message.ack()
-
-    def consume(self, *args, **kwargs):
-        """Actually declare the consumer on the amqp channel.  This will
-        start the flow of messages from the queue.  Using the
-        Connection.iterconsume() iterator will process the messages,
-        calling the appropriate callback.
-
-        If a callback is specified in kwargs, use that.  Otherwise,
-        use the callback passed during __init__()
-
-        If kwargs['nowait'] is True, then this call will block until
-        a message is read.
-
-        """
-
-        options = {'consumer_tag': self.tag}
-        options['nowait'] = kwargs.get('nowait', False)
-        callback = kwargs.get('callback', self.callback)
-        if not callback:
-            raise ValueError("No callback defined")
-
-        def _callback(raw_message):
-            message = self.channel.message_to_python(raw_message)
-            self._callback_handler(message, callback)
-
-        self.queue.consume(*args, callback=_callback, **options)
-
-    def cancel(self):
-        """Cancel the consuming from the queue, if it has started."""
-        try:
-            self.queue.cancel(self.tag)
-        except KeyError as e:
-            # NOTE(comstud): Kludge to get around a amqplib bug
-            if str(e) != "u'%s'" % self.tag:
-                raise
-        self.queue = None
-
-
-class DirectConsumer(ConsumerBase):
-    """Queue/consumer class for 'direct'."""
-
-    def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
-        """Init a 'direct' queue.
-
-        'channel' is the amqp channel to use
-        'msg_id' is the msg_id to listen on
-        'callback' is the callback to call when messages are received
-        'tag' is a unique ID for the consumer on the channel
-
-        Other kombu options may be passed
-        """
-        # Default options
-        options = {'durable': False,
-                   'queue_arguments': _get_queue_arguments(conf),
-                   'auto_delete': True,
-                   'exclusive': False}
-        options.update(kwargs)
-        exchange = kombu.entity.Exchange(name=msg_id,
-                                         type='direct',
-                                         durable=options['durable'],
-                                         auto_delete=options['auto_delete'])
-        super(DirectConsumer, self).__init__(channel,
-                                             callback,
-                                             tag,
-                                             name=msg_id,
-                                             exchange=exchange,
-                                             routing_key=msg_id,
-                                             **options)
-
-
-class TopicConsumer(ConsumerBase):
-    """Consumer class for 'topic'."""
-
-    def __init__(self, conf, channel, topic, callback, tag, name=None,
-                 exchange_name=None, **kwargs):
-        """Init a 'topic' queue.
-
-        :param channel: the amqp channel to use
-        :param topic: the topic to listen on
-        :paramtype topic: str
-        :param callback: the callback to call when messages are received
-        :param tag: a unique ID for the consumer on the channel
-        :param name: optional queue name, defaults to topic
-        :paramtype name: str
-
-        Other kombu options may be passed as keyword arguments
-        """
-        # Default options
-        options = {'durable': conf.amqp_durable_queues,
-                   'queue_arguments': _get_queue_arguments(conf),
-                   'auto_delete': conf.amqp_auto_delete,
-                   'exclusive': False}
-        options.update(kwargs)
-        exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
-        exchange = kombu.entity.Exchange(name=exchange_name,
-                                         type='topic',
-                                         durable=options['durable'],
-                                         auto_delete=options['auto_delete'])
-        super(TopicConsumer, self).__init__(channel,
-                                            callback,
-                                            tag,
-                                            name=name or topic,
-                                            exchange=exchange,
-                                            routing_key=topic,
-                                            **options)
-
-
-class FanoutConsumer(ConsumerBase):
-    """Consumer class for 'fanout'."""
-
-    def __init__(self, conf, channel, topic, callback, tag, **kwargs):
-        """Init a 'fanout' queue.
-
-        'channel' is the amqp channel to use
-        'topic' is the topic to listen on
-        'callback' is the callback to call when messages are received
-        'tag' is a unique ID for the consumer on the channel
-
-        Other kombu options may be passed
-        """
-        unique = uuid.uuid4().hex
-        exchange_name = '%s_fanout' % topic
-        queue_name = '%s_fanout_%s' % (topic, unique)
-
-        # Default options
-        options = {'durable': False,
-                   'queue_arguments': _get_queue_arguments(conf),
-                   'auto_delete': True,
-                   'exclusive': False}
-        options.update(kwargs)
-        exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
-                                         durable=options['durable'],
-                                         auto_delete=options['auto_delete'])
-        super(FanoutConsumer, self).__init__(channel, callback, tag,
-                                             name=queue_name,
-                                             exchange=exchange,
-                                             routing_key=topic,
-                                             **options)
-
-
-class Publisher(object):
-    """Base Publisher class."""
-
-    def __init__(self, channel, exchange_name, routing_key, **kwargs):
-        """Init the Publisher class with the exchange_name, routing_key,
-        and other options
-        """
-        self.exchange_name = exchange_name
-        self.routing_key = routing_key
-        self.kwargs = kwargs
-        self.reconnect(channel)
-
-    def reconnect(self, channel):
-        """Re-establish the Producer after a rabbit reconnection."""
-        self.exchange = kombu.entity.Exchange(name=self.exchange_name,
-                                              **self.kwargs)
-        self.producer = kombu.messaging.Producer(exchange=self.exchange,
-                                                 channel=channel,
-                                                 routing_key=self.routing_key)
-
-    def send(self, msg, timeout=None):
-        """Send a message."""
-        if timeout:
-            #
-            # AMQP TTL is in milliseconds when set in the header.
-            #
-            self.producer.publish(msg, headers={'ttl': (timeout * 1000)})
-        else:
-            self.producer.publish(msg)
-
-
-class DirectPublisher(Publisher):
-    """Publisher class for 'direct'."""
-    def __init__(self, conf, channel, msg_id, **kwargs):
-        """init a 'direct' publisher.
-
-        Kombu options may be passed as keyword args to override defaults
-        """
-
-        options = {'durable': False,
-                   'auto_delete': True,
-                   'exclusive': False}
-        options.update(kwargs)
-        super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
-                                              type='direct', **options)
-
-
-class TopicPublisher(Publisher):
-    """Publisher class for 'topic'."""
-    def __init__(self, conf, channel, topic, **kwargs):
-        """init a 'topic' publisher.
-
-        Kombu options may be passed as keyword args to override defaults
-        """
-        options = {'durable': conf.amqp_durable_queues,
-                   'auto_delete': conf.amqp_auto_delete,
-                   'exclusive': False}
-        options.update(kwargs)
-        exchange_name = rpc_amqp.get_control_exchange(conf)
-        super(TopicPublisher, self).__init__(channel,
-                                             exchange_name,
-                                             topic,
-                                             type='topic',
-                                             **options)
-
-
-class FanoutPublisher(Publisher):
-    """Publisher class for 'fanout'."""
-    def __init__(self, conf, channel, topic, **kwargs):
-        """init a 'fanout' publisher.
-
-        Kombu options may be passed as keyword args to override defaults
-        """
-        options = {'durable': False,
-                   'auto_delete': True,
-                   'exclusive': False}
-        options.update(kwargs)
-        super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
-                                              None, type='fanout', **options)
-
-
-class NotifyPublisher(TopicPublisher):
-    """Publisher class for 'notify'."""
-
-    def __init__(self, conf, channel, topic, **kwargs):
-        self.durable = kwargs.pop('durable', conf.amqp_durable_queues)
-        self.queue_arguments = _get_queue_arguments(conf)
-        super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
-
-    def reconnect(self, channel):
-        super(NotifyPublisher, self).reconnect(channel)
-
-        # NOTE(jerdfelt): Normally the consumer would create the queue, but
-        # we do this to ensure that messages don't get dropped if the
-        # consumer is started after we do
-        queue = kombu.entity.Queue(channel=channel,
-                                   exchange=self.exchange,
-                                   durable=self.durable,
-                                   name=self.routing_key,
-                                   routing_key=self.routing_key,
-                                   queue_arguments=self.queue_arguments)
-        queue.declare()
-
-
-class Connection(object):
-    """Connection object."""
-
-    pool = None
-
-    def __init__(self, conf, server_params=None):
-        self.consumers = []
-        self.consumer_thread = None
-        self.proxy_callbacks = []
-        self.conf = conf
-        self.max_retries = self.conf.rabbit_max_retries
-        # Try forever?
-        if self.max_retries <= 0:
-            self.max_retries = None
-        self.interval_start = self.conf.rabbit_retry_interval
-        self.interval_stepping = self.conf.rabbit_retry_backoff
-        # max retry-interval = 30 seconds
-        self.interval_max = 30
-        self.memory_transport = False
-
-        if server_params is None:
-            server_params = {}
-        # Keys to translate from server_params to kombu params
-        server_params_to_kombu_params = {'username': 'userid'}
-
-        ssl_params = self._fetch_ssl_params()
-        params_list = []
-        for adr in self.conf.rabbit_hosts:
-            hostname, port = network_utils.parse_host_port(
-                adr, default_port=self.conf.rabbit_port)
-
-            params = {
-                'hostname': hostname,
-                'port': port,
-                'userid': self.conf.rabbit_userid,
-                'password': self.conf.rabbit_password,
-                'virtual_host': self.conf.rabbit_virtual_host,
-            }
-
-            for sp_key, value in six.iteritems(server_params):
-                p_key = server_params_to_kombu_params.get(sp_key, sp_key)
-                params[p_key] = value
-
-            if self.conf.fake_rabbit:
-                params['transport'] = 'memory'
-            if self.conf.rabbit_use_ssl:
-                params['ssl'] = ssl_params
-
-            params_list.append(params)
-
-        self.params_list = params_list
-
-        self.memory_transport = self.conf.fake_rabbit
-
-        self.connection = None
-        self.reconnect()
-
-    def _fetch_ssl_params(self):
-        """Handles fetching what ssl params should be used for the connection
-        (if any).
-        """
-        ssl_params = dict()
-
-        # http://docs.python.org/library/ssl.html - ssl.wrap_socket
-        if self.conf.kombu_ssl_version:
-            ssl_params['ssl_version'] = sslutils.validate_ssl_version(
-                self.conf.kombu_ssl_version)
-        if self.conf.kombu_ssl_keyfile:
-            ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
-        if self.conf.kombu_ssl_certfile:
-            ssl_params['certfile'] = self.conf.kombu_ssl_certfile
-        if self.conf.kombu_ssl_ca_certs:
-            ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs
-            # We might want to allow variations in the
-            # future with this?
-            ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
-
-        # Return the extended behavior or just have the default behavior
-        return ssl_params or True
-
-    def _connect(self, params):
-        """Connect to rabbit.  Re-establish any queues that may have
-        been declared before if we are reconnecting.  Exceptions should
-        be handled by the caller.
-        """
-        if self.connection:
-            LOG.info(_("Reconnecting to AMQP server on "
-                     "%(hostname)s:%(port)d") % params)
-            try:
-                self.connection.release()
-            except self.connection_errors:
-                pass
-            # Setting this in case the next statement fails, though
-            # it shouldn't be doing any network operations, yet.
-            self.connection = None
-        self.connection = kombu.connection.BrokerConnection(**params)
-        self.connection_errors = self.connection.connection_errors
-        if self.memory_transport:
-            # Kludge to speed up tests.
-            self.connection.transport.polling_interval = 0.0
-        self.consumer_num = itertools.count(1)
-        self.connection.connect()
-        self.channel = self.connection.channel()
-        # work around 'memory' transport bug in 1.1.3
-        if self.memory_transport:
-            self.channel._new_queue('ae.undeliver')
-        for consumer in self.consumers:
-            consumer.reconnect(self.channel)
-        LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
-                 params)
-
-    def reconnect(self):
-        """Handles reconnecting and re-establishing queues.
-        Will retry up to self.max_retries number of times.
-        self.max_retries = 0 means to retry forever.
-        Sleep between tries, starting at self.interval_start
-        seconds, backing off self.interval_stepping number of seconds
-        each attempt.
-        """
-
-        attempt = 0
-        while True:
-            params = self.params_list[attempt % len(self.params_list)]
-            attempt += 1
-            try:
-                self._connect(params)
-                return
-            except (IOError, self.connection_errors) as e:
-                pass
-            except Exception as e:
-                # NOTE(comstud): Unfortunately it's possible for amqplib
-                # to return an error not covered by its transport
-                # connection_errors in the case of a timeout waiting for
-                # a protocol response.  (See paste link in LP888621)
-                # So, we check all exceptions for 'timeout' in them
-                # and try to reconnect in this case.
-                if 'timeout' not in str(e):
-                    raise
-
-            log_info = {}
-            log_info['err_str'] = str(e)
-            log_info['max_retries'] = self.max_retries
-            log_info.update(params)
-
-            if self.max_retries and attempt == self.max_retries:
-                msg = _('Unable to connect to AMQP server on '
-                        '%(hostname)s:%(port)d after %(max_retries)d '
-                        'tries: %(err_str)s') % log_info
-                LOG.error(msg)
-                raise rpc_common.RPCException(msg)
-
-            if attempt == 1:
-                sleep_time = self.interval_start or 1
-            elif attempt > 1:
-                sleep_time += self.interval_stepping
-            if self.interval_max:
-                sleep_time = min(sleep_time, self.interval_max)
-
-            log_info['sleep_time'] = sleep_time
-            LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
-                        'unreachable: %(err_str)s. Trying again in '
-                        '%(sleep_time)d seconds.') % log_info)
-            time.sleep(sleep_time)
-
-    def ensure(self, error_callback, method, *args, **kwargs):
-        while True:
-            try:
-                return method(*args, **kwargs)
-            except (self.connection_errors, socket.timeout, IOError) as e:
-                if error_callback:
-                    error_callback(e)
-            except Exception as e:
-                # NOTE(comstud): Unfortunately it's possible for amqplib
-                # to return an error not covered by its transport
-                # connection_errors in the case of a timeout waiting for
-                # a protocol response.  (See paste link in LP888621)
-                # So, we check all exceptions for 'timeout' in them
-                # and try to reconnect in this case.
-                if 'timeout' not in str(e):
-                    raise
-                if error_callback:
-                    error_callback(e)
-            self.reconnect()
-
-    def get_channel(self):
-        """Convenience call for bin/clear_rabbit_queues."""
-        return self.channel
-
-    def close(self):
-        """Close/release this connection."""
-        self.cancel_consumer_thread()
-        self.wait_on_proxy_callbacks()
-        self.connection.release()
-        self.connection = None
-
-    def reset(self):
-        """Reset a connection so it can be used again."""
-        self.cancel_consumer_thread()
-        self.wait_on_proxy_callbacks()
-        self.channel.close()
-        self.channel = self.connection.channel()
-        # work around 'memory' transport bug in 1.1.3
-        if self.memory_transport:
-            self.channel._new_queue('ae.undeliver')
-        self.consumers = []
-
-    def declare_consumer(self, consumer_cls, topic, callback):
-        """Create a Consumer using the class that was passed in and
-        add it to our list of consumers
-        """
-
-        def _connect_error(exc):
-            log_info = {'topic': topic, 'err_str': str(exc)}
-            LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
-                      "%(err_str)s") % log_info)
-
-        def _declare_consumer():
-            consumer = consumer_cls(self.conf, self.channel, topic, callback,
-                                    six.next(self.consumer_num))
-            self.consumers.append(consumer)
-            return consumer
-
-        return self.ensure(_connect_error, _declare_consumer)
-
-    def iterconsume(self, limit=None, timeout=None):
-        """Return an iterator that will consume from all queues/consumers."""
-
-        info = {'do_consume': True}
-
-        def _error_callback(exc):
-            if isinstance(exc, socket.timeout):
-                LOG.debug(_('Timed out waiting for RPC response: %s') %
-                          str(exc))
-                raise rpc_common.Timeout()
-            else:
-                LOG.exception(_('Failed to consume message from queue: %s') %
-                              str(exc))
-                info['do_consume'] = True
-
-        def _consume():
-            if info['do_consume']:
-                queues_head = self.consumers[:-1]  # not fanout.
-                queues_tail = self.consumers[-1]  # fanout
-                for queue in queues_head:
-                    queue.consume(nowait=True)
-                queues_tail.consume(nowait=False)
-                info['do_consume'] = False
-            return self.connection.drain_events(timeout=timeout)
-
-        for iteration in itertools.count(0):
-            if limit and iteration >= limit:
-                raise StopIteration
-            yield self.ensure(_error_callback, _consume)
-
-    def cancel_consumer_thread(self):
-        """Cancel a consumer thread."""
-        if self.consumer_thread is not None:
-            self.consumer_thread.kill()
-            try:
-                self.consumer_thread.wait()
-            except greenlet.GreenletExit:
-                pass
-            self.consumer_thread = None
-
-    def wait_on_proxy_callbacks(self):
-        """Wait for all proxy callback threads to exit."""
-        for proxy_cb in self.proxy_callbacks:
-            proxy_cb.wait()
-
-    def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
-        """Send to a publisher based on the publisher class."""
-
-        def _error_callback(exc):
-            log_info = {'topic': topic, 'err_str': str(exc)}
-            LOG.exception(_("Failed to publish message to topic "
-                          "'%(topic)s': %(err_str)s") % log_info)
-
-        def _publish():
-            publisher = cls(self.conf, self.channel, topic, **kwargs)
-            publisher.send(msg, timeout)
-
-        self.ensure(_error_callback, _publish)
-
-    def declare_direct_consumer(self, topic, callback):
-        """Create a 'direct' queue.
-        In nova's use, this is generally a msg_id queue used for
-        responses for call/multicall
-        """
-        self.declare_consumer(DirectConsumer, topic, callback)
-
-    def declare_topic_consumer(self, topic, callback=None, queue_name=None,
-                               exchange_name=None, ack_on_error=True):
-        """Create a 'topic' consumer."""
-        self.declare_consumer(functools.partial(TopicConsumer,
-                                                name=queue_name,
-                                                exchange_name=exchange_name,
-                                                ack_on_error=ack_on_error,
-                                                ),
-                              topic, callback)
-
-    def declare_fanout_consumer(self, topic, callback):
-        """Create a 'fanout' consumer."""
-        self.declare_consumer(FanoutConsumer, topic, callback)
-
-    def direct_send(self, msg_id, msg):
-        """Send a 'direct' message."""
-        self.publisher_send(DirectPublisher, msg_id, msg)
-
-    def topic_send(self, topic, msg, timeout=None):
-        """Send a 'topic' message."""
-        self.publisher_send(TopicPublisher, topic, msg, timeout)
-
-    def fanout_send(self, topic, msg):
-        """Send a 'fanout' message."""
-        self.publisher_send(FanoutPublisher, topic, msg)
-
-    def notify_send(self, topic, msg, **kwargs):
-        """Send a notify message on a topic."""
-        self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
-
-    def consume(self, limit=None):
-        """Consume from all queues/consumers."""
-        it = self.iterconsume(limit=limit)
-        while True:
-            try:
-                six.next(it)
-            except StopIteration:
-                return
-
-    def consume_in_thread(self):
-        """Consumer from all queues/consumers in a greenthread."""
-        @excutils.forever_retry_uncaught_exceptions
-        def _consumer_thread():
-            try:
-                self.consume()
-            except greenlet.GreenletExit:
-                return
-        if self.consumer_thread is None:
-            self.consumer_thread = eventlet.spawn(_consumer_thread)
-        return self.consumer_thread
-
-    def create_consumer(self, topic, proxy, fanout=False):
-        """Create a consumer that calls a method in a proxy object."""
-        proxy_cb = rpc_amqp.ProxyCallback(
-            self.conf, proxy,
-            rpc_amqp.get_connection_pool(self.conf, Connection))
-        self.proxy_callbacks.append(proxy_cb)
-
-        if fanout:
-            self.declare_fanout_consumer(topic, proxy_cb)
-        else:
-            self.declare_topic_consumer(topic, proxy_cb)
-
-    def create_worker(self, topic, proxy, pool_name):
-        """Create a worker that calls a method in a proxy object."""
-        proxy_cb = rpc_amqp.ProxyCallback(
-            self.conf, proxy,
-            rpc_amqp.get_connection_pool(self.conf, Connection))
-        self.proxy_callbacks.append(proxy_cb)
-        self.declare_topic_consumer(topic, proxy_cb, pool_name)
-
-    def join_consumer_pool(self, callback, pool_name, topic,
-                           exchange_name=None, ack_on_error=True):
-        """Register as a member of a group of consumers for a given topic from
-        the specified exchange.
-
-        Exactly one member of a given pool will receive each message.
-
-        A message will be delivered to multiple pools, if more than
-        one is created.
-        """
-        callback_wrapper = rpc_amqp.CallbackWrapper(
-            conf=self.conf,
-            callback=callback,
-            connection_pool=rpc_amqp.get_connection_pool(self.conf,
-                                                         Connection),
-            wait_for_consumers=not ack_on_error
-        )
-        self.proxy_callbacks.append(callback_wrapper)
-        self.declare_topic_consumer(
-            queue_name=pool_name,
-            topic=topic,
-            exchange_name=exchange_name,
-            callback=callback_wrapper,
-            ack_on_error=ack_on_error,
-        )
-
-
-def create_connection(conf, new=True):
-    """Create a connection."""
-    return rpc_amqp.create_connection(
-        conf, new,
-        rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def multicall(conf, context, topic, msg, timeout=None):
-    """Make a call that returns multiple times."""
-    return rpc_amqp.multicall(
-        conf, context, topic, msg, timeout,
-        rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def call(conf, context, topic, msg, timeout=None):
-    """Sends a message on a topic and wait for a response."""
-    return rpc_amqp.call(
-        conf, context, topic, msg, timeout,
-        rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def cast(conf, context, topic, msg):
-    """Sends a message on a topic without waiting for a response."""
-    return rpc_amqp.cast(
-        conf, context, topic, msg,
-        rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def fanout_cast(conf, context, topic, msg):
-    """Sends a message on a fanout exchange without waiting for a response."""
-    return rpc_amqp.fanout_cast(
-        conf, context, topic, msg,
-        rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def cast_to_server(conf, context, server_params, topic, msg):
-    """Sends a message on a topic to a specific server."""
-    return rpc_amqp.cast_to_server(
-        conf, context, server_params, topic, msg,
-        rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def fanout_cast_to_server(conf, context, server_params, topic, msg):
-    """Sends a message on a fanout exchange to a specific server."""
-    return rpc_amqp.fanout_cast_to_server(
-        conf, context, server_params, topic, msg,
-        rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def notify(conf, context, topic, msg, envelope):
-    """Sends a notification event on a topic."""
-    return rpc_amqp.notify(
-        conf, context, topic, msg,
-        rpc_amqp.get_connection_pool(conf, Connection),
-        envelope)
-
-
-def cleanup():
-    return rpc_amqp.cleanup(Connection.pool)
diff --git a/cinder/openstack/common/rpc/impl_qpid.py b/cinder/openstack/common/rpc/impl_qpid.py
deleted file mode 100644 (file)
index b661a03..0000000
+++ /dev/null
@@ -1,821 +0,0 @@
-#    Copyright 2011 OpenStack Foundation
-#    Copyright 2011 - 2012, Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import functools
-import itertools
-import time
-
-import eventlet
-import greenlet
-from oslo.config import cfg
-import six
-
-from cinder.openstack.common import excutils
-from cinder.openstack.common.gettextutils import _
-from cinder.openstack.common import importutils
-from cinder.openstack.common import jsonutils
-from cinder.openstack.common import log as logging
-from cinder.openstack.common.rpc import amqp as rpc_amqp
-from cinder.openstack.common.rpc import common as rpc_common
-
-qpid_codec = importutils.try_import("qpid.codec010")
-qpid_messaging = importutils.try_import("qpid.messaging")
-qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
-
-LOG = logging.getLogger(__name__)
-
-qpid_opts = [
-    cfg.StrOpt('qpid_hostname',
-               default='localhost',
-               help='Qpid broker hostname'),
-    cfg.IntOpt('qpid_port',
-               default=5672,
-               help='Qpid broker port'),
-    cfg.ListOpt('qpid_hosts',
-                default=['$qpid_hostname:$qpid_port'],
-                help='Qpid HA cluster host:port pairs'),
-    cfg.StrOpt('qpid_username',
-               default='',
-               help='Username for qpid connection'),
-    cfg.StrOpt('qpid_password',
-               default='',
-               help='Password for qpid connection',
-               secret=True),
-    cfg.StrOpt('qpid_sasl_mechanisms',
-               default='',
-               help='Space separated list of SASL mechanisms to use for auth'),
-    cfg.IntOpt('qpid_heartbeat',
-               default=60,
-               help='Seconds between connection keepalive heartbeats'),
-    cfg.StrOpt('qpid_protocol',
-               default='tcp',
-               help="Transport to use, either 'tcp' or 'ssl'"),
-    cfg.BoolOpt('qpid_tcp_nodelay',
-                default=True,
-                help='Disable Nagle algorithm'),
-    # NOTE(russellb) If any additional versions are added (beyond 1 and 2),
-    # this file could probably use some additional refactoring so that the
-    # differences between each version are split into different classes.
-    cfg.IntOpt('qpid_topology_version',
-               default=1,
-               help="The qpid topology version to use.  Version 1 is what "
-                    "was originally used by impl_qpid.  Version 2 includes "
-                    "some backwards-incompatible changes that allow broker "
-                    "federation to work.  Users should update to version 2 "
-                    "when they are able to take everything down, as it "
-                    "requires a clean break."),
-]
-
-cfg.CONF.register_opts(qpid_opts)
-
-JSON_CONTENT_TYPE = 'application/json; charset=utf8'
-
-
-def raise_invalid_topology_version(conf):
-    msg = (_("Invalid value for qpid_topology_version: %d") %
-           conf.qpid_topology_version)
-    LOG.error(msg)
-    raise Exception(msg)
-
-
-class ConsumerBase(object):
-    """Consumer base class."""
-
-    def __init__(self, conf, session, callback, node_name, node_opts,
-                 link_name, link_opts):
-        """Declare a queue on an amqp session.
-
-        'session' is the amqp session to use
-        'callback' is the callback to call when messages are received
-        'node_name' is the first part of the Qpid address string, before ';'
-        'node_opts' will be applied to the "x-declare" section of "node"
-                    in the address string.
-        'link_name' goes into the "name" field of the "link" in the address
-                    string
-        'link_opts' will be applied to the "x-declare" section of "link"
-                    in the address string.
-        """
-        self.callback = callback
-        self.receiver = None
-        self.session = None
-
-        if conf.qpid_topology_version == 1:
-            addr_opts = {
-                "create": "always",
-                "node": {
-                    "type": "topic",
-                    "x-declare": {
-                        "durable": True,
-                        "auto-delete": True,
-                    },
-                },
-                "link": {
-                    "durable": True,
-                    "x-declare": {
-                        "durable": False,
-                        "auto-delete": True,
-                        "exclusive": False,
-                    },
-                },
-            }
-            addr_opts["node"]["x-declare"].update(node_opts)
-        elif conf.qpid_topology_version == 2:
-            addr_opts = {
-                "link": {
-                    "x-declare": {
-                        "auto-delete": True,
-                        "exclusive": False,
-                    },
-                },
-            }
-        else:
-            raise_invalid_topology_version()
-
-        addr_opts["link"]["x-declare"].update(link_opts)
-        if link_name:
-            addr_opts["link"]["name"] = link_name
-
-        self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
-
-        self.connect(session)
-
-    def connect(self, session):
-        """Declare the receiver on connect."""
-        self._declare_receiver(session)
-
-    def reconnect(self, session):
-        """Re-declare the receiver after a qpid reconnect."""
-        self._declare_receiver(session)
-
-    def _declare_receiver(self, session):
-        self.session = session
-        self.receiver = session.receiver(self.address)
-        self.receiver.capacity = 1
-
-    def _unpack_json_msg(self, msg):
-        """Load the JSON data in msg if msg.content_type indicates that it
-           is necessary.  Put the loaded data back into msg.content and
-           update msg.content_type appropriately.
-
-        A Qpid Message containing a dict will have a content_type of
-        'amqp/map', whereas one containing a string that needs to be converted
-        back from JSON will have a content_type of JSON_CONTENT_TYPE.
-
-        :param msg: a Qpid Message object
-        :returns: None
-        """
-        if msg.content_type == JSON_CONTENT_TYPE:
-            msg.content = jsonutils.loads(msg.content)
-            msg.content_type = 'amqp/map'
-
-    def consume(self):
-        """Fetch the message and pass it to the callback object."""
-        message = self.receiver.fetch()
-        try:
-            self._unpack_json_msg(message)
-            msg = rpc_common.deserialize_msg(message.content)
-            self.callback(msg)
-        except Exception:
-            LOG.exception(_("Failed to process message... skipping it."))
-        finally:
-            # TODO(sandy): Need support for optional ack_on_error.
-            self.session.acknowledge(message)
-
-    def get_receiver(self):
-        return self.receiver
-
-    def get_node_name(self):
-        return self.address.split(';')[0]
-
-
-class DirectConsumer(ConsumerBase):
-    """Queue/consumer class for 'direct'."""
-
-    def __init__(self, conf, session, msg_id, callback):
-        """Init a 'direct' queue.
-
-        'session' is the amqp session to use
-        'msg_id' is the msg_id to listen on
-        'callback' is the callback to call when messages are received
-        """
-
-        link_opts = {
-            "auto-delete": conf.amqp_auto_delete,
-            "exclusive": True,
-            "durable": conf.amqp_durable_queues,
-        }
-
-        if conf.qpid_topology_version == 1:
-            node_name = "%s/%s" % (msg_id, msg_id)
-            node_opts = {"type": "direct"}
-            link_name = msg_id
-        elif conf.qpid_topology_version == 2:
-            node_name = "amq.direct/%s" % msg_id
-            node_opts = {}
-            link_name = None
-        else:
-            raise_invalid_topology_version()
-
-        super(DirectConsumer, self).__init__(conf, session, callback,
-                                             node_name, node_opts, link_name,
-                                             link_opts)
-
-
-class TopicConsumer(ConsumerBase):
-    """Consumer class for 'topic'."""
-
-    def __init__(self, conf, session, topic, callback, name=None,
-                 exchange_name=None):
-        """Init a 'topic' queue.
-
-        :param session: the amqp session to use
-        :param topic: is the topic to listen on
-        :paramtype topic: str
-        :param callback: the callback to call when messages are received
-        :param name: optional queue name, defaults to topic
-        """
-
-        exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
-        link_opts = {
-            "auto-delete": conf.amqp_auto_delete,
-            "durable": conf.amqp_durable_queues,
-        }
-
-        if conf.qpid_topology_version == 1:
-            node_name = "%s/%s" % (exchange_name, topic)
-        elif conf.qpid_topology_version == 2:
-            node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
-        else:
-            raise_invalid_topology_version()
-
-        super(TopicConsumer, self).__init__(conf, session, callback, node_name,
-                                            {}, name or topic, link_opts)
-
-
-class FanoutConsumer(ConsumerBase):
-    """Consumer class for 'fanout'."""
-
-    def __init__(self, conf, session, topic, callback):
-        """Init a 'fanout' queue.
-
-        'session' is the amqp session to use
-        'topic' is the topic to listen on
-        'callback' is the callback to call when messages are received
-        """
-        self.conf = conf
-
-        link_opts = {"exclusive": True}
-
-        if conf.qpid_topology_version == 1:
-            node_name = "%s_fanout" % topic
-            node_opts = {"durable": False, "type": "fanout"}
-        elif conf.qpid_topology_version == 2:
-            node_name = "amq.topic/fanout/%s" % topic
-            node_opts = {}
-        else:
-            raise_invalid_topology_version()
-
-        super(FanoutConsumer, self).__init__(conf, session, callback,
-                                             node_name, node_opts, None,
-                                             link_opts)
-
-
-class Publisher(object):
-    """Base Publisher class."""
-
-    def __init__(self, conf, session, node_name, node_opts=None):
-        """Init the Publisher class with the exchange_name, routing_key,
-        and other options
-        """
-        self.sender = None
-        self.session = session
-
-        if conf.qpid_topology_version == 1:
-            addr_opts = {
-                "create": "always",
-                "node": {
-                    "type": "topic",
-                    "x-declare": {
-                        "durable": False,
-                        # auto-delete isn't implemented for exchanges in qpid,
-                        # but put in here anyway
-                        "auto-delete": True,
-                    },
-                },
-            }
-            if node_opts:
-                addr_opts["node"]["x-declare"].update(node_opts)
-
-            self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
-        elif conf.qpid_topology_version == 2:
-            self.address = node_name
-        else:
-            raise_invalid_topology_version()
-
-        self.reconnect(session)
-
-    def reconnect(self, session):
-        """Re-establish the Sender after a reconnection."""
-        self.sender = session.sender(self.address)
-
-    def _pack_json_msg(self, msg):
-        """Qpid cannot serialize dicts containing strings longer than 65535
-           characters.  This function dumps the message content to a JSON
-           string, which Qpid is able to handle.
-
-        :param msg: May be either a Qpid Message object or a bare dict.
-        :returns: A Qpid Message with its content field JSON encoded.
-        """
-        try:
-            msg.content = jsonutils.dumps(msg.content)
-        except AttributeError:
-            # Need to have a Qpid message so we can set the content_type.
-            msg = qpid_messaging.Message(jsonutils.dumps(msg))
-        msg.content_type = JSON_CONTENT_TYPE
-        return msg
-
-    def send(self, msg):
-        """Send a message."""
-        try:
-            # Check if Qpid can encode the message
-            check_msg = msg
-            if not hasattr(check_msg, 'content_type'):
-                check_msg = qpid_messaging.Message(msg)
-            content_type = check_msg.content_type
-            enc, dec = qpid_messaging.message.get_codec(content_type)
-            enc(check_msg.content)
-        except qpid_codec.CodecException:
-            # This means the message couldn't be serialized as a dict.
-            msg = self._pack_json_msg(msg)
-        self.sender.send(msg)
-
-
-class DirectPublisher(Publisher):
-    """Publisher class for 'direct'."""
-    def __init__(self, conf, session, msg_id):
-        """Init a 'direct' publisher."""
-
-        if conf.qpid_topology_version == 1:
-            node_name = msg_id
-            node_opts = {"type": "direct"}
-        elif conf.qpid_topology_version == 2:
-            node_name = "amq.direct/%s" % msg_id
-            node_opts = {}
-        else:
-            raise_invalid_topology_version()
-
-        super(DirectPublisher, self).__init__(conf, session, node_name,
-                                              node_opts)
-
-
-class TopicPublisher(Publisher):
-    """Publisher class for 'topic'."""
-    def __init__(self, conf, session, topic):
-        """Init a 'topic' publisher.
-        """
-        exchange_name = rpc_amqp.get_control_exchange(conf)
-
-        if conf.qpid_topology_version == 1:
-            node_name = "%s/%s" % (exchange_name, topic)
-        elif conf.qpid_topology_version == 2:
-            node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
-        else:
-            raise_invalid_topology_version()
-
-        super(TopicPublisher, self).__init__(conf, session, node_name)
-
-
-class FanoutPublisher(Publisher):
-    """Publisher class for 'fanout'."""
-    def __init__(self, conf, session, topic):
-        """Init a 'fanout' publisher.
-        """
-
-        if conf.qpid_topology_version == 1:
-            node_name = "%s_fanout" % topic
-            node_opts = {"type": "fanout"}
-        elif conf.qpid_topology_version == 2:
-            node_name = "amq.topic/fanout/%s" % topic
-            node_opts = {}
-        else:
-            raise_invalid_topology_version()
-
-        super(FanoutPublisher, self).__init__(conf, session, node_name,
-                                              node_opts)
-
-
-class NotifyPublisher(Publisher):
-    """Publisher class for notifications."""
-    def __init__(self, conf, session, topic):
-        """Init a 'topic' publisher.
-        """
-        exchange_name = rpc_amqp.get_control_exchange(conf)
-        node_opts = {"durable": True}
-
-        if conf.qpid_topology_version == 1:
-            node_name = "%s/%s" % (exchange_name, topic)
-        elif conf.qpid_topology_version == 2:
-            node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
-        else:
-            raise_invalid_topology_version()
-
-        super(NotifyPublisher, self).__init__(conf, session, node_name,
-                                              node_opts)
-
-
-class Connection(object):
-    """Connection object."""
-
-    pool = None
-
-    def __init__(self, conf, server_params=None):
-        if not qpid_messaging:
-            raise ImportError("Failed to import qpid.messaging")
-
-        self.session = None
-        self.consumers = {}
-        self.consumer_thread = None
-        self.proxy_callbacks = []
-        self.conf = conf
-
-        if server_params and 'hostname' in server_params:
-            # NOTE(russellb) This enables support for cast_to_server.
-            server_params['qpid_hosts'] = [
-                '%s:%d' % (server_params['hostname'],
-                           server_params.get('port', 5672))
-            ]
-
-        params = {
-            'qpid_hosts': self.conf.qpid_hosts,
-            'username': self.conf.qpid_username,
-            'password': self.conf.qpid_password,
-        }
-        params.update(server_params or {})
-
-        self.brokers = params['qpid_hosts']
-        self.username = params['username']
-        self.password = params['password']
-        self.connection_create(self.brokers[0])
-        self.reconnect()
-
-    def connection_create(self, broker):
-        # Create the connection - this does not open the connection
-        self.connection = qpid_messaging.Connection(broker)
-
-        # Check if flags are set and if so set them for the connection
-        # before we call open
-        self.connection.username = self.username
-        self.connection.password = self.password
-
-        self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
-        # Reconnection is done by self.reconnect()
-        self.connection.reconnect = False
-        self.connection.heartbeat = self.conf.qpid_heartbeat
-        self.connection.transport = self.conf.qpid_protocol
-        self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
-
-    def _register_consumer(self, consumer):
-        self.consumers[str(consumer.get_receiver())] = consumer
-
-    def _lookup_consumer(self, receiver):
-        return self.consumers[str(receiver)]
-
-    def reconnect(self):
-        """Handles reconnecting and re-establishing sessions and queues."""
-        attempt = 0
-        delay = 1
-        while True:
-            # Close the session if necessary
-            if self.connection.opened():
-                try:
-                    self.connection.close()
-                except qpid_exceptions.ConnectionError:
-                    pass
-
-            broker = self.brokers[attempt % len(self.brokers)]
-            attempt += 1
-
-            try:
-                self.connection_create(broker)
-                self.connection.open()
-            except qpid_exceptions.ConnectionError as e:
-                msg_dict = dict(e=e, delay=delay)
-                msg = _("Unable to connect to AMQP server: %(e)s. "
-                        "Sleeping %(delay)s seconds") % msg_dict
-                LOG.error(msg)
-                time.sleep(delay)
-                delay = min(2 * delay, 60)
-            else:
-                LOG.info(_('Connected to AMQP server on %s'), broker)
-                break
-
-        self.session = self.connection.session()
-
-        if self.consumers:
-            consumers = self.consumers
-            self.consumers = {}
-
-            for consumer in six.itervalues(consumers):
-                consumer.reconnect(self.session)
-                self._register_consumer(consumer)
-
-            LOG.debug(_("Re-established AMQP queues"))
-
-    def ensure(self, error_callback, method, *args, **kwargs):
-        while True:
-            try:
-                return method(*args, **kwargs)
-            except (qpid_exceptions.Empty,
-                    qpid_exceptions.ConnectionError) as e:
-                if error_callback:
-                    error_callback(e)
-                self.reconnect()
-
-    def close(self):
-        """Close/release this connection."""
-        self.cancel_consumer_thread()
-        self.wait_on_proxy_callbacks()
-        try:
-            self.connection.close()
-        except Exception:
-            # NOTE(dripton) Logging exceptions that happen during cleanup just
-            # causes confusion; there's really nothing useful we can do with
-            # them.
-            pass
-        self.connection = None
-
-    def reset(self):
-        """Reset a connection so it can be used again."""
-        self.cancel_consumer_thread()
-        self.wait_on_proxy_callbacks()
-        self.session.close()
-        self.session = self.connection.session()
-        self.consumers = {}
-
-    def declare_consumer(self, consumer_cls, topic, callback):
-        """Create a Consumer using the class that was passed in and
-        add it to our list of consumers
-        """
-        def _connect_error(exc):
-            log_info = {'topic': topic, 'err_str': str(exc)}
-            LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
-                      "%(err_str)s") % log_info)
-
-        def _declare_consumer():
-            consumer = consumer_cls(self.conf, self.session, topic, callback)
-            self._register_consumer(consumer)
-            return consumer
-
-        return self.ensure(_connect_error, _declare_consumer)
-
-    def iterconsume(self, limit=None, timeout=None):
-        """Return an iterator that will consume from all queues/consumers."""
-
-        def _error_callback(exc):
-            if isinstance(exc, qpid_exceptions.Empty):
-                LOG.debug(_('Timed out waiting for RPC response: %s') %
-                          str(exc))
-                raise rpc_common.Timeout()
-            else:
-                LOG.exception(_('Failed to consume message from queue: %s') %
-                              str(exc))
-
-        def _consume():
-            nxt_receiver = self.session.next_receiver(timeout=timeout)
-            try:
-                self._lookup_consumer(nxt_receiver).consume()
-            except Exception:
-                LOG.exception(_("Error processing message.  Skipping it."))
-
-        for iteration in itertools.count(0):
-            if limit and iteration >= limit:
-                raise StopIteration
-            yield self.ensure(_error_callback, _consume)
-
-    def cancel_consumer_thread(self):
-        """Cancel a consumer thread."""
-        if self.consumer_thread is not None:
-            self.consumer_thread.kill()
-            try:
-                self.consumer_thread.wait()
-            except greenlet.GreenletExit:
-                pass
-            self.consumer_thread = None
-
-    def wait_on_proxy_callbacks(self):
-        """Wait for all proxy callback threads to exit."""
-        for proxy_cb in self.proxy_callbacks:
-            proxy_cb.wait()
-
-    def publisher_send(self, cls, topic, msg):
-        """Send to a publisher based on the publisher class."""
-
-        def _connect_error(exc):
-            log_info = {'topic': topic, 'err_str': str(exc)}
-            LOG.exception(_("Failed to publish message to topic "
-                          "'%(topic)s': %(err_str)s") % log_info)
-
-        def _publisher_send():
-            publisher = cls(self.conf, self.session, topic)
-            publisher.send(msg)
-
-        return self.ensure(_connect_error, _publisher_send)
-
-    def declare_direct_consumer(self, topic, callback):
-        """Create a 'direct' queue.
-        In nova's use, this is generally a msg_id queue used for
-        responses for call/multicall
-        """
-        self.declare_consumer(DirectConsumer, topic, callback)
-
-    def declare_topic_consumer(self, topic, callback=None, queue_name=None,
-                               exchange_name=None):
-        """Create a 'topic' consumer."""
-        self.declare_consumer(functools.partial(TopicConsumer,
-                                                name=queue_name,
-                                                exchange_name=exchange_name,
-                                                ),
-                              topic, callback)
-
-    def declare_fanout_consumer(self, topic, callback):
-        """Create a 'fanout' consumer."""
-        self.declare_consumer(FanoutConsumer, topic, callback)
-
-    def direct_send(self, msg_id, msg):
-        """Send a 'direct' message."""
-        self.publisher_send(DirectPublisher, msg_id, msg)
-
-    def topic_send(self, topic, msg, timeout=None):
-        """Send a 'topic' message."""
-        #
-        # We want to create a message with attributes, e.g. a TTL. We
-        # don't really need to keep 'msg' in its JSON format any longer
-        # so let's create an actual qpid message here and get some
-        # value-add on the go.
-        #
-        # WARNING: Request timeout happens to be in the same units as
-        # qpid's TTL (seconds). If this changes in the future, then this
-        # will need to be altered accordingly.
-        #
-        qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
-        self.publisher_send(TopicPublisher, topic, qpid_message)
-
-    def fanout_send(self, topic, msg):
-        """Send a 'fanout' message."""
-        self.publisher_send(FanoutPublisher, topic, msg)
-
-    def notify_send(self, topic, msg, **kwargs):
-        """Send a notify message on a topic."""
-        self.publisher_send(NotifyPublisher, topic, msg)
-
-    def consume(self, limit=None):
-        """Consume from all queues/consumers."""
-        it = self.iterconsume(limit=limit)
-        while True:
-            try:
-                six.next(it)
-            except StopIteration:
-                return
-
-    def consume_in_thread(self):
-        """Consumer from all queues/consumers in a greenthread."""
-        @excutils.forever_retry_uncaught_exceptions
-        def _consumer_thread():
-            try:
-                self.consume()
-            except greenlet.GreenletExit:
-                return
-        if self.consumer_thread is None:
-            self.consumer_thread = eventlet.spawn(_consumer_thread)
-        return self.consumer_thread
-
-    def create_consumer(self, topic, proxy, fanout=False):
-        """Create a consumer that calls a method in a proxy object."""
-        proxy_cb = rpc_amqp.ProxyCallback(
-            self.conf, proxy,
-            rpc_amqp.get_connection_pool(self.conf, Connection))
-        self.proxy_callbacks.append(proxy_cb)
-
-        if fanout:
-            consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
-        else:
-            consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
-
-        self._register_consumer(consumer)
-
-        return consumer
-
-    def create_worker(self, topic, proxy, pool_name):
-        """Create a worker that calls a method in a proxy object."""
-        proxy_cb = rpc_amqp.ProxyCallback(
-            self.conf, proxy,
-            rpc_amqp.get_connection_pool(self.conf, Connection))
-        self.proxy_callbacks.append(proxy_cb)
-
-        consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
-                                 name=pool_name)
-
-        self._register_consumer(consumer)
-
-        return consumer
-
-    def join_consumer_pool(self, callback, pool_name, topic,
-                           exchange_name=None, ack_on_error=True):
-        """Register as a member of a group of consumers for a given topic from
-        the specified exchange.
-
-        Exactly one member of a given pool will receive each message.
-
-        A message will be delivered to multiple pools, if more than
-        one is created.
-        """
-        callback_wrapper = rpc_amqp.CallbackWrapper(
-            conf=self.conf,
-            callback=callback,
-            connection_pool=rpc_amqp.get_connection_pool(self.conf,
-                                                         Connection),
-            wait_for_consumers=not ack_on_error
-        )
-        self.proxy_callbacks.append(callback_wrapper)
-
-        consumer = TopicConsumer(conf=self.conf,
-                                 session=self.session,
-                                 topic=topic,
-                                 callback=callback_wrapper,
-                                 name=pool_name,
-                                 exchange_name=exchange_name)
-
-        self._register_consumer(consumer)
-        return consumer
-
-
-def create_connection(conf, new=True):
-    """Create a connection."""
-    return rpc_amqp.create_connection(
-        conf, new,
-        rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def multicall(conf, context, topic, msg, timeout=None):
-    """Make a call that returns multiple times."""
-    return rpc_amqp.multicall(
-        conf, context, topic, msg, timeout,
-        rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def call(conf, context, topic, msg, timeout=None):
-    """Sends a message on a topic and wait for a response."""
-    return rpc_amqp.call(
-        conf, context, topic, msg, timeout,
-        rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def cast(conf, context, topic, msg):
-    """Sends a message on a topic without waiting for a response."""
-    return rpc_amqp.cast(
-        conf, context, topic, msg,
-        rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def fanout_cast(conf, context, topic, msg):
-    """Sends a message on a fanout exchange without waiting for a response."""
-    return rpc_amqp.fanout_cast(
-        conf, context, topic, msg,
-        rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def cast_to_server(conf, context, server_params, topic, msg):
-    """Sends a message on a topic to a specific server."""
-    return rpc_amqp.cast_to_server(
-        conf, context, server_params, topic, msg,
-        rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def fanout_cast_to_server(conf, context, server_params, topic, msg):
-    """Sends a message on a fanout exchange to a specific server."""
-    return rpc_amqp.fanout_cast_to_server(
-        conf, context, server_params, topic, msg,
-        rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def notify(conf, context, topic, msg, envelope):
-    """Sends a notification event on a topic."""
-    return rpc_amqp.notify(conf, context, topic, msg,
-                           rpc_amqp.get_connection_pool(conf, Connection),
-                           envelope)
-
-
-def cleanup():
-    return rpc_amqp.cleanup(Connection.pool)
diff --git a/cinder/openstack/common/rpc/impl_zmq.py b/cinder/openstack/common/rpc/impl_zmq.py
deleted file mode 100644 (file)
index 72189e7..0000000
+++ /dev/null
@@ -1,818 +0,0 @@
-#    Copyright 2011 Cloudscaling Group, Inc
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import os
-import pprint
-import re
-import socket
-import sys
-import types
-import uuid
-
-import eventlet
-import greenlet
-from oslo.config import cfg
-import six
-from six import moves
-
-from cinder.openstack.common import excutils
-from cinder.openstack.common.gettextutils import _
-from cinder.openstack.common import importutils
-from cinder.openstack.common import jsonutils
-from cinder.openstack.common.rpc import common as rpc_common
-
-zmq = importutils.try_import('eventlet.green.zmq')
-
-# for convenience, are not modified.
-pformat = pprint.pformat
-Timeout = eventlet.timeout.Timeout
-LOG = rpc_common.LOG
-RemoteError = rpc_common.RemoteError
-RPCException = rpc_common.RPCException
-
-zmq_opts = [
-    cfg.StrOpt('rpc_zmq_bind_address', default='*',
-               help='ZeroMQ bind address. Should be a wildcard (*), '
-                    'an ethernet interface, or IP. '
-                    'The "host" option should point or resolve to this '
-                    'address.'),
-
-    # The module.Class to use for matchmaking.
-    cfg.StrOpt(
-        'rpc_zmq_matchmaker',
-        default=('cinder.openstack.common.rpc.'
-                 'matchmaker.MatchMakerLocalhost'),
-        help='MatchMaker driver',
-    ),
-
-    # The following port is unassigned by IANA as of 2012-05-21
-    cfg.IntOpt('rpc_zmq_port', default=9501,
-               help='ZeroMQ receiver listening port'),
-
-    cfg.IntOpt('rpc_zmq_contexts', default=1,
-               help='Number of ZeroMQ contexts, defaults to 1'),
-
-    cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
-               help='Maximum number of ingress messages to locally buffer '
-                    'per topic. Default is unlimited.'),
-
-    cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
-               help='Directory for holding IPC sockets'),
-
-    cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
-               help='Name of this node. Must be a valid hostname, FQDN, or '
-                    'IP address. Must match "host" option, if running Nova.')
-]
-
-
-CONF = cfg.CONF
-CONF.register_opts(zmq_opts)
-
-ZMQ_CTX = None  # ZeroMQ Context, must be global.
-matchmaker = None  # memoized matchmaker object
-
-
-def _serialize(data):
-    """Serialization wrapper.
-
-    We prefer using JSON, but it cannot encode all types.
-    Error if a developer passes us bad data.
-    """
-    try:
-        return jsonutils.dumps(data, ensure_ascii=True)
-    except TypeError:
-        with excutils.save_and_reraise_exception():
-            LOG.error(_("JSON serialization failed."))
-
-
-def _deserialize(data):
-    """Deserialization wrapper."""
-    LOG.debug(_("Deserializing: %s"), data)
-    return jsonutils.loads(data)
-
-
-class ZmqSocket(object):
-    """A tiny wrapper around ZeroMQ.
-
-    Simplifies the send/recv protocol and connection management.
-    Can be used as a Context (supports the 'with' statement).
-    """
-
-    def __init__(self, addr, zmq_type, bind=True, subscribe=None):
-        self.sock = _get_ctxt().socket(zmq_type)
-        self.addr = addr
-        self.type = zmq_type
-        self.subscriptions = []
-
-        # Support failures on sending/receiving on wrong socket type.
-        self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
-        self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
-        self.can_sub = zmq_type in (zmq.SUB, )
-
-        # Support list, str, & None for subscribe arg (cast to list)
-        do_sub = {
-            list: subscribe,
-            str: [subscribe],
-            type(None): []
-        }[type(subscribe)]
-
-        for f in do_sub:
-            self.subscribe(f)
-
-        str_data = {'addr': addr, 'type': self.socket_s(),
-                    'subscribe': subscribe, 'bind': bind}
-
-        LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
-        LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
-        LOG.debug(_("-> bind: %(bind)s"), str_data)
-
-        try:
-            if bind:
-                self.sock.bind(addr)
-            else:
-                self.sock.connect(addr)
-        except Exception:
-            raise RPCException(_("Could not open socket."))
-
-    def socket_s(self):
-        """Get socket type as string."""
-        t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
-                  'DEALER')
-        return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
-
-    def subscribe(self, msg_filter):
-        """Subscribe."""
-        if not self.can_sub:
-            raise RPCException("Cannot subscribe on this socket.")
-        LOG.debug(_("Subscribing to %s"), msg_filter)
-
-        try:
-            self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
-        except Exception:
-            return
-
-        self.subscriptions.append(msg_filter)
-
-    def unsubscribe(self, msg_filter):
-        """Unsubscribe."""
-        if msg_filter not in self.subscriptions:
-            return
-        self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
-        self.subscriptions.remove(msg_filter)
-
-    def close(self):
-        if self.sock is None or self.sock.closed:
-            return
-
-        # We must unsubscribe, or we'll leak descriptors.
-        if self.subscriptions:
-            for f in self.subscriptions:
-                try:
-                    self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
-                except Exception:
-                    pass
-            self.subscriptions = []
-
-        try:
-            # Default is to linger
-            self.sock.close()
-        except Exception:
-            # While this is a bad thing to happen,
-            # it would be much worse if some of the code calling this
-            # were to fail. For now, lets log, and later evaluate
-            # if we can safely raise here.
-            LOG.error(_("ZeroMQ socket could not be closed."))
-        self.sock = None
-
-    def recv(self, **kwargs):
-        if not self.can_recv:
-            raise RPCException(_("You cannot recv on this socket."))
-        return self.sock.recv_multipart(**kwargs)
-
-    def send(self, data, **kwargs):
-        if not self.can_send:
-            raise RPCException(_("You cannot send on this socket."))
-        self.sock.send_multipart(data, **kwargs)
-
-
-class ZmqClient(object):
-    """Client for ZMQ sockets."""
-
-    def __init__(self, addr):
-        self.outq = ZmqSocket(addr, zmq.PUSH, bind=False)
-
-    def cast(self, msg_id, topic, data, envelope):
-        msg_id = msg_id or 0
-
-        if not envelope:
-            self.outq.send(map(bytes,
-                           (msg_id, topic, 'cast', _serialize(data))))
-            return
-
-        rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
-        zmq_msg = moves.reduce(lambda x, y: x + y, rpc_envelope.items())
-        self.outq.send(map(bytes,
-                       (msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
-
-    def close(self):
-        self.outq.close()
-
-
-class RpcContext(rpc_common.CommonRpcContext):
-    """Context that supports replying to a rpc.call."""
-    def __init__(self, **kwargs):
-        self.replies = []
-        super(RpcContext, self).__init__(**kwargs)
-
-    def deepcopy(self):
-        values = self.to_dict()
-        values['replies'] = self.replies
-        return self.__class__(**values)
-
-    def reply(self, reply=None, failure=None, ending=False):
-        if ending:
-            return
-        self.replies.append(reply)
-
-    @classmethod
-    def marshal(self, ctx):
-        ctx_data = ctx.to_dict()
-        return _serialize(ctx_data)
-
-    @classmethod
-    def unmarshal(self, data):
-        return RpcContext.from_dict(_deserialize(data))
-
-
-class InternalContext(object):
-    """Used by ConsumerBase as a private context for - methods."""
-
-    def __init__(self, proxy):
-        self.proxy = proxy
-        self.msg_waiter = None
-
-    def _get_response(self, ctx, proxy, topic, data):
-        """Process a curried message and cast the result to topic."""
-        LOG.debug(_("Running func with context: %s"), ctx.to_dict())
-        data.setdefault('version', None)
-        data.setdefault('args', {})
-
-        try:
-            result = proxy.dispatch(
-                ctx, data['version'], data['method'],
-                data.get('namespace'), **data['args'])
-            return ConsumerBase.normalize_reply(result, ctx.replies)
-        except greenlet.GreenletExit:
-            # ignore these since they are just from shutdowns
-            pass
-        except rpc_common.ClientException as e:
-            LOG.debug(_("Expected exception during message handling (%s)") %
-                      e._exc_info[1])
-            return {'exc':
-                    rpc_common.serialize_remote_exception(e._exc_info,
-                                                          log_failure=False)}
-        except Exception:
-            LOG.error(_("Exception during message handling"))
-            return {'exc':
-                    rpc_common.serialize_remote_exception(sys.exc_info())}
-
-    def reply(self, ctx, proxy,
-              msg_id=None, context=None, topic=None, msg=None):
-        """Reply to a casted call."""
-        # NOTE(ewindisch): context kwarg exists for Grizzly compat.
-        #                  this may be able to be removed earlier than
-        #                  'I' if ConsumerBase.process were refactored.
-        if type(msg) is list:
-            payload = msg[-1]
-        else:
-            payload = msg
-
-        response = ConsumerBase.normalize_reply(
-            self._get_response(ctx, proxy, topic, payload),
-            ctx.replies)
-
-        LOG.debug(_("Sending reply"))
-        _multi_send(_cast, ctx, topic, {
-            'method': '-process_reply',
-            'args': {
-                'msg_id': msg_id,  # Include for Folsom compat.
-                'response': response
-            }
-        }, _msg_id=msg_id)
-
-
-class ConsumerBase(object):
-    """Base Consumer."""
-
-    def __init__(self):
-        self.private_ctx = InternalContext(None)
-
-    @classmethod
-    def normalize_reply(self, result, replies):
-        #TODO(ewindisch): re-evaluate and document this method.
-        if isinstance(result, types.GeneratorType):
-            return list(result)
-        elif replies:
-            return replies
-        else:
-            return [result]
-
-    def process(self, proxy, ctx, data):
-        data.setdefault('version', None)
-        data.setdefault('args', {})
-
-        # Method starting with - are
-        # processed internally. (non-valid method name)
-        method = data.get('method')
-        if not method:
-            LOG.error(_("RPC message did not include method."))
-            return
-
-        # Internal method
-        # uses internal context for safety.
-        if method == '-reply':
-            self.private_ctx.reply(ctx, proxy, **data['args'])
-            return
-
-        proxy.dispatch(ctx, data['version'],
-                       data['method'], data.get('namespace'), **data['args'])
-
-
-class ZmqBaseReactor(ConsumerBase):
-    """A consumer class implementing a centralized casting broker (PULL-PUSH).
-
-    Used for RoundRobin requests.
-    """
-
-    def __init__(self, conf):
-        super(ZmqBaseReactor, self).__init__()
-
-        self.proxies = {}
-        self.threads = []
-        self.sockets = []
-        self.subscribe = {}
-
-        self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
-
-    def register(self, proxy, in_addr, zmq_type_in,
-                 in_bind=True, subscribe=None):
-
-        LOG.info(_("Registering reactor"))
-
-        if zmq_type_in not in (zmq.PULL, zmq.SUB):
-            raise RPCException("Bad input socktype")
-
-        # Items push in.
-        inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
-                        subscribe=subscribe)
-
-        self.proxies[inq] = proxy
-        self.sockets.append(inq)
-
-        LOG.info(_("In reactor registered"))
-
-    def consume_in_thread(self):
-        @excutils.forever_retry_uncaught_exceptions
-        def _consume(sock):
-            LOG.info(_("Consuming socket"))
-            while True:
-                self.consume(sock)
-
-        for k in self.proxies.keys():
-            self.threads.append(
-                self.pool.spawn(_consume, k)
-            )
-
-    def wait(self):
-        for t in self.threads:
-            t.wait()
-
-    def close(self):
-        for s in self.sockets:
-            s.close()
-
-        for t in self.threads:
-            t.kill()
-
-
-class ZmqProxy(ZmqBaseReactor):
-    """A consumer class implementing a topic-based proxy.
-
-    Forwards to IPC sockets.
-    """
-
-    def __init__(self, conf):
-        super(ZmqProxy, self).__init__(conf)
-        pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\'))
-        self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep)))
-
-        self.topic_proxy = {}
-
-    def consume(self, sock):
-        ipc_dir = CONF.rpc_zmq_ipc_dir
-
-        data = sock.recv(copy=False)
-        topic = data[1].bytes
-
-        if topic.startswith('fanout~'):
-            sock_type = zmq.PUB
-            topic = topic.split('.', 1)[0]
-        elif topic.startswith('zmq_replies'):
-            sock_type = zmq.PUB
-        else:
-            sock_type = zmq.PUSH
-
-        if topic not in self.topic_proxy:
-            def publisher(waiter):
-                LOG.info(_("Creating proxy for topic: %s"), topic)
-
-                try:
-                    # The topic is received over the network,
-                    # don't trust this input.
-                    if self.badchars.search(topic) is not None:
-                        emsg = _("Topic contained dangerous characters.")
-                        LOG.warn(emsg)
-                        raise RPCException(emsg)
-
-                    out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
-                                         (ipc_dir, topic),
-                                         sock_type, bind=True)
-                except RPCException:
-                    waiter.send_exception(*sys.exc_info())
-                    return
-
-                self.topic_proxy[topic] = eventlet.queue.LightQueue(
-                    CONF.rpc_zmq_topic_backlog)
-                self.sockets.append(out_sock)
-
-                # It takes some time for a pub socket to open,
-                # before we can have any faith in doing a send() to it.
-                if sock_type == zmq.PUB:
-                    eventlet.sleep(.5)
-
-                waiter.send(True)
-
-                while(True):
-                    data = self.topic_proxy[topic].get()
-                    out_sock.send(data, copy=False)
-
-            wait_sock_creation = eventlet.event.Event()
-            eventlet.spawn(publisher, wait_sock_creation)
-
-            try:
-                wait_sock_creation.wait()
-            except RPCException:
-                LOG.error(_("Topic socket file creation failed."))
-                return
-
-        try:
-            self.topic_proxy[topic].put_nowait(data)
-        except eventlet.queue.Full:
-            LOG.error(_("Local per-topic backlog buffer full for topic "
-                        "%(topic)s. Dropping message.") % {'topic': topic})
-
-    def consume_in_thread(self):
-        """Runs the ZmqProxy service."""
-        ipc_dir = CONF.rpc_zmq_ipc_dir
-        consume_in = "tcp://%s:%s" % \
-            (CONF.rpc_zmq_bind_address,
-             CONF.rpc_zmq_port)
-        consumption_proxy = InternalContext(None)
-
-        try:
-            os.makedirs(ipc_dir)
-        except os.error:
-            if not os.path.isdir(ipc_dir):
-                with excutils.save_and_reraise_exception():
-                    LOG.error(_("Required IPC directory does not exist at"
-                                " %s") % (ipc_dir, ))
-        try:
-            self.register(consumption_proxy,
-                          consume_in,
-                          zmq.PULL)
-        except zmq.ZMQError:
-            if os.access(ipc_dir, os.X_OK):
-                with excutils.save_and_reraise_exception():
-                    LOG.error(_("Permission denied to IPC directory at"
-                                " %s") % (ipc_dir, ))
-            with excutils.save_and_reraise_exception():
-                LOG.error(_("Could not create ZeroMQ receiver daemon. "
-                            "Socket may already be in use."))
-
-        super(ZmqProxy, self).consume_in_thread()
-
-
-def unflatten_envelope(packenv):
-    """Unflattens the RPC envelope.
-
-    Takes a list and returns a dictionary.
-    i.e. [1,2,3,4] => {1: 2, 3: 4}
-    """
-    i = iter(packenv)
-    h = {}
-    try:
-        while True:
-            k = six.next(i)
-            h[k] = six.next(i)
-    except StopIteration:
-        return h
-
-
-class ZmqReactor(ZmqBaseReactor):
-    """A consumer class implementing a consumer for messages.
-
-    Can also be used as a 1:1 proxy
-    """
-
-    def __init__(self, conf):
-        super(ZmqReactor, self).__init__(conf)
-
-    def consume(self, sock):
-        #TODO(ewindisch): use zero-copy (i.e. references, not copying)
-        data = sock.recv()
-        LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
-
-        proxy = self.proxies[sock]
-
-        if data[2] == 'cast':  # Legacy protocol
-            packenv = data[3]
-
-            ctx, msg = _deserialize(packenv)
-            request = rpc_common.deserialize_msg(msg)
-            ctx = RpcContext.unmarshal(ctx)
-        elif data[2] == 'impl_zmq_v2':
-            packenv = data[4:]
-
-            msg = unflatten_envelope(packenv)
-            request = rpc_common.deserialize_msg(msg)
-
-            # Unmarshal only after verifying the message.
-            ctx = RpcContext.unmarshal(data[3])
-        else:
-            LOG.error(_("ZMQ Envelope version unsupported or unknown."))
-            return
-
-        self.pool.spawn_n(self.process, proxy, ctx, request)
-
-
-class Connection(rpc_common.Connection):
-    """Manages connections and threads."""
-
-    def __init__(self, conf):
-        self.topics = []
-        self.reactor = ZmqReactor(conf)
-
-    def create_consumer(self, topic, proxy, fanout=False):
-        # Register with matchmaker.
-        _get_matchmaker().register(topic, CONF.rpc_zmq_host)
-
-        # Subscription scenarios
-        if fanout:
-            sock_type = zmq.SUB
-            subscribe = ('', fanout)[type(fanout) == str]
-            topic = 'fanout~' + topic.split('.', 1)[0]
-        else:
-            sock_type = zmq.PULL
-            subscribe = None
-            topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
-
-        if topic in self.topics:
-            LOG.info(_("Skipping topic registration. Already registered."))
-            return
-
-        # Receive messages from (local) proxy
-        inaddr = "ipc://%s/zmq_topic_%s" % \
-            (CONF.rpc_zmq_ipc_dir, topic)
-
-        LOG.debug(_("Consumer is a zmq.%s"),
-                  ['PULL', 'SUB'][sock_type == zmq.SUB])
-
-        self.reactor.register(proxy, inaddr, sock_type,
-                              subscribe=subscribe, in_bind=False)
-        self.topics.append(topic)
-
-    def close(self):
-        _get_matchmaker().stop_heartbeat()
-        for topic in self.topics:
-            _get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
-
-        self.reactor.close()
-        self.topics = []
-
-    def wait(self):
-        self.reactor.wait()
-
-    def consume_in_thread(self):
-        _get_matchmaker().start_heartbeat()
-        self.reactor.consume_in_thread()
-
-
-def _cast(addr, context, topic, msg, timeout=None, envelope=False,
-          _msg_id=None):
-    timeout_cast = timeout or CONF.rpc_cast_timeout
-    payload = [RpcContext.marshal(context), msg]
-
-    with Timeout(timeout_cast, exception=rpc_common.Timeout):
-        try:
-            conn = ZmqClient(addr)
-
-            # assumes cast can't return an exception
-            conn.cast(_msg_id, topic, payload, envelope)
-        except zmq.ZMQError:
-            raise RPCException("Cast failed. ZMQ Socket Exception")
-        finally:
-            if 'conn' in vars():
-                conn.close()
-
-
-def _call(addr, context, topic, msg, timeout=None,
-          envelope=False):
-    # timeout_response is how long we wait for a response
-    timeout = timeout or CONF.rpc_response_timeout
-
-    # The msg_id is used to track replies.
-    msg_id = uuid.uuid4().hex
-
-    # Replies always come into the reply service.
-    reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
-
-    LOG.debug(_("Creating payload"))
-    # Curry the original request into a reply method.
-    mcontext = RpcContext.marshal(context)
-    payload = {
-        'method': '-reply',
-        'args': {
-            'msg_id': msg_id,
-            'topic': reply_topic,
-            # TODO(ewindisch): safe to remove mcontext in I.
-            'msg': [mcontext, msg]
-        }
-    }
-
-    LOG.debug(_("Creating queue socket for reply waiter"))
-
-    # Messages arriving async.
-    # TODO(ewindisch): have reply consumer with dynamic subscription mgmt
-    with Timeout(timeout, exception=rpc_common.Timeout):
-        try:
-            msg_waiter = ZmqSocket(
-                "ipc://%s/zmq_topic_zmq_replies.%s" %
-                (CONF.rpc_zmq_ipc_dir,
-                 CONF.rpc_zmq_host),
-                zmq.SUB, subscribe=msg_id, bind=False
-            )
-
-            LOG.debug(_("Sending cast"))
-            _cast(addr, context, topic, payload, envelope)
-
-            LOG.debug(_("Cast sent; Waiting reply"))
-            # Blocks until receives reply
-            msg = msg_waiter.recv()
-            LOG.debug(_("Received message: %s"), msg)
-            LOG.debug(_("Unpacking response"))
-
-            if msg[2] == 'cast':  # Legacy version
-                raw_msg = _deserialize(msg[-1])[-1]
-            elif msg[2] == 'impl_zmq_v2':
-                rpc_envelope = unflatten_envelope(msg[4:])
-                raw_msg = rpc_common.deserialize_msg(rpc_envelope)
-            else:
-                raise rpc_common.UnsupportedRpcEnvelopeVersion(
-                    _("Unsupported or unknown ZMQ envelope returned."))
-
-            responses = raw_msg['args']['response']
-        # ZMQError trumps the Timeout error.
-        except zmq.ZMQError:
-            raise RPCException("ZMQ Socket Error")
-        except (IndexError, KeyError):
-            raise RPCException(_("RPC Message Invalid."))
-        finally:
-            if 'msg_waiter' in vars():
-                msg_waiter.close()
-
-    # It seems we don't need to do all of the following,
-    # but perhaps it would be useful for multicall?
-    # One effect of this is that we're checking all
-    # responses for Exceptions.
-    for resp in responses:
-        if isinstance(resp, types.DictType) and 'exc' in resp:
-            raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
-
-    return responses[-1]
-
-
-def _multi_send(method, context, topic, msg, timeout=None,
-                envelope=False, _msg_id=None):
-    """Wraps the sending of messages.
-
-    Dispatches to the matchmaker and sends message to all relevant hosts.
-    """
-    conf = CONF
-    LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
-
-    queues = _get_matchmaker().queues(topic)
-    LOG.debug(_("Sending message(s) to: %s"), queues)
-
-    # Don't stack if we have no matchmaker results
-    if not queues:
-        LOG.warn(_("No matchmaker results. Not casting."))
-        # While not strictly a timeout, callers know how to handle
-        # this exception and a timeout isn't too big a lie.
-        raise rpc_common.Timeout(_("No match from matchmaker."))
-
-    # This supports brokerless fanout (addresses > 1)
-    for queue in queues:
-        (_topic, ip_addr) = queue
-        _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
-
-        if method.__name__ == '_cast':
-            eventlet.spawn_n(method, _addr, context,
-                             _topic, msg, timeout, envelope,
-                             _msg_id)
-            return
-        return method(_addr, context, _topic, msg, timeout,
-                      envelope)
-
-
-def create_connection(conf, new=True):
-    return Connection(conf)
-
-
-def multicall(conf, *args, **kwargs):
-    """Multiple calls."""
-    return _multi_send(_call, *args, **kwargs)
-
-
-def call(conf, *args, **kwargs):
-    """Send a message, expect a response."""
-    data = _multi_send(_call, *args, **kwargs)
-    return data[-1]
-
-
-def cast(conf, *args, **kwargs):
-    """Send a message expecting no reply."""
-    _multi_send(_cast, *args, **kwargs)
-
-
-def fanout_cast(conf, context, topic, msg, **kwargs):
-    """Send a message to all listening and expect no reply."""
-    # NOTE(ewindisch): fanout~ is used because it avoid splitting on .
-    # and acts as a non-subtle hint to the matchmaker and ZmqProxy.
-    _multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
-
-
-def notify(conf, context, topic, msg, envelope):
-    """Send notification event.
-
-    Notifications are sent to topic-priority.
-    This differs from the AMQP drivers which send to topic.priority.
-    """
-    # NOTE(ewindisch): dot-priority in rpc notifier does not
-    # work with our assumptions.
-    topic = topic.replace('.', '-')
-    cast(conf, context, topic, msg, envelope=envelope)
-
-
-def cleanup():
-    """Clean up resources in use by implementation."""
-    global ZMQ_CTX
-    if ZMQ_CTX:
-        ZMQ_CTX.term()
-    ZMQ_CTX = None
-
-    global matchmaker
-    matchmaker = None
-
-
-def _get_ctxt():
-    if not zmq:
-        raise ImportError("Failed to import eventlet.green.zmq")
-
-    global ZMQ_CTX
-    if not ZMQ_CTX:
-        ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
-    return ZMQ_CTX
-
-
-def _get_matchmaker(*args, **kwargs):
-    global matchmaker
-    if not matchmaker:
-        mm = CONF.rpc_zmq_matchmaker
-        if mm.endswith('matchmaker.MatchMakerRing'):
-            mm.replace('matchmaker', 'matchmaker_ring')
-            LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use'
-                       ' %(new)s instead') % dict(
-                     orig=CONF.rpc_zmq_matchmaker, new=mm))
-        matchmaker = importutils.import_object(mm, *args, **kwargs)
-    return matchmaker
diff --git a/cinder/openstack/common/rpc/matchmaker.py b/cinder/openstack/common/rpc/matchmaker.py
deleted file mode 100644 (file)
index 4b03480..0000000
+++ /dev/null
@@ -1,322 +0,0 @@
-#    Copyright 2011 Cloudscaling Group, Inc
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-"""
-The MatchMaker classes should except a Topic or Fanout exchange key and
-return keys for direct exchanges, per (approximate) AMQP parlance.
-"""
-
-import contextlib
-
-import eventlet
-from oslo.config import cfg
-
-from cinder.openstack.common.gettextutils import _
-from cinder.openstack.common import log as logging
-
-
-matchmaker_opts = [
-    cfg.IntOpt('matchmaker_heartbeat_freq',
-               default=300,
-               help='Heartbeat frequency'),
-    cfg.IntOpt('matchmaker_heartbeat_ttl',
-               default=600,
-               help='Heartbeat time-to-live.'),
-]
-
-CONF = cfg.CONF
-CONF.register_opts(matchmaker_opts)
-LOG = logging.getLogger(__name__)
-contextmanager = contextlib.contextmanager
-
-
-class MatchMakerException(Exception):
-    """Signified a match could not be found."""
-    message = _("Match not found by MatchMaker.")
-
-
-class Exchange(object):
-    """Implements lookups.
-
-    Subclass this to support hashtables, dns, etc.
-    """
-    def __init__(self):
-        pass
-
-    def run(self, key):
-        raise NotImplementedError()
-
-
-class Binding(object):
-    """A binding on which to perform a lookup."""
-    def __init__(self):
-        pass
-
-    def test(self, key):
-        raise NotImplementedError()
-
-
-class MatchMakerBase(object):
-    """Match Maker Base Class.
-
-    Build off HeartbeatMatchMakerBase if building a heartbeat-capable
-    MatchMaker.
-    """
-    def __init__(self):
-        # Array of tuples. Index [2] toggles negation, [3] is last-if-true
-        self.bindings = []
-
-        self.no_heartbeat_msg = _('Matchmaker does not implement '
-                                  'registration or heartbeat.')
-
-    def register(self, key, host):
-        """Register a host on a backend.
-
-        Heartbeats, if applicable, may keepalive registration.
-        """
-        pass
-
-    def ack_alive(self, key, host):
-        """Acknowledge that a key.host is alive.
-
-        Used internally for updating heartbeats, but may also be used
-        publicly to acknowledge a system is alive (i.e. rpc message
-        successfully sent to host)
-        """
-        pass
-
-    def is_alive(self, topic, host):
-        """Checks if a host is alive."""
-        pass
-
-    def expire(self, topic, host):
-        """Explicitly expire a host's registration."""
-        pass
-
-    def send_heartbeats(self):
-        """Send all heartbeats.
-
-        Use start_heartbeat to spawn a heartbeat greenthread,
-        which loops this method.
-        """
-        pass
-
-    def unregister(self, key, host):
-        """Unregister a topic."""
-        pass
-
-    def start_heartbeat(self):
-        """Spawn heartbeat greenthread."""
-        pass
-
-    def stop_heartbeat(self):
-        """Destroys the heartbeat greenthread."""
-        pass
-
-    def add_binding(self, binding, rule, last=True):
-        self.bindings.append((binding, rule, False, last))
-
-    #NOTE(ewindisch): kept the following method in case we implement the
-    #                 underlying support.
-    #def add_negate_binding(self, binding, rule, last=True):
-    #    self.bindings.append((binding, rule, True, last))
-
-    def queues(self, key):
-        workers = []
-
-        # bit is for negate bindings - if we choose to implement it.
-        # last stops processing rules if this matches.
-        for (binding, exchange, bit, last) in self.bindings:
-            if binding.test(key):
-                workers.extend(exchange.run(key))
-
-                # Support last.
-                if last:
-                    return workers
-        return workers
-
-
-class HeartbeatMatchMakerBase(MatchMakerBase):
-    """Base for a heart-beat capable MatchMaker.
-
-    Provides common methods for registering, unregistering, and maintaining
-    heartbeats.
-    """
-    def __init__(self):
-        self.hosts = set()
-        self._heart = None
-        self.host_topic = {}
-
-        super(HeartbeatMatchMakerBase, self).__init__()
-
-    def send_heartbeats(self):
-        """Send all heartbeats.
-
-        Use start_heartbeat to spawn a heartbeat greenthread,
-        which loops this method.
-        """
-        for key, host in self.host_topic:
-            self.ack_alive(key, host)
-
-    def ack_alive(self, key, host):
-        """Acknowledge that a host.topic is alive.
-
-        Used internally for updating heartbeats, but may also be used
-        publicly to acknowledge a system is alive (i.e. rpc message
-        successfully sent to host)
-        """
-        raise NotImplementedError("Must implement ack_alive")
-
-    def backend_register(self, key, host):
-        """Implements registration logic.
-
-        Called by register(self,key,host)
-        """
-        raise NotImplementedError("Must implement backend_register")
-
-    def backend_unregister(self, key, key_host):
-        """Implements de-registration logic.
-
-        Called by unregister(self,key,host)
-        """
-        raise NotImplementedError("Must implement backend_unregister")
-
-    def register(self, key, host):
-        """Register a host on a backend.
-
-        Heartbeats, if applicable, may keepalive registration.
-        """
-        self.hosts.add(host)
-        self.host_topic[(key, host)] = host
-        key_host = '.'.join((key, host))
-
-        self.backend_register(key, key_host)
-
-        self.ack_alive(key, host)
-
-    def unregister(self, key, host):
-        """Unregister a topic."""
-        if (key, host) in self.host_topic:
-            del self.host_topic[(key, host)]
-
-        self.hosts.discard(host)
-        self.backend_unregister(key, '.'.join((key, host)))
-
-        LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"),
-                 {'key': key, 'host': host})
-
-    def start_heartbeat(self):
-        """Implementation of MatchMakerBase.start_heartbeat.
-
-        Launches greenthread looping send_heartbeats(),
-        yielding for CONF.matchmaker_heartbeat_freq seconds
-        between iterations.
-        """
-        if not self.hosts:
-            raise MatchMakerException(
-                _("Register before starting heartbeat."))
-
-        def do_heartbeat():
-            while True:
-                self.send_heartbeats()
-                eventlet.sleep(CONF.matchmaker_heartbeat_freq)
-
-        self._heart = eventlet.spawn(do_heartbeat)
-
-    def stop_heartbeat(self):
-        """Destroys the heartbeat greenthread."""
-        if self._heart:
-            self._heart.kill()
-
-
-class DirectBinding(Binding):
-    """Specifies a host in the key via a '.' character.
-
-    Although dots are used in the key, the behavior here is
-    that it maps directly to a host, thus direct.
-    """
-    def test(self, key):
-        return '.' in key
-
-
-class TopicBinding(Binding):
-    """Where a 'bare' key without dots.
-
-    AMQP generally considers topic exchanges to be those *with* dots,
-    but we deviate here in terminology as the behavior here matches
-    that of a topic exchange (whereas where there are dots, behavior
-    matches that of a direct exchange.
-    """
-    def test(self, key):
-        return '.' not in key
-
-
-class FanoutBinding(Binding):
-    """Match on fanout keys, where key starts with 'fanout.' string."""
-    def test(self, key):
-        return key.startswith('fanout~')
-
-
-class StubExchange(Exchange):
-    """Exchange that does nothing."""
-    def run(self, key):
-        return [(key, None)]
-
-
-class LocalhostExchange(Exchange):
-    """Exchange where all direct topics are local."""
-    def __init__(self, host='localhost'):
-        self.host = host
-        super(Exchange, self).__init__()
-
-    def run(self, key):
-        return [('.'.join((key.split('.')[0], self.host)), self.host)]
-
-
-class DirectExchange(Exchange):
-    """Exchange where all topic keys are split, sending to second half.
-
-    i.e. "compute.host" sends a message to "compute.host" running on "host"
-    """
-    def __init__(self):
-        super(Exchange, self).__init__()
-
-    def run(self, key):
-        e = key.split('.', 1)[1]
-        return [(key, e)]
-
-
-class MatchMakerLocalhost(MatchMakerBase):
-    """Match Maker where all bare topics resolve to localhost.
-
-    Useful for testing.
-    """
-    def __init__(self, host='localhost'):
-        super(MatchMakerLocalhost, self).__init__()
-        self.add_binding(FanoutBinding(), LocalhostExchange(host))
-        self.add_binding(DirectBinding(), DirectExchange())
-        self.add_binding(TopicBinding(), LocalhostExchange(host))
-
-
-class MatchMakerStub(MatchMakerBase):
-    """Match Maker where topics are untouched.
-
-    Useful for testing, or for AMQP/brokered queues.
-    Will not work where knowledge of hosts is known (i.e. zeromq)
-    """
-    def __init__(self):
-        super(MatchMakerStub, self).__init__()
-
-        self.add_binding(FanoutBinding(), StubExchange())
-        self.add_binding(DirectBinding(), StubExchange())
-        self.add_binding(TopicBinding(), StubExchange())
diff --git a/cinder/openstack/common/rpc/matchmaker_redis.py b/cinder/openstack/common/rpc/matchmaker_redis.py
deleted file mode 100644 (file)
index 25db954..0000000
+++ /dev/null
@@ -1,143 +0,0 @@
-#    Copyright 2013 Cloudscaling Group, Inc
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-"""
-The MatchMaker classes should accept a Topic or Fanout exchange key and
-return keys for direct exchanges, per (approximate) AMQP parlance.
-"""
-
-from oslo.config import cfg
-
-from cinder.openstack.common import importutils
-from cinder.openstack.common import log as logging
-from cinder.openstack.common.rpc import matchmaker as mm_common
-
-redis = importutils.try_import('redis')
-
-
-matchmaker_redis_opts = [
-    cfg.StrOpt('host',
-               default='127.0.0.1',
-               help='Host to locate redis'),
-    cfg.IntOpt('port',
-               default=6379,
-               help='Use this port to connect to redis host.'),
-    cfg.StrOpt('password',
-               default=None,
-               help='Password for Redis server. (optional)'),
-]
-
-CONF = cfg.CONF
-opt_group = cfg.OptGroup(name='matchmaker_redis',
-                         title='Options for Redis-based MatchMaker')
-CONF.register_group(opt_group)
-CONF.register_opts(matchmaker_redis_opts, opt_group)
-LOG = logging.getLogger(__name__)
-
-
-class RedisExchange(mm_common.Exchange):
-    def __init__(self, matchmaker):
-        self.matchmaker = matchmaker
-        self.redis = matchmaker.redis
-        super(RedisExchange, self).__init__()
-
-
-class RedisTopicExchange(RedisExchange):
-    """Exchange where all topic keys are split, sending to second half.
-
-    i.e. "compute.host" sends a message to "compute" running on "host"
-    """
-    def run(self, topic):
-        while True:
-            member_name = self.redis.srandmember(topic)
-
-            if not member_name:
-                # If this happens, there are no
-                # longer any members.
-                break
-
-            if not self.matchmaker.is_alive(topic, member_name):
-                continue
-
-            host = member_name.split('.', 1)[1]
-            return [(member_name, host)]
-        return []
-
-
-class RedisFanoutExchange(RedisExchange):
-    """Return a list of all hosts."""
-    def run(self, topic):
-        topic = topic.split('~', 1)[1]
-        hosts = self.redis.smembers(topic)
-        good_hosts = filter(
-            lambda host: self.matchmaker.is_alive(topic, host), hosts)
-
-        return [(x, x.split('.', 1)[1]) for x in good_hosts]
-
-
-class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase):
-    """MatchMaker registering and looking-up hosts with a Redis server."""
-    def __init__(self):
-        super(MatchMakerRedis, self).__init__()
-
-        if not redis:
-            raise ImportError("Failed to import module redis.")
-
-        self.redis = redis.Redis(
-            host=CONF.matchmaker_redis.host,
-            port=CONF.matchmaker_redis.port,
-            password=CONF.matchmaker_redis.password)
-
-        self.add_binding(mm_common.FanoutBinding(), RedisFanoutExchange(self))
-        self.add_binding(mm_common.DirectBinding(), mm_common.DirectExchange())
-        self.add_binding(mm_common.TopicBinding(), RedisTopicExchange(self))
-
-    def ack_alive(self, key, host):
-        topic = "%s.%s" % (key, host)
-        if not self.redis.expire(topic, CONF.matchmaker_heartbeat_ttl):
-            # If we could not update the expiration, the key
-            # might have been pruned. Re-register, creating a new
-            # key in Redis.
-            self.register(self.topic_host[host], host)
-
-    def is_alive(self, topic, host):
-        if self.redis.ttl(host) == -1:
-            self.expire(topic, host)
-            return False
-        return True
-
-    def expire(self, topic, host):
-        with self.redis.pipeline() as pipe:
-            pipe.multi()
-            pipe.delete(host)
-            pipe.srem(topic, host)
-            pipe.execute()
-
-    def backend_register(self, key, key_host):
-        with self.redis.pipeline() as pipe:
-            pipe.multi()
-            pipe.sadd(key, key_host)
-
-            # No value is needed, we just
-            # care if it exists. Sets aren't viable
-            # because only keys can expire.
-            pipe.set(key_host, '')
-
-            pipe.execute()
-
-    def backend_unregister(self, key, key_host):
-        with self.redis.pipeline() as pipe:
-            pipe.multi()
-            pipe.srem(key, key_host)
-            pipe.delete(key_host)
-            pipe.execute()
diff --git a/cinder/openstack/common/rpc/matchmaker_ring.py b/cinder/openstack/common/rpc/matchmaker_ring.py
deleted file mode 100644 (file)
index 73cdca2..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-#    Copyright 2011-2013 Cloudscaling Group, Inc
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-"""
-The MatchMaker classes should except a Topic or Fanout exchange key and
-return keys for direct exchanges, per (approximate) AMQP parlance.
-"""
-
-import itertools
-import json
-
-from oslo.config import cfg
-
-from cinder.openstack.common.gettextutils import _
-from cinder.openstack.common import log as logging
-from cinder.openstack.common.rpc import matchmaker as mm
-
-
-matchmaker_opts = [
-    # Matchmaker ring file
-    cfg.StrOpt('ringfile',
-               deprecated_name='matchmaker_ringfile',
-               deprecated_group='DEFAULT',
-               default='/etc/oslo/matchmaker_ring.json',
-               help='Matchmaker ring file (JSON)'),
-]
-
-CONF = cfg.CONF
-CONF.register_opts(matchmaker_opts, 'matchmaker_ring')
-LOG = logging.getLogger(__name__)
-
-
-class RingExchange(mm.Exchange):
-    """Match Maker where hosts are loaded from a static JSON formatted file.
-
-    __init__ takes optional ring dictionary argument, otherwise
-    loads the ringfile from CONF.mathcmaker_ringfile.
-    """
-    def __init__(self, ring=None):
-        super(RingExchange, self).__init__()
-
-        if ring:
-            self.ring = ring
-        else:
-            fh = open(CONF.matchmaker_ring.ringfile, 'r')
-            self.ring = json.load(fh)
-            fh.close()
-
-        self.ring0 = {}
-        for k in self.ring.keys():
-            self.ring0[k] = itertools.cycle(self.ring[k])
-
-    def _ring_has(self, key):
-        return key in self.ring0
-
-
-class RoundRobinRingExchange(RingExchange):
-    """A Topic Exchange based on a hashmap."""
-    def __init__(self, ring=None):
-        super(RoundRobinRingExchange, self).__init__(ring)
-
-    def run(self, key):
-        if not self._ring_has(key):
-            LOG.warn(
-                _("No key defining hosts for topic '%s', "
-                  "see ringfile") % (key, )
-            )
-            return []
-        host = next(self.ring0[key])
-        return [(key + '.' + host, host)]
-
-
-class FanoutRingExchange(RingExchange):
-    """Fanout Exchange based on a hashmap."""
-    def __init__(self, ring=None):
-        super(FanoutRingExchange, self).__init__(ring)
-
-    def run(self, key):
-        # Assume starts with "fanout~", strip it for lookup.
-        nkey = key.split('fanout~')[1:][0]
-        if not self._ring_has(nkey):
-            LOG.warn(
-                _("No key defining hosts for topic '%s', "
-                  "see ringfile") % (nkey, )
-            )
-            return []
-        return map(lambda x: (key + '.' + x, x), self.ring[nkey])
-
-
-class MatchMakerRing(mm.MatchMakerBase):
-    """Match Maker where hosts are loaded from a static hashmap."""
-    def __init__(self, ring=None):
-        super(MatchMakerRing, self).__init__()
-        self.add_binding(mm.FanoutBinding(), FanoutRingExchange(ring))
-        self.add_binding(mm.DirectBinding(), mm.DirectExchange())
-        self.add_binding(mm.TopicBinding(), RoundRobinRingExchange(ring))
diff --git a/cinder/openstack/common/rpc/proxy.py b/cinder/openstack/common/rpc/proxy.py
deleted file mode 100644 (file)
index 45694b1..0000000
+++ /dev/null
@@ -1,225 +0,0 @@
-# Copyright 2012-2013 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""
-A helper class for proxy objects to remote APIs.
-
-For more information about rpc API version numbers, see:
-    rpc/dispatcher.py
-"""
-
-import six
-
-from cinder.openstack.common import rpc
-from cinder.openstack.common.rpc import common as rpc_common
-from cinder.openstack.common.rpc import serializer as rpc_serializer
-
-
-class RpcProxy(object):
-    """A helper class for rpc clients.
-
-    This class is a wrapper around the RPC client API.  It allows you to
-    specify the topic and API version in a single place.  This is intended to
-    be used as a base class for a class that implements the client side of an
-    rpc API.
-    """
-
-    # The default namespace, which can be overridden in a subclass.
-    RPC_API_NAMESPACE = None
-
-    def __init__(self, topic, default_version, version_cap=None,
-                 serializer=None):
-        """Initialize an RpcProxy.
-
-        :param topic: The topic to use for all messages.
-        :param default_version: The default API version to request in all
-               outgoing messages.  This can be overridden on a per-message
-               basis.
-        :param version_cap: Optionally cap the maximum version used for sent
-               messages.
-        :param serializer: Optionaly (de-)serialize entities with a
-               provided helper.
-        """
-        self.topic = topic
-        self.default_version = default_version
-        self.version_cap = version_cap
-        if serializer is None:
-            serializer = rpc_serializer.NoOpSerializer()
-        self.serializer = serializer
-        super(RpcProxy, self).__init__()
-
-    def _set_version(self, msg, vers):
-        """Helper method to set the version in a message.
-
-        :param msg: The message having a version added to it.
-        :param vers: The version number to add to the message.
-        """
-        v = vers if vers else self.default_version
-        if (self.version_cap and not
-                rpc_common.version_is_compatible(self.version_cap, v)):
-            raise rpc_common.RpcVersionCapError(version_cap=self.version_cap)
-        msg['version'] = v
-
-    def _get_topic(self, topic):
-        """Return the topic to use for a message."""
-        return topic if topic else self.topic
-
-    def can_send_version(self, version):
-        """Check to see if a version is compatible with the version cap."""
-        return (not self.version_cap or
-                rpc_common.version_is_compatible(self.version_cap, version))
-
-    @staticmethod
-    def make_namespaced_msg(method, namespace, **kwargs):
-        return {'method': method, 'namespace': namespace, 'args': kwargs}
-
-    def make_msg(self, method, **kwargs):
-        return self.make_namespaced_msg(method, self.RPC_API_NAMESPACE,
-                                        **kwargs)
-
-    def _serialize_msg_args(self, context, kwargs):
-        """Helper method called to serialize message arguments.
-
-        This calls our serializer on each argument, returning a new
-        set of args that have been serialized.
-
-        :param context: The request context
-        :param kwargs: The arguments to serialize
-        :returns: A new set of serialized arguments
-        """
-        new_kwargs = dict()
-        for argname, arg in six.iteritems(kwargs):
-            new_kwargs[argname] = self.serializer.serialize_entity(context,
-                                                                   arg)
-        return new_kwargs
-
-    def call(self, context, msg, topic=None, version=None, timeout=None):
-        """rpc.call() a remote method.
-
-        :param context: The request context
-        :param msg: The message to send, including the method and args.
-        :param topic: Override the topic for this message.
-        :param version: (Optional) Override the requested API version in this
-               message.
-        :param timeout: (Optional) A timeout to use when waiting for the
-               response.  If no timeout is specified, a default timeout will be
-               used that is usually sufficient.
-
-        :returns: The return value from the remote method.
-        """
-        self._set_version(msg, version)
-        msg['args'] = self._serialize_msg_args(context, msg['args'])
-        real_topic = self._get_topic(topic)
-        try:
-            result = rpc.call(context, real_topic, msg, timeout)
-            return self.serializer.deserialize_entity(context, result)
-        except rpc.common.Timeout as exc:
-            raise rpc.common.Timeout(
-                exc.info, real_topic, msg.get('method'))
-
-    def multicall(self, context, msg, topic=None, version=None, timeout=None):
-        """rpc.multicall() a remote method.
-
-        :param context: The request context
-        :param msg: The message to send, including the method and args.
-        :param topic: Override the topic for this message.
-        :param version: (Optional) Override the requested API version in this
-               message.
-        :param timeout: (Optional) A timeout to use when waiting for the
-               response.  If no timeout is specified, a default timeout will be
-               used that is usually sufficient.
-
-        :returns: An iterator that lets you process each of the returned values
-                  from the remote method as they arrive.
-        """
-        self._set_version(msg, version)
-        msg['args'] = self._serialize_msg_args(context, msg['args'])
-        real_topic = self._get_topic(topic)
-        try:
-            result = rpc.multicall(context, real_topic, msg, timeout)
-            return self.serializer.deserialize_entity(context, result)
-        except rpc.common.Timeout as exc:
-            raise rpc.common.Timeout(
-                exc.info, real_topic, msg.get('method'))
-
-    def cast(self, context, msg, topic=None, version=None):
-        """rpc.cast() a remote method.
-
-        :param context: The request context
-        :param msg: The message to send, including the method and args.
-        :param topic: Override the topic for this message.
-        :param version: (Optional) Override the requested API version in this
-               message.
-
-        :returns: None.  rpc.cast() does not wait on any return value from the
-                  remote method.
-        """
-        self._set_version(msg, version)
-        msg['args'] = self._serialize_msg_args(context, msg['args'])
-        rpc.cast(context, self._get_topic(topic), msg)
-
-    def fanout_cast(self, context, msg, topic=None, version=None):
-        """rpc.fanout_cast() a remote method.
-
-        :param context: The request context
-        :param msg: The message to send, including the method and args.
-        :param topic: Override the topic for this message.
-        :param version: (Optional) Override the requested API version in this
-               message.
-
-        :returns: None.  rpc.fanout_cast() does not wait on any return value
-                  from the remote method.
-        """
-        self._set_version(msg, version)
-        msg['args'] = self._serialize_msg_args(context, msg['args'])
-        rpc.fanout_cast(context, self._get_topic(topic), msg)
-
-    def cast_to_server(self, context, server_params, msg, topic=None,
-                       version=None):
-        """rpc.cast_to_server() a remote method.
-
-        :param context: The request context
-        :param server_params: Server parameters.  See rpc.cast_to_server() for
-               details.
-        :param msg: The message to send, including the method and args.
-        :param topic: Override the topic for this message.
-        :param version: (Optional) Override the requested API version in this
-               message.
-
-        :returns: None.  rpc.cast_to_server() does not wait on any
-                  return values.
-        """
-        self._set_version(msg, version)
-        msg['args'] = self._serialize_msg_args(context, msg['args'])
-        rpc.cast_to_server(context, server_params, self._get_topic(topic), msg)
-
-    def fanout_cast_to_server(self, context, server_params, msg, topic=None,
-                              version=None):
-        """rpc.fanout_cast_to_server() a remote method.
-
-        :param context: The request context
-        :param server_params: Server parameters.  See rpc.cast_to_server() for
-               details.
-        :param msg: The message to send, including the method and args.
-        :param topic: Override the topic for this message.
-        :param version: (Optional) Override the requested API version in this
-               message.
-
-        :returns: None.  rpc.fanout_cast_to_server() does not wait on any
-                  return values.
-        """
-        self._set_version(msg, version)
-        msg['args'] = self._serialize_msg_args(context, msg['args'])
-        rpc.fanout_cast_to_server(context, server_params,
-                                  self._get_topic(topic), msg)
diff --git a/cinder/openstack/common/rpc/serializer.py b/cinder/openstack/common/rpc/serializer.py
deleted file mode 100644 (file)
index 9bc6e2a..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-#    Copyright 2013 IBM Corp.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-"""Provides the definition of an RPC serialization handler"""
-
-import abc
-
-import six
-
-
-@six.add_metaclass(abc.ABCMeta)
-class Serializer(object):
-    """Generic (de-)serialization definition base class."""
-
-    @abc.abstractmethod
-    def serialize_entity(self, context, entity):
-        """Serialize something to primitive form.
-
-        :param context: Security context
-        :param entity: Entity to be serialized
-        :returns: Serialized form of entity
-        """
-        pass
-
-    @abc.abstractmethod
-    def deserialize_entity(self, context, entity):
-        """Deserialize something from primitive form.
-
-        :param context: Security context
-        :param entity: Primitive to be deserialized
-        :returns: Deserialized form of entity
-        """
-        pass
-
-
-class NoOpSerializer(Serializer):
-    """A serializer that does nothing."""
-
-    def serialize_entity(self, context, entity):
-        return entity
-
-    def deserialize_entity(self, context, entity):
-        return entity
diff --git a/cinder/openstack/common/rpc/service.py b/cinder/openstack/common/rpc/service.py
deleted file mode 100644 (file)
index 248fc93..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-# Copyright 2011 Red Hat, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from cinder.openstack.common.gettextutils import _
-from cinder.openstack.common import log as logging
-from cinder.openstack.common import rpc
-from cinder.openstack.common.rpc import dispatcher as rpc_dispatcher
-from cinder.openstack.common import service
-
-
-LOG = logging.getLogger(__name__)
-
-
-class Service(service.Service):
-    """Service object for binaries running on hosts.
-
-    A service enables rpc by listening to queues based on topic and host.
-    """
-    def __init__(self, host, topic, manager=None, serializer=None):
-        super(Service, self).__init__()
-        self.host = host
-        self.topic = topic
-        self.serializer = serializer
-        if manager is None:
-            self.manager = self
-        else:
-            self.manager = manager
-
-    def start(self):
-        super(Service, self).start()
-
-        self.conn = rpc.create_connection(new=True)
-        LOG.debug(_("Creating Consumer connection for Service %s") %
-                  self.topic)
-
-        dispatcher = rpc_dispatcher.RpcDispatcher([self.manager],
-                                                  self.serializer)
-
-        # Share this same connection for these Consumers
-        self.conn.create_consumer(self.topic, dispatcher, fanout=False)
-
-        node_topic = '%s.%s' % (self.topic, self.host)
-        self.conn.create_consumer(node_topic, dispatcher, fanout=False)
-
-        self.conn.create_consumer(self.topic, dispatcher, fanout=True)
-
-        # Hook to allow the manager to do other initializations after
-        # the rpc connection is created.
-        if callable(getattr(self.manager, 'initialize_service_hook', None)):
-            self.manager.initialize_service_hook(self)
-
-        # Consume from all consumers in a thread
-        self.conn.consume_in_thread()
-
-    def stop(self):
-        # Try to shut the connection down, but if we get any sort of
-        # errors, go ahead and ignore them.. as we're shutting down anyway
-        try:
-            self.conn.close()
-        except Exception:
-            pass
-        super(Service, self).stop()
diff --git a/cinder/openstack/common/rpc/zmq_receiver.py b/cinder/openstack/common/rpc/zmq_receiver.py
deleted file mode 100644 (file)
index 9a0a949..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-#    Copyright 2011 OpenStack Foundation
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import eventlet
-eventlet.monkey_patch()
-
-import contextlib
-import sys
-
-from oslo.config import cfg
-
-from cinder.openstack.common import log as logging
-from cinder.openstack.common import rpc
-from cinder.openstack.common.rpc import impl_zmq
-
-CONF = cfg.CONF
-CONF.register_opts(rpc.rpc_opts)
-CONF.register_opts(impl_zmq.zmq_opts)
-
-
-def main():
-    CONF(sys.argv[1:], project='oslo')
-    logging.setup("oslo")
-
-    with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
-        reactor.consume_in_thread()
-        reactor.wait()
diff --git a/cinder/rpc.py b/cinder/rpc.py
new file mode 100644 (file)
index 0000000..d539cfd
--- /dev/null
@@ -0,0 +1,143 @@
+# Copyright 2013 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+__all__ = [
+    'init',
+    'cleanup',
+    'set_defaults',
+    'add_extra_exmods',
+    'clear_extra_exmods',
+    'get_allowed_exmods',
+    'RequestContextSerializer',
+    'get_client',
+    'get_server',
+    'get_notifier',
+    'TRANSPORT_ALIASES',
+]
+
+from oslo.config import cfg
+from oslo import messaging
+
+import cinder.context
+import cinder.exception
+
+CONF = cfg.CONF
+TRANSPORT = None
+NOTIFIER = None
+
+ALLOWED_EXMODS = [
+    cinder.exception.__name__,
+]
+EXTRA_EXMODS = []
+
+# NOTE(flaper87): The cinder.openstack.common.rpc entries are
+# for backwards compat with Havana rpc_backend configuration
+# values. The cinder.rpc entries are for compat with Folsom values.
+TRANSPORT_ALIASES = {
+    'cinder.openstack.common.rpc.impl_kombu': 'rabbit',
+    'cinder.openstack.common.rpc.impl_qpid': 'qpid',
+    'cinder.openstack.common.rpc.impl_zmq': 'zmq',
+    'cinder.rpc.impl_kombu': 'rabbit',
+    'cinder.rpc.impl_qpid': 'qpid',
+    'cinder.rpc.impl_zmq': 'zmq',
+}
+
+
+def init(conf):
+    global TRANSPORT, NOTIFIER
+    exmods = get_allowed_exmods()
+    TRANSPORT = messaging.get_transport(conf,
+                                        allowed_remote_exmods=exmods,
+                                        aliases=TRANSPORT_ALIASES)
+    NOTIFIER = messaging.Notifier(TRANSPORT)
+
+
+def initialized():
+    return None not in [TRANSPORT, NOTIFIER]
+
+
+def cleanup():
+    global TRANSPORT, NOTIFIER
+    assert TRANSPORT is not None
+    assert NOTIFIER is not None
+    TRANSPORT.cleanup()
+    TRANSPORT = NOTIFIER = None
+
+
+def set_defaults(control_exchange):
+    messaging.set_transport_defaults(control_exchange)
+
+
+def add_extra_exmods(*args):
+    EXTRA_EXMODS.extend(args)
+
+
+def clear_extra_exmods():
+    del EXTRA_EXMODS[:]
+
+
+def get_allowed_exmods():
+    return ALLOWED_EXMODS + EXTRA_EXMODS
+
+
+class RequestContextSerializer(messaging.Serializer):
+
+    def __init__(self, base):
+        self._base = base
+
+    def serialize_entity(self, context, entity):
+        if not self._base:
+            return entity
+        return self._base.serialize_entity(context, entity)
+
+    def deserialize_entity(self, context, entity):
+        if not self._base:
+            return entity
+        return self._base.deserialize_entity(context, entity)
+
+    def serialize_context(self, context):
+        return context.to_dict()
+
+    def deserialize_context(self, context):
+        return cinder.context.RequestContext.from_dict(context)
+
+
+def get_transport_url(url_str=None):
+    return messaging.TransportURL.parse(CONF, url_str, TRANSPORT_ALIASES)
+
+
+def get_client(target, version_cap=None, serializer=None):
+    assert TRANSPORT is not None
+    serializer = RequestContextSerializer(serializer)
+    return messaging.RPCClient(TRANSPORT,
+                               target,
+                               version_cap=version_cap,
+                               serializer=serializer)
+
+
+def get_server(target, endpoints, serializer=None):
+    assert TRANSPORT is not None
+    serializer = RequestContextSerializer(serializer)
+    return messaging.get_rpc_server(TRANSPORT,
+                                    target,
+                                    endpoints,
+                                    executor='eventlet',
+                                    serializer=serializer)
+
+
+def get_notifier(service=None, host=None, publisher_id=None):
+    assert NOTIFIER is not None
+    if not publisher_id:
+        publisher_id = "%s.%s" % (service, host or CONF.host)
+    return NOTIFIER.prepare(publisher_id=publisher_id)
index c8f00bedab00505389034ede2390365827feee0d..4f981981ea2949e683f578604225e525254e9ccf 100644 (file)
@@ -18,7 +18,7 @@ from cinder import exception
 from cinder import flow_utils
 from cinder.openstack.common import excutils
 from cinder.openstack.common import log as logging
-from cinder.openstack.common.notifier import api as notifier
+from cinder import rpc
 from cinder import utils
 from cinder.volume.flows import common
 
@@ -128,9 +128,7 @@ def get_flow(context, db, driver, request_spec=None,
                 'reason': cause,
             }
             try:
-                publisher_id = notifier.publisher_id("scheduler")
-                notifier.notify(context, publisher_id, topic, notifier.ERROR,
-                                payload)
+                rpc.get_notifier('scheduler').error(context, topic, payload)
             except exception.CinderException:
                 LOG.exception(_("Failed notifying on %(topic)s "
                                 "payload %(payload)s") % {'topic': topic,
index 3711ba3c699a59462d56fd9be79749031014123c..fed6b08dea073387b1351da244d9900cbc2ec12f 100644 (file)
@@ -20,6 +20,7 @@ Scheduler Service
 """
 
 from oslo.config import cfg
+from oslo import messaging
 
 from cinder import context
 from cinder import db
@@ -28,8 +29,8 @@ from cinder import manager
 from cinder.openstack.common import excutils
 from cinder.openstack.common import importutils
 from cinder.openstack.common import log as logging
-from cinder.openstack.common.notifier import api as notifier
 from cinder import quota
+from cinder import rpc
 from cinder.scheduler.flows import create_volume
 from cinder.volume import rpcapi as volume_rpcapi
 
@@ -52,6 +53,8 @@ class SchedulerManager(manager.Manager):
 
     RPC_API_VERSION = '1.5'
 
+    target = messaging.Target(version=RPC_API_VERSION)
+
     def __init__(self, scheduler_driver=None, service_name=None,
                  *args, **kwargs):
         if not scheduler_driver:
@@ -232,5 +235,6 @@ class SchedulerManager(manager.Manager):
                        method=method,
                        reason=ex)
 
-        notifier.notify(context, notifier.publisher_id("scheduler"),
-                        'scheduler.' + method, notifier.ERROR, payload)
+        rpc.get_notifier("scheduler").error(context,
+                                            'scheduler.' + method,
+                                            payload)
index f3bbf013ab20ec097bf756845f8b207795b68b20..fb5ab1e13b9ce21620acbd620e432df958ce2e07 100644 (file)
@@ -17,15 +17,16 @@ Client side of the scheduler manager RPC API.
 """
 
 from oslo.config import cfg
+from oslo import messaging
 
 from cinder.openstack.common import jsonutils
-import cinder.openstack.common.rpc.proxy
+from cinder import rpc
 
 
 CONF = cfg.CONF
 
 
-class SchedulerAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
+class SchedulerAPI(object):
     '''Client side of the scheduler rpc API.
 
     API version history:
@@ -42,63 +43,65 @@ class SchedulerAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
     RPC_API_VERSION = '1.0'
 
     def __init__(self):
-        super(SchedulerAPI, self).__init__(
-            topic=CONF.scheduler_topic,
-            default_version=self.RPC_API_VERSION)
+        super(SchedulerAPI, self).__init__()
+        target = messaging.Target(topic=CONF.scheduler_topic,
+                                  version=self.RPC_API_VERSION)
+        self.client = rpc.get_client(target, version_cap='1.5')
 
     def create_volume(self, ctxt, topic, volume_id, snapshot_id=None,
                       image_id=None, request_spec=None,
                       filter_properties=None):
+
+        cctxt = self.client.prepare(version='1.2')
         request_spec_p = jsonutils.to_primitive(request_spec)
-        return self.cast(ctxt, self.make_msg(
-            'create_volume',
-            topic=topic,
-            volume_id=volume_id,
-            snapshot_id=snapshot_id,
-            image_id=image_id,
-            request_spec=request_spec_p,
-            filter_properties=filter_properties),
-            version='1.2')
+        return cctxt.cast(ctxt, 'create_volume',
+                          topic=topic,
+                          volume_id=volume_id,
+                          snapshot_id=snapshot_id,
+                          image_id=image_id,
+                          request_spec=request_spec_p,
+                          filter_properties=filter_properties)
 
     def migrate_volume_to_host(self, ctxt, topic, volume_id, host,
                                force_host_copy=False, request_spec=None,
                                filter_properties=None):
+
+        cctxt = self.client.prepare(version='1.3')
         request_spec_p = jsonutils.to_primitive(request_spec)
-        return self.cast(ctxt, self.make_msg(
-            'migrate_volume_to_host',
-            topic=topic,
-            volume_id=volume_id,
-            host=host,
-            force_host_copy=force_host_copy,
-            request_spec=request_spec_p,
-            filter_properties=filter_properties),
-            version='1.3')
+        return cctxt.cast(ctxt, 'migrate_volume_to_host',
+                          topic=topic,
+                          volume_id=volume_id,
+                          host=host,
+                          force_host_copy=force_host_copy,
+                          request_spec=request_spec_p,
+                          filter_properties=filter_properties)
 
     def retype(self, ctxt, topic, volume_id,
                request_spec=None, filter_properties=None):
+
+        cctxt = self.client.prepare(version='1.4')
         request_spec_p = jsonutils.to_primitive(request_spec)
-        return self.cast(ctxt, self.make_msg(
-            'retype',
-            topic=topic,
-            volume_id=volume_id,
-            request_spec=request_spec_p,
-            filter_properties=filter_properties),
-            version='1.4')
+        return cctxt.cast(ctxt, 'retype',
+                          topic=topic,
+                          volume_id=volume_id,
+                          request_spec=request_spec_p,
+                          filter_properties=filter_properties)
 
     def manage_existing(self, ctxt, topic, volume_id,
                         request_spec=None, filter_properties=None):
+        cctxt = self.client.prepare(version='1.5')
         request_spec_p = jsonutils.to_primitive(request_spec)
-        return self.cast(ctxt, self.make_msg(
-            'manage_existing',
-            topic=topic,
-            volume_id=volume_id,
-            request_spec=request_spec_p,
-            filter_properties=filter_properties),
-            version='1.5')
+        return cctxt.cast(ctxt, 'manage_existing',
+                          topic=topic,
+                          volume_id=volume_id,
+                          request_spec=request_spec_p,
+                          filter_properties=filter_properties)
 
     def update_service_capabilities(self, ctxt,
                                     service_name, host,
                                     capabilities):
-        self.fanout_cast(ctxt, self.make_msg('update_service_capabilities',
-                         service_name=service_name, host=host,
-                         capabilities=capabilities))
+        # FIXME(flaper87): What to do with fanout?
+        cctxt = self.client.prepare(fanout=True)
+        cctxt.cast(ctxt, 'update_service_capabilities',
+                   service_name=service_name, host=host,
+                   capabilities=capabilities)
index 3b7fc63afc2205fd077e8d5dcb8d22ea40de3ef4..1091729cc5c9fb9c9dec1cf601fa04c8e13fc881 100644 (file)
@@ -23,6 +23,7 @@ import os
 import random
 
 from oslo.config import cfg
+from oslo import messaging
 
 from cinder import context
 from cinder import db
@@ -30,8 +31,8 @@ from cinder import exception
 from cinder.openstack.common import importutils
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import loopingcall
-from cinder.openstack.common import rpc
 from cinder.openstack.common import service
+from cinder import rpc
 from cinder import version
 from cinder import wsgi
 
@@ -75,6 +76,10 @@ class Service(service.Service):
                  periodic_interval=None, periodic_fuzzy_delay=None,
                  service_name=None, *args, **kwargs):
         super(Service, self).__init__()
+
+        if not rpc.initialized():
+            rpc.init(CONF)
+
         self.host = host
         self.binary = binary
         self.topic = topic
@@ -104,22 +109,14 @@ class Service(service.Service):
         except exception.NotFound:
             self._create_service_ref(ctxt)
 
-        self.conn = rpc.create_connection(new=True)
-        LOG.debug(_("Creating Consumer connection for Service %s") %
-                  self.topic)
-
-        rpc_dispatcher = self.manager.create_rpc_dispatcher()
+        LOG.debug(_("Creating RPC server for service %s") % self.topic)
 
-        # Share this same connection for these Consumers
-        self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False)
+        target = messaging.Target(topic=self.topic, server=self.host)
+        endpoints = [self.manager]
+        endpoints.extend(self.manager.additional_endpoints)
+        self.rpcserver = rpc.get_server(target, endpoints)
+        self.rpcserver.start()
 
-        node_topic = '%s.%s' % (self.topic, self.host)
-        self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False)
-
-        self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True)
-
-        # Consume from all consumers in a thread
-        self.conn.consume_in_thread()
         self.manager.init_host()
 
         if self.report_interval:
@@ -219,7 +216,7 @@ class Service(service.Service):
         # Try to shut the connection down, but if we get any sort of
         # errors, go ahead and ignore them.. as we're shutting down anyway
         try:
-            self.conn.close()
+            self.rpcserver.stop()
         except Exception:
             pass
         for x in self.timers:
@@ -228,7 +225,6 @@ class Service(service.Service):
             except Exception:
                 pass
         self.timers = []
-
         super(Service, self).stop()
 
     def wait(self):
index 479eeea5460ba0e9f8b20cd66fc36d928440df0c..2c846c6e869b89ff7932d165a03205e0a6d83a4a 100644 (file)
@@ -21,7 +21,6 @@ inline callbacks.
 
 """
 
-
 import os
 import shutil
 import tempfile
@@ -30,6 +29,7 @@ import uuid
 import fixtures
 import mox
 from oslo.config import cfg
+from oslo.messaging import conffixture as messaging_conffixture
 import stubout
 import testtools
 from testtools import matchers
@@ -39,9 +39,10 @@ from cinder.db import migration
 from cinder.openstack.common.db.sqlalchemy import session
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import timeutils
+from cinder import rpc
 from cinder import service
 from cinder.tests import conf_fixture
-
+from cinder.tests import fake_notifier
 
 test_opts = [
     cfg.StrOpt('sqlite_clean_db',
@@ -130,6 +131,17 @@ class TestCase(testtools.TestCase):
 
         self.log_fixture = self.useFixture(fixtures.FakeLogger())
 
+        rpc.add_extra_exmods("cinder.tests")
+        self.addCleanup(rpc.clear_extra_exmods)
+        self.addCleanup(rpc.cleanup)
+
+        fs = '%(levelname)s [%(name)s] %(message)s'
+        self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
+        self.messaging_conf.transport_driver = 'fake'
+        self.messaging_conf.response_timeout = 15
+        self.useFixture(self.messaging_conf)
+        rpc.init(CONF)
+
         conf_fixture.set_defaults(CONF)
         CONF([], default_config_files=[])
 
@@ -163,6 +175,8 @@ class TestCase(testtools.TestCase):
         self.injected = []
         self._services = []
 
+        fake_notifier.stub_notifier(self.stubs)
+
         CONF.set_override('fatal_exception_format_errors', True)
         # This will be cleaned up by the NestedTempfile fixture
         CONF.set_override('lock_path', tempfile.mkdtemp())
index 0a506c48ebec96b24b9ddf0fa8c7837bc18360be..4bad154d59c640b00e6035e40fb6083a2add0050 100644 (file)
@@ -11,7 +11,8 @@
 # under the License.
 
 import ast
-import os
+import tempfile
+import time
 import webob
 
 from oslo.config import cfg
@@ -25,6 +26,7 @@ from cinder.openstack.common import timeutils
 from cinder import test
 from cinder.tests.api import fakes
 from cinder.tests.api.v2 import stubs
+from cinder.tests import cast_as_call
 from cinder.volume import api as volume_api
 from cinder.volume import utils as volutils
 
@@ -43,7 +45,15 @@ class AdminActionsTest(test.TestCase):
 
     def setUp(self):
         super(AdminActionsTest, self).setUp()
+
+        self.tempdir = tempfile.mkdtemp()
+        self.flags(rpc_backend='cinder.openstack.common.rpc.impl_fake')
+        self.flags(lock_path=self.tempdir,
+                   disable_process_locking=True)
+
         self.volume_api = volume_api.API()
+        cast_as_call.mock_cast_as_call(self.volume_api.volume_rpcapi.client)
+        cast_as_call.mock_cast_as_call(self.volume_api.scheduler_rpcapi.client)
         self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True)
 
     def test_reset_status_as_admin(self):
@@ -255,7 +265,6 @@ class AdminActionsTest(test.TestCase):
         self.assertRaises(exception.NotFound, db.volume_get, ctx, volume['id'])
 
     def test_force_delete_snapshot(self):
-        self.stubs.Set(os.path, 'exists', lambda x: True)
         self.stubs.Set(volutils, 'clear_volume',
                        lambda a, b, volume_clear=CONF.volume_clear,
                        volume_clear_size=CONF.volume_clear_size: None)
@@ -275,9 +284,28 @@ class AdminActionsTest(test.TestCase):
         req.environ['cinder.context'] = ctx
         # start service to handle rpc.cast for 'delete snapshot'
         svc = self.start_service('volume', host='test')
+
+        cast_as_call.mock_cast_as_call(svc.manager.scheduler_rpcapi.client)
+
+        # NOTE(flaper87): Instead fo patch `os.path.exists`
+        # create a fake path for the snapshot that should
+        # be deleted and let the check pass
+        def local_path(volume, vg=None):
+            tfile = tempfile.mkstemp(suffix='-cow', dir=self.tempdir)
+            # NOTE(flaper87): Strip `-cow` since it'll be added
+            # later in the happy path.
+            return tfile[1].strip('-cow')
+
+        self.stubs.Set(svc.manager.driver, "local_path", local_path)
         # make request
         resp = req.get_response(app())
-        # request is accepted
+
+        # NOTE(flaper87): Since we're using a nested service
+        # lets make sure we yield the control over the service
+        # thread so it can process the recent calls.
+        time.sleep(0.6)
+
+        # Request is accepted
         self.assertEqual(resp.status_int, 202)
         # snapshot is deleted
         self.assertRaises(exception.NotFound, db.snapshot_get, ctx,
index 196c203ee19bc2c0ef8ec912bc839b97a5d479bb..8abf1d070d2166b2339dd552ffc8f270ddad7930 100644 (file)
@@ -20,10 +20,9 @@ import webob
 
 from cinder.api.contrib import qos_specs_manage
 from cinder import exception
-from cinder.openstack.common.notifier import api as notifier_api
-from cinder.openstack.common.notifier import test_notifier
 from cinder import test
 from cinder.tests.api import fakes
+from cinder.tests import fake_notifier
 from cinder.volume import qos_specs
 
 
@@ -141,13 +140,14 @@ def return_disassociate_all(context, id):
 class QoSSpecManageApiTest(test.TestCase):
     def setUp(self):
         super(QoSSpecManageApiTest, self).setUp()
-        self.flags(host='fake',
-                   notification_driver=[test_notifier.__name__])
+        self.flags(host='fake')
         self.controller = qos_specs_manage.QoSSpecsController()
+
         #reset notifier drivers left over from other api/contrib tests
-        notifier_api._reset_drivers()
-        test_notifier.NOTIFICATIONS = []
-        self.addCleanup(notifier_api._reset_drivers)
+        # NOTE(flaper87) WTF? ^^^^ Cleanups should happen in each test,
+        # not the purpose of this patch, though.
+        fake_notifier.reset()
+        self.addCleanup(fake_notifier.reset)
 
     def test_index(self):
         self.stubs.Set(qos_specs, 'get_all_specs',
@@ -194,9 +194,9 @@ class QoSSpecManageApiTest(test.TestCase):
         self.stubs.Set(qos_specs, 'delete',
                        return_qos_specs_delete)
         req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/1')
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         self.controller.delete(req, 1)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
 
     def test_qos_specs_delete_not_found(self):
         self.stubs.Set(qos_specs, 'get_qos_specs',
@@ -204,11 +204,11 @@ class QoSSpecManageApiTest(test.TestCase):
         self.stubs.Set(qos_specs, 'delete',
                        return_qos_specs_delete)
 
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/777')
         self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
                           req, '777')
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
 
     def test_qos_specs_delete_inuse(self):
         self.stubs.Set(qos_specs, 'get_qos_specs',
@@ -217,10 +217,10 @@ class QoSSpecManageApiTest(test.TestCase):
                        return_qos_specs_delete)
 
         req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666')
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
                           req, '666')
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
 
     def test_qos_specs_delete_inuse_force(self):
         self.stubs.Set(qos_specs, 'get_qos_specs',
@@ -229,42 +229,42 @@ class QoSSpecManageApiTest(test.TestCase):
                        return_qos_specs_delete)
 
         req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666?force=True')
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         self.assertRaises(webob.exc.HTTPInternalServerError,
                           self.controller.delete,
                           req, '666')
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
 
     def test_qos_specs_delete_keys(self):
         self.stubs.Set(qos_specs, 'delete_keys',
                        return_qos_specs_delete_keys)
         body = {"keys": ['bar', 'zoo']}
         req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666/delete_keys')
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         self.controller.delete_keys(req, '666', body)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
 
     def test_qos_specs_delete_keys_qos_notfound(self):
         self.stubs.Set(qos_specs, 'delete_keys',
                        return_qos_specs_delete_keys)
         body = {"keys": ['bar', 'zoo']}
         req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/777/delete_keys')
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         self.assertRaises(webob.exc.HTTPNotFound,
                           self.controller.delete_keys,
                           req, '777', body)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
 
     def test_qos_specs_delete_keys_badkey(self):
         self.stubs.Set(qos_specs, 'delete_keys',
                        return_qos_specs_delete_keys)
         req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666/delete_keys')
         body = {"keys": ['foo', 'zoo']}
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         self.assertRaises(webob.exc.HTTPBadRequest,
                           self.controller.delete_keys,
                           req, '666', body)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
 
     def test_create(self):
         self.stubs.Set(qos_specs, 'create',
@@ -276,10 +276,10 @@ class QoSSpecManageApiTest(test.TestCase):
                               "key1": "value1"}}
         req = fakes.HTTPRequest.blank('/v2/fake/qos-specs')
 
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         res_dict = self.controller.create(req, body)
 
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
         self.assertEqual('qos_specs_1', res_dict['qos_specs']['name'])
 
     def test_create_conflict(self):
@@ -292,10 +292,10 @@ class QoSSpecManageApiTest(test.TestCase):
                               "key1": "value1"}}
         req = fakes.HTTPRequest.blank('/v2/fake/qos-specs')
 
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         self.assertRaises(webob.exc.HTTPConflict,
                           self.controller.create, req, body)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
 
     def test_create_failed(self):
         self.stubs.Set(qos_specs, 'create',
@@ -307,10 +307,10 @@ class QoSSpecManageApiTest(test.TestCase):
                               "key1": "value1"}}
         req = fakes.HTTPRequest.blank('/v2/fake/qos-specs')
 
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         self.assertRaises(webob.exc.HTTPInternalServerError,
                           self.controller.create, req, body)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
 
     def _create_qos_specs_bad_body(self, body):
         req = fakes.HTTPRequest.blank('/v2/fake/qos-specs')
@@ -333,50 +333,51 @@ class QoSSpecManageApiTest(test.TestCase):
         self.stubs.Set(qos_specs, 'update',
                        return_qos_specs_update)
 
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/555')
         body = {'qos_specs': {'key1': 'value1',
                               'key2': 'value2'}}
         res = self.controller.update(req, '555', body)
         self.assertDictMatch(res, body)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
 
     def test_update_not_found(self):
         self.stubs.Set(qos_specs, 'update',
                        return_qos_specs_update)
 
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/777')
         body = {'qos_specs': {'key1': 'value1',
                               'key2': 'value2'}}
         self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
                           req, '777', body)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
 
     def test_update_invalid_input(self):
         self.stubs.Set(qos_specs, 'update',
                        return_qos_specs_update)
 
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/888')
         body = {'qos_specs': {'key1': 'value1',
                               'key2': 'value2'}}
-        self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+        self.assertRaises(webob.exc.HTTPBadRequest,
+                          self.controller.update,
                           req, '888', body)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
 
     def test_update_failed(self):
         self.stubs.Set(qos_specs, 'update',
                        return_qos_specs_update)
 
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/999')
         body = {'qos_specs': {'key1': 'value1',
                               'key2': 'value2'}}
         self.assertRaises(webob.exc.HTTPInternalServerError,
                           self.controller.update,
                           req, '999', body)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
 
     def test_show(self):
         self.stubs.Set(qos_specs, 'get_qos_specs',
index c7474dad070e2e60ab2abfcaa2609accf52e5d5d..18a60dc2a846ad49f11d1a91b100a5c5ff4586ac 100644 (file)
@@ -22,10 +22,9 @@ import mock
 
 from cinder.api.contrib import types_extra_specs
 from cinder import exception
-from cinder.openstack.common.notifier import api as notifier_api
-from cinder.openstack.common.notifier import test_notifier
 from cinder import test
 from cinder.tests.api import fakes
+from cinder.tests import fake_notifier
 import cinder.wsgi
 
 
@@ -67,15 +66,14 @@ class VolumeTypesExtraSpecsTest(test.TestCase):
 
     def setUp(self):
         super(VolumeTypesExtraSpecsTest, self).setUp()
-        self.flags(host='fake',
-                   notification_driver=[test_notifier.__name__])
+        self.flags(host='fake')
         self.stubs.Set(cinder.db, 'volume_type_get', volume_type_get)
         self.api_path = '/v2/fake/os-volume-types/1/extra_specs'
         self.controller = types_extra_specs.VolumeTypeExtraSpecsController()
+
         """to reset notifier drivers left over from other api/contrib tests"""
-        notifier_api._reset_drivers()
-        test_notifier.NOTIFICATIONS = []
-        self.addCleanup(notifier_api._reset_drivers)
+        fake_notifier.reset()
+        self.addCleanup(fake_notifier.reset)
 
     def test_index(self):
         self.stubs.Set(cinder.db, 'volume_type_extra_specs_get',
@@ -116,10 +114,10 @@ class VolumeTypesExtraSpecsTest(test.TestCase):
         self.stubs.Set(cinder.db, 'volume_type_extra_specs_delete',
                        delete_volume_type_extra_specs)
 
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         req = fakes.HTTPRequest.blank(self.api_path + '/key5')
         self.controller.delete(req, 1, 'key5')
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
 
     def test_delete_not_found(self):
         self.stubs.Set(cinder.db, 'volume_type_extra_specs_delete',
@@ -135,10 +133,10 @@ class VolumeTypesExtraSpecsTest(test.TestCase):
                        return_create_volume_type_extra_specs)
         body = {"extra_specs": {"key1": "value1"}}
 
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         req = fakes.HTTPRequest.blank(self.api_path)
         res_dict = self.controller.create(req, 1, body)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
 
         self.assertEqual('value1', res_dict['extra_specs']['key1'])
 
@@ -155,11 +153,11 @@ class VolumeTypesExtraSpecsTest(test.TestCase):
 
         body = {"extra_specs": {"other_alphanum.-_:": "value1"}}
 
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
 
         req = fakes.HTTPRequest.blank(self.api_path)
         res_dict = self.controller.create(req, 1, body)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
         self.assertEqual('value1',
                          res_dict['extra_specs']['other_alphanum.-_:'])
 
@@ -178,11 +176,11 @@ class VolumeTypesExtraSpecsTest(test.TestCase):
                                 "other2_alphanum.-_:": "value2",
                                 "other3_alphanum.-_:": "value3"}}
 
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
 
         req = fakes.HTTPRequest.blank(self.api_path)
         res_dict = self.controller.create(req, 1, body)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
         self.assertEqual('value1',
                          res_dict['extra_specs']['other_alphanum.-_:'])
         self.assertEqual('value2',
@@ -196,10 +194,10 @@ class VolumeTypesExtraSpecsTest(test.TestCase):
                        return_create_volume_type_extra_specs)
         body = {"key1": "value1"}
 
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         req = fakes.HTTPRequest.blank(self.api_path + '/key1')
         res_dict = self.controller.update(req, 1, 'key1', body)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
 
         self.assertEqual('value1', res_dict['key1'])
 
index 74f9ae8addd45931de26a88ce06bff95fe5d01bf..f7b425373a53b273f158d850176d3f7c99e63ebd 100644 (file)
@@ -17,10 +17,9 @@ import webob
 
 from cinder.api.contrib import types_manage
 from cinder import exception
-from cinder.openstack.common.notifier import api as notifier_api
-from cinder.openstack.common.notifier import test_notifier
 from cinder import test
 from cinder.tests.api import fakes
+from cinder.tests import fake_notifier
 from cinder.volume import volume_types
 
 
@@ -64,13 +63,14 @@ def return_volume_types_get_by_name(context, name):
 class VolumeTypesManageApiTest(test.TestCase):
     def setUp(self):
         super(VolumeTypesManageApiTest, self).setUp()
-        self.flags(host='fake',
-                   notification_driver=[test_notifier.__name__])
+        self.flags(host='fake')
         self.controller = types_manage.VolumeTypesManageController()
         """to reset notifier drivers left over from other api/contrib tests"""
-        notifier_api._reset_drivers()
-        test_notifier.NOTIFICATIONS = []
-        self.addCleanup(notifier_api._reset_drivers)
+        fake_notifier.reset()
+        self.addCleanup(fake_notifier.reset)
+
+    def tearDown(self):
+        super(VolumeTypesManageApiTest, self).tearDown()
 
     def test_volume_types_delete(self):
         self.stubs.Set(volume_types, 'get_volume_type',
@@ -79,9 +79,9 @@ class VolumeTypesManageApiTest(test.TestCase):
                        return_volume_types_destroy)
 
         req = fakes.HTTPRequest.blank('/v2/fake/types/1')
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         self.controller._delete(req, 1)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
 
     def test_volume_types_delete_not_found(self):
         self.stubs.Set(volume_types, 'get_volume_type',
@@ -89,11 +89,11 @@ class VolumeTypesManageApiTest(test.TestCase):
         self.stubs.Set(volume_types, 'destroy',
                        return_volume_types_destroy)
 
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         req = fakes.HTTPRequest.blank('/v2/fake/types/777')
         self.assertRaises(webob.exc.HTTPNotFound, self.controller._delete,
                           req, '777')
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
 
     def test_volume_types_with_volumes_destroy(self):
         self.stubs.Set(volume_types, 'get_volume_type',
@@ -101,9 +101,9 @@ class VolumeTypesManageApiTest(test.TestCase):
         self.stubs.Set(volume_types, 'destroy',
                        return_volume_types_with_volumes_destroy)
         req = fakes.HTTPRequest.blank('/v2/fake/types/1')
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         self.controller._delete(req, 1)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
 
     def test_create(self):
         self.stubs.Set(volume_types, 'create',
@@ -115,10 +115,10 @@ class VolumeTypesManageApiTest(test.TestCase):
                                 "extra_specs": {"key1": "value1"}}}
         req = fakes.HTTPRequest.blank('/v2/fake/types')
 
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         res_dict = self.controller._create(req, body)
 
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
         self.assertEqual(1, len(res_dict))
         self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
 
index 15613a53ba6fca3c60a8a9a4699d06f5f31d5665..f38281010cc623481f910dd3185c9f82a7905c1a 100644 (file)
@@ -17,12 +17,12 @@ import json
 import uuid
 
 import mock
+from oslo import messaging
 import webob
 
 from cinder.api.contrib import volume_actions
 from cinder import exception
 from cinder.openstack.common import jsonutils
-from cinder.openstack.common.rpc import common as rpc_common
 from cinder import test
 from cinder.tests.api import fakes
 from cinder.tests.api.v2 import stubs
@@ -522,7 +522,7 @@ class VolumeImageActionsTest(test.TestCase):
     def test_copy_volume_to_image_remoteerror(self):
         def stub_upload_volume_to_image_service_raise(self, context, volume,
                                                       metadata, force):
-            raise rpc_common.RemoteError
+            raise messaging.RemoteError
         self.stubs.Set(volume_api.API,
                        "copy_volume_to_image",
                        stub_upload_volume_to_image_service_raise)
index 93affbeb74a388f5be3d7cccafaacf4f4b9a7d69..9fdd27f1334d06fa4127f406753c459f596b65cf 100644 (file)
@@ -33,7 +33,6 @@ import cinder.volume
 
 
 LOG = logging.getLogger(__name__)
-volume_transfer_api = API()
 
 
 class VolumeTransferAPITestCase(test.TestCase):
@@ -41,14 +40,14 @@ class VolumeTransferAPITestCase(test.TestCase):
 
     def setUp(self):
         super(VolumeTransferAPITestCase, self).setUp()
+        self.volume_transfer_api = API()
 
-    @staticmethod
-    def _create_transfer(volume_id=1,
+    def _create_transfer(self, volume_id=1,
                          display_name='test_transfer'):
         """Create a transfer object."""
-        return volume_transfer_api.create(context.get_admin_context(),
-                                          volume_id,
-                                          display_name)
+        return self.volume_transfer_api.create(context.get_admin_context(),
+                                               volume_id,
+                                               display_name)
 
     @staticmethod
     def _create_volume(display_name='test_volume',
index ea83a13fe8b26005e2be221a5950de202b0f59a0..3ab8a326d88d34ae4fdb4f10d0b9492074aca67b 100644 (file)
@@ -18,10 +18,9 @@ import webob
 
 from cinder import context
 from cinder import db
-from cinder.openstack.common.notifier import api as notifier_api
-from cinder.openstack.common.notifier import test_notifier
 from cinder import test
 from cinder.tests.api import fakes
+from cinder.tests import fake_notifier
 
 
 def return_volume_type_encryption(context, volume_type_id):
@@ -52,13 +51,11 @@ class VolumeTypeEncryptionTest(test.TestCase):
 
     def setUp(self):
         super(VolumeTypeEncryptionTest, self).setUp()
-        self.flags(host='fake',
-                   notification_driver=[test_notifier.__name__])
+        self.flags(host='fake')
         self.api_path = '/v2/fake/os-volume-types/1/encryption'
         """to reset notifier drivers left over from other api/contrib tests"""
-        notifier_api._reset_drivers()
-        test_notifier.NOTIFICATIONS = []
-        self.addCleanup(notifier_api._reset_drivers)
+        fake_notifier.reset()
+        self.addCleanup(fake_notifier.reset)
 
     def _get_response(self, volume_type, admin=True,
                       url='/v2/fake/types/%s/encryption',
@@ -167,7 +164,7 @@ class VolumeTypeEncryptionTest(test.TestCase):
                                'provider': provider,
                                'volume_type_id': volume_type['id']}}
 
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         res = self._get_response(volume_type)
         res_dict = json.loads(res.body)
         self.assertEqual(200, res.status_code)
@@ -182,7 +179,7 @@ class VolumeTypeEncryptionTest(test.TestCase):
                                  req_headers='application/json')
         res_dict = json.loads(res.body)
 
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
 
         # check response
         self.assertIn('encryption', res_dict)
@@ -237,7 +234,7 @@ class VolumeTypeEncryptionTest(test.TestCase):
                                  req_headers='application/json')
         res_dict = json.loads(res.body)
 
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         self.assertEqual(404, res.status_code)
 
         expected = {
index fb937bd96d5d974471d8d75eea9c81b5b70ebedd..d072cd63ee25ffc3d715393af7102f94f5d4e041 100644 (file)
@@ -25,11 +25,10 @@ from cinder.api.v1 import volumes
 from cinder import context
 from cinder import db
 from cinder import exception
-from cinder.openstack.common.notifier import api as notifier_api
-from cinder.openstack.common.notifier import test_notifier
 from cinder import test
 from cinder.tests.api import fakes
 from cinder.tests.api.v2 import stubs
+from cinder.tests import fake_notifier
 from cinder.tests.image import fake as fake_image
 from cinder.volume import api as volume_api
 
@@ -63,8 +62,7 @@ class VolumeApiTest(test.TestCase):
         self.controller = volumes.VolumeController(self.ext_mgr)
 
         self.flags(host='fake',
-                   notification_driver=[test_notifier.__name__])
-        test_notifier.NOTIFICATIONS = []
+                   notification_driver=[fake_notifier.__name__])
 
         self.stubs.Set(db, 'volume_get_all', stubs.stub_volume_get_all)
         self.stubs.Set(db, 'service_get_all_by_topic',
@@ -72,8 +70,8 @@ class VolumeApiTest(test.TestCase):
         self.stubs.Set(volume_api.API, 'delete', stubs.stub_volume_delete)
 
     def tearDown(self):
-        notifier_api._reset_drivers()
         super(VolumeApiTest, self).tearDown()
+        fake_notifier.reset()
 
     def test_volume_create(self):
         self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
@@ -243,7 +241,7 @@ class VolumeApiTest(test.TestCase):
         }
         body = {"volume": updates}
         req = fakes.HTTPRequest.blank('/v1/volumes/1')
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         res_dict = self.controller.update(req, '1', body)
         expected = {'volume': {
             'status': 'fakestatus',
@@ -268,7 +266,7 @@ class VolumeApiTest(test.TestCase):
             'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
             'size': 1}}
         self.assertEqual(res_dict, expected)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 2)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
 
     def test_volume_update_metadata(self):
         self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
@@ -279,7 +277,7 @@ class VolumeApiTest(test.TestCase):
         }
         body = {"volume": updates}
         req = fakes.HTTPRequest.blank('/v1/volumes/1')
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         res_dict = self.controller.update(req, '1', body)
         expected = {'volume': {
             'status': 'fakestatus',
@@ -306,7 +304,7 @@ class VolumeApiTest(test.TestCase):
             'size': 1
         }}
         self.assertEqual(res_dict, expected)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 2)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
 
     def test_volume_update_with_admin_metadata(self):
         self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
@@ -327,7 +325,7 @@ class VolumeApiTest(test.TestCase):
         }
         body = {"volume": updates}
         req = fakes.HTTPRequest.blank('/v1/volumes/1')
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         admin_ctx = context.RequestContext('admin', 'fakeproject', True)
         req.environ['cinder.context'] = admin_ctx
         res_dict = self.controller.update(req, '1', body)
@@ -354,7 +352,7 @@ class VolumeApiTest(test.TestCase):
             'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
             'size': 1}}
         self.assertEqual(res_dict, expected)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 2)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
 
     def test_update_empty_body(self):
         body = {}
index cac93fc8ae1aeca1fc4eae331312b8c859220cf8..49c2b80a197e1788213fe651c61e336b4f9ac6c2 100644 (file)
@@ -17,6 +17,7 @@ import datetime
 
 from cinder import exception as exc
 
+
 FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
 FAKE_UUIDS = {}
 
index 64cd6826247880f5812f2f034a334e7acc277398..efdda4e87c080a80dc7c106ae55dbb5797f67220 100644 (file)
@@ -26,11 +26,10 @@ from cinder.api.v2 import volumes
 from cinder import context
 from cinder import db
 from cinder import exception
-from cinder.openstack.common.notifier import api as notifier_api
-from cinder.openstack.common.notifier import test_notifier
 from cinder import test
 from cinder.tests.api import fakes
 from cinder.tests.api.v2 import stubs
+from cinder.tests import fake_notifier
 from cinder.tests.image import fake as fake_image
 from cinder import utils
 from cinder.volume import api as volume_api
@@ -66,8 +65,7 @@ class VolumeApiTest(test.TestCase):
         self.controller = volumes.VolumeController(self.ext_mgr)
 
         self.flags(host='fake',
-                   notification_driver=[test_notifier.__name__])
-        test_notifier.NOTIFICATIONS = []
+                   notification_driver=[fake_notifier.__name__])
 
         self.stubs.Set(db, 'volume_get_all', stubs.stub_volume_get_all)
         self.stubs.Set(volume_api.API, 'delete', stubs.stub_volume_delete)
@@ -76,8 +74,8 @@ class VolumeApiTest(test.TestCase):
         self.maxDiff = None
 
     def tearDown(self):
-        notifier_api._reset_drivers()
         super(VolumeApiTest, self).tearDown()
+        fake_notifier.reset()
 
     def test_volume_create(self):
         self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
@@ -269,7 +267,7 @@ class VolumeApiTest(test.TestCase):
         }
         body = {"volume": updates}
         req = fakes.HTTPRequest.blank('/v2/volumes/1')
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         res_dict = self.controller.update(req, '1', body)
         expected = {
             'volume': {
@@ -309,7 +307,7 @@ class VolumeApiTest(test.TestCase):
             }
         }
         self.assertEqual(res_dict, expected)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 2)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
 
     def test_volume_update_metadata(self):
         self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
@@ -320,7 +318,7 @@ class VolumeApiTest(test.TestCase):
         }
         body = {"volume": updates}
         req = fakes.HTTPRequest.blank('/v2/volumes/1')
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         res_dict = self.controller.update(req, '1', body)
         expected = {'volume': {
             'status': 'fakestatus',
@@ -358,7 +356,7 @@ class VolumeApiTest(test.TestCase):
             ],
         }}
         self.assertEqual(res_dict, expected)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 2)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
 
     def test_volume_update_with_admin_metadata(self):
         self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
@@ -379,7 +377,7 @@ class VolumeApiTest(test.TestCase):
         }
         body = {"volume": updates}
         req = fakes.HTTPRequest.blank('/v2/volumes/1')
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         admin_ctx = context.RequestContext('admin', 'fake', True)
         req.environ['cinder.context'] = admin_ctx
         res_dict = self.controller.update(req, '1', body)
@@ -418,7 +416,7 @@ class VolumeApiTest(test.TestCase):
             ],
         }}
         self.assertEqual(res_dict, expected)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 2)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
 
     def test_update_empty_body(self):
         body = {}
diff --git a/cinder/tests/cast_as_call.py b/cinder/tests/cast_as_call.py
new file mode 100644 (file)
index 0000000..caf47d4
--- /dev/null
@@ -0,0 +1,35 @@
+# Copyright 2013 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import mock
+
+
+def mock_cast_as_call(obj=None):
+    """Use this to mock `cast` as calls.
+
+    :param obj: Either an instance of RPCClient
+    or an instance of _Context.
+    """
+    orig_prepare = obj.prepare
+
+    def prepare(*args, **kwargs):
+        cctxt = orig_prepare(*args, **kwargs)
+        mock_cast_as_call(obj=cctxt)  # woo, recurse!
+        return cctxt
+
+    prepare_patch = mock.patch.object(obj, 'prepare').start()
+    prepare_patch.side_effect = prepare
+
+    cast_patch = mock.patch.object(obj, 'cast').start()
+    cast_patch.side_effect = obj.call
diff --git a/cinder/tests/fake_notifier.py b/cinder/tests/fake_notifier.py
new file mode 100644 (file)
index 0000000..7074ca9
--- /dev/null
@@ -0,0 +1,60 @@
+# Copyright 2014 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import collections
+import functools
+
+from oslo import messaging
+
+from cinder import rpc
+
+NOTIFICATIONS = []
+
+
+def reset():
+    del NOTIFICATIONS[:]
+
+
+FakeMessage = collections.namedtuple('Message',
+                                     ['publisher_id', 'priority',
+                                      'event_type', 'payload'])
+
+
+class FakeNotifier(object):
+
+    def __init__(self, transport, publisher_id):
+        self.transport = transport
+        self.publisher_id = publisher_id
+        for priority in ['debug', 'info', 'warn', 'error', 'critical']:
+            setattr(self, priority,
+                    functools.partial(self._notify, priority.upper()))
+
+    def prepare(self, publisher_id=None):
+        if publisher_id is None:
+            publisher_id = self.publisher_id
+        return self.__class__(self.transport, publisher_id)
+
+    def _notify(self, priority, ctxt, event_type, payload):
+        msg = dict(publisher_id=self.publisher_id,
+                   priority=priority,
+                   event_type=event_type,
+                   payload=payload)
+        NOTIFICATIONS.append(msg)
+
+
+def stub_notifier(stubs):
+    stubs.Set(messaging, 'Notifier', FakeNotifier)
+    if rpc.NOTIFIER:
+        stubs.Set(rpc, 'NOTIFIER', FakeNotifier(rpc.NOTIFIER.transport,
+                                                rpc.NOTIFIER.publisher_id))
index ef9bb1b58aec606a4745cc1b98063e3e53c12dfc..df3668d13f247cd9ae8fb582764f2be8d967596f 100644 (file)
@@ -65,7 +65,6 @@ class _IntegratedTestBase(test.TestCase):
         # set up services
         self.volume = self.start_service('volume')
         self.scheduler = self.start_service('scheduler')
-
         self._start_api_service()
 
         self.api = client.TestOpenStackClient('fake', 'fake', self.auth_url)
index b3aaa0a1fdb8d56e2a8f34f013cf30e1cb786245..2622a2296f5ffca7f8750a518944b992152ead83 100644 (file)
@@ -17,7 +17,7 @@
 Unit Tests for cinder.scheduler.rpcapi
 """
 
-
+import copy
 import mock
 
 from oslo.config import cfg
@@ -38,46 +38,55 @@ class SchedulerRpcAPITestCase(test.TestCase):
     def tearDown(self):
         super(SchedulerRpcAPITestCase, self).tearDown()
 
-    def _test_scheduler_api(self, method, rpc_method, _mock_method, **kwargs):
+    def _test_scheduler_api(self, method, rpc_method,
+                            fanout=False, **kwargs):
         ctxt = context.RequestContext('fake_user', 'fake_project')
         rpcapi = scheduler_rpcapi.SchedulerAPI()
         expected_retval = 'foo' if rpc_method == 'call' else None
-        expected_version = kwargs.pop('version', rpcapi.RPC_API_VERSION)
-        expected_msg = rpcapi.make_msg(method, **kwargs)
-        expected_msg['version'] = expected_version
+
+        target = {
+            "fanout": fanout,
+            "version": kwargs.pop('version', rpcapi.RPC_API_VERSION)
+        }
+
+        expected_msg = copy.deepcopy(kwargs)
 
         self.fake_args = None
         self.fake_kwargs = None
 
+        def _fake_prepare_method(*args, **kwds):
+            for kwd in kwds:
+                self.assertEqual(kwds[kwd], target[kwd])
+            return rpcapi.client
+
         def _fake_rpc_method(*args, **kwargs):
             self.fake_args = args
             self.fake_kwargs = kwargs
             if expected_retval:
                 return expected_retval
 
-        _mock_method.side_effect = _fake_rpc_method
-
-        retval = getattr(rpcapi, method)(ctxt, **kwargs)
+        with mock.patch.object(rpcapi.client, "prepare") as mock_prepared:
+            mock_prepared.side_effect = _fake_prepare_method
 
-        self.assertEqual(retval, expected_retval)
-        expected_args = [ctxt, CONF.scheduler_topic, expected_msg]
-        for arg, expected_arg in zip(self.fake_args, expected_args):
-            self.assertEqual(arg, expected_arg)
+            with mock.patch.object(rpcapi.client, rpc_method) as mock_method:
+                mock_method.side_effect = _fake_rpc_method
+                retval = getattr(rpcapi, method)(ctxt, **kwargs)
+                self.assertEqual(retval, expected_retval)
+                expected_args = [ctxt, method, expected_msg]
+                for arg, expected_arg in zip(self.fake_args, expected_args):
+                    self.assertEqual(arg, expected_arg)
 
-    @mock.patch('cinder.openstack.common.rpc.fanout_cast')
-    def test_update_service_capabilities(self, _mock_rpc_method):
+    def test_update_service_capabilities(self):
         self._test_scheduler_api('update_service_capabilities',
-                                 rpc_method='fanout_cast',
-                                 _mock_method=_mock_rpc_method,
+                                 rpc_method='cast',
                                  service_name='fake_name',
                                  host='fake_host',
-                                 capabilities='fake_capabilities')
+                                 capabilities='fake_capabilities',
+                                 fanout=True)
 
-    @mock.patch('cinder.openstack.common.rpc.cast')
-    def test_create_volume(self, _mock_rpc_method):
+    def test_create_volume(self):
         self._test_scheduler_api('create_volume',
                                  rpc_method='cast',
-                                 _mock_method=_mock_rpc_method,
                                  topic='topic',
                                  volume_id='volume_id',
                                  snapshot_id='snapshot_id',
@@ -86,11 +95,9 @@ class SchedulerRpcAPITestCase(test.TestCase):
                                  filter_properties='filter_properties',
                                  version='1.2')
 
-    @mock.patch('cinder.openstack.common.rpc.cast')
-    def test_migrate_volume_to_host(self, _mock_rpc_method):
+    def test_migrate_volume_to_host(self):
         self._test_scheduler_api('migrate_volume_to_host',
                                  rpc_method='cast',
-                                 _mock_method=_mock_rpc_method,
                                  topic='topic',
                                  volume_id='volume_id',
                                  host='host',
@@ -99,22 +106,18 @@ class SchedulerRpcAPITestCase(test.TestCase):
                                  filter_properties='filter_properties',
                                  version='1.3')
 
-    @mock.patch('cinder.openstack.common.rpc.cast')
-    def test_retype(self, _mock_rpc_method):
+    def test_retype(self):
         self._test_scheduler_api('retype',
                                  rpc_method='cast',
-                                 _mock_method=_mock_rpc_method,
                                  topic='topic',
                                  volume_id='volume_id',
                                  request_spec='fake_request_spec',
                                  filter_properties='filter_properties',
                                  version='1.4')
 
-    @mock.patch('cinder.openstack.common.rpc.cast')
-    def test_manage_existing(self, _mock_rpc_method):
+    def test_manage_existing(self):
         self._test_scheduler_api('manage_existing',
                                  rpc_method='cast',
-                                 _mock_method=_mock_rpc_method,
                                  topic='topic',
                                  volume_id='volume_id',
                                  request_spec='fake_request_spec',
index 1974ce00028e335c467a7730fbc574d863a38d35..586d3769a8bd5c7513407f6841ac43808313f4db 100644 (file)
@@ -29,7 +29,6 @@ from xml.dom.minidom import Document
 
 from cinder import exception
 from cinder import test
-from cinder import utils
 from cinder.volume import configuration as conf
 from cinder.volume.drivers.huawei import huawei_hvs
 from cinder.volume.drivers.huawei import rest_common
@@ -103,7 +102,10 @@ class FakeHVSCommon(rest_common.HVSCommon):
         return params
 
     def _change_file_mode(self, filepath):
-        utils.execute('chmod', '777', filepath)
+        # NOTE(flaper87): Changing file permissions is
+        # not needed since we're using a tempfile created
+        # within this test.
+        pass
 
     def call(self, url=False, data=None, method=None):
 
@@ -509,6 +511,7 @@ class HVSRESTiSCSIDriverTestCase(test.TestCase):
         self.configuration.append_config_values(mox.IgnoreArg())
 
         self.stubs.Set(time, 'sleep', Fake_sleep)
+        #self.stubs.Set(greenthread, 'sleep', Fake_sleep)
 
         self.driver = FakeHVSiSCSIStorage(configuration=self.configuration)
         self.driver.do_setup({})
index 03017643a99ad8577541a7ac80595da4424724cf..62a87d555b478bde9f22c37020eb8ab37fa937de 100644 (file)
@@ -26,7 +26,6 @@ from cinder import db
 from cinder.db.sqlalchemy import api as sqa_api
 from cinder.db.sqlalchemy import models as sqa_models
 from cinder import exception
-from cinder.openstack.common import rpc
 from cinder.openstack.common import timeutils
 from cinder import quota
 from cinder import test
@@ -55,12 +54,6 @@ class QuotaIntegrationTestCase(test.TestCase):
         self.context = context.RequestContext(self.user_id,
                                               self.project_id,
                                               is_admin=True)
-        orig_rpc_call = rpc.call
-
-        def rpc_call_wrapper(context, topic, msg, timeout=None):
-            return orig_rpc_call(context, topic, msg)
-
-        self.stubs.Set(rpc, 'call', rpc_call_wrapper)
 
         # Destroy the 'default' quota_class in the database to avoid
         # conflicts with the test cases here that are setting up their own
index b4c1af7abba013aed03d6d7702635ecf50195310..a2efd3b5860ae0dcae4d555414c694160ea87c1e 100644 (file)
 
 """Tests for the testing base code."""
 
-from cinder.openstack.common import rpc
+from oslo.config import cfg
+from oslo import messaging
+
+from cinder import rpc
 from cinder import test
 
 
@@ -37,7 +40,7 @@ class IsolationTestCase(test.TestCase):
             def __getattribute__(*args):
                 assert False, "I should never get called."
 
-        connection = rpc.create_connection(new=True)
-        proxy = NeverCalled()
-        connection.create_consumer('volume', proxy, fanout=False)
-        connection.consume_in_thread()
+        server = rpc.get_server(messaging.Target(topic='volume',
+                                                 server=cfg.CONF.host),
+                                endpoints=[NeverCalled()])
+        server.start()
index d70bb4766ae76ccc9cc6570a844af70ae103b0c5..631e8e966b8c098918b4cf5124d0fd40ea60195f 100644 (file)
@@ -43,14 +43,12 @@ from cinder import keymgr
 from cinder.openstack.common import fileutils
 from cinder.openstack.common import importutils
 from cinder.openstack.common import jsonutils
-from cinder.openstack.common.notifier import api as notifier_api
-from cinder.openstack.common.notifier import test_notifier
-from cinder.openstack.common import rpc
 import cinder.policy
 from cinder import quota
 from cinder import test
 from cinder.tests.brick.fake_lvm import FakeBrickLVM
 from cinder.tests import conf_fixture
+from cinder.tests import fake_notifier
 from cinder.tests.image import fake as fake_image
 from cinder.tests.keymgr import fake as fake_keymgr
 from cinder.tests import utils as tests_utils
@@ -97,7 +95,7 @@ class BaseVolumeTestCase(test.TestCase):
             "BaseVolumeTestCase")
         vol_tmpdir = tempfile.mkdtemp()
         self.flags(volumes_dir=vol_tmpdir,
-                   notification_driver=[test_notifier.__name__])
+                   notification_driver=["test"])
         self.volume = importutils.import_object(CONF.volume_manager)
         self.context = context.get_admin_context()
         self.context.user_id = 'fake'
@@ -111,7 +109,6 @@ class BaseVolumeTestCase(test.TestCase):
                        'get_all_volume_groups',
                        self.fake_get_all_volume_groups)
         fake_image.stub_out_image_service(self.stubs)
-        test_notifier.NOTIFICATIONS = []
         self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True)
         self.stubs.Set(os.path, 'exists', lambda x: True)
         self.volume.driver.set_initialized()
@@ -124,7 +121,7 @@ class BaseVolumeTestCase(test.TestCase):
             shutil.rmtree(CONF.volumes_dir)
         except OSError:
             pass
-        notifier_api._reset_drivers()
+        fake_notifier.reset()
         super(BaseVolumeTestCase, self).tearDown()
 
     def fake_get_target(obj, iqn):
@@ -180,7 +177,7 @@ class VolumeTestCase(BaseVolumeTestCase):
 
         volume_id = volume['id']
         self.assertIsNone(volume['encryption_key_id'])
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         self.assertRaises(exception.DriverNotInitialized,
                           self.volume.create_volume,
                           self.context, volume_id)
@@ -214,7 +211,7 @@ class VolumeTestCase(BaseVolumeTestCase):
 
         volume_id = volume['id']
         self.assertIsNone(volume['encryption_key_id'])
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         self.assertRaises(exception.DriverNotInitialized,
                           self.volume.delete_volume,
                           self.context, volume_id)
@@ -246,10 +243,10 @@ class VolumeTestCase(BaseVolumeTestCase):
             **self.volume_params)
         volume_id = volume['id']
         self.assertIsNone(volume['encryption_key_id'])
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         self.volume.create_volume(self.context, volume_id)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 2)
-        msg = test_notifier.NOTIFICATIONS[0]
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+        msg = fake_notifier.NOTIFICATIONS[0]
         self.assertEqual(msg['event_type'], 'volume.create.start')
         expected = {
             'status': 'creating',
@@ -265,7 +262,7 @@ class VolumeTestCase(BaseVolumeTestCase):
             'size': 1,
         }
         self.assertDictMatch(msg['payload'], expected)
-        msg = test_notifier.NOTIFICATIONS[1]
+        msg = fake_notifier.NOTIFICATIONS[1]
         self.assertEqual(msg['event_type'], 'volume.create.end')
         expected['status'] = 'available'
         self.assertDictMatch(msg['payload'], expected)
@@ -276,11 +273,11 @@ class VolumeTestCase(BaseVolumeTestCase):
         vol = db.volume_get(context.get_admin_context(read_deleted='yes'),
                             volume_id)
         self.assertEqual(vol['status'], 'deleted')
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 4)
-        msg = test_notifier.NOTIFICATIONS[2]
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 4)
+        msg = fake_notifier.NOTIFICATIONS[2]
         self.assertEqual(msg['event_type'], 'volume.delete.start')
         self.assertDictMatch(msg['payload'], expected)
-        msg = test_notifier.NOTIFICATIONS[3]
+        msg = fake_notifier.NOTIFICATIONS[3]
         self.assertEqual(msg['event_type'], 'volume.delete.end')
         self.assertDictMatch(msg['payload'], expected)
         self.assertRaises(exception.NotFound,
@@ -1478,16 +1475,16 @@ class VolumeTestCase(BaseVolumeTestCase):
             self.context,
             availability_zone=CONF.storage_availability_zone,
             **self.volume_params)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
         self.volume.create_volume(self.context, volume['id'])
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 2)
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
         snapshot_id = self._create_snapshot(volume['id'])['id']
         self.volume.create_snapshot(self.context, volume['id'], snapshot_id)
         self.assertEqual(snapshot_id,
                          db.snapshot_get(context.get_admin_context(),
                                          snapshot_id).id)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 4)
-        msg = test_notifier.NOTIFICATIONS[2]
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 4)
+        msg = fake_notifier.NOTIFICATIONS[2]
         self.assertEqual(msg['event_type'], 'snapshot.create.start')
         expected = {
             'created_at': 'DONTCARE',
@@ -1502,17 +1499,17 @@ class VolumeTestCase(BaseVolumeTestCase):
             'availability_zone': 'nova'
         }
         self.assertDictMatch(msg['payload'], expected)
-        msg = test_notifier.NOTIFICATIONS[3]
+        msg = fake_notifier.NOTIFICATIONS[3]
         self.assertEqual(msg['event_type'], 'snapshot.create.end')
         self.assertDictMatch(msg['payload'], expected)
 
         self.volume.delete_snapshot(self.context, snapshot_id)
-        self.assertEqual(len(test_notifier.NOTIFICATIONS), 6)
-        msg = test_notifier.NOTIFICATIONS[4]
+        self.assertEqual(len(fake_notifier.NOTIFICATIONS), 6)
+        msg = fake_notifier.NOTIFICATIONS[4]
         self.assertEqual(msg['event_type'], 'snapshot.delete.start')
         expected['status'] = 'available'
         self.assertDictMatch(msg['payload'], expected)
-        msg = test_notifier.NOTIFICATIONS[5]
+        msg = fake_notifier.NOTIFICATIONS[5]
         self.assertEqual(msg['event_type'], 'snapshot.delete.end')
         self.assertDictMatch(msg['payload'], expected)
 
@@ -1653,9 +1650,6 @@ class VolumeTestCase(BaseVolumeTestCase):
     def test_create_snapshot_force(self):
         """Test snapshot in use can be created forcibly."""
 
-        def fake_cast(ctxt, topic, msg):
-            pass
-        self.stubs.Set(rpc, 'cast', fake_cast)
         instance_uuid = '12345678-1234-5678-1234-567812345678'
         # create volume and attach to the instance
         volume = tests_utils.create_volume(self.context, **self.volume_params)
index 0641805e82bd8ac73f5f7c47a31364237680ffdc..8afc8527791fb750dd7c40209acd5ac5bfc3085b 100644 (file)
 """
 Unit Tests for cinder.volume.rpcapi
 """
-
+import copy
 
 from oslo.config import cfg
 
 from cinder import context
 from cinder import db
 from cinder.openstack.common import jsonutils
-from cinder.openstack.common import rpc
 from cinder import test
 from cinder.volume import rpcapi as volume_rpcapi
 
@@ -70,60 +69,74 @@ class VolumeRpcAPITestCase(test.TestCase):
         rpcapi = rpcapi_class()
         expected_retval = 'foo' if method == 'call' else None
 
-        expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
+        target = {
+            "version": kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
+        }
 
         if 'request_spec' in kwargs:
             spec = jsonutils.to_primitive(kwargs['request_spec'])
             kwargs['request_spec'] = spec
 
-        expected_msg = rpcapi.make_msg(method, **kwargs)
-        if 'volume' in expected_msg['args']:
-            volume = expected_msg['args']['volume']
-            del expected_msg['args']['volume']
-            expected_msg['args']['volume_id'] = volume['id']
-        if 'snapshot' in expected_msg['args']:
-            snapshot = expected_msg['args']['snapshot']
-            del expected_msg['args']['snapshot']
-            expected_msg['args']['snapshot_id'] = snapshot['id']
-        if 'host' in expected_msg['args']:
-            del expected_msg['args']['host']
-        if 'dest_host' in expected_msg['args']:
-            dest_host = expected_msg['args']['dest_host']
+        expected_msg = copy.deepcopy(kwargs)
+        if 'volume' in expected_msg:
+            volume = expected_msg['volume']
+            del expected_msg['volume']
+            expected_msg['volume_id'] = volume['id']
+        if 'snapshot' in expected_msg:
+            snapshot = expected_msg['snapshot']
+            del expected_msg['snapshot']
+            expected_msg['snapshot_id'] = snapshot['id']
+        if 'host' in expected_msg:
+            del expected_msg['host']
+        if 'dest_host' in expected_msg:
+            dest_host = expected_msg['dest_host']
             dest_host_dict = {'host': dest_host.host,
                               'capabilities': dest_host.capabilities}
-            del expected_msg['args']['dest_host']
-            expected_msg['args']['host'] = dest_host_dict
-        if 'new_volume' in expected_msg['args']:
-            volume = expected_msg['args']['new_volume']
-            del expected_msg['args']['new_volume']
-            expected_msg['args']['new_volume_id'] = volume['id']
-
-        expected_msg['version'] = expected_version
+            del expected_msg['dest_host']
+            expected_msg['host'] = dest_host_dict
+        if 'new_volume' in expected_msg:
+            volume = expected_msg['new_volume']
+            del expected_msg['new_volume']
+            expected_msg['new_volume_id'] = volume['id']
 
         if 'host' in kwargs:
             host = kwargs['host']
         else:
             host = kwargs['volume']['host']
-        expected_topic = '%s.%s' % (CONF.volume_topic, host)
+
+        target['server'] = host
+        target['topic'] = '%s.%s' % (CONF.volume_topic, host)
 
         self.fake_args = None
         self.fake_kwargs = None
 
+        real_prepare = rpcapi.client.prepare
+
+        def _fake_prepare_method(*args, **kwds):
+            for kwd in kwds:
+                self.assertEqual(kwds[kwd], target[kwd])
+            return rpcapi.client
+
         def _fake_rpc_method(*args, **kwargs):
             self.fake_args = args
             self.fake_kwargs = kwargs
             if expected_retval:
                 return expected_retval
 
-        self.stubs.Set(rpc, rpc_method, _fake_rpc_method)
+        self.stubs.Set(rpcapi.client, "prepare", _fake_prepare_method)
+        self.stubs.Set(rpcapi.client, rpc_method, _fake_rpc_method)
 
         retval = getattr(rpcapi, method)(ctxt, **kwargs)
 
         self.assertEqual(retval, expected_retval)
-        expected_args = [ctxt, expected_topic, expected_msg]
+        expected_args = [ctxt, method]
+
         for arg, expected_arg in zip(self.fake_args, expected_args):
             self.assertEqual(arg, expected_arg)
 
+        for kwarg, value in self.fake_kwargs.items():
+            self.assertEqual(value, expected_msg[kwarg])
+
     def test_create_volume(self):
         self._test_volume_api('create_volume',
                               rpc_method='cast',
index 403fdbcc770b32db09717609e371ab666e7653c6..fd120013c18966dc134abb7a815404aee141d53a 100644 (file)
@@ -25,9 +25,8 @@ from cinder import db
 from cinder import exception
 from cinder.openstack.common import importutils
 from cinder.openstack.common import log as logging
-from cinder.openstack.common.notifier import api as notifier_api
-from cinder.openstack.common.notifier import test_notifier
 from cinder import test
+from cinder.tests import fake_notifier
 from cinder import utils
 from cinder.volume import utils as volume_utils
 
@@ -47,19 +46,17 @@ class UsageInfoTestCase(test.TestCase):
 
     def setUp(self):
         super(UsageInfoTestCase, self).setUp()
-        self.flags(host='fake',
-                   notification_driver=[test_notifier.__name__])
+        self.flags(host='fake', notification_driver=["test"])
         self.volume = importutils.import_object(CONF.volume_manager)
         self.user_id = 'fake'
         self.project_id = 'fake'
         self.snapshot_id = 'fake'
         self.volume_size = 0
         self.context = context.RequestContext(self.user_id, self.project_id)
-        test_notifier.NOTIFICATIONS = []
 
     def tearDown(self):
-        notifier_api._reset_drivers()
         super(UsageInfoTestCase, self).tearDown()
+        fake_notifier.reset()
 
     def _create_volume(self, params={}):
         """Create a test volume."""
index d9f6bd754ce98cd3ddc3cee0df35ec1df101c669..2b806d9882685991f0d0ebcf29a2640dd4dcfbc4 100644 (file)
@@ -39,6 +39,7 @@ intact.
 import time
 
 from oslo.config import cfg
+from oslo import messaging
 
 from cinder import compute
 from cinder import context
@@ -171,6 +172,8 @@ class VolumeManager(manager.SchedulerDependentManager):
 
     RPC_API_VERSION = '1.15'
 
+    target = messaging.Target(version=RPC_API_VERSION)
+
     def __init__(self, volume_driver=None, service_name=None,
                  *args, **kwargs):
         """Load the driver from the one specified in args, or from flags."""
index dca2f378c31bd553dbf32638bfe62fe454ae7f26..ee3a710aa049bb6d2d5840b26f91344f83d3db52 100644 (file)
@@ -17,16 +17,16 @@ Client side of the volume RPC API.
 """
 
 from oslo.config import cfg
+from oslo import messaging
 
 from cinder.openstack.common import jsonutils
-from cinder.openstack.common import rpc
-import cinder.openstack.common.rpc.proxy
+from cinder import rpc
 
 
 CONF = cfg.CONF
 
 
-class VolumeAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
+class VolumeAPI(object):
     '''Client side of the volume rpc API.
 
     API version history:
@@ -55,9 +55,10 @@ class VolumeAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
     BASE_RPC_API_VERSION = '1.0'
 
     def __init__(self, topic=None):
-        super(VolumeAPI, self).__init__(
-            topic=topic or CONF.volume_topic,
-            default_version=self.BASE_RPC_API_VERSION)
+        super(VolumeAPI, self).__init__()
+        target = messaging.Target(topic=CONF.volume_topic,
+                                  version=self.BASE_RPC_API_VERSION)
+        self.client = rpc.get_client(target, '1.15')
 
     def create_volume(self, ctxt, volume, host,
                       request_spec, filter_properties,
@@ -65,156 +66,105 @@ class VolumeAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
                       snapshot_id=None, image_id=None,
                       source_volid=None):
 
+        cctxt = self.client.prepare(server=host, version='1.4')
         request_spec_p = jsonutils.to_primitive(request_spec)
-        self.cast(ctxt,
-                  self.make_msg('create_volume',
-                                volume_id=volume['id'],
-                                request_spec=request_spec_p,
-                                filter_properties=filter_properties,
-                                allow_reschedule=allow_reschedule,
-                                snapshot_id=snapshot_id,
-                                image_id=image_id,
-                                source_volid=source_volid),
-                  topic=rpc.queue_get_for(ctxt,
-                                          self.topic,
-                                          host),
-                  version='1.4')
+        cctxt.cast(ctxt, 'create_volume',
+                   volume_id=volume['id'],
+                   request_spec=request_spec_p,
+                   filter_properties=filter_properties,
+                   allow_reschedule=allow_reschedule,
+                   snapshot_id=snapshot_id,
+                   image_id=image_id,
+                   source_volid=source_volid),
 
     def delete_volume(self, ctxt, volume, unmanage_only=False):
-        self.cast(ctxt,
-                  self.make_msg('delete_volume',
-                                volume_id=volume['id'],
-                                unmanage_only=unmanage_only),
-                  topic=rpc.queue_get_for(ctxt, self.topic, volume['host']),
-                  version='1.15')
+        cctxt = self.client.prepare(server=volume['host'], version='1.15')
+        cctxt.cast(ctxt, 'delete_volume',
+                   volume_id=volume['id'],
+                   unmanage_only=unmanage_only)
 
     def create_snapshot(self, ctxt, volume, snapshot):
-        self.cast(ctxt, self.make_msg('create_snapshot',
-                                      volume_id=volume['id'],
-                                      snapshot_id=snapshot['id']),
-                  topic=rpc.queue_get_for(ctxt, self.topic, volume['host']))
+        cctxt = self.client.prepare(server=volume['host'])
+        cctxt.cast(ctxt, 'create_snapshot', volume_id=volume['id'],
+                   snapshot_id=snapshot['id'])
 
     def delete_snapshot(self, ctxt, snapshot, host):
-        self.cast(ctxt, self.make_msg('delete_snapshot',
-                                      snapshot_id=snapshot['id']),
-                  topic=rpc.queue_get_for(ctxt, self.topic, host))
+        cctxt = self.client.prepare(server=host)
+        cctxt.cast(ctxt, 'delete_snapshot', snapshot_id=snapshot['id'])
 
     def attach_volume(self, ctxt, volume, instance_uuid, host_name,
                       mountpoint, mode):
-        return self.call(ctxt, self.make_msg('attach_volume',
-                                             volume_id=volume['id'],
-                                             instance_uuid=instance_uuid,
-                                             host_name=host_name,
-                                             mountpoint=mountpoint,
-                                             mode=mode),
-                         topic=rpc.queue_get_for(ctxt,
-                                                 self.topic,
-                                                 volume['host']),
-                         version='1.11')
+
+        cctxt = self.client.prepare(server=volume['host'], version='1.11')
+        return cctxt.call(ctxt, 'attach_volume',
+                          volume_id=volume['id'],
+                          instance_uuid=instance_uuid,
+                          host_name=host_name,
+                          mountpoint=mountpoint,
+                          mode=mode)
 
     def detach_volume(self, ctxt, volume):
-        return self.call(ctxt, self.make_msg('detach_volume',
-                                             volume_id=volume['id']),
-                         topic=rpc.queue_get_for(ctxt,
-                                                 self.topic,
-                                                 volume['host']))
+        cctxt = self.client.prepare(server=volume['host'])
+        return cctxt.call(ctxt, 'detach_volume', volume_id=volume['id'])
 
     def copy_volume_to_image(self, ctxt, volume, image_meta):
-        self.cast(ctxt, self.make_msg('copy_volume_to_image',
-                                      volume_id=volume['id'],
-                                      image_meta=image_meta),
-                  topic=rpc.queue_get_for(ctxt,
-                                          self.topic,
-                                          volume['host']),
-                  version='1.3')
+        cctxt = self.client.prepare(server=volume['host'], version='1.3')
+        cctxt.cast(ctxt, 'copy_volume_to_image', volume_id=volume['id'],
+                   image_meta=image_meta)
 
     def initialize_connection(self, ctxt, volume, connector):
-        return self.call(ctxt, self.make_msg('initialize_connection',
-                                             volume_id=volume['id'],
-                                             connector=connector),
-                         topic=rpc.queue_get_for(ctxt,
-                                                 self.topic,
-                                                 volume['host']))
+        cctxt = self.client.prepare(server=volume['host'])
+        return cctxt.call(ctxt, 'initialize_connection',
+                          volume_id=volume['id'],
+                          connector=connector)
 
     def terminate_connection(self, ctxt, volume, connector, force=False):
-        return self.call(ctxt, self.make_msg('terminate_connection',
-                                             volume_id=volume['id'],
-                                             connector=connector,
-                                             force=force),
-                         topic=rpc.queue_get_for(ctxt,
-                                                 self.topic,
-                                                 volume['host']))
+        cctxt = self.client.prepare(server=volume['host'])
+        return cctxt.call(ctxt, 'terminate_connection', volume_id=volume['id'],
+                          connector=connector, force=force)
 
     def publish_service_capabilities(self, ctxt):
-        self.fanout_cast(ctxt, self.make_msg('publish_service_capabilities'),
-                         version='1.2')
+        cctxt = self.client.prepare(fanout=True, version='1.2')
+        cctxt.cast(ctxt, 'publish_service_capabilities')
 
     def accept_transfer(self, ctxt, volume, new_user, new_project):
-        self.cast(ctxt,
-                  self.make_msg('accept_transfer',
-                                volume_id=volume['id'],
-                                new_user=new_user,
-                                new_project=new_project),
-                  topic=rpc.queue_get_for(ctxt, self.topic, volume['host']),
-                  version='1.9')
+        cctxt = self.client.prepare(server=volume['host'], version='1.9')
+        cctxt.cast(ctxt, 'accept_transfer', volume_id=volume['id'],
+                   new_user=new_user, new_project=new_project)
 
     def extend_volume(self, ctxt, volume, new_size, reservations):
-        self.cast(ctxt,
-                  self.make_msg('extend_volume',
-                                volume_id=volume['id'],
-                                new_size=new_size,
-                                reservations=reservations),
-                  topic=rpc.queue_get_for(ctxt, self.topic, volume['host']),
-                  version='1.14')
+        cctxt = self.client.prepare(server=volume['host'], version='1.14')
+        cctxt.cast(ctxt, 'extend_volume', volume_id=volume['id'],
+                   new_size=new_size, reservations=reservations)
 
     def migrate_volume(self, ctxt, volume, dest_host, force_host_copy):
+        cctxt = self.client.prepare(server=volume['host'], version='1.8')
         host_p = {'host': dest_host.host,
                   'capabilities': dest_host.capabilities}
-        self.cast(ctxt,
-                  self.make_msg('migrate_volume',
-                                volume_id=volume['id'],
-                                host=host_p,
-                                force_host_copy=force_host_copy),
-                  topic=rpc.queue_get_for(ctxt, self.topic, volume['host']),
-                  version='1.8')
+        cctxt.cast(ctxt, 'migrate_volume', volume_id=volume['id'],
+                   host=host_p, force_host_copy=force_host_copy)
 
     def migrate_volume_completion(self, ctxt, volume, new_volume, error):
-        return self.call(ctxt,
-                         self.make_msg('migrate_volume_completion',
-                                       volume_id=volume['id'],
-                                       new_volume_id=new_volume['id'],
-                                       error=error),
-                         topic=rpc.queue_get_for(ctxt, self.topic,
-                                                 volume['host']),
-                         version='1.10')
+        cctxt = self.client.prepare(server=volume['host'], version='1.10')
+        return cctxt.call(ctxt, 'migrate_volume_completion',
+                          volume_id=volume['id'],
+                          new_volume_id=new_volume['id'],
+                          error=error)
 
     def retype(self, ctxt, volume, new_type_id, dest_host,
                migration_policy='never', reservations=None):
+        cctxt = self.client.prepare(server=volume['host'], version='1.12')
         host_p = {'host': dest_host.host,
                   'capabilities': dest_host.capabilities}
-        self.cast(ctxt,
-                  self.make_msg('retype',
-                                volume_id=volume['id'],
-                                new_type_id=new_type_id,
-                                host=host_p,
-                                migration_policy=migration_policy,
-                                reservations=reservations),
-                  topic=rpc.queue_get_for(ctxt, self.topic, volume['host']),
-                  version='1.12')
+        cctxt.cast(ctxt, 'retype', volume_id=volume['id'],
+                   new_type_id=new_type_id, host=host_p,
+                   migration_policy=migration_policy,
+                   reservations=reservations)
 
     def create_export(self, ctxt, volume):
-        return self.call(ctxt, self.make_msg('create_export',
-                                             volume_id=volume['id']),
-                         topic=rpc.queue_get_for(ctxt,
-                                                 self.topic,
-                                                 volume['host']),
-                         version='1.13')
+        cctxt = self.client.prepare(server=volume['host'], version='1.13')
+        return cctxt.call(ctxt, 'create_export', volume_id=volume['id'])
 
     def manage_existing(self, ctxt, volume, ref):
-        return self.cast(ctxt, self.make_msg('manage_existing',
-                                             volume_id=volume['id'],
-                                             ref=ref),
-                         topic=rpc.queue_get_for(ctxt,
-                                                 self.topic,
-                                                 volume['host']),
-                         version='1.15')
+        cctxt = self.client.prepare(server=volume['host'], version='1.15')
+        cctxt.cast(ctxt, 'manage_existing', volume_id=volume['id'], ref=ref)
index d356036501269c7d1ab2f4d164997ff3ee386848..9fea56c46d0b07b18d98335b8f707b09f33271e6 100644 (file)
@@ -22,9 +22,9 @@ from oslo.config import cfg
 from cinder.brick.local_dev import lvm as brick_lvm
 from cinder import exception
 from cinder.openstack.common import log as logging
-from cinder.openstack.common.notifier import api as notifier_api
 from cinder.openstack.common import processutils
 from cinder.openstack.common import strutils
+from cinder import rpc
 from cinder import units
 from cinder import utils
 
@@ -65,9 +65,8 @@ def notify_about_volume_usage(context, volume, event_suffix,
 
     usage_info = _usage_from_volume(context, volume, **extra_usage_info)
 
-    notifier_api.notify(context, 'volume.%s' % host,
-                        'volume.%s' % event_suffix,
-                        notifier_api.INFO, usage_info)
+    rpc.get_notifier("volume", host).info(context, 'volume.%s' % event_suffix,
+                                          usage_info)
 
 
 def _usage_from_snapshot(context, snapshot_ref, **extra_usage_info):
@@ -98,9 +97,9 @@ def notify_about_snapshot_usage(context, snapshot, event_suffix,
 
     usage_info = _usage_from_snapshot(context, snapshot, **extra_usage_info)
 
-    notifier_api.notify(context, 'snapshot.%s' % host,
-                        'snapshot.%s' % event_suffix,
-                        notifier_api.INFO, usage_info)
+    rpc.get_notifier('snapshot', host).info(context,
+                                            'snapshot.%s' % event_suffix,
+                                            usage_info)
 
 
 def _calculate_count(size_in_m, blocksize):
index a0d4e705538c2c92ad9164843c8e585893e7e853..e075bf1bcf20d8f3382100d95816c3815481e4f8 100644 (file)
 #syslog_log_facility=LOG_USER
 
 
-#
-# Options defined in cinder.openstack.common.notifier.api
-#
-
-# Driver or drivers to handle sending notifications (multi
-# valued)
-#notification_driver=
-
-# Default notification level for outgoing notifications
-# (string value)
-#default_notification_level=INFO
-
-# Default publisher_id for outgoing notifications (string
-# value)
-#default_publisher_id=<None>
-
-
-#
-# Options defined in cinder.openstack.common.notifier.rpc_notifier
-#
-
-# AMQP topic used for OpenStack notifications (list value)
-#notification_topics=notifications
-
-
 #
 # Options defined in cinder.openstack.common.periodic_task
 #
 #run_external_periodic_tasks=true
 
 
-#
-# Options defined in cinder.openstack.common.rpc
-#
-
-# The messaging module to use, defaults to kombu. (string
-# value)
-#rpc_backend=cinder.openstack.common.rpc.impl_kombu
-
-# Size of RPC thread pool (integer value)
-#rpc_thread_pool_size=64
-
-# Size of RPC connection pool (integer value)
-#rpc_conn_pool_size=30
-
-# Seconds to wait for a response from call or multicall
-# (integer value)
-#rpc_response_timeout=60
-
-# Seconds to wait before a cast expires (TTL). Only supported
-# by impl_zmq. (integer value)
-#rpc_cast_timeout=30
-
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call. (list value)
-#allowed_rpc_exception_modules=nova.exception,cinder.exception,exceptions
-
-# If passed, use a fake RabbitMQ provider (boolean value)
-#fake_rabbit=false
-
-# AMQP exchange to connect to if using RabbitMQ or Qpid
-# (string value)
-#control_exchange=openstack
-
-
-#
-# Options defined in cinder.openstack.common.rpc.amqp
-#
-
-# Use durable queues in amqp. (boolean value)
-# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
-#amqp_durable_queues=false
-
-# Auto-delete queues in amqp. (boolean value)
-#amqp_auto_delete=false
-
-
-#
-# Options defined in cinder.openstack.common.rpc.impl_kombu
-#
-
-# SSL version to use (valid only if SSL enabled). valid values
-# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
-# distributions (string value)
-#kombu_ssl_version=
-
-# SSL key file (valid only if SSL enabled) (string value)
-#kombu_ssl_keyfile=
-
-# SSL cert file (valid only if SSL enabled) (string value)
-#kombu_ssl_certfile=
-
-# SSL certification authority file (valid only if SSL enabled)
-# (string value)
-#kombu_ssl_ca_certs=
-
-# The RabbitMQ broker address where a single node is used
-# (string value)
-#rabbit_host=localhost
-
-# The RabbitMQ broker port where a single node is used
-# (integer value)
-#rabbit_port=5672
-
-# RabbitMQ HA cluster host:port pairs (list value)
-#rabbit_hosts=$rabbit_host:$rabbit_port
-
-# connect over SSL for RabbitMQ (boolean value)
-#rabbit_use_ssl=false
-
-# the RabbitMQ userid (string value)
-#rabbit_userid=guest
-
-# the RabbitMQ password (string value)
-#rabbit_password=guest
-
-# the RabbitMQ virtual host (string value)
-#rabbit_virtual_host=/
-
-# how frequently to retry connecting with RabbitMQ (integer
-# value)
-#rabbit_retry_interval=1
-
-# how long to backoff for between retries when connecting to
-# RabbitMQ (integer value)
-#rabbit_retry_backoff=2
-
-# maximum retries with trying to connect to RabbitMQ (the
-# default of 0 implies an infinite retry count) (integer
-# value)
-#rabbit_max_retries=0
-
-# use H/A queues in RabbitMQ (x-ha-policy: all).You need to
-# wipe RabbitMQ database when changing this option. (boolean
-# value)
-#rabbit_ha_queues=false
-
-
-#
-# Options defined in cinder.openstack.common.rpc.impl_qpid
-#
-
-# Qpid broker hostname (string value)
-#qpid_hostname=localhost
-
-# Qpid broker port (integer value)
-#qpid_port=5672
-
-# Qpid HA cluster host:port pairs (list value)
-#qpid_hosts=$qpid_hostname:$qpid_port
-
-# Username for qpid connection (string value)
-#qpid_username=
-
-# Password for qpid connection (string value)
-#qpid_password=
-
-# Space separated list of SASL mechanisms to use for auth
-# (string value)
-#qpid_sasl_mechanisms=
-
-# Seconds between connection keepalive heartbeats (integer
-# value)
-#qpid_heartbeat=60
-
-# Transport to use, either 'tcp' or 'ssl' (string value)
-#qpid_protocol=tcp
-
-# Disable Nagle algorithm (boolean value)
-#qpid_tcp_nodelay=true
-
-# The qpid topology version to use.  Version 1 is what was
-# originally used by impl_qpid.  Version 2 includes some
-# backwards-incompatible changes that allow broker federation
-# to work.  Users should update to version 2 when they are
-# able to take everything down, as it requires a clean break.
-# (integer value)
-#qpid_topology_version=1
-
-
-#
-# Options defined in cinder.openstack.common.rpc.impl_zmq
-#
-
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet
-# interface, or IP. The "host" option should point or resolve
-# to this address. (string value)
-#rpc_zmq_bind_address=*
-
-# MatchMaker driver (string value)
-#rpc_zmq_matchmaker=cinder.openstack.common.rpc.matchmaker.MatchMakerLocalhost
-
-# ZeroMQ receiver listening port (integer value)
-#rpc_zmq_port=9501
-
-# Number of ZeroMQ contexts, defaults to 1 (integer value)
-#rpc_zmq_contexts=1
-
-# Maximum number of ingress messages to locally buffer per
-# topic. Default is unlimited. (integer value)
-#rpc_zmq_topic_backlog=<None>
-
-# Directory for holding IPC sockets (string value)
-#rpc_zmq_ipc_dir=/var/run/openstack
-
-# Name of this node. Must be a valid hostname, FQDN, or IP
-# address. Must match "host" option, if running Nova. (string
-# value)
-#rpc_zmq_host=cinder
-
-
-#
-# Options defined in cinder.openstack.common.rpc.matchmaker
-#
-
-# Heartbeat frequency (integer value)
-#matchmaker_heartbeat_freq=300
-
-# Heartbeat time-to-live. (integer value)
-#matchmaker_heartbeat_ttl=600
-
-
 #
 # Options defined in cinder.scheduler.driver
 #
 #enforce_token_bind=permissive
 
 
-[matchmaker_redis]
-
-#
-# Options defined in cinder.openstack.common.rpc.matchmaker_redis
-#
-
-# Host to locate redis (string value)
-#host=127.0.0.1
-
-# Use this port to connect to redis host. (integer value)
-#port=6379
-
-# Password for Redis server. (optional) (string value)
-#password=<None>
-
-
-[matchmaker_ring]
-
-#
-# Options defined in cinder.openstack.common.rpc.matchmaker_ring
-#
-
-# Matchmaker ring file (JSON) (string value)
-# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
-#ringfile=/etc/oslo/matchmaker_ring.json
-
-
-[rpc_notifier2]
-
-#
-# Options defined in cinder.openstack.common.notifier.rpc_notifier2
-#
-
-# AMQP topic(s) used for OpenStack notifications (list value)
-#topics=notifications
-
-
 [ssl]
 
 #
index 44e5a96e2c3e4384eb3b877edabb61d1e7cada2e..46de164265ffcb68dc8386ad3e6d9a4a127a04e3 100644 (file)
@@ -19,13 +19,11 @@ module=log
 module=log_handler
 module=middleware
 module=network_utils
-module=notifier
 module=periodic_task
 module=policy
 module=processutils
 module=request_utils
 module=rootwrap
-module=rpc
 module=scheduler
 module=scheduler.filters
 module=scheduler.weights
index c21d847f53346db4c066d09392d7ec31c3818d9e..1d7ca66076f36c9081bebf284e4c6b2e6e95d683 100644 (file)
@@ -10,6 +10,7 @@ kombu>=2.4.8
 lxml>=2.3
 netaddr>=0.7.6
 oslo.config>=1.2.0
+oslo.messaging>=1.3.0a4
 oslo.rootwrap
 paramiko>=1.9.0
 Paste
index e9ccac6637f4d49322687248685abbca355ed141..eb8ecc854e0546966ce7df997ec4357275a52037 100644 (file)
--- a/setup.cfg
+++ b/setup.cfg
@@ -50,6 +50,13 @@ cinder.scheduler.weights =
     ChanceWeigher = cinder.scheduler.weights.chance:ChanceWeigher
 console_scripts =
     cinder-rootwrap = oslo.rootwrap.cmd:main
+# These are for backwards compat with Havana notification_driver configuration values
+oslo.messaging.notify.drivers =
+    cinder.openstack.common.notifier.log_notifier = oslo.messaging.notify._impl_log:LogDriver
+    cinder.openstack.common.notifier.no_op_notifier = oslo.messaging.notify._impl_noop:NoOpDriver
+    cinder.openstack.common.notifier.rpc_notifier2 = oslo.messaging.notify._impl_messaging:MessagingV2Driver
+    cinder.openstack.common.notifier.rpc_notifier = oslo.messaging.notify._impl_messaging:MessagingDriver
+    cinder.openstack.common.notifier.test_notifier = oslo.messaging.notify._impl_test:TestDriver
 
 [build_sphinx]
 all_files = 1