from oslo.db import exception as db_exc
from oslo.db import options
from oslo.db.sqlalchemy import session as db_session
+import osprofiler.sqlalchemy
+import sqlalchemy
from sqlalchemy import or_
from sqlalchemy.orm import joinedload, joinedload_all
from sqlalchemy.orm import RelationshipProperty
CONF = cfg.CONF
+CONF.import_group("profiler", "cinder.service")
LOG = logging.getLogger(__name__)
options.set_defaults(CONF, connection='sqlite:///$state_path/cinder.sqlite')
CONF.database.connection,
**dict(CONF.database.iteritems())
)
+
+ if CONF.profiler.profiler_enabled:
+ if CONF.profiler.trace_sqlalchemy:
+ osprofiler.sqlalchemy.add_tracing(sqlalchemy,
+ _FACADE.get_engine(),
+ "db")
+
return _FACADE
from oslo.config import cfg
from oslo import messaging
+from osprofiler import profiler
import cinder.context
import cinder.exception
return self._base.deserialize_entity(context, entity)
def serialize_context(self, context):
- return context.to_dict()
+ _context = context.to_dict()
+ prof = profiler.get()
+ if prof:
+ trace_info = {
+ "hmac_key": prof.hmac_key,
+ "base_id": prof.get_base_id(),
+ "parent_id": prof.get_id()
+ }
+ _context.update({"trace_info": trace_info})
+ return _context
def deserialize_context(self, context):
+ trace_info = context.pop("trace_info", None)
+ if trace_info:
+ profiler.init(**trace_info)
+
return cinder.context.RequestContext.from_dict(context)
from oslo.config import cfg
from oslo import messaging
+import osprofiler.notifier
+from osprofiler import profiler
+import osprofiler.web
from cinder import context
from cinder import db
help='Number of workers for OpenStack Volume API service. '
'The default is equal to the number of CPUs available.'), ]
+profiler_opts = [
+ cfg.BoolOpt("profiler_enabled", default=False,
+ help=_('If False fully disable profiling feature.')),
+ cfg.BoolOpt("trace_sqlalchemy", default=False,
+ help=_("If False doesn't trace SQL requests."))
+]
+
CONF = cfg.CONF
CONF.register_opts(service_opts)
+CONF.register_opts(profiler_opts, group="profiler")
+
+
+def setup_profiler(binary, host):
+ if CONF.profiler.profiler_enabled:
+ _notifier = osprofiler.notifier.create(
+ "Messaging", messaging, context.get_admin_context().to_dict(),
+ rpc.TRANSPORT, "cinder", binary, host)
+ osprofiler.notifier.set(_notifier)
+ LOG.warning("OSProfiler is enabled.\nIt means that person who knows "
+ "any of hmac_keys that are specified in "
+ "/etc/cinder/api-paste.ini can trace his requests. \n"
+ "In real life only operator can read this file so there "
+ "is no security issue. Note that even if person can "
+ "trigger profiler, only admin user can retrieve trace "
+ "information.\n"
+ "To disable OSprofiler set in cinder.conf:\n"
+ "[profiler]\nenabled=false")
+ else:
+ osprofiler.web.disable()
class Service(service.Service):
self.topic = topic
self.manager_class_name = manager
manager_class = importutils.import_class(self.manager_class_name)
+ manager_class = profiler.trace_cls("rpc")(manager_class)
+
self.manager = manager_class(host=self.host,
service_name=service_name,
*args, **kwargs)
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
+ setup_profiler(binary, host)
+
def start(self):
version_string = version.version_string()
LOG.info(_('Starting %(topic)s node (version %(version_string)s)'),
self.port = getattr(CONF, '%s_listen_port' % name, 0)
self.workers = getattr(CONF, '%s_workers' % name,
processutils.get_worker_count())
+ setup_profiler(name, self.host)
+
if self.workers < 1:
LOG.warn(_("Value of config option %(name)s_workers must be "
"integer greater than 1. Input value ignored.") %
class FakeNotifier(object):
- def __init__(self, transport, publisher_id, serializer=None):
+ def __init__(self, transport, publisher_id, serializer=None, driver=None,
+ topic=None, retry=None):
self.transport = transport
self.publisher_id = publisher_id
for priority in ['debug', 'info', 'warn', 'error', 'critical']:
setattr(self, priority,
functools.partial(self._notify, priority.upper()))
self._serializer = serializer or messaging.serializer.NoOpSerializer()
+ self._topic = topic
+ self.retry = retry
def prepare(self, publisher_id=None):
if publisher_id is None:
super(BaseBackupTest, self).setUp()
vol_tmpdir = tempfile.mkdtemp()
self.flags(volumes_dir=vol_tmpdir)
- self.backup_mgr = \
- importutils.import_object(CONF.backup_manager)
+ with mock.patch("osprofiler.profiler.trace_cls") as mock_trace_cls:
+ side_effect = lambda value: value
+ mock_decorator = mock.MagicMock(side_effect=side_effect)
+ mock_trace_cls.return_value = mock_decorator
+ self.backup_mgr = \
+ importutils.import_object(CONF.backup_manager)
self.backup_mgr.host = 'testhost'
self.ctxt = context.get_admin_context()
self.backup_mgr.driver.set_initialized()
self.flags(volumes_dir=vol_tmpdir,
notification_driver=["test"])
self.addCleanup(self._cleanup)
- self.volume = importutils.import_object(CONF.volume_manager)
+ with mock.patch("osprofiler.profiler.trace_cls") as mock_trace_cls:
+ side_effect = lambda value: value
+ mock_decorator = mock.MagicMock(side_effect=side_effect)
+ mock_trace_cls.return_value = mock_decorator
+ self.volume = importutils.import_object(CONF.volume_manager)
self.context = context.get_admin_context()
self.context.user_id = 'fake'
self.context.project_id = 'fake'
from oslo.config import cfg
from oslo import messaging
+from osprofiler import profiler
from cinder import compute
from cinder import context
db=self.db,
host=self.host)
+ self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
[composite:openstack_volume_api_v1]
use = call:cinder.api.middleware.auth:pipeline_factory
-noauth = request_id faultwrap sizelimit noauth apiv1
-keystone = request_id faultwrap sizelimit authtoken keystonecontext apiv1
-keystone_nolimit = request_id faultwrap sizelimit authtoken keystonecontext apiv1
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv1
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
[composite:openstack_volume_api_v2]
use = call:cinder.api.middleware.auth:pipeline_factory
-noauth = request_id faultwrap sizelimit noauth apiv2
-keystone = request_id faultwrap sizelimit authtoken keystonecontext apiv2
-keystone_nolimit = request_id faultwrap sizelimit authtoken keystonecontext apiv2
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv2
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
[filter:request_id]
paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMiddleware.factory
[filter:faultwrap]
paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY
+enabled = yes
+
[filter:noauth]
paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
#ringfile=/etc/oslo/matchmaker_ring.json
+[profiler]
+
+#
+# Options defined in cinder.service
+#
+
+# If False fully disable profiling feature. (boolean value)
+#profiler_enabled=false
+
+# If False doesn't trace SQL requests. (boolean value)
+#trace_sqlalchemy=false
+
+
[ssl]
#
oslo.db>=0.2.0
oslo.messaging>=1.3.0
oslo.rootwrap>=1.3.0.0a1
+osprofiler>=0.3.0
paramiko>=1.13.0
Paste
PasteDeploy>=1.5.0