Importing rpc.common requires the use of the global cfg.CONF.
In fact, most of common requires the use of this global. This
patch removes all the object specific access to config options
and directly accesses the global in prep for rpc openstack-common
import.
Change-Id: I5989a436964c199df0dc38dbb191dc3d867e5ce7
Signed-off-by: Steven Dake <sdake@redhat.com>
gettext.install('heat', unicode=1)
-from heat import rpc
from heat.common import config
from heat.common import wsgi
from paste import httpserver
+from heat.openstack.common import cfg
from heat.openstack.common import log as logging
+from heat import rpc
+
LOG = logging.getLogger('heat.api')
if __name__ == '__main__':
try:
- conf = config.HeatConfigOpts()
- conf(project='heat', prog='heat-api')
- config.FLAGS = conf
- rpc.configure(conf)
+ cfg.CONF(project='heat', prog='heat-api')
+ config.setup_logging()
+ config.register_api_opts()
+ rpc.configure()
- app = config.load_paste_app(conf)
+ app = config.load_paste_app()
- port = conf.bind_port
- host = conf.bind_host
+ port = cfg.CONF.bind_port
+ host = cfg.CONF.bind_host
LOG.info(('Starting Heat API on %s:%s') % (host, port))
httpserver.serve(app, host=host, port=port)
except RuntimeError, e:
import os
import sys
-from heat.openstack.common import log as logging
# If ../heat/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
gettext.install('heat', unicode=1)
-from heat import rpc
+from heat.openstack.common import log as logging
+from heat.openstack.common import cfg
from heat import service
from heat.common import config
from heat.common import utils
from heat.db import api as db_api
+from heat import rpc
+
logger = logging.getLogger('heat.engine')
if __name__ == '__main__':
default_manager = 'heat.engine.manager.EngineManager'
- conf = config.HeatEngineConfigOpts()
- conf(project='heat', prog='heat-engine')
- config.FLAGS = conf
+ cfg.CONF(project='heat', prog='heat-engine')
- config.setup_logging(conf)
- rpc.configure(conf)
- db_api.configure(conf)
+ config.setup_logging()
+ config.register_engine_opts()
+ db_api.configure()
+ rpc.configure()
#utils.monkey_patch()
server = service.Service.create(binary='heat-engine',
topic='engine',
manager=default_manager,
- config=conf)
+ config=cfg.CONF)
service.serve(server)
service.wait()
from paste import httpserver
from heat.openstack.common import log as logging
+from heat.openstack.common import cfg
LOG = logging.getLogger('heat.metadata')
if __name__ == '__main__':
try:
- conf = config.HeatMetadataConfigOpts()
- conf(project='heat', prog='heat-metadata')
- config.FLAGS = conf
- rpc.configure(conf)
+ cfg.CONF(project='heat', prog='heat-metadata')
- app = config.load_paste_app(conf)
+ config.setup_logging()
+ config.register_metadata_opts()
+ rpc.configure()
- port = conf.bind_port
- host = conf.bind_host
+ app = config.load_paste_app()
+
+ port = cfg.CONF.bind_port
+ host = cfg.CONF.bind_host
send_address_to_engine(host, port)
LOG.info(('Starting Heat Metadata on %s:%s') % (host, port))
httpserver.serve(app, host=host, port=port)
+
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
cfg.StrOpt('config_file'),
]
-FLAGS = None
+
+bind_opts = [cfg.IntOpt('bind_port', default=8000),
+ cfg.StrOpt('bind_host', default='127.0.0.1')]
rpc_opts = [
cfg.StrOpt('rpc_backend',
cfg.BoolOpt('fake_rabbit',
default=False,
help='If passed, use a fake RabbitMQ provider'),
- ]
-
-
-class HeatConfigOpts(cfg.CommonConfigOpts):
- def __init__(self):
- super(HeatConfigOpts, self).__init__()
- opts = [cfg.IntOpt('bind_port', default=8000),
- cfg.StrOpt('bind_host', default='127.0.0.1')]
- opts.extend(rpc_opts)
- self.register_cli_opts(opts)
-
-
-class HeatMetadataConfigOpts(cfg.CommonConfigOpts):
- def __init__(self):
- super(HeatMetadataConfigOpts, self).__init__()
- opts = [cfg.IntOpt('bind_port', default=8000),
- cfg.StrOpt('bind_host', default='127.0.0.1')]
- opts.extend(rpc_opts)
- self.register_cli_opts(opts)
-
-
-class HeatEngineConfigOpts(cfg.CommonConfigOpts):
-
- service_opts = [
- cfg.IntOpt('report_interval',
- default=10,
- help='seconds between nodes reporting state to datastore'),
- cfg.IntOpt('periodic_interval',
- default=60,
- help='seconds between running periodic tasks'),
- cfg.StrOpt('ec2_listen',
- default="0.0.0.0",
- help='IP address for EC2 API to listen'),
- cfg.IntOpt('ec2_listen_port',
- default=8773,
- help='port for ec2 api to listen'),
- cfg.StrOpt('osapi_compute_listen',
- default="0.0.0.0",
- help='IP address for OpenStack API to listen'),
- cfg.IntOpt('osapi_compute_listen_port',
- default=8774,
- help='list port for osapi compute'),
- cfg.StrOpt('metadata_manager',
- default='nova.api.manager.MetadataManager',
- help='OpenStack metadata service manager'),
- cfg.StrOpt('metadata_listen',
- default="0.0.0.0",
- help='IP address for metadata api to listen'),
- cfg.IntOpt('metadata_listen_port',
- default=8775,
- help='port for metadata api to listen'),
- cfg.StrOpt('osapi_volume_listen',
- default="0.0.0.0",
- help='IP address for OpenStack Volume API to listen'),
- cfg.IntOpt('osapi_volume_listen_port',
- default=8776,
- help='port for os volume api to listen'),
- cfg.StrOpt('heat_metadata_server_url',
- default="",
- help='URL of the Heat metadata server'),
- ]
- db_opts = [
- cfg.StrOpt('sql_connection',
- default='mysql://heat:heat@localhost/heat',
- help='The SQLAlchemy connection string used to connect to the '
- 'database'),
- cfg.IntOpt('sql_idle_timeout',
- default=3600,
- help='timeout before idle sql connections are reaped'),
- ]
- engine_opts = [
- cfg.StrOpt('host',
- default=socket.gethostname(),
- help='Name of this node. This can be an opaque identifier. '
- 'It is not necessarily a hostname, FQDN, or IP address.'),
- cfg.StrOpt('instance_driver',
- default='heat.engine.nova',
- help='Driver to use for controlling instances'),
- ]
-
- def __init__(self):
- super(HeatEngineConfigOpts, self).__init__()
- self.register_cli_opts(self.engine_opts)
- self.register_cli_opts(self.db_opts)
- self.register_cli_opts(self.service_opts)
- self.register_cli_opts(rpc_opts)
-
-
-def setup_logging(conf):
+]
+
+service_opts = [
+cfg.IntOpt('report_interval',
+ default=10,
+ help='seconds between nodes reporting state to datastore'),
+cfg.IntOpt('periodic_interval',
+ default=60,
+ help='seconds between running periodic tasks'),
+cfg.StrOpt('ec2_listen',
+ default="0.0.0.0",
+ help='IP address for EC2 API to listen'),
+cfg.IntOpt('ec2_listen_port',
+ default=8773,
+ help='port for ec2 api to listen'),
+cfg.StrOpt('osapi_compute_listen',
+ default="0.0.0.0",
+ help='IP address for OpenStack API to listen'),
+cfg.IntOpt('osapi_compute_listen_port',
+ default=8774,
+ help='list port for osapi compute'),
+cfg.StrOpt('metadata_manager',
+ default='nova.api.manager.MetadataManager',
+ help='OpenStack metadata service manager'),
+cfg.StrOpt('metadata_listen',
+ default="0.0.0.0",
+ help='IP address for metadata api to listen'),
+cfg.IntOpt('metadata_listen_port',
+ default=8775,
+ help='port for metadata api to listen'),
+cfg.StrOpt('osapi_volume_listen',
+ default="0.0.0.0",
+ help='IP address for OpenStack Volume API to listen'),
+cfg.IntOpt('osapi_volume_listen_port',
+ default=8776,
+ help='port for os volume api to listen'),
+cfg.StrOpt('heat_metadata_server_url',
+ default="",
+ help='URL of the Heat metadata server'),
+]
+db_opts = [
+cfg.StrOpt('sql_connection',
+ default='mysql://heat:heat@localhost/heat',
+ help='The SQLAlchemy connection string used to connect to the '
+ 'database'),
+cfg.IntOpt('sql_idle_timeout',
+ default=3600,
+ help='timeout before idle sql connections are reaped'),
+]
+engine_opts = [
+cfg.StrOpt('host',
+ default=socket.gethostname(),
+ help='Name of this node. This can be an opaque identifier. '
+ 'It is not necessarily a hostname, FQDN, or IP address.'),
+cfg.StrOpt('instance_driver',
+ default='heat.engine.nova',
+ help='Driver to use for controlling instances'),
+]
+
+
+def register_metadata_opts():
+ cfg.CONF.register_opts(service_opts)
+ cfg.CONF.register_opts(bind_opts)
+ cfg.CONF.register_opts(rpc_opts)
+
+
+def register_api_opts():
+ cfg.CONF.register_opts(bind_opts)
+ cfg.CONF.register_opts(rpc_opts)
+
+
+def register_engine_opts():
+ cfg.CONF.register_opts(engine_opts)
+ cfg.CONF.register_opts(db_opts)
+ cfg.CONF.register_opts(service_opts)
+ cfg.CONF.register_opts(rpc_opts)
+
+
+def setup_logging():
"""
Sets up the logging options for a log with supplied name
-
- :param conf: a cfg.ConfOpts object
"""
- if conf.log_config:
+ if cfg.CONF.log_config:
# Use a logging configuration file for all settings...
- if os.path.exists(conf.log_config):
- logging.config.fileConfig(conf.log_config)
+ if os.path.exists(cfg.CONF.log_config):
+ logging.config.fileConfig(cfg.CONF.log_config)
return
else:
raise RuntimeError("Unable to locate specified logging "
- "config file: %s" % conf.log_config)
+ "config file: %s" % cfg.CONF.log_config)
root_logger = logging.root
- if conf.debug:
+ if cfg.CONF.debug:
root_logger.setLevel(logging.DEBUG)
- elif conf.verbose:
+ elif cfg.CONF.verbose:
root_logger.setLevel(logging.INFO)
else:
root_logger.setLevel(logging.WARNING)
# quiet down the qpid logging
root_logger.getChild('qpid.messaging').setLevel(logging.INFO)
- formatter = logging.Formatter(conf.log_format, conf.log_date_format)
+ formatter = logging.Formatter(cfg.CONF.log_format,
+ cfg.CONF.log_date_format)
- if conf.use_syslog:
+ if cfg.CONF.use_syslog:
try:
facility = getattr(logging.handlers.SysLogHandler,
- conf.syslog_log_facility)
+ cfg.CONF.syslog_log_facility)
except AttributeError:
raise ValueError(_("Invalid syslog facility"))
handler = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
- elif conf.log_file:
- logfile = conf.log_file
- if conf.log_dir:
- logfile = os.path.join(conf.log_dir, logfile)
+ elif cfg.CONF.log_file:
+ logfile = cfg.CONF.log_file
+ if cfg.CONF.log_dir:
+ logfile = os.path.join(cfg.CONF.log_dir, logfile)
handler = logging.handlers.WatchedFileHandler(logfile)
else:
handler = logging.StreamHandler(sys.stdout)
root_logger.addHandler(handler)
-def _register_paste_deploy_opts(conf):
+def _register_paste_deploy_opts():
"""
Idempotent registration of paste_deploy option group
-
- :param conf: a cfg.ConfigOpts object
"""
- conf.register_group(paste_deploy_group)
- conf.register_opts(paste_deploy_opts, group=paste_deploy_group)
+ cfg.CONF.register_group(paste_deploy_group)
+ cfg.CONF.register_opts(paste_deploy_opts, group=paste_deploy_group)
-def _get_deployment_flavor(conf):
+def _get_deployment_flavor():
"""
Retrieve the paste_deploy.flavor config item, formatted appropriately
for appending to the application name.
-
- :param conf: a cfg.ConfigOpts object
"""
- _register_paste_deploy_opts(conf)
- flavor = conf.paste_deploy.flavor
+ _register_paste_deploy_opts()
+ flavor = cfg.CONF.paste_deploy.flavor
return '' if not flavor else ('-' + flavor)
-def _get_deployment_config_file(conf):
+def _get_deployment_config_file():
"""
Retrieve the deployment_config_file config item, formatted as an
absolute pathname.
-
- :param conf: a cfg.ConfigOpts object
"""
- _register_paste_deploy_opts(conf)
- config_file = conf.paste_deploy.config_file
+ _register_paste_deploy_opts()
+ config_file = cfg.CONF.paste_deploy.config_file
if not config_file:
- if conf.config_file:
+ if cfg.CONF.config_file:
# Assume paste config is in a paste.ini file corresponding
# to the last config file
- path = os.path.splitext(conf.config_file[-1])[0] + "-paste.ini"
+ path = os.path.splitext(cfg.CONF.config_file[-1])[0] + "-paste.ini"
else:
return None
else:
return os.path.abspath(path)
-def load_paste_app(conf, app_name=None):
+def load_paste_app(app_name=None):
"""
Builds and returns a WSGI app from a paste config file.
We assume the last config file specified in the supplied ConfigOpts
object is the paste config file.
- :param conf: a cfg.ConfigOpts object
:param app_name: name of the application to load
:raises RuntimeError when config file cannot be located or application
cannot be loaded from config file
"""
if app_name is None:
- app_name = conf.prog
+ app_name = cfg.CONF.prog
# append the deployment flavor to the application name,
# in order to identify the appropriate paste pipeline
- app_name += _get_deployment_flavor(conf)
+ app_name += _get_deployment_flavor()
- conf_file = _get_deployment_config_file(conf)
+ conf_file = _get_deployment_config_file()
if conf_file is None:
raise RuntimeError("Unable to locate config file")
try:
# Setup logging early
- setup_logging(conf)
+ setup_logging()
- app = wsgi.paste_deploy_app(conf_file, app_name, conf)
+ app = wsgi.paste_deploy_app(conf_file, app_name, cfg.CONF)
# Log the options used when starting if we're in debug mode...
- if conf.debug:
- conf.log_opt_values(logging.getLogger(app_name), logging.DEBUG)
+ if cfg.CONF.debug:
+ cfg.CONF.log_opt_values(logging.getLogger(app_name), logging.DEBUG)
return app
except (LookupError, ImportError), e:
import heat.utils
from heat.openstack.common import utils
from heat.openstack.common import cfg
-#from heat.common import config
import heat.utils
SQL_CONNECTION = 'sqlite:///heat-test.db/'
sqlalchemy='heat.db.sqlalchemy.api')
-def configure(conf):
+def configure():
global SQL_CONNECTION
global SQL_IDLE_TIMEOUT
- SQL_CONNECTION = conf.sql_connection
- SQL_IDLE_TIMEOUT = conf.sql_idle_timeout
+ SQL_CONNECTION = cfg.CONF.sql_connection
+ SQL_IDLE_TIMEOUT = cfg.CONF.sql_idle_timeout
def raw_template_get(context, template_id):
from heat.engine import watchrule
from heat.engine import auth
+from heat.openstack.common import cfg
from heat.openstack.common import timeutils
from heat.openstack.common import log as logging
for resource in stack if resource.id is not None]
def metadata_register_address(self, context, url):
- config.FLAGS.heat_metadata_server_url = url
+ cfg.CONF.heat_metadata_server_url = url
def metadata_list_stacks(self, context):
"""
from heat.engine import auth
from heat.openstack.common import log as logging
+from heat.openstack.common import cfg
logger = logging.getLogger('heat.engine.resources')
None if no server is registered.
'''
try:
- return config.FLAGS.heat_metadata_server_url
+ return cfg.CONF.heat_metadata_server_url
except AttributeError:
return None
"""
from heat import version
-from heat.common import config
from heat.openstack.common import log as logging
+from heat.openstack.common import cfg
-FLAGS = config.FLAGS
LOG = logging.getLogger(__name__)
def __init__(self, host=None, db_driver=None):
if not host:
- host = FLAGS.host
+ host = cfg.CONF.host
self.host = host
super(Manager, self).__init__(db_driver)
def service_config(self, context):
config = {}
- for key in FLAGS:
- config[key] = FLAGS.get(key, None)
+ for key in cfg.CONF:
+ config[key] = cfg.CONF.get(key, None)
return config
_RPCIMPL = None
-def configure(conf):
+def configure():
"""Delay import of rpc_backend until FLAGS are loaded."""
- LOG.debug(_("Configuring RPC %s") % conf.rpc_backend)
+ LOG.debug(_("Configuring RPC %s") % cfg.CONF.rpc_backend)
global _RPCIMPL
- _RPCIMPL = importutils.import_module(conf.rpc_backend)
+ _RPCIMPL = importutils.import_module(cfg.CONF.rpc_backend)
def _get_impl():
from heat.common import exception
from heat.common import config
from heat.openstack.common import local
+from heat.openstack.common import cfg
import heat.rpc.common as rpc_common
LOG = logging.getLogger(__name__)
-FLAGS = config.FLAGS
class Pool(pools.Pool):
"""Class that implements a Pool of Connections."""
def __init__(self, *args, **kwargs):
self.connection_cls = kwargs.pop("connection_cls", None)
- kwargs.setdefault("max_size", FLAGS.rpc_conn_pool_size)
+ kwargs.setdefault("max_size", cfg.CONF.rpc_conn_pool_size)
kwargs.setdefault("order_as_stack", True)
super(Pool, self).__init__(*args, **kwargs)
def __init__(self, proxy, connection_pool):
self.proxy = proxy
- self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size)
+ self.pool = greenpool.GreenPool(cfg.CONF.rpc_thread_pool_size)
self.connection_pool = connection_pool
def __call__(self, message_data):
def __init__(self, connection, timeout):
self._connection = connection
self._iterator = connection.iterconsume(
- timeout=timeout or FLAGS.rpc_response_timeout)
+ timeout=timeout or
+ cfg.CONF.rpc_response_timeout)
self._result = None
self._done = False
self._got_ending = False
"""
super(TopicConsumer, self).__init__(session, callback,
- "%s/%s" % (config.FLAGS.control_exchange, topic), {},
+ "%s/%s" % (cfg.CONF.control_exchange, topic), {},
topic, {})
"""init a 'topic' publisher.
"""
super(TopicPublisher, self).__init__(session,
- "%s/%s" % (config.FLAGS.control_exchange, topic))
+ "%s/%s" % (cfg.CONF.control_exchange, topic))
class FanoutPublisher(Publisher):
"""init a 'topic' publisher.
"""
super(NotifyPublisher, self).__init__(session,
- "%s/%s" % (config.FLAGS.control_exchange, topic),
+ "%s/%s" % (cfg.CONF.control_exchange, topic),
{"durable": True})
if server_params is None:
server_params = {}
- default_params = dict(hostname=config.FLAGS.qpid_hostname,
- port=config.FLAGS.qpid_port,
- username=config.FLAGS.qpid_username,
- password=config.FLAGS.qpid_password)
+ default_params = dict(hostname=cfg.CONF.qpid_hostname,
+ port=cfg.CONF.qpid_port,
+ username=cfg.CONF.qpid_username,
+ password=cfg.CONF.qpid_password)
params = server_params
for key in default_params.keys():
# before we call open
self.connection.username = params['username']
self.connection.password = params['password']
- self.connection.sasl_mechanisms = config.FLAGS.qpid_sasl_mechanisms
- self.connection.reconnect = config.FLAGS.qpid_reconnect
- if config.FLAGS.qpid_reconnect_timeout:
+ self.connection.sasl_mechanisms = cfg.CONF.qpid_sasl_mechanisms
+ self.connection.reconnect = cfg.CONF.qpid_reconnect
+ if cfg.CONF.qpid_reconnect_timeout:
self.connection.reconnect_timeout = \
- config.FLAGS.qpid_reconnect_timeout
- if config.FLAGS.qpid_reconnect_limit:
- self.connection.reconnect_limit = config.FLAGS.qpid_reconnect_limit
- if config.FLAGS.qpid_reconnect_interval_max:
+ cfg.CONF.qpid_reconnect_timeout
+ if cfg.CONF.qpid_reconnect_limit:
+ self.connection.reconnect_limit = cfg.CONF.qpid_reconnect_limit
+ if cfg.CONF.qpid_reconnect_interval_max:
self.connection.reconnect_interval_max = (
- config.FLAGS.qpid_reconnect_interval_max)
- if config.FLAGS.qpid_reconnect_interval_min:
+ cfg.CONF.qpid_reconnect_interval_max)
+ if cfg.CONF.qpid_reconnect_interval_min:
self.connection.reconnect_interval_min = (
- config.FLAGS.qpid_reconnect_interval_min)
- if config.FLAGS.qpid_reconnect_interval:
+ cfg.CONF.qpid_reconnect_interval_min)
+ if cfg.CONF.qpid_reconnect_interval:
self.connection.reconnect_interval = \
- config.FLAGS.qpid_reconnect_interval
- self.connection.hearbeat = config.FLAGS.qpid_heartbeat
- self.connection.protocol = config.FLAGS.qpid_protocol
- self.connection.tcp_nodelay = config.FLAGS.qpid_tcp_nodelay
+ cfg.CONF.qpid_reconnect_interval
+ self.connection.hearbeat = cfg.CONF.qpid_heartbeat
+ self.connection.protocol = cfg.CONF.qpid_protocol
+ self.connection.tcp_nodelay = cfg.CONF.qpid_tcp_nodelay
# Open is part of reconnect -
# NOTE(WGH) not sure we need this with the reconnect flags
self.connection.open()
except qpid.messaging.exceptions.ConnectionError, e:
LOG.error(_('Unable to connect to AMQP server: %s ') % e)
- time.sleep(config.FLAGS.qpid_reconnect_interval or 1)
+ time.sleep(cfg.CONF.qpid_reconnect_interval or 1)
else:
break
from heat.openstack.common import cfg
from heat.openstack.common import importutils
from heat.openstack.common import log as logging
+from heat import rpc
from heat.common import utils as heat_utils
from heat.common import exception
from heat.common import context
-from heat import rpc
from heat import version
LOG = logging.getLogger(__name__)
periodic_interval=None, config=None):
"""Instantiates class and passes back application object.
- :param host: defaults to FLAGS.host
+ :param host: defaults to cfg.CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'heat-' part
- :param manager: defaults to FLAGS.<topic>_manager
- :param periodic_interval: defaults to FLAGS.periodic_interval
+ :param manager: defaults to cfg.CONF.<topic>_manager
+ :param periodic_interval: defaults to cfg.CONF.periodic_interval
"""
- global FLAGS
- FLAGS = config
if not host:
- host = FLAGS.host
+ host = cfg.CONF.host
if not binary:
binary = os.path.basename(inspect.stack()[-1][1])
if not topic:
topic = binary.rpartition('heat-')[2]
if not manager:
- manager = FLAGS.get('%s_manager' % topic, None)
+ manager = cfg.CONF.get('%s_manager' % topic, None)
if not periodic_interval:
- periodic_interval = FLAGS.periodic_interval
+ periodic_interval = cfg.CONF.periodic_interval
service_obj = cls(host, binary, topic, manager,
periodic_interval)
def wait():
- LOG.debug(_('Full set of FLAGS:'))
- for flag in FLAGS:
- flag_get = FLAGS.get(flag, None)
+ LOG.debug(_('Full set of CONF:'))
+ for flag in cfg.CONF:
+ flag_get = cfg.CONF.get(flag, None)
# hide flag contents from log if contains a password
# should use secret flag when switch over to openstack-common
if ("_password" in flag or "_key" in flag or
import unittest
from nose.plugins.attrib import attr
-from heat.common.config import HeatConfigOpts
+from heat.common import config
+from heat.openstack.common import cfg
+import heat.common.config
import heat.api.v1.stacks as stacks
def setUp(self):
# Create WSGI controller instance
- options = HeatConfigOpts()
- self.controller = stacks.StackController(options)
+ config.register_api_opts()
+ self.controller = stacks.StackController(cfg.CONF)
print "setup complete"
def tearDown(self):