"""
try:
- import oslo.i18n
+ import oslo_i18n
# NOTE(dhellmann): This reference to o-s-l-o will be replaced by the
# application name when this module is synced into the separate
# repository. It is OK to have more than one translation function
# using the same domain, since there will still only be one message
# catalog.
- _translators = oslo.i18n.TranslatorFactory(domain='neutron')
+ _translators = oslo_i18n.TranslatorFactory(domain='neutron')
# The primary translation function using the well-known name "_"
_ = _translators.primary
_LC = _translators.log_critical
except ImportError:
# NOTE(dims): Support for cases where a project wants to use
- # code from neutron-incubator, but is not ready to be internationalized
+ # code from oslo-incubator, but is not ready to be internationalized
# (like tempest)
_ = _LI = _LW = _LE = _LC = lambda x: x
from __future__ import print_function
+import copy
import errno
import gc
+import logging
import os
import pprint
import socket
import sys
import traceback
-import eventlet
import eventlet.backdoor
import greenlet
-from oslo.config import cfg
+from oslo_config import cfg
from neutron.openstack.common._i18n import _LI
-from neutron.openstack.common import log as logging
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
LOG = logging.getLogger(__name__)
+def list_opts():
+ """Entry point for oslo-config-generator.
+ """
+ return [(None, copy.deepcopy(eventlet_backdoor_opts))]
+
+
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
import contextlib
import errno
+import logging
import os
+import stat
import tempfile
-from oslo.utils import excutils
-
-from neutron.openstack.common import log as logging
+from oslo_utils import excutils
LOG = logging.getLogger(__name__)
_FILE_CACHE = {}
+DEFAULT_MODE = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
-def ensure_tree(path):
+def ensure_tree(path, mode=DEFAULT_MODE):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
+ :param mode: Directory creation permissions
"""
try:
- os.makedirs(path)
+ os.makedirs(path, mode)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
# License for the specific language governing permissions and limitations
# under the License.
+import logging
import sys
import time
from eventlet import greenthread
from neutron.openstack.common._i18n import _LE, _LW
-from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
break
delay = end - start - interval
if delay > 0:
- LOG.warn(_LW('task %(func_name)s run outlasted '
+ LOG.warn(_LW('task %(func_name)r run outlasted '
'interval by %(delay).2f sec'),
- {'func_name': repr(self.f), 'delay': delay})
+ {'func_name': self.f, 'delay': delay})
greenthread.sleep(-delay if delay < 0 else 0)
except LoopingCallDone as e:
self.stop()
if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
- LOG.debug('Dynamic looping call %(func_name)s sleeping '
+ LOG.debug('Dynamic looping call %(func_name)r sleeping '
'for %(idle).02f seconds',
- {'func_name': repr(self.f), 'idle': idle})
+ {'func_name': self.f, 'idle': idle})
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()
# under the License.
import copy
+import logging
import random
import time
-from oslo.config import cfg
+from oslo_config import cfg
import six
from neutron.openstack.common._i18n import _, _LE, _LI
-from neutron.openstack.common import log as logging
periodic_opts = [
def list_opts():
- """Entry point for oslo.config-generator."""
+ """Entry point for oslo-config-generator."""
return [(None, copy.deepcopy(periodic_opts))]
interval of 60 seconds.
2. With arguments:
- @periodic_task(spacing=N [, run_immediately=[True|False]])
+ @periodic_task(spacing=N [, run_immediately=[True|False]]
+ [, name=[None|"string"])
this will be run on approximately every N seconds. If this number is
negative the periodic task will be disabled. If the run_immediately
argument is provided and has a value of 'True', the first run of the
task will be shortly after task scheduler starts. If
run_immediately is omitted or set to 'False', the first time the
task runs will be approximately N seconds after the task scheduler
- starts.
+ starts. If name is not provided, __name__ of function is used.
"""
def decorator(f):
# Test for old style invocation
f._periodic_enabled = False
else:
f._periodic_enabled = kwargs.pop('enabled', True)
+ f._periodic_name = kwargs.pop('name', f.__name__)
# Control frequency
f._periodic_spacing = kwargs.pop('spacing', 0)
class _PeriodicTasksMeta(type):
+ def _add_periodic_task(cls, task):
+ """Add a periodic task to the list of periodic tasks.
+
+ The task should already be decorated by @periodic_task.
+
+ :return: whether task was actually enabled
+ """
+ name = task._periodic_name
+
+ if task._periodic_spacing < 0:
+ LOG.info(_LI('Skipping periodic task %(task)s because '
+ 'its interval is negative'),
+ {'task': name})
+ return False
+ if not task._periodic_enabled:
+ LOG.info(_LI('Skipping periodic task %(task)s because '
+ 'it is disabled'),
+ {'task': name})
+ return False
+
+ # A periodic spacing of zero indicates that this task should
+ # be run on the default interval to avoid running too
+ # frequently.
+ if task._periodic_spacing == 0:
+ task._periodic_spacing = DEFAULT_INTERVAL
+
+ cls._periodic_tasks.append((name, task))
+ cls._periodic_spacing[name] = task._periodic_spacing
+ return True
+
def __init__(cls, names, bases, dict_):
"""Metaclass that allows us to collect decorated periodic tasks."""
super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
for value in cls.__dict__.values():
if getattr(value, '_periodic_task', False):
- task = value
- name = task.__name__
-
- if task._periodic_spacing < 0:
- LOG.info(_LI('Skipping periodic task %(task)s because '
- 'its interval is negative'),
- {'task': name})
- continue
- if not task._periodic_enabled:
- LOG.info(_LI('Skipping periodic task %(task)s because '
- 'it is disabled'),
- {'task': name})
- continue
-
- # A periodic spacing of zero indicates that this task should
- # be run on the default interval to avoid running too
- # frequently.
- if task._periodic_spacing == 0:
- task._periodic_spacing = DEFAULT_INTERVAL
-
- cls._periodic_tasks.append((name, task))
- cls._periodic_spacing[name] = task._periodic_spacing
+ cls._add_periodic_task(value)
def _nearest_boundary(last_run, spacing):
for name, task in self._periodic_tasks:
self._periodic_last_run[name] = task._periodic_last_run
+ def add_periodic_task(self, task):
+ """Add a periodic task to the list of periodic tasks.
+
+ The task should already be decorated by @periodic_task.
+ """
+ if self.__class__._add_periodic_task(task):
+ self._periodic_last_run[task._periodic_name] = (
+ task._periodic_last_run)
+
def run_periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
idle_for = DEFAULT_INTERVAL
+# -*- coding: utf-8 -*-
+#
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
-combined as with an "or" conjunction. This is the original way of
-expressing policies, but there now exists a new way: the policy
-language.
-
-In the policy language, each check is specified the same way as in the
-list-of-lists representation: a simple "a:b" pair that is matched to
-the correct code to perform that check. However, conjunction
-operators are available, allowing for more expressiveness in crafting
-policies.
-
-As an example, take the following rule, expressed in the list-of-lists
-representation::
+combined as with an "or" conjunction. As an example, take the following
+rule, expressed in the list-of-lists representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
-In the policy language, this becomes::
+This is the original way of expressing policies, but there now exists a
+new way: the policy language.
+
+In the policy language, each check is specified the same way as in the
+list-of-lists representation: a simple "a:b" pair that is matched to
+the correct class to perform that check::
+
+ +===========================================================================+
+ | TYPE | SYNTAX |
+ +===========================================================================+
+ |User's Role | role:admin |
+ +---------------------------------------------------------------------------+
+ |Rules already defined on policy | rule:admin_required |
+ +---------------------------------------------------------------------------+
+ |Against URL's¹ | http://my-url.org/check |
+ +---------------------------------------------------------------------------+
+ |User attributes² | project_id:%(target.project.id)s |
+ +---------------------------------------------------------------------------+
+ |Strings | <variable>:'xpto2035abc' |
+ | | 'myproject':<variable> |
+ +---------------------------------------------------------------------------+
+ | | project_id:xpto2035abc |
+ |Literals | domain_id:20 |
+ | | True:%(user.enabled)s |
+ +===========================================================================+
+
+¹URL checking must return 'True' to be valid
+²User attributes (obtained through the token): user_id, domain_id or project_id
+
+Conjunction operators are available, allowing for more expressiveness
+in crafting policies. So, in the policy language, the previous check in
+list-of-lists becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
project_id:%(project_id)s and not role:dunce
-It is possible to perform policy checks on the following user
-attributes (obtained through the token): user_id, domain_id or
-project_id::
-
- domain_id:<some_value>
-
Attributes sent along with API calls can be used by the policy engine
(on the right side of the expression), by using the following syntax::
- <some_value>:user.id
+ <some_value>:%(user.id)s
Contextual attributes of objects identified by their IDs are loaded
from the database. They are also available to the policy engine and
can be checked through the `target` keyword::
- <some_value>:target.role.name
-
-All these attributes (related to users, API calls, and context) can be
-checked against each other or against constants, be it literals (True,
-<a_number>) or strings.
+ <some_value>:%(target.role.name)s
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
import abc
import ast
+import copy
+import logging
import os
import re
-from oslo.config import cfg
-from oslo.serialization import jsonutils
+from oslo_config import cfg
+from oslo_serialization import jsonutils
import six
import six.moves.urllib.parse as urlparse
import six.moves.urllib.request as urlrequest
from neutron.openstack.common import fileutils
-from neutron.openstack.common._i18n import _, _LE, _LW
-from neutron.openstack.common import log as logging
+from neutron.openstack.common._i18n import _, _LE
policy_opts = [
cfg.MultiStrOpt('policy_dirs',
default=['policy.d'],
help=_('Directories where policy configuration files are '
- 'stored.')),
+ 'stored. They can be relative to any directory '
+ 'in the search path defined by the config_dir '
+ 'option, or absolute paths. The file defined by '
+ 'policy_file must exist for these directories to '
+ 'be searched. Missing or empty directories are '
+ 'ignored.')),
]
CONF = cfg.CONF
_checks = {}
+def list_opts():
+ """Entry point for oslo-config-generator."""
+ return [(None, copy.deepcopy(policy_opts))]
+
+
class PolicyNotAuthorized(Exception):
def __init__(self, rule):
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from cache or config file.
+ :param overwrite: Whether to overwrite existing rules when reload rules
+ from config file.
"""
def __init__(self, policy_file=None, rules=None,
- default_rule=None, use_conf=True):
+ default_rule=None, use_conf=True, overwrite=True):
self.default_rule = default_rule or CONF.policy_default_rule
self.rules = Rules(rules, self.default_rule)
self.policy_path = None
self.policy_file = policy_file or CONF.policy_file
self.use_conf = use_conf
+ self.overwrite = overwrite
def set_rules(self, rules, overwrite=True, use_conf=False):
"""Create a new Rules object based on the provided dict of rules.
Policy file is cached and will be reloaded if modified.
- :param force_reload: Whether to overwrite current rules.
+ :param force_reload: Whether to reload rules from config file.
"""
if force_reload:
if not self.policy_path:
self.policy_path = self._get_policy_path(self.policy_file)
- self._load_policy_file(self.policy_path, force_reload)
+ self._load_policy_file(self.policy_path, force_reload,
+ overwrite=self.overwrite)
for path in CONF.policy_dirs:
try:
path = self._get_policy_path(path)
except cfg.ConfigFilesNotFoundError:
- LOG.warn(_LW("Can not find policy directory: %s"), path)
continue
self._walk_through_policy_directory(path,
self._load_policy_file,
force_reload, False)
- def _walk_through_policy_directory(self, path, func, *args):
+ @staticmethod
+ def _walk_through_policy_directory(path, func, *args):
# We do not iterate over sub-directories.
policy_files = next(os.walk(path))[2]
policy_files.sort()
def _load_policy_file(self, path, force_reload, overwrite=True):
reloaded, data = fileutils.read_cached_file(
path, force_reload=force_reload)
- if reloaded or not self.rules:
+ if reloaded or not self.rules or not overwrite:
rules = Rules.load_json(data, self.default_rule)
- self.set_rules(rules, overwrite)
- LOG.debug("Rules successfully reloaded")
+ self.set_rules(rules, overwrite=overwrite, use_conf=True)
+ LOG.debug("Reloaded policy file: %(path)s",
+ {'path': path})
def _get_policy_path(self, path):
"""Locate the policy json data file/path.
"""
url = ('http:' + self.match) % target
- data = {'target': jsonutils.dumps(target),
+
+ # Convert instances of object() in target temporarily to
+ # empty dict to avoid circular reference detection
+ # errors in jsonutils.dumps().
+ temp_target = copy.deepcopy(target)
+ for key in target.keys():
+ element = target.get(key)
+ if type(element) is object:
+ temp_target[key] = {}
+
+ data = {'target': jsonutils.dumps(temp_target),
'credentials': jsonutils.dumps(creds)}
post_data = urlparse.urlencode(data)
f = urlrequest.urlopen(url, post_data)
"""Generic Node base class for all workers that run on hosts."""
import errno
-import logging as std_logging
+import logging
import os
import random
import signal
import eventlet
from eventlet import event
-from oslo.config import cfg
+from oslo_config import cfg
from neutron.openstack.common import eventlet_backdoor
from neutron.openstack.common._i18n import _LE, _LI, _LW
-from neutron.openstack.common import log as logging
from neutron.openstack.common import systemd
from neutron.openstack.common import threadgroup
signo = 0
LOG.debug('Full set of CONF:')
- CONF.log_opt_values(LOG, std_logging.DEBUG)
+ CONF.log_opt_values(LOG, logging.DEBUG)
try:
if ready_callback:
systemd.notify_once()
LOG.debug('Full set of CONF:')
- CONF.log_opt_values(LOG, std_logging.DEBUG)
+ CONF.log_opt_values(LOG, logging.DEBUG)
try:
while True:
self.running = True
self.sigcaught = None
except eventlet.greenlet.GreenletExit:
- LOG.info(_LI("Wait called after thread killed. Cleaning up."))
+ LOG.info(_LI("Wait called after thread killed. Cleaning up."))
self.stop()
def start(self):
pass
- def stop(self):
- self.tg.stop()
+ def stop(self, graceful=False):
+ self.tg.stop(graceful)
self.tg.wait()
# Signal that service cleanup is done:
if not self._done.ready():
Helper module for systemd service readiness notification.
"""
+import logging
import os
import socket
import sys
-from neutron.openstack.common import log as logging
-
LOG = logging.getLogger(__name__)
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import logging
import threading
import eventlet
from eventlet import greenpool
-from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
continue
try:
x.stop()
+ except eventlet.greenlet.GreenletExit:
+ pass
except Exception as ex:
LOG.exception(ex)
import functools
import inspect
+import logging
+from oslo_config import cfg
import pkg_resources
import six
from neutron.openstack.common._i18n import _
-from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+
+
+opts = [
+ cfg.BoolOpt('fatal_deprecations',
+ default=False,
+ help='Enables or disables fatal status of deprecations.'),
+]
class deprecated(object):
@six.wraps(func_or_cls)
def wrapped(*args, **kwargs):
- LOG.deprecated(msg, details)
+ report_deprecated_feature(LOG, msg, details)
return func_or_cls(*args, **kwargs)
return wrapped
elif inspect.isclass(func_or_cls):
# TODO(tsufiev): change `functools` module to `six` as
# soon as six 1.7.4 (with fix for passing `assigned`
# argument to underlying `functools.wraps`) is released
- # and added to the neutron-incubator requrements
+ # and added to the oslo-incubator requrements
@functools.wraps(orig_init, assigned=('__name__', '__doc__'))
def new_init(self, *args, **kwargs):
- LOG.deprecated(msg, details)
+ report_deprecated_feature(LOG, msg, details)
orig_init(self, *args, **kwargs)
func_or_cls.__init__ = new_init
return func_or_cls
return False
return current_parts >= requested_parts
+
+
+# Track the messages we have sent already. See
+# report_deprecated_feature().
+_deprecated_messages_sent = {}
+
+
+def report_deprecated_feature(logger, msg, *args, **kwargs):
+ """Call this function when a deprecated feature is used.
+
+ If the system is configured for fatal deprecations then the message
+ is logged at the 'critical' level and :class:`DeprecatedConfig` will
+ be raised.
+
+ Otherwise, the message will be logged (once) at the 'warn' level.
+
+ :raises: :class:`DeprecatedConfig` if the system is configured for
+ fatal deprecations.
+ """
+ stdmsg = _("Deprecated: %s") % msg
+ CONF.register_opts(opts)
+ if CONF.fatal_deprecations:
+ logger.critical(stdmsg, *args, **kwargs)
+ raise DeprecatedConfig(msg=stdmsg)
+
+ # Using a list because a tuple with dict can't be stored in a set.
+ sent_args = _deprecated_messages_sent.setdefault(msg, list())
+
+ if args in sent_args:
+ # Already logged this message, so don't log it again.
+ return
+
+ sent_args.append(args)
+ logger.warn(stdmsg, *args, **kwargs)
+
+
+class DeprecatedConfig(Exception):
+ message = _("Fatal call to deprecated config: %(msg)s")
+
+ def __init__(self, msg):
+ super(Exception, self).__init__(self.message % dict(msg=msg))