--- /dev/null
+#
+# Copyright 2013 Mirantis, Inc.
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import fixtures
+from oslo.config import cfg
+import six
+
+
+class Config(fixtures.Fixture):
+ """Override some configuration values.
+
+ The keyword arguments are the names of configuration options to
+ override and their values.
+
+ If a group argument is supplied, the overrides are applied to
+ the specified configuration option group.
+
+ All overrides are automatically cleared at the end of the current
+ test by the reset() method, which is registered by addCleanup().
+ """
+
+ def __init__(self, conf=cfg.CONF):
+ self.conf = conf
+
+ def setUp(self):
+ super(Config, self).setUp()
+ self.addCleanup(self.conf.reset)
+
+ def config(self, **kw):
+ group = kw.pop('group', None)
+ for k, v in six.iteritems(kw):
+ self.conf.set_override(k, v, group)
--- /dev/null
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+
+from neutron.openstack.common import lockutils
+
+
+class LockFixture(fixtures.Fixture):
+ """External locking fixture.
+
+ This fixture is basically an alternative to the synchronized decorator with
+ the external flag so that tearDowns and addCleanups will be included in
+ the lock context for locking between tests. The fixture is recommended to
+ be the first line in a test method, like so::
+
+ def test_method(self):
+ self.useFixture(LockFixture)
+ ...
+
+ or the first line in setUp if all the test methods in the class are
+ required to be serialized. Something like::
+
+ class TestCase(testtools.testcase):
+ def setUp(self):
+ self.useFixture(LockFixture)
+ super(TestCase, self).setUp()
+ ...
+
+ This is because addCleanups are put on a LIFO queue that gets run after the
+ test method exits. (either by completing or raising an exception)
+ """
+ def __init__(self, name, lock_file_prefix=None):
+ self.mgr = lockutils.lock(name, lock_file_prefix, True)
+
+ def setUp(self):
+ super(LockFixture, self).setUp()
+ self.addCleanup(self.mgr.__exit__, None, None, None)
+ self.mgr.__enter__()
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# under the License.
+import contextlib
import errno
import functools
import os
import shutil
+import subprocess
+import sys
import tempfile
+import threading
import time
import weakref
-from eventlet import semaphore
from oslo.config import cfg
from neutron.openstack.common import fileutils
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
- help=('Directory to use for lock files. Default to a '
- 'temp directory'))
+ default=os.environ.get("NEUTRON_LOCK_PATH"),
+ help=('Directory to use for lock files.'))
]
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
+_semaphores_lock = threading.Lock()
+
+
+@contextlib.contextmanager
+def lock(name, lock_file_prefix=None, external=False, lock_path=None):
+ """Context based lock
+
+ This function yields a `threading.Semaphore` instance (if we don't use
+ eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
+ True, in which case, it'll yield an InterProcessLock instance.
+
+ :param lock_file_prefix: The lock_file_prefix argument is used to provide
+ lock files on disk with a meaningful prefix.
+
+ :param external: The external keyword argument denotes whether this lock
+ should work across multiple processes. This means that if two different
+ workers both run a a method decorated with @synchronized('mylock',
+ external=True), only one of them will execute at a time.
+
+ :param lock_path: The lock_path keyword argument is used to specify a
+ special location for external lock files to live. If nothing is set, then
+ CONF.lock_path is used as a default.
+ """
+ with _semaphores_lock:
+ try:
+ sem = _semaphores[name]
+ except KeyError:
+ sem = threading.Semaphore()
+ _semaphores[name] = sem
+ with sem:
+ LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
+
+ # NOTE(mikal): I know this looks odd
+ if not hasattr(local.strong_store, 'locks_held'):
+ local.strong_store.locks_held = []
+ local.strong_store.locks_held.append(name)
+
+ try:
+ if external and not CONF.disable_process_locking:
+ LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
+ {'lock': name})
+
+ # We need a copy of lock_path because it is non-local
+ local_lock_path = lock_path or CONF.lock_path
+ if not local_lock_path:
+ raise cfg.RequiredOptError('lock_path')
+
+ if not os.path.exists(local_lock_path):
+ fileutils.ensure_tree(local_lock_path)
+ LOG.info(_('Created lock path: %s'), local_lock_path)
+
+ def add_prefix(name, prefix):
+ if not prefix:
+ return name
+ sep = '' if prefix.endswith('-') else '-'
+ return '%s%s%s' % (prefix, sep, name)
+
+ # NOTE(mikal): the lock name cannot contain directory
+ # separators
+ lock_file_name = add_prefix(name.replace(os.sep, '_'),
+ lock_file_prefix)
+
+ lock_file_path = os.path.join(local_lock_path, lock_file_name)
+
+ try:
+ lock = InterProcessLock(lock_file_path)
+ with lock as lock:
+ LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
+ {'lock': name, 'path': lock_file_path})
+ yield lock
+ finally:
+ LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
+ {'lock': name, 'path': lock_file_path})
+ else:
+ yield sem
-def synchronized(name, lock_file_prefix, external=False, lock_path=None):
+ finally:
+ local.strong_store.locks_held.remove(name)
+
+
+def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
...
This way only one of either foo or bar can be executing at a time.
-
- :param lock_file_prefix: The lock_file_prefix argument is used to provide
- lock files on disk with a meaningful prefix. The prefix should end with a
- hyphen ('-') if specified.
-
- :param external: The external keyword argument denotes whether this lock
- should work across multiple processes. This means that if two different
- workers both run a a method decorated with @synchronized('mylock',
- external=True), only one of them will execute at a time.
-
- :param lock_path: The lock_path keyword argument is used to specify a
- special location for external lock files to live. If nothing is set, then
- CONF.lock_path is used as a default.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
- # NOTE(soren): If we ever go natively threaded, this will be racy.
- # See http://stackoverflow.com/questions/5390569/dyn
- # amically-allocating-and-destroying-mutexes
- sem = _semaphores.get(name, semaphore.Semaphore())
- if name not in _semaphores:
- # this check is not racy - we're already holding ref locally
- # so GC won't remove the item and there was no IO switch
- # (only valid in greenthreads)
- _semaphores[name] = sem
-
- with sem:
- LOG.debug(_('Got semaphore "%(lock)s" for method '
- '"%(method)s"...'), {'lock': name,
- 'method': f.__name__})
-
- # NOTE(mikal): I know this looks odd
- if not hasattr(local.strong_store, 'locks_held'):
- local.strong_store.locks_held = []
- local.strong_store.locks_held.append(name)
-
- try:
- if external and not CONF.disable_process_locking:
- LOG.debug(_('Attempting to grab file lock "%(lock)s" '
- 'for method "%(method)s"...'),
- {'lock': name, 'method': f.__name__})
- cleanup_dir = False
-
- # We need a copy of lock_path because it is non-local
- local_lock_path = lock_path
- if not local_lock_path:
- local_lock_path = CONF.lock_path
-
- if not local_lock_path:
- cleanup_dir = True
- local_lock_path = tempfile.mkdtemp()
-
- if not os.path.exists(local_lock_path):
- fileutils.ensure_tree(local_lock_path)
-
- # NOTE(mikal): the lock name cannot contain directory
- # separators
- safe_name = name.replace(os.sep, '_')
- lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
- lock_file_path = os.path.join(local_lock_path,
- lock_file_name)
-
- try:
- lock = InterProcessLock(lock_file_path)
- with lock:
- LOG.debug(_('Got file lock "%(lock)s" at '
- '%(path)s for method '
- '"%(method)s"...'),
- {'lock': name,
- 'path': lock_file_path,
- 'method': f.__name__})
- retval = f(*args, **kwargs)
- finally:
- LOG.debug(_('Released file lock "%(lock)s" at '
- '%(path)s for method "%(method)s"...'),
- {'lock': name,
- 'path': lock_file_path,
- 'method': f.__name__})
- # NOTE(vish): This removes the tempdir if we needed
- # to create one. This is used to
- # cleanup the locks left behind by unit
- # tests.
- if cleanup_dir:
- shutil.rmtree(local_lock_path)
- else:
- retval = f(*args, **kwargs)
-
- finally:
- local.strong_store.locks_held.remove(name)
-
- return retval
+ try:
+ with lock(name, lock_file_prefix, external, lock_path):
+ LOG.debug(_('Got semaphore / lock "%(function)s"'),
+ {'function': f.__name__})
+ return f(*args, **kwargs)
+ finally:
+ LOG.debug(_('Semaphore / lock released "%(function)s"'),
+ {'function': f.__name__})
return inner
return wrap
...
The lock_file_prefix argument is used to provide lock files on disk with a
- meaningful prefix. The prefix should end with a hyphen ('-') if specified.
+ meaningful prefix.
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
+
+
+def main(argv):
+ """Create a dir for locks and pass it to command from arguments
+
+ If you run this:
+ python -m openstack.common.lockutils python setup.py testr <etc>
+
+ a temporary directory will be created for all your locks and passed to all
+ your tests in an environment variable. The temporary dir will be deleted
+ afterwards and the return value will be preserved.
+ """
+
+ lock_dir = tempfile.mkdtemp()
+ os.environ["NEUTRON_LOCK_PATH"] = lock_dir
+ try:
+ ret_val = subprocess.call(argv[1:])
+ finally:
+ shutil.rmtree(lock_dir, ignore_errors=True)
+ return ret_val
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))