# Importing full names to not pollute the namespace and cause possible
# collisions with use of 'from cinder.backup import <foo>' elsewhere.
-import cinder.flags
+
+from oslo.config import cfg
+
import cinder.openstack.common.importutils
-API = cinder.openstack.common.importutils.import_class(
- cinder.flags.FLAGS.backup_api_class)
+
+CONF = cfg.CONF
+
+API = cinder.openstack.common.importutils.import_class(CONF.backup_api_class)
from cinder.backup import rpcapi as backup_rpcapi
from cinder.db import base
from cinder import exception
-from cinder import flags
from cinder.openstack.common import log as logging
import cinder.policy
import cinder.volume
-FLAGS = flags.FLAGS
-
LOG = logging.getLogger(__name__)
from cinder import context
from cinder import exception
-from cinder import flags
from cinder import manager
from cinder.openstack.common import excutils
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
+
LOG = logging.getLogger(__name__)
backup_manager_opts = [
help='Service to use for backups.'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(backup_manager_opts)
+CONF = cfg.CONF
+CONF.register_opts(backup_manager_opts)
class BackupManager(manager.SchedulerDependentManager):
RPC_API_VERSION = '1.0'
def __init__(self, service_name=None, *args, **kwargs):
- self.service = importutils.import_module(FLAGS.backup_service)
- self.az = FLAGS.storage_availability_zone
- self.volume_manager = importutils.import_object(FLAGS.volume_manager)
+ self.service = importutils.import_module(CONF.backup_service)
+ self.az = CONF.storage_availability_zone
+ self.volume_manager = importutils.import_object(
+ CONF.volume_manager)
self.driver = self.volume_manager.driver
super(BackupManager, self).__init__(service_name='backup',
*args, **kwargs)
'volume: %(volume_id)s') % locals())
self.db.backup_update(context, backup_id, {'host': self.host,
'service':
- FLAGS.backup_service})
+ CONF.backup_service})
expected_status = 'backing-up'
actual_status = volume['status']
backup['id'], backup['size'])
backup_service = backup['service']
- configured_service = FLAGS.backup_service
+ configured_service = CONF.backup_service
if backup_service != configured_service:
err = _('restore_backup aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
backup_service = backup['service']
if backup_service is not None:
- configured_service = FLAGS.backup_service
+ configured_service = CONF.backup_service
if backup_service != configured_service:
err = _('delete_backup aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
Client side of the volume backup RPC API.
"""
-from cinder import flags
+
+from oslo.config import cfg
+
from cinder.openstack.common import log as logging
from cinder.openstack.common import rpc
import cinder.openstack.common.rpc.proxy
-LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
-FLAGS = flags.FLAGS
+LOG = logging.getLogger(__name__)
class BackupAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
def __init__(self):
super(BackupAPI, self).__init__(
- topic=FLAGS.backup_topic,
+ topic=CONF.backup_topic,
default_version=self.BASE_RPC_API_VERSION)
def create_backup(self, ctxt, host, backup_id, volume_id):
from cinder.db import base
from cinder import exception
-from cinder import flags
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from swiftclient import client as swift
+
LOG = logging.getLogger(__name__)
swiftbackup_service_opts = [
help='Compression algorithm (None to disable)'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(swiftbackup_service_opts)
+CONF = cfg.CONF
+CONF.register_opts(swiftbackup_service_opts)
class SwiftBackupService(base.Base):
def __init__(self, context, db_driver=None):
self.context = context
- self.swift_url = '%s%s' % (FLAGS.backup_swift_url,
+ self.swift_url = '%s%s' % (CONF.backup_swift_url,
self.context.project_id)
- self.az = FLAGS.storage_availability_zone
- self.data_block_size_bytes = FLAGS.backup_swift_object_size
- self.swift_attempts = FLAGS.backup_swift_retry_attempts
- self.swift_backoff = FLAGS.backup_swift_retry_backoff
+ self.az = CONF.storage_availability_zone
+ self.data_block_size_bytes = CONF.backup_swift_object_size
+ self.swift_attempts = CONF.backup_swift_retry_attempts
+ self.swift_backoff = CONF.backup_swift_retry_backoff
self.compressor = \
- self._get_compressor(FLAGS.backup_compression_algorithm)
+ self._get_compressor(CONF.backup_compression_algorithm)
self.conn = swift.Connection(None, None, None,
retries=self.swift_attempts,
preauthurl=self.swift_url,
LOG.debug(_('_create_container started, container: %(container)s,'
'backup: %(backup_id)s') % locals())
if container is None:
- container = FLAGS.backup_swift_container
+ container = CONF.backup_swift_container
self.db.backup_update(context, backup_id, {'container': container})
if not self._check_container_exists(container):
self.conn.put_container(container)
break
LOG.debug(_('reading chunk of data from volume'))
if self.compressor is not None:
- algorithm = FLAGS.backup_compression_algorithm.lower()
+ algorithm = CONF.backup_compression_algorithm.lower()
obj[object_name]['compression'] = algorithm
data_size_bytes = len(data)
data = self.compressor.compress(data)
Helper code for the iSCSI volume driver.
"""
+
+
import os
import re
from oslo.config import cfg
from cinder import exception
-from cinder import flags
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume import utils as volume_utils
+
LOG = logging.getLogger(__name__)
iscsi_helper_opt = [cfg.StrOpt('iscsi_helper',
)
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(iscsi_helper_opt)
-FLAGS.import_opt('volume_name_template', 'cinder.db')
+CONF = cfg.CONF
+CONF.register_opts(iscsi_helper_opt)
+CONF.import_opt('volume_name_template', 'cinder.db')
class TargetAdmin(object):
# Note(jdg) tid and lun aren't used by TgtAdm but remain for
# compatibility
- fileutils.ensure_tree(FLAGS.volumes_dir)
+ fileutils.ensure_tree(CONF.volumes_dir)
vol_id = name.split(':')[1]
if chap_auth is None:
""" % (name, path, chap_auth)
LOG.info(_('Creating iscsi_target for: %s') % vol_id)
- volumes_dir = FLAGS.volumes_dir
+ volumes_dir = CONF.volumes_dir
volume_path = os.path.join(volumes_dir, vol_id)
f = open(volume_path, 'w+')
os.unlink(volume_path)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
- iqn = '%s%s' % (FLAGS.iscsi_target_prefix, vol_id)
+ iqn = '%s%s' % (CONF.iscsi_target_prefix, vol_id)
tid = self._get_target(iqn)
if tid is None:
LOG.error(_("Failed to create iscsi target for volume "
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
LOG.info(_('Removing iscsi_target for: %s') % vol_id)
- vol_uuid_file = FLAGS.volume_name_template % vol_id
- volume_path = os.path.join(FLAGS.volumes_dir, vol_uuid_file)
+ vol_uuid_file = CONF.volume_name_template % vol_id
+ volume_path = os.path.join(CONF.volumes_dir, vol_uuid_file)
if os.path.isfile(volume_path):
- iqn = '%s%s' % (FLAGS.iscsi_target_prefix,
+ iqn = '%s%s' % (CONF.iscsi_target_prefix,
vol_uuid_file)
else:
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
super(IetAdm, self).__init__('ietadm', execute)
def _iotype(self, path):
- if FLAGS.iscsi_iotype == 'auto':
+ if CONF.iscsi_iotype == 'auto':
return 'blockio' if volume_utils.is_block(path) else 'fileio'
else:
- return FLAGS.iscsi_iotype
+ return CONF.iscsi_iotype
def create_iscsi_target(self, name, tid, lun, path,
chap_auth=None, **kwargs):
(type, username, password) = chap_auth.split()
self._new_auth(tid, type, username, password, **kwargs)
- conf_file = FLAGS.iet_conf
+ conf_file = CONF.iet_conf
if os.path.exists(conf_file):
try:
volume_conf = """
LOG.info(_('Removing iscsi_target for volume: %s') % vol_id)
self._delete_logicalunit(tid, lun, **kwargs)
self._delete_target(tid, **kwargs)
- vol_uuid_file = FLAGS.volume_name_template % vol_id
- conf_file = FLAGS.iet_conf
+ vol_uuid_file = CONF.volume_name_template % vol_id
+ conf_file = CONF.iet_conf
if os.path.exists(conf_file):
with utils.temporary_chown(conf_file):
try:
(chap_auth_userid, chap_auth_password) = chap_auth.split(' ')[1:]
extra_args = []
- if FLAGS.lio_initiator_iqns:
- extra_args.append(FLAGS.lio_initiator_iqns)
+ if CONF.lio_initiator_iqns:
+ extra_args.append(CONF.lio_initiator_iqns)
try:
command_args = ['rtstool',
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
- iqn = '%s%s' % (FLAGS.iscsi_target_prefix, vol_id)
+ iqn = '%s%s' % (CONF.iscsi_target_prefix, vol_id)
tid = self._get_target(iqn)
if tid is None:
LOG.error(_("Failed to create iscsi target for volume "
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
LOG.info(_('Removing iscsi_target: %s') % vol_id)
vol_uuid_name = 'volume-%s' % vol_id
- iqn = '%s%s' % (FLAGS.iscsi_target_prefix, vol_uuid_name)
+ iqn = '%s%s' % (CONF.iscsi_target_prefix, vol_uuid_name)
try:
self._execute('rtstool',
def get_target_admin():
- if FLAGS.iscsi_helper == 'tgtadm':
+ if CONF.iscsi_helper == 'tgtadm':
return TgtAdm()
- elif FLAGS.iscsi_helper == 'fake':
+ elif CONF.iscsi_helper == 'fake':
return FakeIscsiHelper()
- elif FLAGS.iscsi_helper == 'lioadm':
+ elif CONF.iscsi_helper == 'lioadm':
return LioAdm()
else:
return IetAdm()
from oslo.config import cfg
import webob.exc
-from cinder import flags
from cinder.openstack.common import exception as com_exception
from cinder.openstack.common import log as logging
+
LOG = logging.getLogger(__name__)
exc_log_opts = [
help='make exception message format errors fatal'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(exc_log_opts)
+CONF = cfg.CONF
+CONF.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
- if FLAGS.fatal_exception_format_errors:
+ if CONF.fatal_exception_format_errors:
raise e
else:
# at least get the core message out if something happened
"""Implementation of an image service that uses Glance as the backend"""
+
from __future__ import absolute_import
import copy
import glanceclient
import glanceclient.exc
+from oslo.config import cfg
from cinder import exception
-from cinder import flags
from cinder.openstack.common import jsonutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
+CONF = cfg.CONF
+
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
def _parse_image_ref(image_href):
def _create_glance_client(context, netloc, use_ssl,
- version=FLAGS.glance_api_version):
+ version=CONF.glance_api_version):
"""Instantiate a new glanceclient.Client object."""
if version is None:
- version = FLAGS.glance_api_version
+ version = CONF.glance_api_version
params = {}
if use_ssl:
scheme = 'https'
# https specific params
- params['insecure'] = FLAGS.glance_api_insecure
- params['ssl_compression'] = FLAGS.glance_api_ssl_compression
+ params['insecure'] = CONF.glance_api_insecure
+ params['ssl_compression'] = CONF.glance_api_ssl_compression
else:
scheme = 'http'
- if FLAGS.auth_strategy == 'keystone':
+ if CONF.auth_strategy == 'keystone':
params['token'] = context.auth_token
endpoint = '%s://%s' % (scheme, netloc)
return glanceclient.Client(str(version), endpoint, **params)
def get_api_servers():
"""Return Iterable over shuffled api servers.
- Shuffle a list of FLAGS.glance_api_servers and return an iterator
+ Shuffle a list of CONF.glance_api_servers and return an iterator
that will cycle through the list, looping around to the beginning
if necessary.
"""
api_servers = []
- for api_server in FLAGS.glance_api_servers:
+ for api_server in CONF.glance_api_servers:
if '//' not in api_server:
api_server = 'http://' + api_server
url = urlparse.urlparse(api_server)
"""Call a glance client method.
If we get a connection error,
- retry the request according to FLAGS.glance_num_retries.
+ retry the request according to CONF.glance_num_retries.
"""
version = self.version
if version in kwargs:
retry_excs = (glanceclient.exc.ServiceUnavailable,
glanceclient.exc.InvalidEndpoint,
glanceclient.exc.CommunicationError)
- num_attempts = 1 + FLAGS.glance_num_retries
+ num_attempts = 1 + CONF.glance_num_retries
for attempt in xrange(1, num_attempts + 1):
client = self.client or self._create_onetime_client(context,
we should look at maybe pushign this up to OSLO
"""
+
import os
import re
import tempfile
from oslo.config import cfg
from cinder import exception
-from cinder import flags
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder import utils
+
LOG = logging.getLogger(__name__)
image_helper_opt = [cfg.StrOpt('image_conversion_dir',
default='/tmp',
help='parent dir for tempdir used for image conversion'), ]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(image_helper_opt)
+CONF = cfg.CONF
+CONF.register_opts(image_helper_opt)
class QemuImgInfo(object):
def fetch_to_raw(context, image_service,
image_id, dest,
user_id=None, project_id=None):
- if (FLAGS.image_conversion_dir and not
- os.path.exists(FLAGS.image_conversion_dir)):
- os.makedirs(FLAGS.image_conversion_dir)
+ if (CONF.image_conversion_dir and not
+ os.path.exists(CONF.image_conversion_dir)):
+ os.makedirs(CONF.image_conversion_dir)
# NOTE(avishay): I'm not crazy about creating temp files which may be
# large and cause disk full errors which would confuse users.
# Unfortunately it seems that you can't pipe to 'qemu-img convert' because
# it seeks. Maybe we can think of something for a future version.
- fd, tmp = tempfile.mkstemp(dir=FLAGS.image_conversion_dir)
+ fd, tmp = tempfile.mkstemp(dir=CONF.image_conversion_dir)
os.close(fd)
with fileutils.remove_path_on_error(tmp):
fetch(context, image_service, image_id, tmp, user_id, project_id)
image_service.update(context, image_id, {}, image_file)
return
- if (FLAGS.image_conversion_dir and not
- os.path.exists(FLAGS.image_conversion_dir)):
- os.makedirs(FLAGS.image_conversion_dir)
+ if (CONF.image_conversion_dir and not
+ os.path.exists(CONF.image_conversion_dir)):
+ os.makedirs(CONF.image_conversion_dir)
- fd, tmp = tempfile.mkstemp(dir=FLAGS.image_conversion_dir)
+ fd, tmp = tempfile.mkstemp(dir=CONF.image_conversion_dir)
os.close(fd)
with fileutils.remove_path_on_error(tmp):
LOG.debug("%s was raw, converting to %s" %
"""
+
+from oslo.config import cfg
+
from cinder.db import base
-from cinder import flags
from cinder.openstack.common import log as logging
from cinder.openstack.common import periodic_task
from cinder.openstack.common.rpc import dispatcher as rpc_dispatcher
from cinder import version
-FLAGS = flags.FLAGS
-
-
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def __init__(self, host=None, db_driver=None):
if not host:
- host = FLAGS.host
+ host = CONF.host
self.host = host
super(Manager, self).__init__(db_driver)
def service_config(self, context):
config = {}
- for key in FLAGS:
- config[key] = FLAGS.get(key, None)
+ for key in CONF:
+ config[key] = CONF.get(key, None)
return config
"""Policy Engine For Cinder"""
+
from oslo.config import cfg
from cinder import exception
-from cinder import flags
from cinder.openstack.common import policy
from cinder import utils
+
policy_opts = [
cfg.StrOpt('policy_file',
default='policy.json',
default='default',
help=_('Rule checked when requested rule is not found')), ]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(policy_opts)
+CONF = cfg.CONF
+CONF.register_opts(policy_opts)
_POLICY_PATH = None
_POLICY_CACHE = {}
global _POLICY_PATH
global _POLICY_CACHE
if not _POLICY_PATH:
- _POLICY_PATH = utils.find_config(FLAGS.policy_file)
+ _POLICY_PATH = utils.find_config(CONF.policy_file)
utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE,
reload_func=_set_brain)
def _set_brain(data):
- default_rule = FLAGS.policy_default_rule
+ default_rule = CONF.policy_default_rule
policy.set_brain(policy.Brain.load_json(data, default_rule))
"""Quotas for volumes."""
+
import datetime
from oslo.config import cfg
from cinder import db
from cinder import exception
-from cinder import flags
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
+
LOG = logging.getLogger(__name__)
quota_opts = [
default='cinder.quota.DbQuotaDriver',
help='default driver to use for quota checks'), ]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(quota_opts)
+CONF = cfg.CONF
+CONF.register_opts(quota_opts)
class DbQuotaDriver(object):
# Set up the reservation expiration
if expire is None:
- expire = FLAGS.reservation_expire
+ expire = CONF.reservation_expire
if isinstance(expire, (int, long)):
expire = datetime.timedelta(seconds=expire)
if isinstance(expire, datetime.timedelta):
# session isn't available outside the DBAPI, we
# have to do the work there.
return db.quota_reserve(context, resources, quotas, deltas, expire,
- FLAGS.until_refresh, FLAGS.max_age,
+ CONF.until_refresh, CONF.max_age,
project_id=project_id)
def commit(self, context, reservations, project_id=None):
def default(self):
"""Return the default value of the quota."""
- return FLAGS[self.flag] if self.flag else -1
+ return CONF[self.flag] if self.flag else -1
class ReservableResource(BaseResource):
"""Initialize a Quota object."""
if not quota_driver_class:
- quota_driver_class = FLAGS.quota_driver
+ quota_driver_class = CONF.quota_driver
if isinstance(quota_driver_class, basestring):
quota_driver_class = importutils.import_object(quota_driver_class)
(_junk, vol_gigs) = db.volume_data_get_for_project(context,
project_id,
session=session)
- if FLAGS.no_snapshot_gb_quota:
+ if CONF.no_snapshot_gb_quota:
return {'gigabytes': vol_gigs}
(_junk, snap_gigs) = db.snapshot_data_get_for_project(context,
"""Generic Node base class for all workers that run on hosts."""
+
import errno
import inspect
import os
from cinder import context
from cinder import db
from cinder import exception
-from cinder import flags
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import rpc
from cinder import version
from cinder import wsgi
+
LOG = logging.getLogger(__name__)
service_opts = [
default=8776,
help='port for os volume api to listen'), ]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(service_opts)
+CONF = cfg.CONF
+CONF.register_opts(service_opts)
class SignalExit(SystemExit):
self.timers.append(periodic)
def _create_service_ref(self, context):
- zone = FLAGS.storage_availability_zone
+ zone = CONF.storage_availability_zone
service_ref = db.service_create(context,
{'host': self.host,
'binary': self.binary,
periodic_fuzzy_delay=None, service_name=None):
"""Instantiates class and passes back application object.
- :param host: defaults to FLAGS.host
+ :param host: defaults to CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'cinder-' part
- :param manager: defaults to FLAGS.<topic>_manager
- :param report_interval: defaults to FLAGS.report_interval
- :param periodic_interval: defaults to FLAGS.periodic_interval
- :param periodic_fuzzy_delay: defaults to FLAGS.periodic_fuzzy_delay
+ :param manager: defaults to CONF.<topic>_manager
+ :param report_interval: defaults to CONF.report_interval
+ :param periodic_interval: defaults to CONF.periodic_interval
+ :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
"""
if not host:
- host = FLAGS.host
+ host = CONF.host
if not binary:
binary = os.path.basename(inspect.stack()[-1][1])
if not topic:
topic = binary
if not manager:
subtopic = topic.rpartition('cinder-')[2]
- manager = FLAGS.get('%s_manager' % subtopic, None)
+ manager = CONF.get('%s_manager' % subtopic, None)
if report_interval is None:
- report_interval = FLAGS.report_interval
+ report_interval = CONF.report_interval
if periodic_interval is None:
- periodic_interval = FLAGS.periodic_interval
+ periodic_interval = CONF.periodic_interval
if periodic_fuzzy_delay is None:
- periodic_fuzzy_delay = FLAGS.periodic_fuzzy_delay
+ periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
periodic_interval=periodic_interval,
def report_state(self):
"""Update the state of this service in the datastore."""
ctxt = context.get_admin_context()
- zone = FLAGS.storage_availability_zone
+ zone = CONF.storage_availability_zone
state_catalog = {}
try:
try:
self.manager = self._get_manager()
self.loader = loader or wsgi.Loader()
self.app = self.loader.load_app(name)
- self.host = getattr(FLAGS, '%s_listen' % name, "0.0.0.0")
- self.port = getattr(FLAGS, '%s_listen_port' % name, 0)
+ self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
+ self.port = getattr(CONF, '%s_listen_port' % name, 0)
self.server = wsgi.Server(name,
self.app,
host=self.host,
"""
fl = '%s_manager' % self.name
- if fl not in FLAGS:
+ if fl not in CONF:
return None
- manager_class_name = FLAGS.get(fl, None)
+ manager_class_name = CONF.get(fl, None)
if not manager_class_name:
return None
def wait():
- LOG.debug(_('Full set of FLAGS:'))
- for flag in FLAGS:
- flag_get = FLAGS.get(flag, None)
+ LOG.debug(_('Full set of CONF:'))
+ for flag in CONF:
+ flag_get = CONF.get(flag, None)
# hide flag contents from log if contains a password
# should use secret flag when switch over to openstack-common
if ("_password" in flag or "_key" in flag or
Handles all requests relating to transferring ownership of volumes.
"""
+
import datetime
import hashlib
import hmac
from cinder.db import base
from cinder import exception
-from cinder import flags
from cinder.openstack.common import log as logging
from cinder import quota
from cinder.volume import api as volume_api
+
volume_transfer_opts = [
cfg.IntOpt('volume_transfer_salt_length', default=8,
help='The number of characters in the salt.'),
help='The number of characters in the '
'autogenerated auth key.'), ]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(volume_transfer_opts)
+CONF = cfg.CONF
+CONF.register_opts(volume_transfer_opts)
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
raise exception.InvalidVolume(reason=_("status must be available"))
# The salt is just a short random string.
- salt = self._get_random_string(FLAGS.volume_transfer_salt_length)
- auth_key = self._get_random_string(FLAGS.volume_transfer_key_length)
+ salt = self._get_random_string(CONF.volume_transfer_salt_length)
+ auth_key = self._get_random_string(CONF.volume_transfer_key_length)
crypt_hash = self._get_crypt_hash(salt, auth_key)
# TODO(ollie): Transfer expiry needs to be implemented.
"""Utilities and helper functions."""
+
import contextlib
import datetime
import errno
import functools
import hashlib
import inspect
-import itertools
import os
import paramiko
import pyclbr
import shlex
import shutil
import signal
-import socket
-import struct
import sys
import tempfile
import time
-import types
-import warnings
from xml.dom import minidom
from xml.parsers import expat
from xml import sax
from eventlet import greenthread
from eventlet import pools
+from oslo.config import cfg
+
from cinder import exception
-from cinder import flags
from cinder.openstack.common import excutils
from cinder.openstack.common import importutils
from cinder.openstack.common import lockutils
from cinder.openstack.common import timeutils
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
-FLAGS = flags.FLAGS
synchronized = lockutils.synchronized_with_prefix('cinder-')
"""
possible_locations = [
config_path,
- os.path.join(FLAGS.state_path, "etc", "cinder", config_path),
- os.path.join(FLAGS.state_path, "etc", config_path),
- os.path.join(FLAGS.state_path, config_path),
+ os.path.join(CONF.state_path, "etc", "cinder", config_path),
+ os.path.join(CONF.state_path, "etc", config_path),
+ os.path.join(CONF.state_path, config_path),
"/etc/cinder/%s" % config_path,
]
:param attempts: How many times to retry cmd.
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
- in the root_helper FLAG.
+ in the root_helper CONF.
:raises exception.Error: on receiving unknown arguments
:raises exception.ProcessExecutionError:
if run_as_root:
- if FLAGS.rootwrap_config is None or FLAGS.root_helper != 'sudo':
+ if CONF.rootwrap_config is None or CONF.root_helper != 'sudo':
LOG.deprecated(_('The root_helper option (which lets you specify '
'a root wrapper different from cinder-rootwrap, '
'and defaults to using sudo) is now deprecated. '
'You should use the rootwrap_config option '
'instead.'))
- if (FLAGS.rootwrap_config is not None):
+ if (CONF.rootwrap_config is not None):
cmd = ['sudo', 'cinder-rootwrap',
- FLAGS.rootwrap_config] + list(cmd)
+ CONF.rootwrap_config] + list(cmd)
else:
- cmd = shlex.split(FLAGS.root_helper) + list(cmd)
+ cmd = shlex.split(CONF.root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
The begin timestamp of this audit period is the same as the
end of the previous."""
if not unit:
- unit = FLAGS.volume_usage_audit_period
+ unit = CONF.volume_usage_audit_period
offset = 0
if '@' in unit:
def __get_backend(self):
if not self.__backend:
- backend_name = FLAGS[self.__pivot]
+ backend_name = CONF[self.__pivot]
if backend_name not in self.__backends:
raise exception.Error(_('Invalid backend: %s') % backend_name)
def monkey_patch():
- """ If the Flags.monkey_patch set as True,
+ """ If the CONF.monkey_patch set as True,
this function patches a decorator
for all functions in specified modules.
You can set decorators for each modules
- using FLAGS.monkey_patch_modules.
+ using CONF.monkey_patch_modules.
The format is "Module path:Decorator function".
Example: 'cinder.api.ec2.cloud:' \
cinder.openstack.common.notifier.api.notify_decorator'
name - name of the function
function - object of the function
"""
- # If FLAGS.monkey_patch is not True, this function do nothing.
- if not FLAGS.monkey_patch:
+ # If CONF.monkey_patch is not True, this function do nothing.
+ if not CONF.monkey_patch:
return
# Get list of modules and decorators
- for module_and_decorator in FLAGS.monkey_patch_modules:
+ for module_and_decorator in CONF.monkey_patch_modules:
module, decorator_name = module_and_decorator.split(':')
# import decorator function
decorator = importutils.import_class(decorator_name)
"""Generate the URL to glance."""
# TODO(jk0): This will eventually need to take SSL into consideration
# when supported in glance.
- return "http://%s:%d" % (FLAGS.glance_host, FLAGS.glance_port)
+ return "http://%s:%d" % (CONF.glance_host, CONF.glance_port)
@contextlib.contextmanager
last_heartbeat = service['updated_at'] or service['created_at']
# Timestamps in DB are UTC.
elapsed = total_seconds(timeutils.utcnow() - last_heartbeat)
- return abs(elapsed) <= FLAGS.service_down_time
+ return abs(elapsed) <= CONF.service_down_time
def generate_mac_address():
"""Utility methods for working with WSGI servers."""
+
import errno
import os
import socket
import webob.exc
from cinder import exception
-from cinder import flags
from cinder.openstack.common import log as logging
from cinder import utils
+
socket_opts = [
cfg.IntOpt('backlog',
default=4096,
CONF = cfg.CONF
CONF.register_opts(socket_opts)
-FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
:returns: None
"""
- config_path = config_path or FLAGS.api_paste_config
+ config_path = config_path or CONF.api_paste_config
self.config_path = utils.find_config(config_path)
def load_app(self, name):