]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Replace FLAGS with cfg.CONF in other modules, unless tests
authorSergey Vilgelm <svilgelm@mirantis.com>
Thu, 13 Jun 2013 07:25:37 +0000 (11:25 +0400)
committerSergey Vilgelm <svilgelm@mirantis.com>
Thu, 13 Jun 2013 09:11:36 +0000 (13:11 +0400)
Replace all the FLAGS with cfg.CONF in cinder/
Large commit was split into several parts

Change-Id: Iacd645997a0c50aa47079c856e1b4e33e3001243
Fixes: bug #1182037
16 files changed:
cinder/backup/__init__.py
cinder/backup/api.py
cinder/backup/manager.py
cinder/backup/rpcapi.py
cinder/backup/services/swift.py
cinder/brick/iscsi/iscsi.py
cinder/exception.py
cinder/image/glance.py
cinder/image/image_utils.py
cinder/manager.py
cinder/policy.py
cinder/quota.py
cinder/service.py
cinder/transfer/api.py
cinder/utils.py
cinder/wsgi.py

index 193b7a5f02affdfa2809ea12a64891b9cc0ce74b..1d59acc022ca0848720dcdb5a92201cb09650b6a 100644 (file)
 # Importing full names to not pollute the namespace and cause possible
 # collisions with use of 'from cinder.backup import <foo>' elsewhere.
 
-import cinder.flags
+
+from oslo.config import cfg
+
 import cinder.openstack.common.importutils
 
-API = cinder.openstack.common.importutils.import_class(
-    cinder.flags.FLAGS.backup_api_class)
+
+CONF = cfg.CONF
+
+API = cinder.openstack.common.importutils.import_class(CONF.backup_api_class)
index 725c7f854baa959159b5ea53d051f4e771467e15..188812056bb0feb5b3bb8221cc413f044735ec76 100644 (file)
@@ -22,14 +22,11 @@ from eventlet import greenthread
 from cinder.backup import rpcapi as backup_rpcapi
 from cinder.db import base
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import log as logging
 import cinder.policy
 import cinder.volume
 
 
-FLAGS = flags.FLAGS
-
 LOG = logging.getLogger(__name__)
 
 
index b4a972327b3a4b327395c76c5661968bf41f2965..4146694302491bd8b38750df67fe2c9deaba8cb7 100755 (executable)
@@ -36,12 +36,12 @@ from oslo.config import cfg
 
 from cinder import context
 from cinder import exception
-from cinder import flags
 from cinder import manager
 from cinder.openstack.common import excutils
 from cinder.openstack.common import importutils
 from cinder.openstack.common import log as logging
 
+
 LOG = logging.getLogger(__name__)
 
 backup_manager_opts = [
@@ -50,8 +50,8 @@ backup_manager_opts = [
                help='Service to use for backups.'),
 ]
 
-FLAGS = flags.FLAGS
-FLAGS.register_opts(backup_manager_opts)
+CONF = cfg.CONF
+CONF.register_opts(backup_manager_opts)
 
 
 class BackupManager(manager.SchedulerDependentManager):
@@ -60,9 +60,10 @@ class BackupManager(manager.SchedulerDependentManager):
     RPC_API_VERSION = '1.0'
 
     def __init__(self, service_name=None, *args, **kwargs):
-        self.service = importutils.import_module(FLAGS.backup_service)
-        self.az = FLAGS.storage_availability_zone
-        self.volume_manager = importutils.import_object(FLAGS.volume_manager)
+        self.service = importutils.import_module(CONF.backup_service)
+        self.az = CONF.storage_availability_zone
+        self.volume_manager = importutils.import_object(
+            CONF.volume_manager)
         self.driver = self.volume_manager.driver
         super(BackupManager, self).__init__(service_name='backup',
                                             *args, **kwargs)
@@ -120,7 +121,7 @@ class BackupManager(manager.SchedulerDependentManager):
                    'volume: %(volume_id)s') % locals())
         self.db.backup_update(context, backup_id, {'host': self.host,
                                                    'service':
-                                                   FLAGS.backup_service})
+                                                   CONF.backup_service})
 
         expected_status = 'backing-up'
         actual_status = volume['status']
@@ -194,7 +195,7 @@ class BackupManager(manager.SchedulerDependentManager):
                      backup['id'], backup['size'])
 
         backup_service = backup['service']
-        configured_service = FLAGS.backup_service
+        configured_service = CONF.backup_service
         if backup_service != configured_service:
             err = _('restore_backup aborted, the backup service currently'
                     ' configured [%(configured_service)s] is not the'
@@ -239,7 +240,7 @@ class BackupManager(manager.SchedulerDependentManager):
 
         backup_service = backup['service']
         if backup_service is not None:
-            configured_service = FLAGS.backup_service
+            configured_service = CONF.backup_service
             if backup_service != configured_service:
                 err = _('delete_backup aborted, the backup service currently'
                         ' configured [%(configured_service)s] is not the'
index a0b8771bcaec2396db3ce38d111b2829d75b1d50..cc6c2493fec05b183f10671c61b6b94cace1c230 100644 (file)
 Client side of the volume backup RPC API.
 """
 
-from cinder import flags
+
+from oslo.config import cfg
+
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import rpc
 import cinder.openstack.common.rpc.proxy
 
 
-LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
 
-FLAGS = flags.FLAGS
+LOG = logging.getLogger(__name__)
 
 
 class BackupAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
@@ -42,7 +44,7 @@ class BackupAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
 
     def __init__(self):
         super(BackupAPI, self).__init__(
-            topic=FLAGS.backup_topic,
+            topic=CONF.backup_topic,
             default_version=self.BASE_RPC_API_VERSION)
 
     def create_backup(self, ctxt, host, backup_id, volume_id):
index 43cbd1ff2d21c92b53a5563819ed09225c7996a1..24a989e9aafc4c9ea235b9adb93e579d3bee3a1e 100644 (file)
@@ -42,11 +42,11 @@ from oslo.config import cfg
 
 from cinder.db import base
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import timeutils
 from swiftclient import client as swift
 
+
 LOG = logging.getLogger(__name__)
 
 swiftbackup_service_opts = [
@@ -70,8 +70,8 @@ swiftbackup_service_opts = [
                help='Compression algorithm (None to disable)'),
 ]
 
-FLAGS = flags.FLAGS
-FLAGS.register_opts(swiftbackup_service_opts)
+CONF = cfg.CONF
+CONF.register_opts(swiftbackup_service_opts)
 
 
 class SwiftBackupService(base.Base):
@@ -98,14 +98,14 @@ class SwiftBackupService(base.Base):
 
     def __init__(self, context, db_driver=None):
         self.context = context
-        self.swift_url = '%s%s' % (FLAGS.backup_swift_url,
+        self.swift_url = '%s%s' % (CONF.backup_swift_url,
                                    self.context.project_id)
-        self.az = FLAGS.storage_availability_zone
-        self.data_block_size_bytes = FLAGS.backup_swift_object_size
-        self.swift_attempts = FLAGS.backup_swift_retry_attempts
-        self.swift_backoff = FLAGS.backup_swift_retry_backoff
+        self.az = CONF.storage_availability_zone
+        self.data_block_size_bytes = CONF.backup_swift_object_size
+        self.swift_attempts = CONF.backup_swift_retry_attempts
+        self.swift_backoff = CONF.backup_swift_retry_backoff
         self.compressor = \
-            self._get_compressor(FLAGS.backup_compression_algorithm)
+            self._get_compressor(CONF.backup_compression_algorithm)
         self.conn = swift.Connection(None, None, None,
                                      retries=self.swift_attempts,
                                      preauthurl=self.swift_url,
@@ -133,7 +133,7 @@ class SwiftBackupService(base.Base):
         LOG.debug(_('_create_container started, container: %(container)s,'
                     'backup: %(backup_id)s') % locals())
         if container is None:
-            container = FLAGS.backup_swift_container
+            container = CONF.backup_swift_container
             self.db.backup_update(context, backup_id, {'container': container})
         if not self._check_container_exists(container):
             self.conn.put_container(container)
@@ -236,7 +236,7 @@ class SwiftBackupService(base.Base):
                 break
             LOG.debug(_('reading chunk of data from volume'))
             if self.compressor is not None:
-                algorithm = FLAGS.backup_compression_algorithm.lower()
+                algorithm = CONF.backup_compression_algorithm.lower()
                 obj[object_name]['compression'] = algorithm
                 data_size_bytes = len(data)
                 data = self.compressor.compress(data)
index 4d3c9018767c1c77bdb653c26163a86049251795..4ddaebf89ed70ef845c1c13a6beb4593eadf99f5 100644 (file)
 Helper code for the iSCSI volume driver.
 
 """
+
+
 import os
 import re
 
 from oslo.config import cfg
 
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import fileutils
 from cinder.openstack.common import log as logging
 from cinder import utils
 from cinder.volume import utils as volume_utils
 
+
 LOG = logging.getLogger(__name__)
 
 iscsi_helper_opt = [cfg.StrOpt('iscsi_helper',
@@ -59,9 +61,9 @@ iscsi_helper_opt = [cfg.StrOpt('iscsi_helper',
                                )
                     ]
 
-FLAGS = flags.FLAGS
-FLAGS.register_opts(iscsi_helper_opt)
-FLAGS.import_opt('volume_name_template', 'cinder.db')
+CONF = cfg.CONF
+CONF.register_opts(iscsi_helper_opt)
+CONF.import_opt('volume_name_template', 'cinder.db')
 
 
 class TargetAdmin(object):
@@ -133,7 +135,7 @@ class TgtAdm(TargetAdmin):
         # Note(jdg) tid and lun aren't used by TgtAdm but remain for
         # compatibility
 
-        fileutils.ensure_tree(FLAGS.volumes_dir)
+        fileutils.ensure_tree(CONF.volumes_dir)
 
         vol_id = name.split(':')[1]
         if chap_auth is None:
@@ -151,7 +153,7 @@ class TgtAdm(TargetAdmin):
             """ % (name, path, chap_auth)
 
         LOG.info(_('Creating iscsi_target for: %s') % vol_id)
-        volumes_dir = FLAGS.volumes_dir
+        volumes_dir = CONF.volumes_dir
         volume_path = os.path.join(volumes_dir, vol_id)
 
         f = open(volume_path, 'w+')
@@ -177,7 +179,7 @@ class TgtAdm(TargetAdmin):
             os.unlink(volume_path)
             raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
 
-        iqn = '%s%s' % (FLAGS.iscsi_target_prefix, vol_id)
+        iqn = '%s%s' % (CONF.iscsi_target_prefix, vol_id)
         tid = self._get_target(iqn)
         if tid is None:
             LOG.error(_("Failed to create iscsi target for volume "
@@ -192,10 +194,10 @@ class TgtAdm(TargetAdmin):
 
     def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
         LOG.info(_('Removing iscsi_target for: %s') % vol_id)
-        vol_uuid_file = FLAGS.volume_name_template % vol_id
-        volume_path = os.path.join(FLAGS.volumes_dir, vol_uuid_file)
+        vol_uuid_file = CONF.volume_name_template % vol_id
+        volume_path = os.path.join(CONF.volumes_dir, vol_uuid_file)
         if os.path.isfile(volume_path):
-            iqn = '%s%s' % (FLAGS.iscsi_target_prefix,
+            iqn = '%s%s' % (CONF.iscsi_target_prefix,
                             vol_uuid_file)
         else:
             raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
@@ -232,10 +234,10 @@ class IetAdm(TargetAdmin):
         super(IetAdm, self).__init__('ietadm', execute)
 
     def _iotype(self, path):
-        if FLAGS.iscsi_iotype == 'auto':
+        if CONF.iscsi_iotype == 'auto':
             return 'blockio' if volume_utils.is_block(path) else 'fileio'
         else:
-            return FLAGS.iscsi_iotype
+            return CONF.iscsi_iotype
 
     def create_iscsi_target(self, name, tid, lun, path,
                             chap_auth=None, **kwargs):
@@ -249,7 +251,7 @@ class IetAdm(TargetAdmin):
             (type, username, password) = chap_auth.split()
             self._new_auth(tid, type, username, password, **kwargs)
 
-        conf_file = FLAGS.iet_conf
+        conf_file = CONF.iet_conf
         if os.path.exists(conf_file):
             try:
                 volume_conf = """
@@ -274,8 +276,8 @@ class IetAdm(TargetAdmin):
         LOG.info(_('Removing iscsi_target for volume: %s') % vol_id)
         self._delete_logicalunit(tid, lun, **kwargs)
         self._delete_target(tid, **kwargs)
-        vol_uuid_file = FLAGS.volume_name_template % vol_id
-        conf_file = FLAGS.iet_conf
+        vol_uuid_file = CONF.volume_name_template % vol_id
+        conf_file = CONF.iet_conf
         if os.path.exists(conf_file):
             with utils.temporary_chown(conf_file):
                 try:
@@ -387,8 +389,8 @@ class LioAdm(TargetAdmin):
             (chap_auth_userid, chap_auth_password) = chap_auth.split(' ')[1:]
 
         extra_args = []
-        if FLAGS.lio_initiator_iqns:
-            extra_args.append(FLAGS.lio_initiator_iqns)
+        if CONF.lio_initiator_iqns:
+            extra_args.append(CONF.lio_initiator_iqns)
 
         try:
             command_args = ['rtstool',
@@ -407,7 +409,7 @@ class LioAdm(TargetAdmin):
 
                 raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
 
-        iqn = '%s%s' % (FLAGS.iscsi_target_prefix, vol_id)
+        iqn = '%s%s' % (CONF.iscsi_target_prefix, vol_id)
         tid = self._get_target(iqn)
         if tid is None:
             LOG.error(_("Failed to create iscsi target for volume "
@@ -419,7 +421,7 @@ class LioAdm(TargetAdmin):
     def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
         LOG.info(_('Removing iscsi_target: %s') % vol_id)
         vol_uuid_name = 'volume-%s' % vol_id
-        iqn = '%s%s' % (FLAGS.iscsi_target_prefix, vol_uuid_name)
+        iqn = '%s%s' % (CONF.iscsi_target_prefix, vol_uuid_name)
 
         try:
             self._execute('rtstool',
@@ -462,11 +464,11 @@ class LioAdm(TargetAdmin):
 
 
 def get_target_admin():
-    if FLAGS.iscsi_helper == 'tgtadm':
+    if CONF.iscsi_helper == 'tgtadm':
         return TgtAdm()
-    elif FLAGS.iscsi_helper == 'fake':
+    elif CONF.iscsi_helper == 'fake':
         return FakeIscsiHelper()
-    elif FLAGS.iscsi_helper == 'lioadm':
+    elif CONF.iscsi_helper == 'lioadm':
         return LioAdm()
     else:
         return IetAdm()
index d0811b5712189ae60ded3f0918c351e80f776b2c..c668e140407e8b312b4b8a9a8eabea8d5d62a1f2 100644 (file)
@@ -27,10 +27,10 @@ SHOULD include dedicated exception logging.
 from oslo.config import cfg
 import webob.exc
 
-from cinder import flags
 from cinder.openstack.common import exception as com_exception
 from cinder.openstack.common import log as logging
 
+
 LOG = logging.getLogger(__name__)
 
 exc_log_opts = [
@@ -39,8 +39,8 @@ exc_log_opts = [
                 help='make exception message format errors fatal'),
 ]
 
-FLAGS = flags.FLAGS
-FLAGS.register_opts(exc_log_opts)
+CONF = cfg.CONF
+CONF.register_opts(exc_log_opts)
 
 
 class ConvertedException(webob.exc.WSGIHTTPException):
@@ -105,7 +105,7 @@ class CinderException(Exception):
                 LOG.exception(_('Exception in string format operation'))
                 for name, value in kwargs.iteritems():
                     LOG.error("%s: %s" % (name, value))
-                if FLAGS.fatal_exception_format_errors:
+                if CONF.fatal_exception_format_errors:
                     raise e
                 else:
                     # at least get the core message out if something happened
index 34c0fd2cc9f5255e4b9bbfe870e9087633a97c93..80b23911ec586e71fca0913ea56a2b04a67e05f8 100644 (file)
@@ -17,6 +17,7 @@
 
 """Implementation of an image service that uses Glance as the backend"""
 
+
 from __future__ import absolute_import
 
 import copy
@@ -28,16 +29,17 @@ import urlparse
 
 import glanceclient
 import glanceclient.exc
+from oslo.config import cfg
 
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import jsonutils
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import timeutils
 
 
+CONF = cfg.CONF
+
 LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
 
 
 def _parse_image_ref(image_href):
@@ -56,19 +58,19 @@ def _parse_image_ref(image_href):
 
 
 def _create_glance_client(context, netloc, use_ssl,
-                          version=FLAGS.glance_api_version):
+                          version=CONF.glance_api_version):
     """Instantiate a new glanceclient.Client object."""
     if version is None:
-        version = FLAGS.glance_api_version
+        version = CONF.glance_api_version
     params = {}
     if use_ssl:
         scheme = 'https'
         # https specific params
-        params['insecure'] = FLAGS.glance_api_insecure
-        params['ssl_compression'] = FLAGS.glance_api_ssl_compression
+        params['insecure'] = CONF.glance_api_insecure
+        params['ssl_compression'] = CONF.glance_api_ssl_compression
     else:
         scheme = 'http'
-    if FLAGS.auth_strategy == 'keystone':
+    if CONF.auth_strategy == 'keystone':
         params['token'] = context.auth_token
     endpoint = '%s://%s' % (scheme, netloc)
     return glanceclient.Client(str(version), endpoint, **params)
@@ -77,12 +79,12 @@ def _create_glance_client(context, netloc, use_ssl,
 def get_api_servers():
     """Return Iterable over shuffled api servers.
 
-    Shuffle a list of FLAGS.glance_api_servers and return an iterator
+    Shuffle a list of CONF.glance_api_servers and return an iterator
     that will cycle through the list, looping around to the beginning
     if necessary.
     """
     api_servers = []
-    for api_server in FLAGS.glance_api_servers:
+    for api_server in CONF.glance_api_servers:
         if '//' not in api_server:
             api_server = 'http://' + api_server
         url = urlparse.urlparse(api_server)
@@ -129,7 +131,7 @@ class GlanceClientWrapper(object):
         """Call a glance client method.
 
         If we get a connection error,
-        retry the request according to FLAGS.glance_num_retries.
+        retry the request according to CONF.glance_num_retries.
         """
         version = self.version
         if version in kwargs:
@@ -138,7 +140,7 @@ class GlanceClientWrapper(object):
         retry_excs = (glanceclient.exc.ServiceUnavailable,
                       glanceclient.exc.InvalidEndpoint,
                       glanceclient.exc.CommunicationError)
-        num_attempts = 1 + FLAGS.glance_num_retries
+        num_attempts = 1 + CONF.glance_num_retries
 
         for attempt in xrange(1, num_attempts + 1):
             client = self.client or self._create_onetime_client(context,
index 62587e2e7abf1cd0741e3afa29e1f5bc4a7869f0..2c5450e92200ee5577141bb4d1183db026d67e5c 100644 (file)
@@ -25,6 +25,7 @@ Some slight modifications, but at some point
 we should look at maybe pushign this up to OSLO
 """
 
+
 import os
 import re
 import tempfile
@@ -32,19 +33,19 @@ import tempfile
 from oslo.config import cfg
 
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import fileutils
 from cinder.openstack.common import log as logging
 from cinder import utils
 
+
 LOG = logging.getLogger(__name__)
 
 image_helper_opt = [cfg.StrOpt('image_conversion_dir',
                     default='/tmp',
                     help='parent dir for tempdir used for image conversion'), ]
 
-FLAGS = flags.FLAGS
-FLAGS.register_opts(image_helper_opt)
+CONF = cfg.CONF
+CONF.register_opts(image_helper_opt)
 
 
 class QemuImgInfo(object):
@@ -211,15 +212,15 @@ def fetch(context, image_service, image_id, path, _user_id, _project_id):
 def fetch_to_raw(context, image_service,
                  image_id, dest,
                  user_id=None, project_id=None):
-    if (FLAGS.image_conversion_dir and not
-            os.path.exists(FLAGS.image_conversion_dir)):
-        os.makedirs(FLAGS.image_conversion_dir)
+    if (CONF.image_conversion_dir and not
+            os.path.exists(CONF.image_conversion_dir)):
+        os.makedirs(CONF.image_conversion_dir)
 
     # NOTE(avishay): I'm not crazy about creating temp files which may be
     # large and cause disk full errors which would confuse users.
     # Unfortunately it seems that you can't pipe to 'qemu-img convert' because
     # it seeks. Maybe we can think of something for a future version.
-    fd, tmp = tempfile.mkstemp(dir=FLAGS.image_conversion_dir)
+    fd, tmp = tempfile.mkstemp(dir=CONF.image_conversion_dir)
     os.close(fd)
     with fileutils.remove_path_on_error(tmp):
         fetch(context, image_service, image_id, tmp, user_id, project_id)
@@ -267,11 +268,11 @@ def upload_volume(context, image_service, image_meta, volume_path):
                 image_service.update(context, image_id, {}, image_file)
         return
 
-    if (FLAGS.image_conversion_dir and not
-            os.path.exists(FLAGS.image_conversion_dir)):
-        os.makedirs(FLAGS.image_conversion_dir)
+    if (CONF.image_conversion_dir and not
+            os.path.exists(CONF.image_conversion_dir)):
+        os.makedirs(CONF.image_conversion_dir)
 
-    fd, tmp = tempfile.mkstemp(dir=FLAGS.image_conversion_dir)
+    fd, tmp = tempfile.mkstemp(dir=CONF.image_conversion_dir)
     os.close(fd)
     with fileutils.remove_path_on_error(tmp):
         LOG.debug("%s was raw, converting to %s" %
index 96270e530a939681b8d86741a57db501b4ca130f..d7cf41e2bc01d24fdb0796a8f75d60b0a7e35593 100644 (file)
@@ -53,8 +53,10 @@ This module provides Manager, a base class for managers.
 
 """
 
+
+from oslo.config import cfg
+
 from cinder.db import base
-from cinder import flags
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import periodic_task
 from cinder.openstack.common.rpc import dispatcher as rpc_dispatcher
@@ -62,9 +64,7 @@ from cinder.scheduler import rpcapi as scheduler_rpcapi
 from cinder import version
 
 
-FLAGS = flags.FLAGS
-
-
+CONF = cfg.CONF
 LOG = logging.getLogger(__name__)
 
 
@@ -74,7 +74,7 @@ class Manager(base.Base, periodic_task.PeriodicTasks):
 
     def __init__(self, host=None, db_driver=None):
         if not host:
-            host = FLAGS.host
+            host = CONF.host
         self.host = host
         super(Manager, self).__init__(db_driver)
 
@@ -103,8 +103,8 @@ class Manager(base.Base, periodic_task.PeriodicTasks):
 
     def service_config(self, context):
         config = {}
-        for key in FLAGS:
-            config[key] = FLAGS.get(key, None)
+        for key in CONF:
+            config[key] = CONF.get(key, None)
         return config
 
 
index fcf1649ff39f412d7aea6e5dfeda349716be5ab2..390fc6ac09446dce2f881ed4cbb2bb5e82f4b60c 100644 (file)
 
 """Policy Engine For Cinder"""
 
+
 from oslo.config import cfg
 
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import policy
 from cinder import utils
 
+
 policy_opts = [
     cfg.StrOpt('policy_file',
                default='policy.json',
@@ -32,8 +33,8 @@ policy_opts = [
                default='default',
                help=_('Rule checked when requested rule is not found')), ]
 
-FLAGS = flags.FLAGS
-FLAGS.register_opts(policy_opts)
+CONF = cfg.CONF
+CONF.register_opts(policy_opts)
 
 _POLICY_PATH = None
 _POLICY_CACHE = {}
@@ -51,13 +52,13 @@ def init():
     global _POLICY_PATH
     global _POLICY_CACHE
     if not _POLICY_PATH:
-        _POLICY_PATH = utils.find_config(FLAGS.policy_file)
+        _POLICY_PATH = utils.find_config(CONF.policy_file)
     utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE,
                            reload_func=_set_brain)
 
 
 def _set_brain(data):
-    default_rule = FLAGS.policy_default_rule
+    default_rule = CONF.policy_default_rule
     policy.set_brain(policy.Brain.load_json(data, default_rule))
 
 
index 59868d33d2ee9e39d05f5c7658c782c20ce06717..3f9b174ecd3851a2cdb0656cd173a4b1d1fc661d 100644 (file)
 
 """Quotas for volumes."""
 
+
 import datetime
 
 from oslo.config import cfg
 
 from cinder import db
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import importutils
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import timeutils
 
+
 LOG = logging.getLogger(__name__)
 
 quota_opts = [
@@ -55,8 +56,8 @@ quota_opts = [
                default='cinder.quota.DbQuotaDriver',
                help='default driver to use for quota checks'), ]
 
-FLAGS = flags.FLAGS
-FLAGS.register_opts(quota_opts)
+CONF = cfg.CONF
+CONF.register_opts(quota_opts)
 
 
 class DbQuotaDriver(object):
@@ -296,7 +297,7 @@ class DbQuotaDriver(object):
 
         # Set up the reservation expiration
         if expire is None:
-            expire = FLAGS.reservation_expire
+            expire = CONF.reservation_expire
         if isinstance(expire, (int, long)):
             expire = datetime.timedelta(seconds=expire)
         if isinstance(expire, datetime.timedelta):
@@ -321,7 +322,7 @@ class DbQuotaDriver(object):
         #            session isn't available outside the DBAPI, we
         #            have to do the work there.
         return db.quota_reserve(context, resources, quotas, deltas, expire,
-                                FLAGS.until_refresh, FLAGS.max_age,
+                                CONF.until_refresh, CONF.max_age,
                                 project_id=project_id)
 
     def commit(self, context, reservations, project_id=None):
@@ -446,7 +447,7 @@ class BaseResource(object):
     def default(self):
         """Return the default value of the quota."""
 
-        return FLAGS[self.flag] if self.flag else -1
+        return CONF[self.flag] if self.flag else -1
 
 
 class ReservableResource(BaseResource):
@@ -538,7 +539,7 @@ class QuotaEngine(object):
         """Initialize a Quota object."""
 
         if not quota_driver_class:
-            quota_driver_class = FLAGS.quota_driver
+            quota_driver_class = CONF.quota_driver
 
         if isinstance(quota_driver_class, basestring):
             quota_driver_class = importutils.import_object(quota_driver_class)
@@ -792,7 +793,7 @@ def _sync_gigabytes(context, project_id, session):
     (_junk, vol_gigs) = db.volume_data_get_for_project(context,
                                                        project_id,
                                                        session=session)
-    if FLAGS.no_snapshot_gb_quota:
+    if CONF.no_snapshot_gb_quota:
         return {'gigabytes': vol_gigs}
 
     (_junk, snap_gigs) = db.snapshot_data_get_for_project(context,
index 4e7c549770d177de36f1254d214ab16df45bc182..3ffd7f8924624d2b0e14683c0e8e5708f619d4a2 100644 (file)
@@ -19,6 +19,7 @@
 
 """Generic Node base class for all workers that run on hosts."""
 
+
 import errno
 import inspect
 import os
@@ -34,7 +35,6 @@ from oslo.config import cfg
 from cinder import context
 from cinder import db
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import importutils
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import rpc
@@ -42,6 +42,7 @@ from cinder import utils
 from cinder import version
 from cinder import wsgi
 
+
 LOG = logging.getLogger(__name__)
 
 service_opts = [
@@ -63,8 +64,8 @@ service_opts = [
                default=8776,
                help='port for os volume api to listen'), ]
 
-FLAGS = flags.FLAGS
-FLAGS.register_opts(service_opts)
+CONF = cfg.CONF
+CONF.register_opts(service_opts)
 
 
 class SignalExit(SystemExit):
@@ -398,7 +399,7 @@ class Service(object):
             self.timers.append(periodic)
 
     def _create_service_ref(self, context):
-        zone = FLAGS.storage_availability_zone
+        zone = CONF.storage_availability_zone
         service_ref = db.service_create(context,
                                         {'host': self.host,
                                          'binary': self.binary,
@@ -417,30 +418,30 @@ class Service(object):
                periodic_fuzzy_delay=None, service_name=None):
         """Instantiates class and passes back application object.
 
-        :param host: defaults to FLAGS.host
+        :param host: defaults to CONF.host
         :param binary: defaults to basename of executable
         :param topic: defaults to bin_name - 'cinder-' part
-        :param manager: defaults to FLAGS.<topic>_manager
-        :param report_interval: defaults to FLAGS.report_interval
-        :param periodic_interval: defaults to FLAGS.periodic_interval
-        :param periodic_fuzzy_delay: defaults to FLAGS.periodic_fuzzy_delay
+        :param manager: defaults to CONF.<topic>_manager
+        :param report_interval: defaults to CONF.report_interval
+        :param periodic_interval: defaults to CONF.periodic_interval
+        :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
 
         """
         if not host:
-            host = FLAGS.host
+            host = CONF.host
         if not binary:
             binary = os.path.basename(inspect.stack()[-1][1])
         if not topic:
             topic = binary
         if not manager:
             subtopic = topic.rpartition('cinder-')[2]
-            manager = FLAGS.get('%s_manager' % subtopic, None)
+            manager = CONF.get('%s_manager' % subtopic, None)
         if report_interval is None:
-            report_interval = FLAGS.report_interval
+            report_interval = CONF.report_interval
         if periodic_interval is None:
-            periodic_interval = FLAGS.periodic_interval
+            periodic_interval = CONF.periodic_interval
         if periodic_fuzzy_delay is None:
-            periodic_fuzzy_delay = FLAGS.periodic_fuzzy_delay
+            periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
         service_obj = cls(host, binary, topic, manager,
                           report_interval=report_interval,
                           periodic_interval=periodic_interval,
@@ -486,7 +487,7 @@ class Service(object):
     def report_state(self):
         """Update the state of this service in the datastore."""
         ctxt = context.get_admin_context()
-        zone = FLAGS.storage_availability_zone
+        zone = CONF.storage_availability_zone
         state_catalog = {}
         try:
             try:
@@ -531,8 +532,8 @@ class WSGIService(object):
         self.manager = self._get_manager()
         self.loader = loader or wsgi.Loader()
         self.app = self.loader.load_app(name)
-        self.host = getattr(FLAGS, '%s_listen' % name, "0.0.0.0")
-        self.port = getattr(FLAGS, '%s_listen_port' % name, 0)
+        self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
+        self.port = getattr(CONF, '%s_listen_port' % name, 0)
         self.server = wsgi.Server(name,
                                   self.app,
                                   host=self.host,
@@ -549,10 +550,10 @@ class WSGIService(object):
 
         """
         fl = '%s_manager' % self.name
-        if fl not in FLAGS:
+        if fl not in CONF:
             return None
 
-        manager_class_name = FLAGS.get(fl, None)
+        manager_class_name = CONF.get(fl, None)
         if not manager_class_name:
             return None
 
@@ -605,9 +606,9 @@ def serve(*servers):
 
 
 def wait():
-    LOG.debug(_('Full set of FLAGS:'))
-    for flag in FLAGS:
-        flag_get = FLAGS.get(flag, None)
+    LOG.debug(_('Full set of CONF:'))
+    for flag in CONF:
+        flag_get = CONF.get(flag, None)
         # hide flag contents from log if contains a password
         # should use secret flag when switch over to openstack-common
         if ("_password" in flag or "_key" in flag or
index db53468d99559c9598decc9ff5128f031d2568b1..c8cd7cf870875ab94adec83857c84774afdb5d70 100644 (file)
@@ -17,6 +17,7 @@
 Handles all requests relating to transferring ownership of volumes.
 """
 
+
 import datetime
 import hashlib
 import hmac
@@ -26,11 +27,11 @@ from oslo.config import cfg
 
 from cinder.db import base
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import log as logging
 from cinder import quota
 from cinder.volume import api as volume_api
 
+
 volume_transfer_opts = [
     cfg.IntOpt('volume_transfer_salt_length', default=8,
                help='The number of characters in the salt.'),
@@ -38,8 +39,8 @@ volume_transfer_opts = [
                help='The number of characters in the '
                'autogenerated auth key.'), ]
 
-FLAGS = flags.FLAGS
-FLAGS.register_opts(volume_transfer_opts)
+CONF = cfg.CONF
+CONF.register_opts(volume_transfer_opts)
 
 LOG = logging.getLogger(__name__)
 QUOTAS = quota.QUOTAS
@@ -102,8 +103,8 @@ class API(base.Base):
             raise exception.InvalidVolume(reason=_("status must be available"))
 
         # The salt is just a short random string.
-        salt = self._get_random_string(FLAGS.volume_transfer_salt_length)
-        auth_key = self._get_random_string(FLAGS.volume_transfer_key_length)
+        salt = self._get_random_string(CONF.volume_transfer_salt_length)
+        auth_key = self._get_random_string(CONF.volume_transfer_key_length)
         crypt_hash = self._get_crypt_hash(salt, auth_key)
 
         # TODO(ollie): Transfer expiry needs to be implemented.
index 51f4fb9f165e492292c8ac22784caba54106596d..98c172dbf30db85f8489aaa7e69792a9956da116 100644 (file)
 
 """Utilities and helper functions."""
 
+
 import contextlib
 import datetime
 import errno
 import functools
 import hashlib
 import inspect
-import itertools
 import os
 import paramiko
 import pyclbr
@@ -34,13 +34,9 @@ import re
 import shlex
 import shutil
 import signal
-import socket
-import struct
 import sys
 import tempfile
 import time
-import types
-import warnings
 from xml.dom import minidom
 from xml.parsers import expat
 from xml import sax
@@ -52,8 +48,9 @@ from eventlet.green import subprocess
 from eventlet import greenthread
 from eventlet import pools
 
+from oslo.config import cfg
+
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import excutils
 from cinder.openstack.common import importutils
 from cinder.openstack.common import lockutils
@@ -61,10 +58,10 @@ from cinder.openstack.common import log as logging
 from cinder.openstack.common import timeutils
 
 
+CONF = cfg.CONF
 LOG = logging.getLogger(__name__)
 ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
 PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
-FLAGS = flags.FLAGS
 
 synchronized = lockutils.synchronized_with_prefix('cinder-')
 
@@ -79,9 +76,9 @@ def find_config(config_path):
     """
     possible_locations = [
         config_path,
-        os.path.join(FLAGS.state_path, "etc", "cinder", config_path),
-        os.path.join(FLAGS.state_path, "etc", config_path),
-        os.path.join(FLAGS.state_path, config_path),
+        os.path.join(CONF.state_path, "etc", "cinder", config_path),
+        os.path.join(CONF.state_path, "etc", config_path),
+        os.path.join(CONF.state_path, config_path),
         "/etc/cinder/%s" % config_path,
     ]
 
@@ -121,7 +118,7 @@ def execute(*cmd, **kwargs):
     :param attempts:           How many times to retry cmd.
     :param run_as_root:        True | False. Defaults to False. If set to True,
                                the command is prefixed by the command specified
-                               in the root_helper FLAG.
+                               in the root_helper CONF.
 
     :raises exception.Error: on receiving unknown arguments
     :raises exception.ProcessExecutionError:
@@ -149,18 +146,18 @@ def execute(*cmd, **kwargs):
 
     if run_as_root:
 
-        if FLAGS.rootwrap_config is None or FLAGS.root_helper != 'sudo':
+        if CONF.rootwrap_config is None or CONF.root_helper != 'sudo':
             LOG.deprecated(_('The root_helper option (which lets you specify '
                              'a root wrapper different from cinder-rootwrap, '
                              'and defaults to using sudo) is now deprecated. '
                              'You should use the rootwrap_config option '
                              'instead.'))
 
-        if (FLAGS.rootwrap_config is not None):
+        if (CONF.rootwrap_config is not None):
             cmd = ['sudo', 'cinder-rootwrap',
-                   FLAGS.rootwrap_config] + list(cmd)
+                   CONF.rootwrap_config] + list(cmd)
         else:
-            cmd = shlex.split(FLAGS.root_helper) + list(cmd)
+            cmd = shlex.split(CONF.root_helper) + list(cmd)
     cmd = map(str, cmd)
 
     while attempts > 0:
@@ -410,7 +407,7 @@ def last_completed_audit_period(unit=None):
               The begin timestamp of this audit period is the same as the
               end of the previous."""
     if not unit:
-        unit = FLAGS.volume_usage_audit_period
+        unit = CONF.volume_usage_audit_period
 
     offset = 0
     if '@' in unit:
@@ -564,7 +561,7 @@ class LazyPluggable(object):
 
     def __get_backend(self):
         if not self.__backend:
-            backend_name = FLAGS[self.__pivot]
+            backend_name = CONF[self.__pivot]
             if backend_name not in self.__backends:
                 raise exception.Error(_('Invalid backend: %s') % backend_name)
 
@@ -829,11 +826,11 @@ def is_valid_ipv4(address):
 
 
 def monkey_patch():
-    """  If the Flags.monkey_patch set as True,
+    """  If the CONF.monkey_patch set as True,
     this function patches a decorator
     for all functions in specified modules.
     You can set decorators for each modules
-    using FLAGS.monkey_patch_modules.
+    using CONF.monkey_patch_modules.
     The format is "Module path:Decorator function".
     Example: 'cinder.api.ec2.cloud:' \
      cinder.openstack.common.notifier.api.notify_decorator'
@@ -844,11 +841,11 @@ def monkey_patch():
     name - name of the function
     function - object of the function
     """
-    # If FLAGS.monkey_patch is not True, this function do nothing.
-    if not FLAGS.monkey_patch:
+    # If CONF.monkey_patch is not True, this function do nothing.
+    if not CONF.monkey_patch:
         return
     # Get list of modules and decorators
-    for module_and_decorator in FLAGS.monkey_patch_modules:
+    for module_and_decorator in CONF.monkey_patch_modules:
         module, decorator_name = module_and_decorator.split(':')
         # import decorator function
         decorator = importutils.import_class(decorator_name)
@@ -897,7 +894,7 @@ def generate_glance_url():
     """Generate the URL to glance."""
     # TODO(jk0): This will eventually need to take SSL into consideration
     # when supported in glance.
-    return "http://%s:%d" % (FLAGS.glance_host, FLAGS.glance_port)
+    return "http://%s:%d" % (CONF.glance_host, CONF.glance_port)
 
 
 @contextlib.contextmanager
@@ -1010,7 +1007,7 @@ def service_is_up(service):
     last_heartbeat = service['updated_at'] or service['created_at']
     # Timestamps in DB are UTC.
     elapsed = total_seconds(timeutils.utcnow() - last_heartbeat)
-    return abs(elapsed) <= FLAGS.service_down_time
+    return abs(elapsed) <= CONF.service_down_time
 
 
 def generate_mac_address():
index bcd63006af27a360c4b99e56610554232ad252c4..9b589abcfb737deea33dc587fe81c3d352c93e7f 100644 (file)
@@ -19,6 +19,7 @@
 
 """Utility methods for working with WSGI servers."""
 
+
 import errno
 import os
 import socket
@@ -36,10 +37,10 @@ import webob.dec
 import webob.exc
 
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import log as logging
 from cinder import utils
 
+
 socket_opts = [
     cfg.IntOpt('backlog',
                default=4096,
@@ -65,7 +66,6 @@ socket_opts = [
 CONF = cfg.CONF
 CONF.register_opts(socket_opts)
 
-FLAGS = flags.FLAGS
 LOG = logging.getLogger(__name__)
 
 
@@ -475,7 +475,7 @@ class Loader(object):
         :returns: None
 
         """
-        config_path = config_path or FLAGS.api_paste_config
+        config_path = config_path or CONF.api_paste_config
         self.config_path = utils.find_config(config_path)
 
     def load_app(self, name):