]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Replace FLAGS with cfg.CONF in volume
authorSergey Vilgelm <svilgelm@mirantis.com>
Thu, 13 Jun 2013 09:06:25 +0000 (13:06 +0400)
committerSergey Vilgelm <svilgelm@mirantis.com>
Thu, 13 Jun 2013 09:28:55 +0000 (13:28 +0400)
Replace all the FLAGS with cfg.CONF in cinder/volume
Large commit was split into several parts

Change-Id: I435fe91f414fda027531076bdbcb6660a3c635af
Fixes: bug #1182037
cinder/volume/api.py
cinder/volume/configuration.py
cinder/volume/drivers/hds/hds.py
cinder/volume/drivers/windows.py
cinder/volume/drivers/zadara.py
cinder/volume/rpcapi.py
cinder/volume/utils.py
cinder/volume/volume_types.py

index 17274297a3aa264c4f67149a2c8c037ea496282e..7d1510e4088c9e408dd2f75059c9b7aa1927c9f2 100644 (file)
@@ -20,6 +20,7 @@
 Handles all requests relating to volumes.
 """
 
+
 import functools
 
 from oslo.config import cfg
@@ -27,7 +28,6 @@ from oslo.config import cfg
 from cinder import context
 from cinder.db import base
 from cinder import exception
-from cinder import flags
 from cinder.image import glance
 from cinder.openstack.common import excutils
 from cinder.openstack.common import log as logging
@@ -35,20 +35,22 @@ from cinder.openstack.common import timeutils
 import cinder.policy
 from cinder import quota
 from cinder.scheduler import rpcapi as scheduler_rpcapi
+from cinder import units
 from cinder.volume import rpcapi as volume_rpcapi
 from cinder.volume import volume_types
 
+
 volume_host_opt = cfg.BoolOpt('snapshot_same_host',
                               default=True,
                               help='Create volume from snapshot at the host '
                                    'where snapshot resides')
 
-FLAGS = flags.FLAGS
-FLAGS.register_opt(volume_host_opt)
-flags.DECLARE('storage_availability_zone', 'cinder.volume.manager')
+CONF = cfg.CONF
+CONF.register_opt(volume_host_opt)
+CONF.import_opt('storage_availability_zone', 'cinder.volume.manager')
 
 LOG = logging.getLogger(__name__)
-GB = 1048576 * 1024
+GB = units.GiB
 QUOTAS = quota.QUOTAS
 
 
@@ -185,7 +187,7 @@ class API(base.Base):
                 raise exception.VolumeLimitExceeded(allowed=quotas['volumes'])
 
         if availability_zone is None:
-            availability_zone = FLAGS.storage_availability_zone
+            availability_zone = CONF.storage_availability_zone
         else:
             self._check_availabilty_zone(availability_zone)
 
@@ -249,7 +251,7 @@ class API(base.Base):
         snapshot_id = request_spec['snapshot_id']
         image_id = request_spec['image_id']
 
-        if snapshot_id and FLAGS.snapshot_same_host:
+        if snapshot_id and CONF.snapshot_same_host:
             snapshot_ref = self.db.snapshot_get(context, snapshot_id)
             source_volume_ref = self.db.volume_get(context,
                                                    snapshot_ref['volume_id'])
@@ -288,7 +290,7 @@ class API(base.Base):
         else:
             self.scheduler_rpcapi.create_volume(
                 context,
-                FLAGS.volume_topic,
+                CONF.volume_topic,
                 volume_id,
                 snapshot_id,
                 image_id,
@@ -300,7 +302,7 @@ class API(base.Base):
             return
 
         ctxt = context.get_admin_context()
-        topic = FLAGS.volume_topic
+        topic = CONF.volume_topic
         volume_services = self.db.service_get_all_by_topic(ctxt, topic)
 
         # NOTE(haomai): In case of volume services isn't init or
@@ -556,7 +558,7 @@ class API(base.Base):
             raise exception.InvalidVolume(reason=msg)
 
         try:
-            if FLAGS.no_snapshot_gb_quota:
+            if CONF.no_snapshot_gb_quota:
                 reservations = QUOTAS.reserve(context, snapshots=1)
             else:
                 reservations = QUOTAS.reserve(context, snapshots=1,
index 13935be6764f42eefc7f245c1c48fb161aff60ce..893f4638a3935fef6a1470db3eda4038e72be44e 100644 (file)
@@ -42,13 +42,13 @@ option group. This is due to the way cfg works. All cfg options must be defined
 and registered in the group in which they are used.
 """
 
+
 from oslo.config import cfg
 
-from cinder import flags
 from cinder.openstack.common import log as logging
 
 
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
 LOG = logging.getLogger(__name__)
 
 
@@ -62,13 +62,13 @@ class Configuration(object):
         # set the local conf so that __call__'s know what to use
         if self.config_group:
             self._ensure_config_values(volume_opts)
-            self.local_conf = FLAGS._get(self.config_group)
+            self.local_conf = CONF._get(self.config_group)
         else:
-            self.local_conf = FLAGS
+            self.local_conf = CONF
 
     def _ensure_config_values(self, volume_opts):
-            FLAGS.register_opts(volume_opts,
-                                group=self.config_group)
+            CONF.register_opts(volume_opts,
+                               group=self.config_group)
 
     def append_config_values(self, volume_opts):
         self._ensure_config_values(volume_opts)
index 45c944bbd945ac346058f6687ebef522dcbc025f..a85fd1c6d6668544db80f5baa2a5b62b210cea66 100644 (file)
 iSCSI Cinder Volume driver for Hitachi Unified Storage (HUS) platform.
 """
 
+
 from oslo.config import cfg
 from xml.etree import ElementTree as ETree
 
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import log as logging
 from cinder import utils
 from cinder.volume import driver
-
 from cinder.volume.drivers.hds.hus_backend import HusBackend
 
 
@@ -38,8 +37,8 @@ HUS_OPTS = [
                default='/opt/hds/hus/cinder_hus_conf.xml',
                help='configuration file for HDS cinder plugin for HUS'), ]
 
-FLAGS = flags.FLAGS
-FLAGS.register_opts(HUS_OPTS)
+CONF = cfg.CONF
+CONF.register_opts(HUS_OPTS)
 
 HI_IQN = 'iqn.1994-04.jp.co.hitachi:'  # fixed string, for now.
 
index a0113b15350a6b698acc9fda4d0a7fdc61f403c9..87672bb504d4c970e15d370c0aea408afce8defc 100644 (file)
@@ -20,13 +20,13 @@ Volume driver for Windows Server 2012
 This driver requires ISCSI target role installed
 
 """
+
+
 import os
-import sys
 
 from oslo.config import cfg
 
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import log as logging
 from cinder.volume import driver
 
@@ -37,15 +37,14 @@ if os.name == 'nt':
 
 LOG = logging.getLogger(__name__)
 
-FLAGS = flags.FLAGS
-
 windows_opts = [
     cfg.StrOpt('windows_iscsi_lun_path',
                default='C:\iSCSIVirtualDisks',
                help='Path to store VHD backed volumes'),
 ]
 
-FLAGS.register_opts(windows_opts)
+CONF = cfg.CONF
+CONF.register_opts(windows_opts)
 
 
 class WindowsDriver(driver.ISCSIDriver):
@@ -137,7 +136,7 @@ class WindowsDriver(driver.ISCSIDriver):
                      SizeInMB=volume['size'] * 1024)
 
     def _get_vhd_path(self, volume):
-        base_vhd_folder = FLAGS.windows_iscsi_lun_path
+        base_vhd_folder = CONF.windows_iscsi_lun_path
         if not os.path.exists(base_vhd_folder):
                 LOG.debug(_('Creating folder %s '), base_vhd_folder)
                 os.makedirs(base_vhd_folder)
@@ -194,7 +193,7 @@ class WindowsDriver(driver.ISCSIDriver):
             resources
         :return: iscsiadm-formatted provider location string
         """
-        target_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
+        target_name = "%s%s" % (CONF.iscsi_target_prefix, volume['name'])
         #ISCSI target creation
         try:
             cl = self._conn_wmi.__getattr__("WT_Host")
@@ -230,7 +229,7 @@ class WindowsDriver(driver.ISCSIDriver):
     def remove_export(self, context, volume):
         """Driver exntry point to remove an export for a volume.
         """
-        target_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
+        target_name = "%s%s" % (CONF.iscsi_target_prefix, volume['name'])
 
         #Get ISCSI target
         wt_host = self._conn_wmi.WT_Host(HostName=target_name)[0]
index 988ac647afeab600e617beac96b163188d7ea254..138c8ad1ec2dc65101d22873ef65987dd5ef9a3f 100644 (file)
@@ -21,17 +21,17 @@ Volume driver for Zadara Virtual Private Storage Array (VPSA).
 This driver requires VPSA with API ver.12.06 or higher.
 """
 
+
 import httplib
 
 from lxml import etree
 from oslo.config import cfg
 
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import log as logging
-from cinder import utils
 from cinder.volume import driver
 
+
 LOG = logging.getLogger("cinder.volume.driver")
 
 zadara_opts = [
@@ -78,8 +78,8 @@ zadara_opts = [
                 default=True,
                 help="Don't halt on deletion of non-existing volumes"), ]
 
-FLAGS = flags.FLAGS
-FLAGS.register_opts(zadara_opts)
+CONF = cfg.CONF
+CONF.register_opts(zadara_opts)
 
 
 class ZadaraVPSAConnection(object):
@@ -117,12 +117,12 @@ class ZadaraVPSAConnection(object):
                               '/api/volumes.xml',
                               {'display_name': kwargs.get('name'),
                                'virtual_capacity': kwargs.get('size'),
-                               'raid_group_name[]': FLAGS.zadara_vpsa_poolname,
+                               'raid_group_name[]': CONF.zadara_vpsa_poolname,
                                'quantity': 1,
-                               'cache': FLAGS.zadara_default_cache_policy,
-                               'crypt': FLAGS.zadara_default_encryption,
-                               'mode': FLAGS.zadara_default_striping_mode,
-                               'stripesize': FLAGS.zadara_default_stripesize,
+                               'cache': CONF.zadara_default_cache_policy,
+                               'crypt': CONF.zadara_default_encryption,
+                               'mode': CONF.zadara_default_striping_mode,
+                               'stripesize': CONF.zadara_default_stripesize,
                                'force': 'NO'}),
             'delete_volume': ('DELETE',
                               '/api/volumes/%s.xml' % kwargs.get('vpsa_vol'),
@@ -252,11 +252,11 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
         Any initialization the volume driver does while starting.
         Establishes initial connection with VPSA and retrieves access_key.
         """
-        self.vpsa = ZadaraVPSAConnection(FLAGS.zadara_vpsa_ip,
-                                         FLAGS.zadara_vpsa_port,
-                                         FLAGS.zadara_vpsa_use_ssl,
-                                         FLAGS.zadara_user,
-                                         FLAGS.zadara_password)
+        self.vpsa = ZadaraVPSAConnection(CONF.zadara_vpsa_ip,
+                                         CONF.zadara_vpsa_port,
+                                         CONF.zadara_vpsa_use_ssl,
+                                         CONF.zadara_user,
+                                         CONF.zadara_password)
 
     def check_for_setup_error(self):
         """Returns an error (exception) if prerequisites aren't met."""
@@ -334,7 +334,7 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
         """Create volume."""
         self.vpsa.send_cmd(
             'create_volume',
-            name=FLAGS.zadara_vol_name_template % volume['name'],
+            name=CONF.zadara_vol_name_template % volume['name'],
             size=volume['size'])
 
     def delete_volume(self, volume):
@@ -344,13 +344,13 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
         Return ok if doesn't exist. Auto detach from all servers.
         """
         # Get volume name
-        name = FLAGS.zadara_vol_name_template % volume['name']
+        name = CONF.zadara_vol_name_template % volume['name']
         vpsa_vol = self._get_vpsa_volume_name(name)
         if not vpsa_vol:
             msg = _('Volume %(name)s could not be found. '
                     'It might be already deleted') % locals()
             LOG.warning(msg)
-            if FLAGS.zadara_vpsa_allow_nonexistent_delete:
+            if CONF.zadara_vpsa_allow_nonexistent_delete:
                 return
             else:
                 raise exception.VolumeNotFound(volume_id=name)
@@ -361,7 +361,7 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
         servers = self._xml_parse_helper(xml_tree, 'servers',
                                          ('iqn', None), first=False)
         if servers:
-            if not FLAGS.zadara_vpsa_auto_detach_on_delete:
+            if not CONF.zadara_vpsa_auto_detach_on_delete:
                 raise exception.VolumeAttached(volume_id=name)
 
             for server in servers:
@@ -404,7 +404,7 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
             raise exception.ZadaraServerCreateFailure(name=initiator_name)
 
         # Get volume name
-        name = FLAGS.zadara_vol_name_template % volume['name']
+        name = CONF.zadara_vol_name_template % volume['name']
         vpsa_vol = self._get_vpsa_volume_name(name)
         if not vpsa_vol:
             raise exception.VolumeNotFound(volume_id=name)
@@ -459,7 +459,7 @@ class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
             raise exception.ZadaraServerNotFound(name=initiator_name)
 
         # Get volume name
-        name = FLAGS.zadara_vol_name_template % volume['name']
+        name = CONF.zadara_vol_name_template % volume['name']
         vpsa_vol = self._get_vpsa_volume_name(name)
         if not vpsa_vol:
             raise exception.VolumeNotFound(volume_id=name)
index adb9e4d0fc8209bc259c6ff66cb198ad79221c07..e3ab89e00336fcd28353508c31bb6f50a1124608 100644 (file)
 Client side of the volume RPC API.
 """
 
-from cinder import exception
-from cinder import flags
+
+from oslo.config import cfg
+
 from cinder.openstack.common import rpc
 import cinder.openstack.common.rpc.proxy
 
 
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
 
 
 class VolumeAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
@@ -45,7 +46,7 @@ class VolumeAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
 
     def __init__(self, topic=None):
         super(VolumeAPI, self).__init__(
-            topic=topic or FLAGS.volume_topic,
+            topic=topic or CONF.volume_topic,
             default_version=self.BASE_RPC_API_VERSION)
 
     def create_volume(self, ctxt, volume, host,
index 9ddd309c18d3daa2efa3e82c89cfca427deb63bb..3970ea4cd6486f6ec7b01bd18e0c436487c209d8 100644 (file)
 
 """Volume-related Utilities and helpers."""
 
+
 import os
 import stat
 
-from cinder import flags
+from oslo.config import cfg
+
 from cinder.openstack.common import log as logging
 from cinder.openstack.common.notifier import api as notifier_api
 from cinder.openstack.common import timeutils
 from cinder import utils
 
 
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
 LOG = logging.getLogger(__name__)
 
 
@@ -81,7 +83,7 @@ def _usage_from_volume(context, volume_ref, **kw):
 def notify_about_volume_usage(context, volume, event_suffix,
                               extra_usage_info=None, host=None):
     if not host:
-        host = FLAGS.host
+        host = CONF.host
 
     if not extra_usage_info:
         extra_usage_info = {}
@@ -114,7 +116,7 @@ def _usage_from_snapshot(context, snapshot_ref, **extra_usage_info):
 def notify_about_snapshot_usage(context, snapshot, event_suffix,
                                 extra_usage_info=None, host=None):
     if not host:
-        host = FLAGS.host
+        host = CONF.host
 
     if not extra_usage_info:
         extra_usage_info = {}
index 4228820bac3646dd4d563f54c7ad71acd00aa2df..7c6f09709072a559ba2e0748cc2c5c4fc012c67d 100644 (file)
 
 """Built-in volume type properties."""
 
+
+from oslo.config import cfg
+
 from cinder import context
 from cinder import db
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common.db import exception as db_exc
 from cinder.openstack.common import log as logging
 
-FLAGS = flags.FLAGS
+
+CONF = cfg.CONF
 LOG = logging.getLogger(__name__)
 
 
@@ -115,7 +118,7 @@ def get_volume_type_by_name(context, name):
 
 def get_default_volume_type():
     """Get the default volume type."""
-    name = FLAGS.default_volume_type
+    name = CONF.default_volume_type
     vol_type = {}
 
     if name is not None: