]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Fix config registration in cinder volume drivers.
authorJohn Griffith <john.griffith@solidfire.com>
Thu, 30 May 2013 22:05:45 +0000 (16:05 -0600)
committerJohn Griffith <john.griffith@solidfire.com>
Wed, 5 Jun 2013 03:51:17 +0000 (21:51 -0600)
The config documentation relies on options being registered
on a modules import.  Our need to move the drivers to using
self.configuration for multi-backend support means that options
wouldn't be loaded until object initialization which breaks
documentation.

This patch puts a dummy CONF init/load back in the drivers. While putting
this change together I came across a number of drivers still using FLAGS,
and even worse a number of drivers using a mixture of FLAGS and CONF and
self.configuraiton.  So most of those are cleaned up here as well.

Note there are two drivers that were not updated at all here:
  1. windows.py
  2. zadara.py

The zadara folks have indicated that they're in the process of updating and
releasing a new version of their driver so I left that as is.

The windows driver needs a bit of work to switch over.

Fixes bug: 1179159

Change-Id: I90165299bf080da17741d027e36e361540da0ff8

25 files changed:
cinder/tests/test_coraid.py
cinder/tests/test_glusterfs.py
cinder/tests/test_scality.py
cinder/tests/test_windows.py
cinder/tests/test_xenapi_sm.py
cinder/volume/drivers/coraid.py
cinder/volume/drivers/emc/emc_smis_common.py
cinder/volume/drivers/emc/emc_smis_iscsi.py
cinder/volume/drivers/glusterfs.py
cinder/volume/drivers/huawei/huawei_iscsi.py
cinder/volume/drivers/lvm.py
cinder/volume/drivers/netapp/iscsi.py
cinder/volume/drivers/netapp/nfs.py
cinder/volume/drivers/nexenta/volume.py
cinder/volume/drivers/nfs.py
cinder/volume/drivers/rbd.py
cinder/volume/drivers/san/hp/hp_3par_common.py
cinder/volume/drivers/san/san.py
cinder/volume/drivers/san/solaris.py
cinder/volume/drivers/scality.py
cinder/volume/drivers/sheepdog.py
cinder/volume/drivers/solidfire.py
cinder/volume/drivers/storwize_svc.py
cinder/volume/drivers/xenapi/sm.py
cinder/volume/drivers/xiv.py

index cda121b2832a8b8cdb61455dcaa3e0ed15e9074e..3edd3f16c02bbcf0c88e2ce3009c548a453c5d05 100644 (file)
@@ -120,6 +120,8 @@ class TestCoraidDriver(test.TestCase):
         configuration.coraid_user = fake_esm_username
         configuration.coraid_group = fake_esm_group
         configuration.coraid_password = fake_esm_password
+        configuration.volume_name_template = "volume-%s"
+        configuration.snapshot_name_template = "snapshot-%s"
 
         self.drv = CoraidDriver(configuration=configuration)
         self.drv.do_setup({})
index cb1802275b5b3ab22922a69d19c8f8b6f1e08db6..d908403a909b0ec05b82fc2ef2bc97a1b21e63a1 100644 (file)
@@ -87,7 +87,7 @@ class GlusterFsDriverTestCase(test.TestCase):
 
     def test_local_path(self):
         """local_path common use case."""
-        glusterfs.FLAGS.glusterfs_mount_point_base = self.TEST_MNT_POINT_BASE
+        glusterfs.CONF.glusterfs_mount_point_base = self.TEST_MNT_POINT_BASE
         drv = self._driver
 
         volume = DumbVolume()
@@ -188,7 +188,7 @@ class GlusterFsDriverTestCase(test.TestCase):
         """_get_mount_point_for_share should calculate correct value."""
         drv = self._driver
 
-        glusterfs.FLAGS.glusterfs_mount_point_base = self.TEST_MNT_POINT_BASE
+        glusterfs.CONF.glusterfs_mount_point_base = self.TEST_MNT_POINT_BASE
 
         self.assertEqual('/mnt/test/ab03ab34eaca46a5fb81878f7e9b91fc',
                          drv._get_mount_point_for_share(
@@ -206,7 +206,7 @@ class GlusterFsDriverTestCase(test.TestCase):
                   (df_total_size, df_avail)
         df_output = df_head + df_data
 
-        setattr(glusterfs.FLAGS, 'glusterfs_disk_util', 'df')
+        setattr(glusterfs.CONF, 'glusterfs_disk_util', 'df')
 
         mox.StubOutWithMock(drv, '_get_mount_point_for_share')
         drv._get_mount_point_for_share(self.TEST_EXPORT1).\
@@ -225,7 +225,7 @@ class GlusterFsDriverTestCase(test.TestCase):
 
         mox.VerifyAll()
 
-        delattr(glusterfs.FLAGS, 'glusterfs_disk_util')
+        delattr(glusterfs.CONF, 'glusterfs_disk_util')
 
     def test_get_available_capacity_with_du(self):
         """_get_available_capacity should calculate correct value."""
@@ -368,7 +368,7 @@ class GlusterFsDriverTestCase(test.TestCase):
         """do_setup should throw error if shares config is not configured."""
         drv = self._driver
 
-        glusterfs.FLAGS.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE
+        glusterfs.CONF.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE
 
         self.assertRaises(exception.GlusterfsException,
                           drv.do_setup, IsA(context.RequestContext))
@@ -378,7 +378,7 @@ class GlusterFsDriverTestCase(test.TestCase):
         mox = self._mox
         drv = self._driver
 
-        glusterfs.FLAGS.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE
+        glusterfs.CONF.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE
 
         mox.StubOutWithMock(os.path, 'exists')
         os.path.exists(self.TEST_SHARES_CONFIG_FILE).AndReturn(True)
@@ -457,7 +457,7 @@ class GlusterFsDriverTestCase(test.TestCase):
         drv = self._driver
         volume = self._simple_volume()
 
-        setattr(glusterfs.FLAGS, 'glusterfs_sparsed_volumes', True)
+        setattr(glusterfs.CONF, 'glusterfs_sparsed_volumes', True)
 
         mox.StubOutWithMock(drv, '_create_sparsed_file')
         mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
@@ -471,7 +471,7 @@ class GlusterFsDriverTestCase(test.TestCase):
 
         mox.VerifyAll()
 
-        delattr(glusterfs.FLAGS, 'glusterfs_sparsed_volumes')
+        delattr(glusterfs.CONF, 'glusterfs_sparsed_volumes')
 
     def test_create_nonsparsed_volume(self):
         mox = self._mox
index f32261cc86c77becbb698cded2e6447ac1e2e781..ec1ed0501c3240f56e6cb7b511a896d0bf783d58 100644 (file)
@@ -77,9 +77,9 @@ class ScalityDriverTestCase(test.TestCase):
                 raise e
 
     def _configure_driver(self):
-        scality.FLAGS.scality_sofs_config = self.TEST_CONFIG
-        scality.FLAGS.scality_sofs_mount_point = self.TEST_MOUNT
-        scality.FLAGS.scality_sofs_volume_dir = self.TEST_VOLDIR
+        scality.CONF.scality_sofs_config = self.TEST_CONFIG
+        scality.CONF.scality_sofs_mount_point = self.TEST_MOUNT
+        scality.CONF.scality_sofs_volume_dir = self.TEST_VOLDIR
 
     def _execute_wrapper(self, cmd, *args, **kwargs):
         try:
@@ -116,13 +116,13 @@ class ScalityDriverTestCase(test.TestCase):
 
     def test_setup_no_config(self):
         """Missing SOFS configuration shall raise an error."""
-        scality.FLAGS.scality_sofs_config = None
+        scality.CONF.scality_sofs_config = None
         self.assertRaises(exception.VolumeBackendAPIException,
                           self._driver.do_setup, None)
 
     def test_setup_missing_config(self):
         """Non-existent SOFS configuration file shall raise an error."""
-        scality.FLAGS.scality_sofs_config = 'nonexistent.conf'
+        scality.CONF.scality_sofs_config = 'nonexistent.conf'
         self.assertRaises(exception.VolumeBackendAPIException,
                           self._driver.do_setup, None)
 
index 2ee09b73db52753c996c32a9da9eae7d96ee431b..709e2f5f2b2e4da3b22b63cc49c9af0084743a92 100644 (file)
@@ -20,13 +20,14 @@ Unit tests for Windows Server 2012 OpenStack Cinder volume driver
 """
 import sys
 
-import cinder.flags
+from oslo.config import cfg
+
 from cinder.tests.windows import basetestcase
 from cinder.tests.windows import db_fakes
 from cinder.tests.windows import windowsutils
 from cinder.volume.drivers import windows
 
-FLAGS = cinder.flags.FLAGS
+CONF = cfg.CONF
 
 
 class TestWindowsDriver(basetestcase.BaseTestCase):
@@ -88,19 +89,19 @@ class TestWindowsDriver(basetestcase.BaseTestCase):
                 self._wutils.delete_snapshot(self._snapshot_data['name'])
             if (self._connector_data and
                     self._wutils.initiator_id_exists(
-                        "%s%s" % (FLAGS.iscsi_target_prefix,
+                        "%s%s" % (CONF.iscsi_target_prefix,
                                   self._volume_data['name']),
                         self._connector_data['initiator'])):
-                target_name = "%s%s" % (FLAGS.iscsi_target_prefix,
+                target_name = "%s%s" % (CONF.iscsi_target_prefix,
                                         self._volume_data['name'])
                 initiator_name = self._connector_data['initiator']
                 self._wutils.delete_initiator_id(target_name, initiator_name)
             if (self._volume_data and
                     self._wutils.export_exists("%s%s" %
-                                               (FLAGS.iscsi_target_prefix,
+                                               (CONF.iscsi_target_prefix,
                                                 self._volume_data['name']))):
                 self._wutils.delete_export(
-                    "%s%s" % (FLAGS.iscsi_target_prefix,
+                    "%s%s" % (CONF.iscsi_target_prefix,
                               self._volume_data['name']))
 
         finally:
@@ -182,7 +183,7 @@ class TestWindowsDriver(basetestcase.BaseTestCase):
         volume_name = self._volume_data['name']
         self.assertEquals(
             retval,
-            {'provider_location': "%s%s" % (FLAGS.iscsi_target_prefix,
+            {'provider_location': "%s%s" % (CONF.iscsi_target_prefix,
                                             volume_name)})
 
     def test_initialize_connection(self):
index be1c34d465baaad19042c8cf47f9472ad8879200..950bdca9d5fb1018ee6fe92e54a448c4a58d1f02 100644 (file)
@@ -54,7 +54,7 @@ def get_configured_driver(server='ignore_server', path='ignore_path'):
 class DriverTestCase(test.TestCase):
 
     def assert_flag(self, flagname):
-        self.assertTrue(hasattr(driver.FLAGS, flagname))
+        self.assertTrue(hasattr(driver.CONF, flagname))
 
     def test_config_options(self):
         self.assert_flag('xenapi_connection_url')
@@ -210,10 +210,10 @@ class DriverTestCase(test.TestCase):
         drv.nfs_ops = ops
         drv.db = db
 
-        mock.StubOutWithMock(driver, 'FLAGS')
-        driver.FLAGS.xenapi_nfs_server = server
-        driver.FLAGS.xenapi_nfs_serverpath = serverpath
-        driver.FLAGS.xenapi_sr_base_path = sr_base_path
+        mock.StubOutWithMock(driver, 'CONF')
+        driver.CONF.xenapi_nfs_server = server
+        driver.CONF.xenapi_nfs_serverpath = serverpath
+        driver.CONF.xenapi_sr_base_path = sr_base_path
 
         return mock, drv
 
index 0c251304bc241bc9d9bff22f39cc9e58ccd00dd2..95ebe5aaaffde60b03c3c67f36890d1b8239715a 100644 (file)
@@ -22,15 +22,11 @@ Contrib : Larry Matter <support@coraid.com>
 """
 
 import cookielib
-import os
 import time
 import urllib2
 
 from oslo.config import cfg
 
-from cinder import context
-from cinder import exception
-from cinder import flags
 from cinder.openstack.common import jsonutils
 from cinder.openstack.common import log as logging
 from cinder.volume import driver
@@ -38,7 +34,6 @@ from cinder.volume import volume_types
 
 LOG = logging.getLogger(__name__)
 
-FLAGS = flags.FLAGS
 coraid_opts = [
     cfg.StrOpt('coraid_esm_address',
                default='',
@@ -57,7 +52,9 @@ coraid_opts = [
                default='coraid_repository',
                help='Volume Type key name to store ESM Repository Name'),
 ]
-FLAGS.register_opts(coraid_opts)
+
+CONF = cfg.CONF
+CONF.register_opts(coraid_opts)
 
 
 class CoraidException(Exception):
@@ -325,11 +322,11 @@ class CoraidDriver(driver.VolumeDriver):
 
     def create_snapshot(self, snapshot):
         """Create a Snapshot."""
+        volume_name = (self.configuration.volume_name_template
+                       % snapshot['volume_id'])
+        snapshot_name = (self.configuration.snapshot_name_template
+                         % snapshot['id'])
         try:
-            volume_name = (FLAGS.volume_name_template
-                           % snapshot['volume_id'])
-            snapshot_name = (FLAGS.snapshot_name_template
-                             % snapshot['id'])
             self.esm.create_snapshot(volume_name, snapshot_name)
         except Exception, e:
             msg = _('Failed to Create Snapshot %(snapname)s')
@@ -339,9 +336,9 @@ class CoraidDriver(driver.VolumeDriver):
 
     def delete_snapshot(self, snapshot):
         """Delete a Snapshot."""
+        snapshot_name = (self.configuration.snapshot_name_template
+                         % snapshot['id'])
         try:
-            snapshot_name = (FLAGS.snapshot_name_template
-                             % snapshot['id'])
             self.esm.delete_snapshot(snapshot_name)
         except Exception:
             msg = _('Failed to Delete Snapshot %(snapname)s')
@@ -351,10 +348,10 @@ class CoraidDriver(driver.VolumeDriver):
 
     def create_volume_from_snapshot(self, volume, snapshot):
         """Create a Volume from a Snapshot."""
+        snapshot_name = (self.configuration.snapshot_name_template
+                         % snapshot['id'])
+        repository = self._get_repository(volume['volume_type'])
         try:
-            snapshot_name = (FLAGS.snapshot_name_template
-                             % snapshot['id'])
-            repository = self._get_repository(volume['volume_type'])
             self.esm.create_volume_from_snapshot(snapshot_name,
                                                  volume['name'],
                                                  repository)
index 9f686bca3f5757e2d191f65c4de0489d79be0c61..11d96989edb867b492224f4856a2286777cf4658 100644 (file)
@@ -29,12 +29,11 @@ from oslo.config import cfg
 from xml.dom.minidom import parseString
 
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import log as logging
 
 LOG = logging.getLogger(__name__)
 
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
 
 try:
     import pywbem
@@ -62,7 +61,7 @@ class EMCSMISCommon():
                          default=CINDER_EMC_CONFIG_FILE,
                          help='use this file for cinder emc plugin '
                          'config data')
-        FLAGS.register_opt(opt)
+        CONF.register_opt(opt)
         self.protocol = prtcl
         self.configuration = configuration
         self.configuration.append_config_values([opt])
index 25308c53a30fae25f6c5690544dbb2e16a85e375..53682f9bfb928f20330339ec402a1fe1fc166feb 100644 (file)
@@ -20,20 +20,14 @@ ISCSI Drivers for EMC VNX and VMAX arrays based on SMI-S.
 
 """
 
-import os
-import time
 
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import log as logging
-from cinder import utils
 from cinder.volume import driver
 from cinder.volume.drivers.emc import emc_smis_common
 
 LOG = logging.getLogger(__name__)
 
-FLAGS = flags.FLAGS
-
 
 class EMCSMISISCSIDriver(driver.ISCSIDriver):
     """EMC ISCSI Drivers for VMAX and VNX using SMI-S."""
@@ -42,8 +36,8 @@ class EMCSMISISCSIDriver(driver.ISCSIDriver):
 
         super(EMCSMISISCSIDriver, self).__init__(*args, **kwargs)
         self.common = emc_smis_common.EMCSMISCommon(
-                                        'iSCSI',
-                                        configuration=self.configuration)
+            'iSCSI',
+            configuration=self.configuration)
 
     def check_for_setup_error(self):
         pass
index 0f1dfd7be66b37ebafbad12c21d1bd867898cae6..5b3fa0cd3a99ac4c71d478c87fcc657fe342eb6f 100644 (file)
@@ -21,7 +21,6 @@ import os
 from oslo.config import cfg
 
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import log as logging
 from cinder.volume.drivers import nfs
 
@@ -44,8 +43,8 @@ volume_opts = [
                       'In such case volume creation takes a lot of time.'))]
 VERSION = '1.0'
 
-FLAGS = flags.FLAGS
-FLAGS.register_opts(volume_opts)
+CONF = cfg.CONF
+CONF.register_opts(volume_opts)
 
 
 class GlusterfsDriver(nfs.RemoteFsDriver):
index 869552d0304f2839ce8d7524e16c4d09412bfea5..4c868668b147253b2653ed87c088abe129bc6248 100644 (file)
@@ -48,6 +48,10 @@ VOL_AND_SNAP_NAME_PREFIX = 'OpenStack_'
 READBUFFERSIZE = 8192
 
 
+CONF = cfg.CONF
+CONF.register_opts(huawei_opt)
+
+
 class SSHConn(utils.SSHPool):
     """Define a new class inherited to SSHPool.
 
index 7317c2901b9f9e2f5ad140cd1036037fdb7bd454..f9fc569022abbfec8426cb14ad5a4b3278324ef9 100644 (file)
@@ -28,7 +28,6 @@ from oslo.config import cfg
 
 from cinder.brick.iscsi import iscsi
 from cinder import exception
-from cinder import flags
 from cinder.image import image_utils
 from cinder.openstack.common import log as logging
 from cinder import utils
@@ -60,8 +59,8 @@ volume_opts = [
                     'this requires lvm_mirrors + 2 pvs with available space'),
 ]
 
-FLAGS = flags.FLAGS
-FLAGS.register_opts(volume_opts)
+CONF = cfg.CONF
+CONF.register_opts(volume_opts)
 
 
 class LVMVolumeDriver(driver.VolumeDriver):
@@ -266,7 +265,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
     def create_cloned_volume(self, volume, src_vref):
         """Creates a clone of the specified volume."""
         LOG.info(_('Creating clone of volume: %s') % src_vref['id'])
-        volume_name = FLAGS.volume_name_template % src_vref['id']
+        volume_name = self.configuration.volume_name_template % src_vref['id']
         temp_id = 'tmp-snap-%s' % src_vref['id']
         temp_snapshot = {'volume_name': volume_name,
                          'size': src_vref['size'],
@@ -346,8 +345,10 @@ class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver):
                            "provision for volume: %s"), volume['id'])
                 return
 
-            iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
-            volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
+            iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix,
+                                   volume['name'])
+            volume_path = "/dev/%s/%s" % (self.configuration.volume_group,
+                                          volume['name'])
             iscsi_target = 1
 
             self.tgtadm.create_iscsi_target(iscsi_name, iscsi_target,
@@ -611,17 +612,22 @@ class ThinLVMVolumeDriver(LVMISCSIDriver):
         out, err = self._execute('lvs', '--option',
                                  'name', '--noheadings',
                                  run_as_root=True)
-        pool_name = "%s-pool" % FLAGS.volume_group
+        pool_name = "%s-pool" % self.configuration.volume_group
         if pool_name not in out:
-            if not FLAGS.pool_size:
-                out, err = self._execute('vgs', FLAGS.volume_group,
-                                         '--noheadings', '--options',
-                                         'name,size', run_as_root=True)
+            if not self.configuration.pool_size:
+                out, err = self._execute('vgs',
+                                         self.configuration.volume_group,
+                                         '--noheadings',
+                                         '--options',
+                                         'name,size',
+                                         run_as_root=True)
+
                 size = re.sub(r'[\.][\d][\d]', '', out.split()[1])
             else:
-                size = "%s" % FLAGS.pool_size
+                size = "%s" % self.configuration.pool_size
 
-            pool_path = '%s/%s' % (FLAGS.volume_group, pool_name)
+            pool_path = '%s/%s' % (self.configuration.volume_group,
+                                   pool_name)
             out, err = self._execute('lvcreate', '-T', '-L', size,
                                      pool_path, run_as_root=True)
 
@@ -638,7 +644,8 @@ class ThinLVMVolumeDriver(LVMISCSIDriver):
         """Creates a logical volume. Can optionally return a Dictionary of
         changes to the volume object to be persisted."""
         sizestr = self._sizestr(volume['size'])
-        vg_name = ("%s/%s-pool" % (FLAGS.volume_group, FLAGS.volume_group))
+        vg_name = ("%s/%s-pool" % (self.configuration.volume_group,
+                                   self.configuration.volume_group))
         self._try_execute('lvcreate', '-T', '-V', sizestr, '-n',
                           volume['name'], vg_name, run_as_root=True)
 
@@ -647,19 +654,21 @@ class ThinLVMVolumeDriver(LVMISCSIDriver):
         if self._volume_not_present(volume['name']):
             return True
         self._try_execute('lvremove', '-f', "%s/%s" %
-                          (FLAGS.volume_group,
+                          (self.configuration.volume_group,
                            self._escape_snapshot(volume['name'])),
                           run_as_root=True)
 
     def create_cloned_volume(self, volume, src_vref):
         """Creates a clone of the specified volume."""
         LOG.info(_('Creating clone of volume: %s') % src_vref['id'])
-        orig_lv_name = "%s/%s" % (FLAGS.volume_group, src_vref['name'])
+        orig_lv_name = "%s/%s" % (self.configuration.volume_group,
+                                  src_vref['name'])
         self._do_lvm_snapshot(orig_lv_name, volume, False)
 
     def create_snapshot(self, snapshot):
         """Creates a snapshot of a volume."""
-        orig_lv_name = "%s/%s" % (FLAGS.volume_group, snapshot['volume_name'])
+        orig_lv_name = "%s/%s" % (self.configuration.volume_group,
+                                  snapshot['volume_name'])
         self._do_lvm_snapshot(orig_lv_name, snapshot)
 
     def get_volume_stats(self, refresh=False):
index 7fe24b6f798eb23a41af84c802ec74d45f03f5d1..0e6b2e95bd25f8bcf39fd412735b15ddcdaa62de 100644 (file)
@@ -85,6 +85,10 @@ netapp_opts = [
                     ' 7 mode'), ]
 
 
+CONF = cfg.CONF
+CONF.register_opts(netapp_opts)
+
+
 class DfmDataset(object):
     def __init__(self, id, name, project, type):
         self.id = id
index bb3890aae24c6b8afa93555cfa211fff4ff05f55..2110adeb95cfef46142086157a3fda5ddf02b1e8 100644 (file)
@@ -42,6 +42,10 @@ netapp_nfs_opts = [
                help='Does snapshot creation call returns immediately')]
 
 
+CONF = cfg.CONF
+CONF.register_opts(netapp_nfs_opts)
+
+
 class NetAppNFSDriver(nfs.NfsDriver):
     """Executes commands relating to Volumes."""
     def __init__(self, *args, **kwargs):
index 94b73fd3aa51246718e6e967f3cfd43096cd6191..2444bc2ffb77ee0ecb303e55dd5205e514c1f27f 100644 (file)
@@ -25,7 +25,6 @@
 from oslo.config import cfg
 
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import log as logging
 from cinder.volume import driver
 from cinder.volume.drivers import nexenta
@@ -33,7 +32,6 @@ from cinder.volume.drivers.nexenta import jsonrpc
 
 VERSION = '1.0'
 LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
 
 nexenta_opts = [
     cfg.StrOpt('nexenta_host',
@@ -71,7 +69,9 @@ nexenta_opts = [
                 default=False,
                 help='flag to create sparse volumes'),
 ]
-FLAGS.register_opts(nexenta_opts)
+
+CONF = cfg.CONF
+CONF.register_opts(nexenta_opts)
 
 
 class NexentaDriver(driver.ISCSIDriver):  # pylint: disable=R0921
@@ -81,38 +81,38 @@ class NexentaDriver(driver.ISCSIDriver):  # pylint: disable=R0921
         super(NexentaDriver, self).__init__(*args, **kwargs)
 
     def do_setup(self, context):
-        protocol = FLAGS.nexenta_rest_protocol
+        protocol = CONF.nexenta_rest_protocol
         auto = protocol == 'auto'
         if auto:
             protocol = 'http'
         self.nms = jsonrpc.NexentaJSONProxy(
-            '%s://%s:%s/rest/nms/' % (protocol, FLAGS.nexenta_host,
-                                      FLAGS.nexenta_rest_port),
-            FLAGS.nexenta_user, FLAGS.nexenta_password, auto=auto)
+            '%s://%s:%s/rest/nms/' % (protocol, CONF.nexenta_host,
+                                      CONF.nexenta_rest_port),
+            CONF.nexenta_user, CONF.nexenta_password, auto=auto)
 
     def check_for_setup_error(self):
         """Verify that the volume for our zvols exists.
 
         :raise: :py:exc:`LookupError`
         """
-        if not self.nms.volume.object_exists(FLAGS.nexenta_volume):
+        if not self.nms.volume.object_exists(CONF.nexenta_volume):
             raise LookupError(_("Volume %s does not exist in Nexenta SA"),
-                              FLAGS.nexenta_volume)
+                              CONF.nexenta_volume)
 
     @staticmethod
     def _get_zvol_name(volume_name):
         """Return zvol name that corresponds given volume name."""
-        return '%s/%s' % (FLAGS.nexenta_volume, volume_name)
+        return '%s/%s' % (CONF.nexenta_volume, volume_name)
 
     @staticmethod
     def _get_target_name(volume_name):
         """Return iSCSI target name to access volume."""
-        return '%s%s' % (FLAGS.nexenta_target_prefix, volume_name)
+        return '%s%s' % (CONF.nexenta_target_prefix, volume_name)
 
     @staticmethod
     def _get_target_group_name(volume_name):
         """Return Nexenta iSCSI target group name for volume."""
-        return '%s%s' % (FLAGS.nexenta_target_group_prefix, volume_name)
+        return '%s%s' % (CONF.nexenta_target_group_prefix, volume_name)
 
     def create_volume(self, volume):
         """Create a zvol on appliance.
@@ -122,7 +122,7 @@ class NexentaDriver(driver.ISCSIDriver):  # pylint: disable=R0921
         self.nms.zvol.create(
             self._get_zvol_name(volume['name']),
             '%sG' % (volume['size'],),
-            FLAGS.nexenta_blocksize, FLAGS.nexenta_sparse)
+            CONF.nexenta_blocksize, CONF.nexenta_sparse)
 
     def delete_volume(self, volume):
         """Destroy a zvol on appliance.
@@ -237,8 +237,8 @@ class NexentaDriver(driver.ISCSIDriver):  # pylint: disable=R0921
             else:
                 LOG.info(_('Ignored LUN mapping entry addition error "%s"'
                            ' while ensuring export'), exc)
-        return '%s:%s,1 %s 0' % (FLAGS.nexenta_host,
-                                 FLAGS.nexenta_iscsi_target_portal_port,
+        return '%s:%s,1 %s 0' % (CONF.nexenta_host,
+                                 CONF.nexenta_iscsi_target_portal_port,
                                  target_name)
 
     def create_export(self, _ctx, volume):
@@ -324,7 +324,7 @@ class NexentaDriver(driver.ISCSIDriver):  # pylint: disable=R0921
         data["driver_version"] = VERSION
         data["storage_protocol"] = 'iSCSI'
 
-        stats = self.nms.volume.get_child_props(FLAGS.nexenta_volume,
+        stats = self.nms.volume.get_child_props(CONF.nexenta_volume,
                                                 'health|size|used|available')
         total_unit = stats['size'][-1]
         total_amount = float(stats['size'][:-1])
index b6909168c34fd719cfed6da8d7ea2136995133f9..4ba7ec44c269e129c2f0964e4dc78168e12db8a1 100644 (file)
@@ -59,6 +59,9 @@ volume_opts = [
 
 VERSION = '1.1'
 
+CONF = cfg.CONF
+CONF.register_opts(volume_opts)
+
 
 class RemoteFsDriver(driver.VolumeDriver):
     """Common base for drivers that work like NFS."""
index 53efc2955715dd6c36228866e5805d0c96675be8..617b90477df3f9bfc66d235c3d68d85490a3358c 100644 (file)
@@ -128,6 +128,9 @@ class RADOSClient(object):
     def __exit__(self, type_, value, traceback):
         self.driver._disconnect_from_rados(self.cluster, self.ioctx)
 
+CONF = cfg.CONF
+CONF.register_opts(rbd_opts)
+
 
 class RBDDriver(driver.VolumeDriver):
     """Implements RADOS block device (RBD) volume commands"""
index e99f94388cb436169d88e3cf0a49c84b5fe1e6c6..dc3c43ba83eae956c9de8ca6ad403efcbd762f9f 100644 (file)
@@ -94,6 +94,10 @@ hp3par_opts = [
 ]
 
 
+CONF = cfg.CONF
+CONF.register_opts(hp3par_opts)
+
+
 class HP3PARCommon(object):
 
     stats = {}
@@ -834,7 +838,7 @@ exit
                 # use the wwn to see if we can find the hostname
                 hostname = self._get_3par_hostname_from_wwn_iqn(wwn_iqn)
                 # no 3par host, re-throw
-                if (hostname == None):
+                if (hostname is None):
                     raise
             else:
             # not a 'host does not exist' HTTPNotFound exception, re-throw
index fbd9c2ead874d0f724806766d47b09bc02fc9fea..c0ac8ac772523cf477d745af3c4168c0076b0815 100644 (file)
@@ -27,7 +27,6 @@ from eventlet import greenthread
 from oslo.config import cfg
 
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import log as logging
 from cinder import utils
 from cinder.volume.driver import ISCSIDriver
@@ -72,8 +71,8 @@ san_opts = [
                help='Maximum ssh connections in the pool'),
 ]
 
-FLAGS = flags.FLAGS
-FLAGS.register_opts(san_opts)
+CONF = cfg.CONF
+CONF.register_opts(san_opts)
 
 
 class SanISCSIDriver(ISCSIDriver):
index ca478cd186d3becd341475cc5bac1e22b9d8ca12..f6537ddae8606816f01f72b52fba82721e7f55a5 100644 (file)
@@ -15,7 +15,6 @@
 from oslo.config import cfg
 
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import log as logging
 from cinder.volume.drivers.san.san import SanISCSIDriver
 
@@ -26,8 +25,8 @@ solaris_opts = [
                default='rpool/',
                help='The ZFS path under which to create zvols for volumes.'), ]
 
-FLAGS = flags.FLAGS
-FLAGS.register_opts(solaris_opts)
+CONF = cfg.CONF
+CONF.register_opts(solaris_opts)
 
 
 class SolarisISCSIDriver(SanISCSIDriver):
@@ -61,6 +60,7 @@ class SolarisISCSIDriver(SanISCSIDriver):
         super(SolarisISCSIDriver, self).__init__(*cmd,
                                                  execute=self._execute,
                                                  **kwargs)
+        self.configuration.append_config_values(solaris_opts)
 
     def _execute(self, *cmd, **kwargs):
         new_cmd = ['pfexec']
@@ -123,7 +123,8 @@ class SolarisISCSIDriver(SanISCSIDriver):
         return iscsi_target_name in self._get_iscsi_targets()
 
     def _build_zfs_poolname(self, volume):
-        zfs_poolname = '%s%s' % (FLAGS.san_zfs_volume_base, volume['name'])
+        zfs_poolname = '%s%s' % (self.configuration.san_zfs_volume_base,
+                                 volume['name'])
         return zfs_poolname
 
     def create_volume(self, volume):
@@ -137,7 +138,7 @@ class SolarisISCSIDriver(SanISCSIDriver):
 
         # Create a zfs volume
         cmd = ['/usr/sbin/zfs', 'create']
-        if FLAGS.san_thin_provision:
+        if self.configuration.san_thin_provision:
             cmd.append('-s')
         cmd.extend(['-V', sizestr])
         cmd.append(zfs_poolname)
@@ -186,7 +187,7 @@ class SolarisISCSIDriver(SanISCSIDriver):
 
     def local_path(self, volume):
         # TODO(justinsb): Is this needed here?
-        escaped_group = FLAGS.volume_group.replace('-', '--')
+        escaped_group = self.configuration.volume_group.replace('-', '--')
         escaped_name = volume['name'].replace('-', '--')
         return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
 
@@ -233,7 +234,8 @@ class SolarisISCSIDriver(SanISCSIDriver):
 
         #TODO(justinsb): Is this always 1? Does it matter?
         iscsi_portal_interface = '1'
-        iscsi_portal = FLAGS.san_ip + ":3260," + iscsi_portal_interface
+        iscsi_portal = \
+            self.configuration.san_ip + ":3260," + iscsi_portal_interface
 
         db_update = {}
         db_update['provider_location'] = ("%s %s" %
index b59fe26be42c032e5df9eacdb2222fcabd66b09c..bf16339d1b0a328b581bb2e61095fc42b165cf47 100644 (file)
@@ -24,7 +24,6 @@ import urlparse
 from oslo.config import cfg
 
 from cinder import exception
-from cinder import flags
 from cinder.image import image_utils
 from cinder.openstack.common import log as logging
 from cinder.volume import driver
@@ -43,8 +42,8 @@ volume_opts = [
                help='Path from Scality SOFS root to volume dir'),
 ]
 
-FLAGS = flags.FLAGS
-FLAGS.register_opts(volume_opts)
+CONF = cfg.CONF
+CONF.register_opts(volume_opts)
 
 
 class ScalityDriver(driver.VolumeDriver):
@@ -58,7 +57,7 @@ class ScalityDriver(driver.VolumeDriver):
         """Sanity checks before attempting to mount SOFS."""
 
         # config is mandatory
-        config = FLAGS.scality_sofs_config
+        config = CONF.scality_sofs_config
         if not config:
             msg = _("Value required for 'scality_sofs_config'")
             LOG.warn(msg)
@@ -89,8 +88,8 @@ class ScalityDriver(driver.VolumeDriver):
                 raise e
 
     def _mount_sofs(self):
-        config = FLAGS.scality_sofs_config
-        mount_path = FLAGS.scality_sofs_mount_point
+        config = CONF.scality_sofs_config
+        mount_path = CONF.scality_sofs_mount_point
         sysdir = os.path.join(mount_path, 'sys')
 
         self._makedirs(mount_path)
@@ -121,16 +120,16 @@ class ScalityDriver(driver.VolumeDriver):
         """Any initialization the volume driver does while starting."""
         self._check_prerequisites()
         self._mount_sofs()
-        voldir = os.path.join(FLAGS.scality_sofs_mount_point,
-                              FLAGS.scality_sofs_volume_dir)
+        voldir = os.path.join(CONF.scality_sofs_mount_point,
+                              CONF.scality_sofs_volume_dir)
         if not os.path.isdir(voldir):
             self._makedirs(voldir)
 
     def check_for_setup_error(self):
         """Returns an error if prerequisites aren't met."""
         self._check_prerequisites()
-        voldir = os.path.join(FLAGS.scality_sofs_mount_point,
-                              FLAGS.scality_sofs_volume_dir)
+        voldir = os.path.join(CONF.scality_sofs_mount_point,
+                              CONF.scality_sofs_volume_dir)
         if not os.path.isdir(voldir):
             msg = _("Cannot find volume dir for Scality SOFS at '%s'") % voldir
             LOG.warn(msg)
@@ -160,8 +159,8 @@ class ScalityDriver(driver.VolumeDriver):
 
     def create_snapshot(self, snapshot):
         """Creates a snapshot."""
-        volume_path = os.path.join(FLAGS.scality_sofs_mount_point,
-                                   FLAGS.scality_sofs_volume_dir,
+        volume_path = os.path.join(CONF.scality_sofs_mount_point,
+                                   CONF.scality_sofs_volume_dir,
                                    snapshot['volume_name'])
         snapshot_path = self.local_path(snapshot)
         self._create_file(snapshot_path,
@@ -173,11 +172,11 @@ class ScalityDriver(driver.VolumeDriver):
         os.remove(self.local_path(snapshot))
 
     def _sofs_path(self, volume):
-        return os.path.join(FLAGS.scality_sofs_volume_dir,
+        return os.path.join(CONF.scality_sofs_volume_dir,
                             volume['name'])
 
     def local_path(self, volume):
-        return os.path.join(FLAGS.scality_sofs_mount_point,
+        return os.path.join(CONF.scality_sofs_mount_point,
                             self._sofs_path(volume))
 
     def ensure_export(self, context, volume):
index a539ce1815791fe8ddb21fa0b3270eb84a6c54da..ea4d296e81200f3a01b327a99050f608052728ba 100644 (file)
@@ -18,14 +18,14 @@ SheepDog Volume Driver.
 """
 import re
 
+from oslo.config import cfg
+
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import log as logging
 from cinder.volume import driver
 
 
 LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
 
 
 class SheepdogDriver(driver.VolumeDriver):
index f29c807ce9fa7462925e93ee12f854357188bc80..c12c3efa2aded4b93089499e9aa7e57a7002dafb 100644 (file)
@@ -50,6 +50,10 @@ sf_opts = [
                help='Create SolidFire accounts with this prefix'), ]
 
 
+CONF = cfg.CONF
+CONF.register_opts(sf_opts)
+
+
 class SolidFire(SanISCSIDriver):
     """OpenStack driver to enable SolidFire cluster.
 
index 8f44477fa5ac4aeb0e1a0d547d5be7315d3caf4d..c29533b5bf27cd0cb2cf41ade7bda91902078529 100755 (executable)
@@ -101,6 +101,10 @@ storwize_svc_opts = [
 ]
 
 
+CONF = cfg.CONF
+CONF.register_opts(storwize_svc_opts)
+
+
 class StorwizeSVCDriver(san.SanISCSIDriver):
     """IBM Storwize V7000 and SVC iSCSI/FC volume driver.
 
index 6fedfa300a37d246ea520a2d4fc23dcf98767f1f..2b11c89b4fa6c5ab3ecf8f373ab1782e186746f6 100644 (file)
@@ -19,7 +19,6 @@
 from oslo.config import cfg
 
 from cinder import exception
-from cinder import flags
 from cinder.image import glance
 from cinder.image import image_utils
 from cinder.openstack.common import log as logging
@@ -53,9 +52,9 @@ xenapi_nfs_opts = [
                help='Path of exported NFS, used by XenAPINFSDriver'),
 ]
 
-FLAGS = flags.FLAGS
-FLAGS.register_opts(xenapi_opts)
-FLAGS.register_opts(xenapi_nfs_opts)
+CONF = cfg.CONF
+CONF.register_opts(xenapi_opts)
+CONF.register_opts(xenapi_nfs_opts)
 
 
 class XenAPINFSDriver(driver.VolumeDriver):
@@ -166,8 +165,8 @@ class XenAPINFSDriver(driver.VolumeDriver):
     def _use_image_utils_to_pipe_bytes_to_volume(self, context, volume,
                                                  image_service, image_id):
         sr_uuid, vdi_uuid = volume['provider_location'].split('/')
-        with self.nfs_ops.volume_attached_here(FLAGS.xenapi_nfs_server,
-                                               FLAGS.xenapi_nfs_serverpath,
+        with self.nfs_ops.volume_attached_here(CONF.xenapi_nfs_server,
+                                               CONF.xenapi_nfs_serverpath,
                                                sr_uuid, vdi_uuid,
                                                False) as device:
             image_utils.fetch_to_raw(context,
@@ -184,22 +183,22 @@ class XenAPINFSDriver(driver.VolumeDriver):
         auth_token = context.auth_token
 
         overwrite_result = self.nfs_ops.use_glance_plugin_to_overwrite_volume(
-            FLAGS.xenapi_nfs_server,
-            FLAGS.xenapi_nfs_serverpath,
+            CONF.xenapi_nfs_server,
+            CONF.xenapi_nfs_serverpath,
             sr_uuid,
             vdi_uuid,
             glance_server,
             image_id,
             auth_token,
-            FLAGS.xenapi_sr_base_path)
+            CONF.xenapi_sr_base_path)
 
         if overwrite_result is False:
             raise exception.ImageCopyFailure(reason='Overwriting volume '
                                                     'failed.')
 
         self.nfs_ops.resize_volume(
-            FLAGS.xenapi_nfs_server,
-            FLAGS.xenapi_nfs_serverpath,
+            CONF.xenapi_nfs_server,
+            CONF.xenapi_nfs_serverpath,
             sr_uuid,
             vdi_uuid,
             volume['size'])
@@ -215,8 +214,8 @@ class XenAPINFSDriver(driver.VolumeDriver):
     def _use_image_utils_to_upload_volume(self, context, volume, image_service,
                                           image_meta):
         sr_uuid, vdi_uuid = volume['provider_location'].split('/')
-        with self.nfs_ops.volume_attached_here(FLAGS.xenapi_nfs_server,
-                                               FLAGS.xenapi_nfs_serverpath,
+        with self.nfs_ops.volume_attached_here(CONF.xenapi_nfs_server,
+                                               CONF.xenapi_nfs_serverpath,
                                                sr_uuid, vdi_uuid,
                                                True) as device:
             image_utils.upload_volume(context,
@@ -235,14 +234,14 @@ class XenAPINFSDriver(driver.VolumeDriver):
         auth_token = context.auth_token
 
         self.nfs_ops.use_glance_plugin_to_upload_volume(
-            FLAGS.xenapi_nfs_server,
-            FLAGS.xenapi_nfs_serverpath,
+            CONF.xenapi_nfs_server,
+            CONF.xenapi_nfs_serverpath,
             sr_uuid,
             vdi_uuid,
             glance_server,
             image_id,
             auth_token,
-            FLAGS.xenapi_sr_base_path)
+            CONF.xenapi_sr_base_path)
 
     def get_volume_stats(self, refresh=False):
         if refresh or not self._stats:
index 2b60497e8f594111743c90680a2282b55defc1c6..cecc18b8ba252d46ce83ed98ebea9a52b93ae175 100644 (file)
@@ -27,7 +27,6 @@ Volume driver for IBM XIV storage systems.
 from oslo.config import cfg
 
 from cinder import exception
-from cinder import flags
 from cinder.openstack.common import importutils
 from cinder.openstack.common import log as logging
 from cinder.volume.drivers.san import san
@@ -38,8 +37,8 @@ ibm_xiv_opts = [
                help='Proxy driver'),
 ]
 
-FLAGS = flags.FLAGS
-FLAGS.register_opts(ibm_xiv_opts)
+CONF = cfg.CONF
+CONF.register_opts(ibm_xiv_opts)
 
 LOG = logging.getLogger('cinder.volume.xiv')
 
@@ -50,12 +49,12 @@ class XIVDriver(san.SanISCSIDriver):
     def __init__(self, *args, **kwargs):
         """Initialize the driver."""
 
-        proxy = importutils.import_class(FLAGS.xiv_proxy)
+        proxy = importutils.import_class(CONF.xiv_proxy)
 
-        self.xiv_proxy = proxy({"xiv_user": FLAGS.san_login,
-                                "xiv_pass": FLAGS.san_password,
-                                "xiv_address": FLAGS.san_ip,
-                                "xiv_vol_pool": FLAGS.san_clustername},
+        self.xiv_proxy = proxy({"xiv_user": CONF.san_login,
+                                "xiv_pass": CONF.san_password,
+                                "xiv_address": CONF.san_ip,
+                                "xiv_vol_pool": CONF.san_clustername},
                                LOG,
                                exception)
         san.SanISCSIDriver.__init__(self, *args, **kwargs)