configuration.coraid_user = fake_esm_username
configuration.coraid_group = fake_esm_group
configuration.coraid_password = fake_esm_password
+ configuration.volume_name_template = "volume-%s"
+ configuration.snapshot_name_template = "snapshot-%s"
self.drv = CoraidDriver(configuration=configuration)
self.drv.do_setup({})
def test_local_path(self):
"""local_path common use case."""
- glusterfs.FLAGS.glusterfs_mount_point_base = self.TEST_MNT_POINT_BASE
+ glusterfs.CONF.glusterfs_mount_point_base = self.TEST_MNT_POINT_BASE
drv = self._driver
volume = DumbVolume()
"""_get_mount_point_for_share should calculate correct value."""
drv = self._driver
- glusterfs.FLAGS.glusterfs_mount_point_base = self.TEST_MNT_POINT_BASE
+ glusterfs.CONF.glusterfs_mount_point_base = self.TEST_MNT_POINT_BASE
self.assertEqual('/mnt/test/ab03ab34eaca46a5fb81878f7e9b91fc',
drv._get_mount_point_for_share(
(df_total_size, df_avail)
df_output = df_head + df_data
- setattr(glusterfs.FLAGS, 'glusterfs_disk_util', 'df')
+ setattr(glusterfs.CONF, 'glusterfs_disk_util', 'df')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
drv._get_mount_point_for_share(self.TEST_EXPORT1).\
mox.VerifyAll()
- delattr(glusterfs.FLAGS, 'glusterfs_disk_util')
+ delattr(glusterfs.CONF, 'glusterfs_disk_util')
def test_get_available_capacity_with_du(self):
"""_get_available_capacity should calculate correct value."""
"""do_setup should throw error if shares config is not configured."""
drv = self._driver
- glusterfs.FLAGS.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE
+ glusterfs.CONF.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE
self.assertRaises(exception.GlusterfsException,
drv.do_setup, IsA(context.RequestContext))
mox = self._mox
drv = self._driver
- glusterfs.FLAGS.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE
+ glusterfs.CONF.glusterfs_shares_config = self.TEST_SHARES_CONFIG_FILE
mox.StubOutWithMock(os.path, 'exists')
os.path.exists(self.TEST_SHARES_CONFIG_FILE).AndReturn(True)
drv = self._driver
volume = self._simple_volume()
- setattr(glusterfs.FLAGS, 'glusterfs_sparsed_volumes', True)
+ setattr(glusterfs.CONF, 'glusterfs_sparsed_volumes', True)
mox.StubOutWithMock(drv, '_create_sparsed_file')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.VerifyAll()
- delattr(glusterfs.FLAGS, 'glusterfs_sparsed_volumes')
+ delattr(glusterfs.CONF, 'glusterfs_sparsed_volumes')
def test_create_nonsparsed_volume(self):
mox = self._mox
raise e
def _configure_driver(self):
- scality.FLAGS.scality_sofs_config = self.TEST_CONFIG
- scality.FLAGS.scality_sofs_mount_point = self.TEST_MOUNT
- scality.FLAGS.scality_sofs_volume_dir = self.TEST_VOLDIR
+ scality.CONF.scality_sofs_config = self.TEST_CONFIG
+ scality.CONF.scality_sofs_mount_point = self.TEST_MOUNT
+ scality.CONF.scality_sofs_volume_dir = self.TEST_VOLDIR
def _execute_wrapper(self, cmd, *args, **kwargs):
try:
def test_setup_no_config(self):
"""Missing SOFS configuration shall raise an error."""
- scality.FLAGS.scality_sofs_config = None
+ scality.CONF.scality_sofs_config = None
self.assertRaises(exception.VolumeBackendAPIException,
self._driver.do_setup, None)
def test_setup_missing_config(self):
"""Non-existent SOFS configuration file shall raise an error."""
- scality.FLAGS.scality_sofs_config = 'nonexistent.conf'
+ scality.CONF.scality_sofs_config = 'nonexistent.conf'
self.assertRaises(exception.VolumeBackendAPIException,
self._driver.do_setup, None)
"""
import sys
-import cinder.flags
+from oslo.config import cfg
+
from cinder.tests.windows import basetestcase
from cinder.tests.windows import db_fakes
from cinder.tests.windows import windowsutils
from cinder.volume.drivers import windows
-FLAGS = cinder.flags.FLAGS
+CONF = cfg.CONF
class TestWindowsDriver(basetestcase.BaseTestCase):
self._wutils.delete_snapshot(self._snapshot_data['name'])
if (self._connector_data and
self._wutils.initiator_id_exists(
- "%s%s" % (FLAGS.iscsi_target_prefix,
+ "%s%s" % (CONF.iscsi_target_prefix,
self._volume_data['name']),
self._connector_data['initiator'])):
- target_name = "%s%s" % (FLAGS.iscsi_target_prefix,
+ target_name = "%s%s" % (CONF.iscsi_target_prefix,
self._volume_data['name'])
initiator_name = self._connector_data['initiator']
self._wutils.delete_initiator_id(target_name, initiator_name)
if (self._volume_data and
self._wutils.export_exists("%s%s" %
- (FLAGS.iscsi_target_prefix,
+ (CONF.iscsi_target_prefix,
self._volume_data['name']))):
self._wutils.delete_export(
- "%s%s" % (FLAGS.iscsi_target_prefix,
+ "%s%s" % (CONF.iscsi_target_prefix,
self._volume_data['name']))
finally:
volume_name = self._volume_data['name']
self.assertEquals(
retval,
- {'provider_location': "%s%s" % (FLAGS.iscsi_target_prefix,
+ {'provider_location': "%s%s" % (CONF.iscsi_target_prefix,
volume_name)})
def test_initialize_connection(self):
class DriverTestCase(test.TestCase):
def assert_flag(self, flagname):
- self.assertTrue(hasattr(driver.FLAGS, flagname))
+ self.assertTrue(hasattr(driver.CONF, flagname))
def test_config_options(self):
self.assert_flag('xenapi_connection_url')
drv.nfs_ops = ops
drv.db = db
- mock.StubOutWithMock(driver, 'FLAGS')
- driver.FLAGS.xenapi_nfs_server = server
- driver.FLAGS.xenapi_nfs_serverpath = serverpath
- driver.FLAGS.xenapi_sr_base_path = sr_base_path
+ mock.StubOutWithMock(driver, 'CONF')
+ driver.CONF.xenapi_nfs_server = server
+ driver.CONF.xenapi_nfs_serverpath = serverpath
+ driver.CONF.xenapi_sr_base_path = sr_base_path
return mock, drv
"""
import cookielib
-import os
import time
import urllib2
from oslo.config import cfg
-from cinder import context
-from cinder import exception
-from cinder import flags
from cinder.openstack.common import jsonutils
from cinder.openstack.common import log as logging
from cinder.volume import driver
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
coraid_opts = [
cfg.StrOpt('coraid_esm_address',
default='',
default='coraid_repository',
help='Volume Type key name to store ESM Repository Name'),
]
-FLAGS.register_opts(coraid_opts)
+
+CONF = cfg.CONF
+CONF.register_opts(coraid_opts)
class CoraidException(Exception):
def create_snapshot(self, snapshot):
"""Create a Snapshot."""
+ volume_name = (self.configuration.volume_name_template
+ % snapshot['volume_id'])
+ snapshot_name = (self.configuration.snapshot_name_template
+ % snapshot['id'])
try:
- volume_name = (FLAGS.volume_name_template
- % snapshot['volume_id'])
- snapshot_name = (FLAGS.snapshot_name_template
- % snapshot['id'])
self.esm.create_snapshot(volume_name, snapshot_name)
except Exception, e:
msg = _('Failed to Create Snapshot %(snapname)s')
def delete_snapshot(self, snapshot):
"""Delete a Snapshot."""
+ snapshot_name = (self.configuration.snapshot_name_template
+ % snapshot['id'])
try:
- snapshot_name = (FLAGS.snapshot_name_template
- % snapshot['id'])
self.esm.delete_snapshot(snapshot_name)
except Exception:
msg = _('Failed to Delete Snapshot %(snapname)s')
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a Volume from a Snapshot."""
+ snapshot_name = (self.configuration.snapshot_name_template
+ % snapshot['id'])
+ repository = self._get_repository(volume['volume_type'])
try:
- snapshot_name = (FLAGS.snapshot_name_template
- % snapshot['id'])
- repository = self._get_repository(volume['volume_type'])
self.esm.create_volume_from_snapshot(snapshot_name,
volume['name'],
repository)
from xml.dom.minidom import parseString
from cinder import exception
-from cinder import flags
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
try:
import pywbem
default=CINDER_EMC_CONFIG_FILE,
help='use this file for cinder emc plugin '
'config data')
- FLAGS.register_opt(opt)
+ CONF.register_opt(opt)
self.protocol = prtcl
self.configuration = configuration
self.configuration.append_config_values([opt])
"""
-import os
-import time
from cinder import exception
-from cinder import flags
from cinder.openstack.common import log as logging
-from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.emc import emc_smis_common
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
-
class EMCSMISISCSIDriver(driver.ISCSIDriver):
"""EMC ISCSI Drivers for VMAX and VNX using SMI-S."""
super(EMCSMISISCSIDriver, self).__init__(*args, **kwargs)
self.common = emc_smis_common.EMCSMISCommon(
- 'iSCSI',
- configuration=self.configuration)
+ 'iSCSI',
+ configuration=self.configuration)
def check_for_setup_error(self):
pass
from oslo.config import cfg
from cinder import exception
-from cinder import flags
from cinder.openstack.common import log as logging
from cinder.volume.drivers import nfs
'In such case volume creation takes a lot of time.'))]
VERSION = '1.0'
-FLAGS = flags.FLAGS
-FLAGS.register_opts(volume_opts)
+CONF = cfg.CONF
+CONF.register_opts(volume_opts)
class GlusterfsDriver(nfs.RemoteFsDriver):
READBUFFERSIZE = 8192
+CONF = cfg.CONF
+CONF.register_opts(huawei_opt)
+
+
class SSHConn(utils.SSHPool):
"""Define a new class inherited to SSHPool.
from cinder.brick.iscsi import iscsi
from cinder import exception
-from cinder import flags
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder import utils
'this requires lvm_mirrors + 2 pvs with available space'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(volume_opts)
+CONF = cfg.CONF
+CONF.register_opts(volume_opts)
class LVMVolumeDriver(driver.VolumeDriver):
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
LOG.info(_('Creating clone of volume: %s') % src_vref['id'])
- volume_name = FLAGS.volume_name_template % src_vref['id']
+ volume_name = self.configuration.volume_name_template % src_vref['id']
temp_id = 'tmp-snap-%s' % src_vref['id']
temp_snapshot = {'volume_name': volume_name,
'size': src_vref['size'],
"provision for volume: %s"), volume['id'])
return
- iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
- volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
+ iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix,
+ volume['name'])
+ volume_path = "/dev/%s/%s" % (self.configuration.volume_group,
+ volume['name'])
iscsi_target = 1
self.tgtadm.create_iscsi_target(iscsi_name, iscsi_target,
out, err = self._execute('lvs', '--option',
'name', '--noheadings',
run_as_root=True)
- pool_name = "%s-pool" % FLAGS.volume_group
+ pool_name = "%s-pool" % self.configuration.volume_group
if pool_name not in out:
- if not FLAGS.pool_size:
- out, err = self._execute('vgs', FLAGS.volume_group,
- '--noheadings', '--options',
- 'name,size', run_as_root=True)
+ if not self.configuration.pool_size:
+ out, err = self._execute('vgs',
+ self.configuration.volume_group,
+ '--noheadings',
+ '--options',
+ 'name,size',
+ run_as_root=True)
+
size = re.sub(r'[\.][\d][\d]', '', out.split()[1])
else:
- size = "%s" % FLAGS.pool_size
+ size = "%s" % self.configuration.pool_size
- pool_path = '%s/%s' % (FLAGS.volume_group, pool_name)
+ pool_path = '%s/%s' % (self.configuration.volume_group,
+ pool_name)
out, err = self._execute('lvcreate', '-T', '-L', size,
pool_path, run_as_root=True)
"""Creates a logical volume. Can optionally return a Dictionary of
changes to the volume object to be persisted."""
sizestr = self._sizestr(volume['size'])
- vg_name = ("%s/%s-pool" % (FLAGS.volume_group, FLAGS.volume_group))
+ vg_name = ("%s/%s-pool" % (self.configuration.volume_group,
+ self.configuration.volume_group))
self._try_execute('lvcreate', '-T', '-V', sizestr, '-n',
volume['name'], vg_name, run_as_root=True)
if self._volume_not_present(volume['name']):
return True
self._try_execute('lvremove', '-f', "%s/%s" %
- (FLAGS.volume_group,
+ (self.configuration.volume_group,
self._escape_snapshot(volume['name'])),
run_as_root=True)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
LOG.info(_('Creating clone of volume: %s') % src_vref['id'])
- orig_lv_name = "%s/%s" % (FLAGS.volume_group, src_vref['name'])
+ orig_lv_name = "%s/%s" % (self.configuration.volume_group,
+ src_vref['name'])
self._do_lvm_snapshot(orig_lv_name, volume, False)
def create_snapshot(self, snapshot):
"""Creates a snapshot of a volume."""
- orig_lv_name = "%s/%s" % (FLAGS.volume_group, snapshot['volume_name'])
+ orig_lv_name = "%s/%s" % (self.configuration.volume_group,
+ snapshot['volume_name'])
self._do_lvm_snapshot(orig_lv_name, snapshot)
def get_volume_stats(self, refresh=False):
' 7 mode'), ]
+CONF = cfg.CONF
+CONF.register_opts(netapp_opts)
+
+
class DfmDataset(object):
def __init__(self, id, name, project, type):
self.id = id
help='Does snapshot creation call returns immediately')]
+CONF = cfg.CONF
+CONF.register_opts(netapp_nfs_opts)
+
+
class NetAppNFSDriver(nfs.NfsDriver):
"""Executes commands relating to Volumes."""
def __init__(self, *args, **kwargs):
from oslo.config import cfg
from cinder import exception
-from cinder import flags
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers import nexenta
VERSION = '1.0'
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
nexenta_opts = [
cfg.StrOpt('nexenta_host',
default=False,
help='flag to create sparse volumes'),
]
-FLAGS.register_opts(nexenta_opts)
+
+CONF = cfg.CONF
+CONF.register_opts(nexenta_opts)
class NexentaDriver(driver.ISCSIDriver): # pylint: disable=R0921
super(NexentaDriver, self).__init__(*args, **kwargs)
def do_setup(self, context):
- protocol = FLAGS.nexenta_rest_protocol
+ protocol = CONF.nexenta_rest_protocol
auto = protocol == 'auto'
if auto:
protocol = 'http'
self.nms = jsonrpc.NexentaJSONProxy(
- '%s://%s:%s/rest/nms/' % (protocol, FLAGS.nexenta_host,
- FLAGS.nexenta_rest_port),
- FLAGS.nexenta_user, FLAGS.nexenta_password, auto=auto)
+ '%s://%s:%s/rest/nms/' % (protocol, CONF.nexenta_host,
+ CONF.nexenta_rest_port),
+ CONF.nexenta_user, CONF.nexenta_password, auto=auto)
def check_for_setup_error(self):
"""Verify that the volume for our zvols exists.
:raise: :py:exc:`LookupError`
"""
- if not self.nms.volume.object_exists(FLAGS.nexenta_volume):
+ if not self.nms.volume.object_exists(CONF.nexenta_volume):
raise LookupError(_("Volume %s does not exist in Nexenta SA"),
- FLAGS.nexenta_volume)
+ CONF.nexenta_volume)
@staticmethod
def _get_zvol_name(volume_name):
"""Return zvol name that corresponds given volume name."""
- return '%s/%s' % (FLAGS.nexenta_volume, volume_name)
+ return '%s/%s' % (CONF.nexenta_volume, volume_name)
@staticmethod
def _get_target_name(volume_name):
"""Return iSCSI target name to access volume."""
- return '%s%s' % (FLAGS.nexenta_target_prefix, volume_name)
+ return '%s%s' % (CONF.nexenta_target_prefix, volume_name)
@staticmethod
def _get_target_group_name(volume_name):
"""Return Nexenta iSCSI target group name for volume."""
- return '%s%s' % (FLAGS.nexenta_target_group_prefix, volume_name)
+ return '%s%s' % (CONF.nexenta_target_group_prefix, volume_name)
def create_volume(self, volume):
"""Create a zvol on appliance.
self.nms.zvol.create(
self._get_zvol_name(volume['name']),
'%sG' % (volume['size'],),
- FLAGS.nexenta_blocksize, FLAGS.nexenta_sparse)
+ CONF.nexenta_blocksize, CONF.nexenta_sparse)
def delete_volume(self, volume):
"""Destroy a zvol on appliance.
else:
LOG.info(_('Ignored LUN mapping entry addition error "%s"'
' while ensuring export'), exc)
- return '%s:%s,1 %s 0' % (FLAGS.nexenta_host,
- FLAGS.nexenta_iscsi_target_portal_port,
+ return '%s:%s,1 %s 0' % (CONF.nexenta_host,
+ CONF.nexenta_iscsi_target_portal_port,
target_name)
def create_export(self, _ctx, volume):
data["driver_version"] = VERSION
data["storage_protocol"] = 'iSCSI'
- stats = self.nms.volume.get_child_props(FLAGS.nexenta_volume,
+ stats = self.nms.volume.get_child_props(CONF.nexenta_volume,
'health|size|used|available')
total_unit = stats['size'][-1]
total_amount = float(stats['size'][:-1])
VERSION = '1.1'
+CONF = cfg.CONF
+CONF.register_opts(volume_opts)
+
class RemoteFsDriver(driver.VolumeDriver):
"""Common base for drivers that work like NFS."""
def __exit__(self, type_, value, traceback):
self.driver._disconnect_from_rados(self.cluster, self.ioctx)
+CONF = cfg.CONF
+CONF.register_opts(rbd_opts)
+
class RBDDriver(driver.VolumeDriver):
"""Implements RADOS block device (RBD) volume commands"""
]
+CONF = cfg.CONF
+CONF.register_opts(hp3par_opts)
+
+
class HP3PARCommon(object):
stats = {}
# use the wwn to see if we can find the hostname
hostname = self._get_3par_hostname_from_wwn_iqn(wwn_iqn)
# no 3par host, re-throw
- if (hostname == None):
+ if (hostname is None):
raise
else:
# not a 'host does not exist' HTTPNotFound exception, re-throw
from oslo.config import cfg
from cinder import exception
-from cinder import flags
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume.driver import ISCSIDriver
help='Maximum ssh connections in the pool'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(san_opts)
+CONF = cfg.CONF
+CONF.register_opts(san_opts)
class SanISCSIDriver(ISCSIDriver):
from oslo.config import cfg
from cinder import exception
-from cinder import flags
from cinder.openstack.common import log as logging
from cinder.volume.drivers.san.san import SanISCSIDriver
default='rpool/',
help='The ZFS path under which to create zvols for volumes.'), ]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(solaris_opts)
+CONF = cfg.CONF
+CONF.register_opts(solaris_opts)
class SolarisISCSIDriver(SanISCSIDriver):
super(SolarisISCSIDriver, self).__init__(*cmd,
execute=self._execute,
**kwargs)
+ self.configuration.append_config_values(solaris_opts)
def _execute(self, *cmd, **kwargs):
new_cmd = ['pfexec']
return iscsi_target_name in self._get_iscsi_targets()
def _build_zfs_poolname(self, volume):
- zfs_poolname = '%s%s' % (FLAGS.san_zfs_volume_base, volume['name'])
+ zfs_poolname = '%s%s' % (self.configuration.san_zfs_volume_base,
+ volume['name'])
return zfs_poolname
def create_volume(self, volume):
# Create a zfs volume
cmd = ['/usr/sbin/zfs', 'create']
- if FLAGS.san_thin_provision:
+ if self.configuration.san_thin_provision:
cmd.append('-s')
cmd.extend(['-V', sizestr])
cmd.append(zfs_poolname)
def local_path(self, volume):
# TODO(justinsb): Is this needed here?
- escaped_group = FLAGS.volume_group.replace('-', '--')
+ escaped_group = self.configuration.volume_group.replace('-', '--')
escaped_name = volume['name'].replace('-', '--')
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
#TODO(justinsb): Is this always 1? Does it matter?
iscsi_portal_interface = '1'
- iscsi_portal = FLAGS.san_ip + ":3260," + iscsi_portal_interface
+ iscsi_portal = \
+ self.configuration.san_ip + ":3260," + iscsi_portal_interface
db_update = {}
db_update['provider_location'] = ("%s %s" %
from oslo.config import cfg
from cinder import exception
-from cinder import flags
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.volume import driver
help='Path from Scality SOFS root to volume dir'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(volume_opts)
+CONF = cfg.CONF
+CONF.register_opts(volume_opts)
class ScalityDriver(driver.VolumeDriver):
"""Sanity checks before attempting to mount SOFS."""
# config is mandatory
- config = FLAGS.scality_sofs_config
+ config = CONF.scality_sofs_config
if not config:
msg = _("Value required for 'scality_sofs_config'")
LOG.warn(msg)
raise e
def _mount_sofs(self):
- config = FLAGS.scality_sofs_config
- mount_path = FLAGS.scality_sofs_mount_point
+ config = CONF.scality_sofs_config
+ mount_path = CONF.scality_sofs_mount_point
sysdir = os.path.join(mount_path, 'sys')
self._makedirs(mount_path)
"""Any initialization the volume driver does while starting."""
self._check_prerequisites()
self._mount_sofs()
- voldir = os.path.join(FLAGS.scality_sofs_mount_point,
- FLAGS.scality_sofs_volume_dir)
+ voldir = os.path.join(CONF.scality_sofs_mount_point,
+ CONF.scality_sofs_volume_dir)
if not os.path.isdir(voldir):
self._makedirs(voldir)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
self._check_prerequisites()
- voldir = os.path.join(FLAGS.scality_sofs_mount_point,
- FLAGS.scality_sofs_volume_dir)
+ voldir = os.path.join(CONF.scality_sofs_mount_point,
+ CONF.scality_sofs_volume_dir)
if not os.path.isdir(voldir):
msg = _("Cannot find volume dir for Scality SOFS at '%s'") % voldir
LOG.warn(msg)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
- volume_path = os.path.join(FLAGS.scality_sofs_mount_point,
- FLAGS.scality_sofs_volume_dir,
+ volume_path = os.path.join(CONF.scality_sofs_mount_point,
+ CONF.scality_sofs_volume_dir,
snapshot['volume_name'])
snapshot_path = self.local_path(snapshot)
self._create_file(snapshot_path,
os.remove(self.local_path(snapshot))
def _sofs_path(self, volume):
- return os.path.join(FLAGS.scality_sofs_volume_dir,
+ return os.path.join(CONF.scality_sofs_volume_dir,
volume['name'])
def local_path(self, volume):
- return os.path.join(FLAGS.scality_sofs_mount_point,
+ return os.path.join(CONF.scality_sofs_mount_point,
self._sofs_path(volume))
def ensure_export(self, context, volume):
"""
import re
+from oslo.config import cfg
+
from cinder import exception
-from cinder import flags
from cinder.openstack.common import log as logging
from cinder.volume import driver
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
class SheepdogDriver(driver.VolumeDriver):
help='Create SolidFire accounts with this prefix'), ]
+CONF = cfg.CONF
+CONF.register_opts(sf_opts)
+
+
class SolidFire(SanISCSIDriver):
"""OpenStack driver to enable SolidFire cluster.
]
+CONF = cfg.CONF
+CONF.register_opts(storwize_svc_opts)
+
+
class StorwizeSVCDriver(san.SanISCSIDriver):
"""IBM Storwize V7000 and SVC iSCSI/FC volume driver.
from oslo.config import cfg
from cinder import exception
-from cinder import flags
from cinder.image import glance
from cinder.image import image_utils
from cinder.openstack.common import log as logging
help='Path of exported NFS, used by XenAPINFSDriver'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(xenapi_opts)
-FLAGS.register_opts(xenapi_nfs_opts)
+CONF = cfg.CONF
+CONF.register_opts(xenapi_opts)
+CONF.register_opts(xenapi_nfs_opts)
class XenAPINFSDriver(driver.VolumeDriver):
def _use_image_utils_to_pipe_bytes_to_volume(self, context, volume,
image_service, image_id):
sr_uuid, vdi_uuid = volume['provider_location'].split('/')
- with self.nfs_ops.volume_attached_here(FLAGS.xenapi_nfs_server,
- FLAGS.xenapi_nfs_serverpath,
+ with self.nfs_ops.volume_attached_here(CONF.xenapi_nfs_server,
+ CONF.xenapi_nfs_serverpath,
sr_uuid, vdi_uuid,
False) as device:
image_utils.fetch_to_raw(context,
auth_token = context.auth_token
overwrite_result = self.nfs_ops.use_glance_plugin_to_overwrite_volume(
- FLAGS.xenapi_nfs_server,
- FLAGS.xenapi_nfs_serverpath,
+ CONF.xenapi_nfs_server,
+ CONF.xenapi_nfs_serverpath,
sr_uuid,
vdi_uuid,
glance_server,
image_id,
auth_token,
- FLAGS.xenapi_sr_base_path)
+ CONF.xenapi_sr_base_path)
if overwrite_result is False:
raise exception.ImageCopyFailure(reason='Overwriting volume '
'failed.')
self.nfs_ops.resize_volume(
- FLAGS.xenapi_nfs_server,
- FLAGS.xenapi_nfs_serverpath,
+ CONF.xenapi_nfs_server,
+ CONF.xenapi_nfs_serverpath,
sr_uuid,
vdi_uuid,
volume['size'])
def _use_image_utils_to_upload_volume(self, context, volume, image_service,
image_meta):
sr_uuid, vdi_uuid = volume['provider_location'].split('/')
- with self.nfs_ops.volume_attached_here(FLAGS.xenapi_nfs_server,
- FLAGS.xenapi_nfs_serverpath,
+ with self.nfs_ops.volume_attached_here(CONF.xenapi_nfs_server,
+ CONF.xenapi_nfs_serverpath,
sr_uuid, vdi_uuid,
True) as device:
image_utils.upload_volume(context,
auth_token = context.auth_token
self.nfs_ops.use_glance_plugin_to_upload_volume(
- FLAGS.xenapi_nfs_server,
- FLAGS.xenapi_nfs_serverpath,
+ CONF.xenapi_nfs_server,
+ CONF.xenapi_nfs_serverpath,
sr_uuid,
vdi_uuid,
glance_server,
image_id,
auth_token,
- FLAGS.xenapi_sr_base_path)
+ CONF.xenapi_sr_base_path)
def get_volume_stats(self, refresh=False):
if refresh or not self._stats:
from oslo.config import cfg
from cinder import exception
-from cinder import flags
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder.volume.drivers.san import san
help='Proxy driver'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(ibm_xiv_opts)
+CONF = cfg.CONF
+CONF.register_opts(ibm_xiv_opts)
LOG = logging.getLogger('cinder.volume.xiv')
def __init__(self, *args, **kwargs):
"""Initialize the driver."""
- proxy = importutils.import_class(FLAGS.xiv_proxy)
+ proxy = importutils.import_class(CONF.xiv_proxy)
- self.xiv_proxy = proxy({"xiv_user": FLAGS.san_login,
- "xiv_pass": FLAGS.san_password,
- "xiv_address": FLAGS.san_ip,
- "xiv_vol_pool": FLAGS.san_clustername},
+ self.xiv_proxy = proxy({"xiv_user": CONF.san_login,
+ "xiv_pass": CONF.san_password,
+ "xiv_address": CONF.san_ip,
+ "xiv_vol_pool": CONF.san_clustername},
LOG,
exception)
san.SanISCSIDriver.__init__(self, *args, **kwargs)