Handles all requests relating to volumes.
"""
+
import functools
from oslo.config import cfg
from cinder import context
from cinder.db import base
from cinder import exception
-from cinder import flags
from cinder.image import glance
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
import cinder.policy
from cinder import quota
from cinder.scheduler import rpcapi as scheduler_rpcapi
+from cinder import units
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_types
+
volume_host_opt = cfg.BoolOpt('snapshot_same_host',
default=True,
help='Create volume from snapshot at the host '
'where snapshot resides')
-FLAGS = flags.FLAGS
-FLAGS.register_opt(volume_host_opt)
-flags.DECLARE('storage_availability_zone', 'cinder.volume.manager')
+CONF = cfg.CONF
+CONF.register_opt(volume_host_opt)
+CONF.import_opt('storage_availability_zone', 'cinder.volume.manager')
LOG = logging.getLogger(__name__)
-GB = 1048576 * 1024
+GB = units.GiB
QUOTAS = quota.QUOTAS
raise exception.VolumeLimitExceeded(allowed=quotas['volumes'])
if availability_zone is None:
- availability_zone = FLAGS.storage_availability_zone
+ availability_zone = CONF.storage_availability_zone
else:
self._check_availabilty_zone(availability_zone)
snapshot_id = request_spec['snapshot_id']
image_id = request_spec['image_id']
- if snapshot_id and FLAGS.snapshot_same_host:
+ if snapshot_id and CONF.snapshot_same_host:
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
source_volume_ref = self.db.volume_get(context,
snapshot_ref['volume_id'])
else:
self.scheduler_rpcapi.create_volume(
context,
- FLAGS.volume_topic,
+ CONF.volume_topic,
volume_id,
snapshot_id,
image_id,
return
ctxt = context.get_admin_context()
- topic = FLAGS.volume_topic
+ topic = CONF.volume_topic
volume_services = self.db.service_get_all_by_topic(ctxt, topic)
# NOTE(haomai): In case of volume services isn't init or
raise exception.InvalidVolume(reason=msg)
try:
- if FLAGS.no_snapshot_gb_quota:
+ if CONF.no_snapshot_gb_quota:
reservations = QUOTAS.reserve(context, snapshots=1)
else:
reservations = QUOTAS.reserve(context, snapshots=1,
and registered in the group in which they are used.
"""
+
from oslo.config import cfg
-from cinder import flags
from cinder.openstack.common import log as logging
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# set the local conf so that __call__'s know what to use
if self.config_group:
self._ensure_config_values(volume_opts)
- self.local_conf = FLAGS._get(self.config_group)
+ self.local_conf = CONF._get(self.config_group)
else:
- self.local_conf = FLAGS
+ self.local_conf = CONF
def _ensure_config_values(self, volume_opts):
- FLAGS.register_opts(volume_opts,
- group=self.config_group)
+ CONF.register_opts(volume_opts,
+ group=self.config_group)
def append_config_values(self, volume_opts):
self._ensure_config_values(volume_opts)
iSCSI Cinder Volume driver for Hitachi Unified Storage (HUS) platform.
"""
+
from oslo.config import cfg
from xml.etree import ElementTree as ETree
from cinder import exception
-from cinder import flags
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume import driver
-
from cinder.volume.drivers.hds.hus_backend import HusBackend
default='/opt/hds/hus/cinder_hus_conf.xml',
help='configuration file for HDS cinder plugin for HUS'), ]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(HUS_OPTS)
+CONF = cfg.CONF
+CONF.register_opts(HUS_OPTS)
HI_IQN = 'iqn.1994-04.jp.co.hitachi:' # fixed string, for now.
This driver requires ISCSI target role installed
"""
+
+
import os
-import sys
from oslo.config import cfg
from cinder import exception
-from cinder import flags
from cinder.openstack.common import log as logging
from cinder.volume import driver
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
-
windows_opts = [
cfg.StrOpt('windows_iscsi_lun_path',
default='C:\iSCSIVirtualDisks',
help='Path to store VHD backed volumes'),
]
-FLAGS.register_opts(windows_opts)
+CONF = cfg.CONF
+CONF.register_opts(windows_opts)
class WindowsDriver(driver.ISCSIDriver):
SizeInMB=volume['size'] * 1024)
def _get_vhd_path(self, volume):
- base_vhd_folder = FLAGS.windows_iscsi_lun_path
+ base_vhd_folder = CONF.windows_iscsi_lun_path
if not os.path.exists(base_vhd_folder):
LOG.debug(_('Creating folder %s '), base_vhd_folder)
os.makedirs(base_vhd_folder)
resources
:return: iscsiadm-formatted provider location string
"""
- target_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
+ target_name = "%s%s" % (CONF.iscsi_target_prefix, volume['name'])
#ISCSI target creation
try:
cl = self._conn_wmi.__getattr__("WT_Host")
def remove_export(self, context, volume):
"""Driver exntry point to remove an export for a volume.
"""
- target_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
+ target_name = "%s%s" % (CONF.iscsi_target_prefix, volume['name'])
#Get ISCSI target
wt_host = self._conn_wmi.WT_Host(HostName=target_name)[0]
This driver requires VPSA with API ver.12.06 or higher.
"""
+
import httplib
from lxml import etree
from oslo.config import cfg
from cinder import exception
-from cinder import flags
from cinder.openstack.common import log as logging
-from cinder import utils
from cinder.volume import driver
+
LOG = logging.getLogger("cinder.volume.driver")
zadara_opts = [
default=True,
help="Don't halt on deletion of non-existing volumes"), ]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(zadara_opts)
+CONF = cfg.CONF
+CONF.register_opts(zadara_opts)
class ZadaraVPSAConnection(object):
'/api/volumes.xml',
{'display_name': kwargs.get('name'),
'virtual_capacity': kwargs.get('size'),
- 'raid_group_name[]': FLAGS.zadara_vpsa_poolname,
+ 'raid_group_name[]': CONF.zadara_vpsa_poolname,
'quantity': 1,
- 'cache': FLAGS.zadara_default_cache_policy,
- 'crypt': FLAGS.zadara_default_encryption,
- 'mode': FLAGS.zadara_default_striping_mode,
- 'stripesize': FLAGS.zadara_default_stripesize,
+ 'cache': CONF.zadara_default_cache_policy,
+ 'crypt': CONF.zadara_default_encryption,
+ 'mode': CONF.zadara_default_striping_mode,
+ 'stripesize': CONF.zadara_default_stripesize,
'force': 'NO'}),
'delete_volume': ('DELETE',
'/api/volumes/%s.xml' % kwargs.get('vpsa_vol'),
Any initialization the volume driver does while starting.
Establishes initial connection with VPSA and retrieves access_key.
"""
- self.vpsa = ZadaraVPSAConnection(FLAGS.zadara_vpsa_ip,
- FLAGS.zadara_vpsa_port,
- FLAGS.zadara_vpsa_use_ssl,
- FLAGS.zadara_user,
- FLAGS.zadara_password)
+ self.vpsa = ZadaraVPSAConnection(CONF.zadara_vpsa_ip,
+ CONF.zadara_vpsa_port,
+ CONF.zadara_vpsa_use_ssl,
+ CONF.zadara_user,
+ CONF.zadara_password)
def check_for_setup_error(self):
"""Returns an error (exception) if prerequisites aren't met."""
"""Create volume."""
self.vpsa.send_cmd(
'create_volume',
- name=FLAGS.zadara_vol_name_template % volume['name'],
+ name=CONF.zadara_vol_name_template % volume['name'],
size=volume['size'])
def delete_volume(self, volume):
Return ok if doesn't exist. Auto detach from all servers.
"""
# Get volume name
- name = FLAGS.zadara_vol_name_template % volume['name']
+ name = CONF.zadara_vol_name_template % volume['name']
vpsa_vol = self._get_vpsa_volume_name(name)
if not vpsa_vol:
msg = _('Volume %(name)s could not be found. '
'It might be already deleted') % locals()
LOG.warning(msg)
- if FLAGS.zadara_vpsa_allow_nonexistent_delete:
+ if CONF.zadara_vpsa_allow_nonexistent_delete:
return
else:
raise exception.VolumeNotFound(volume_id=name)
servers = self._xml_parse_helper(xml_tree, 'servers',
('iqn', None), first=False)
if servers:
- if not FLAGS.zadara_vpsa_auto_detach_on_delete:
+ if not CONF.zadara_vpsa_auto_detach_on_delete:
raise exception.VolumeAttached(volume_id=name)
for server in servers:
raise exception.ZadaraServerCreateFailure(name=initiator_name)
# Get volume name
- name = FLAGS.zadara_vol_name_template % volume['name']
+ name = CONF.zadara_vol_name_template % volume['name']
vpsa_vol = self._get_vpsa_volume_name(name)
if not vpsa_vol:
raise exception.VolumeNotFound(volume_id=name)
raise exception.ZadaraServerNotFound(name=initiator_name)
# Get volume name
- name = FLAGS.zadara_vol_name_template % volume['name']
+ name = CONF.zadara_vol_name_template % volume['name']
vpsa_vol = self._get_vpsa_volume_name(name)
if not vpsa_vol:
raise exception.VolumeNotFound(volume_id=name)
Client side of the volume RPC API.
"""
-from cinder import exception
-from cinder import flags
+
+from oslo.config import cfg
+
from cinder.openstack.common import rpc
import cinder.openstack.common.rpc.proxy
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
class VolumeAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
def __init__(self, topic=None):
super(VolumeAPI, self).__init__(
- topic=topic or FLAGS.volume_topic,
+ topic=topic or CONF.volume_topic,
default_version=self.BASE_RPC_API_VERSION)
def create_volume(self, ctxt, volume, host,
"""Volume-related Utilities and helpers."""
+
import os
import stat
-from cinder import flags
+from oslo.config import cfg
+
from cinder.openstack.common import log as logging
from cinder.openstack.common.notifier import api as notifier_api
from cinder.openstack.common import timeutils
from cinder import utils
-FLAGS = flags.FLAGS
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def notify_about_volume_usage(context, volume, event_suffix,
extra_usage_info=None, host=None):
if not host:
- host = FLAGS.host
+ host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
def notify_about_snapshot_usage(context, snapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
- host = FLAGS.host
+ host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
"""Built-in volume type properties."""
+
+from oslo.config import cfg
+
from cinder import context
from cinder import db
from cinder import exception
-from cinder import flags
from cinder.openstack.common.db import exception as db_exc
from cinder.openstack.common import log as logging
-FLAGS = flags.FLAGS
+
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def get_default_volume_type():
"""Get the default volume type."""
- name = FLAGS.default_volume_type
+ name = CONF.default_volume_type
vol_type = {}
if name is not None: